mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-07 06:40:04 +06:00

* Remove nestedness in tool config * Really do it * Use remote tools descriptions * Work * Clean up eval * Changes * Tools * Tools * tool * Fix everything * Use last result/assign for evaluation * Prompt * Remove hardcoded selection * Evaluation for chat agents * correct some spelling * Small fixes * Change summarization model (#23172) * Fix link displayed * Update description of the tool * Fixes in chat prompt * Custom tools, custom prompt * Tool clean up * save_pretrained and push_to_hub for tool * Fix init * Tests * Fix tests * Tool save/from_hub/push_to_hub and tool->load_tool * Clean push_to_hub and add app file * Custom inference API for endpoints too * Clean up * old remote tool and new remote tool * Make a requirements * return_code adds tool creation * Avoid redundancy between global variables * Remote tools can be loaded * Tests * Text summarization tests * Quality * Properly mark tests * Test the python interpreter * And the CI shall be green. * fix loading of additional tools * Work on RemoteTool and fix tests * General clean up * Guard imports * Fix tools * docs: Fix broken link in 'How to add a model...' (#23216) fix link * Get default endpoint from the Hub * Add guide * Simplify tool config * Docs * Some fixes * Docs * Docs * Docs * Fix code returned by agent * Try this * Match args with signature in remote tool * Should fix python interpreter for Python 3.8 * Fix push_to_hub for tools * Other fixes to push_to_hub * Add API doc page * Docs * Docs * Custom tools * Pin tensorflow-probability (#23220) * Pin tensorflow-probability * [all-test] * [all-test] Fix syntax for bash * PoC for some chaining API * Text to speech * J'ai pris des libertés * Rename * Basic python interpreter * Add agents * Quality * Add translation tool * temp * GenQA + LID + S2T * Quality + word missing in translation * Add open assistance, support f-strings in evaluate * captioning + s2t fixes * Style * Refactor descriptions and remove chain * Support errors and rename OpenAssistantAgent * Add setup * Deal with typos + example of inference API * Some rename + README * Fixes * Update prompt * Unwanted change * Make sure everyone has a default * One prompt to rule them all. * SD * Description * Clean up remote tools * More remote tools * Add option to return code and update doc * Image segmentation * ControlNet * Gradio demo * Diffusers protection * Lib protection * ControlNet description * Cleanup * Style * Remove accelerate and try to be reproducible * No randomness * Male Basic optional in token * Clean description * Better prompts * Fix args eval in interpreter * Add tool wrapper * Tool on the Hub * Style post-rebase * Big refactor of descriptions, batch generation and evaluation for agents * Make problems easier - interface to debug * More problems, add python primitives * Back to one prompt * Remove dict for translation * Be consistent * Add prompts * New version of the agent * Evaluate new agents * New endpoints agents * Make all tools a dict variable * Typo * Add problems * Add to big prompt * Harmonize * Add tools * New evaluation * Add more tools * Build prompt with tools descriptions * Tools on the Hub * Let's chat! * Cleanup * Temporary bs4 safeguard * Cache agents and clean up * Blank init * Fix evaluation for agents * New format for tools on the Hub * Add method to reset state * Remove nestedness in tool config * Really do it * Use remote tools descriptions * Work * Clean up eval * Changes * Tools * Tools * tool * Fix everything * Use last result/assign for evaluation * Prompt * Remove hardcoded selection * Evaluation for chat agents * correct some spelling * Small fixes * Change summarization model (#23172) * Fix link displayed * Update description of the tool * Fixes in chat prompt * Custom tools, custom prompt * Tool clean up * save_pretrained and push_to_hub for tool * Fix init * Tests * Fix tests * Tool save/from_hub/push_to_hub and tool->load_tool * Clean push_to_hub and add app file * Custom inference API for endpoints too * Clean up * old remote tool and new remote tool * Make a requirements * return_code adds tool creation * Avoid redundancy between global variables * Remote tools can be loaded * Tests * Text summarization tests * Quality * Properly mark tests * Test the python interpreter * And the CI shall be green. * Work on RemoteTool and fix tests * fix loading of additional tools * General clean up * Guard imports * Fix tools * Get default endpoint from the Hub * Simplify tool config * Add guide * Docs * Some fixes * Docs * Docs * Fix code returned by agent * Try this * Docs * Match args with signature in remote tool * Should fix python interpreter for Python 3.8 * Fix push_to_hub for tools * Other fixes to push_to_hub * Add API doc page * Fixes * Doc fixes * Docs * Fix audio * Custom tools * Audio fix * Improve custom tools docstring * Docstrings * Trigger CI * Mode docstrings * More docstrings * Improve custom tools * Fix for remote tools * Style * Fix repo consistency * Quality * Tip * Cleanup on doc * Cleanup toc * Add disclaimer for starcoder vs openai * Remove disclaimer * Small fixed in the prompts * 4.29 * Update src/transformers/tools/agents.py Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr> * Complete documentation * Small fixes * Agent evaluation * Note about gradio-tools & LC * Clean up agents and prompt * Apply suggestions from code review Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Apply suggestions from code review Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Note about gradio-tools & LC * Add copyrights and address review comments * Quality * Add all language codes * Add remote tool tests * Move custom prompts to other docs * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * TTS tests * Quality --------- Co-authored-by: Lysandre <hi@lyand.re> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Philipp Schmid <32632186+philschmid@users.noreply.github.com> Co-authored-by: Connor Henderson <connor.henderson@talkiatry.com> Co-authored-by: Lysandre <lysandre.debut@reseau.eseo.fr> Co-authored-by: Lysandre <lysandre@huggingface.co> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
125 lines
4.3 KiB
Python
125 lines
4.3 KiB
Python
# coding=utf-8
|
|
# Copyright 2023 HuggingFace Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import unittest
|
|
|
|
from transformers.testing_utils import CaptureStdout
|
|
from transformers.tools.python_interpreter import evaluate
|
|
|
|
|
|
# Fake function we will use as tool
|
|
def add_two(x):
|
|
return x + 2
|
|
|
|
|
|
class PythonInterpreterTester(unittest.TestCase):
|
|
def test_evaluate_assign(self):
|
|
code = "x = 3"
|
|
state = {}
|
|
result = evaluate(code, {}, state=state)
|
|
assert result == 3
|
|
self.assertDictEqual(state, {"x": 3})
|
|
|
|
code = "x = y"
|
|
state = {"y": 5}
|
|
result = evaluate(code, {}, state=state)
|
|
# evaluate returns the value of the last assignment.
|
|
assert result == 5
|
|
self.assertDictEqual(state, {"x": 5, "y": 5})
|
|
|
|
def test_evaluate_call(self):
|
|
code = "y = add_two(x)"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {"add_two": add_two}, state=state)
|
|
assert result == 5
|
|
self.assertDictEqual(state, {"x": 3, "y": 5})
|
|
|
|
# Won't work without the tool
|
|
with CaptureStdout() as out:
|
|
result = evaluate(code, {}, state=state)
|
|
assert result is None
|
|
assert "tried to execute add_two" in out.out
|
|
|
|
def test_evaluate_constant(self):
|
|
code = "x = 3"
|
|
state = {}
|
|
result = evaluate(code, {}, state=state)
|
|
assert result == 3
|
|
self.assertDictEqual(state, {"x": 3})
|
|
|
|
def test_evaluate_dict(self):
|
|
code = "test_dict = {'x': x, 'y': add_two(x)}"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {"add_two": add_two}, state=state)
|
|
self.assertDictEqual(result, {"x": 3, "y": 5})
|
|
self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}})
|
|
|
|
def test_evaluate_expression(self):
|
|
code = "x = 3\ny = 5"
|
|
state = {}
|
|
result = evaluate(code, {}, state=state)
|
|
# evaluate returns the value of the last assignment.
|
|
assert result == 5
|
|
self.assertDictEqual(state, {"x": 3, "y": 5})
|
|
|
|
def test_evaluate_f_string(self):
|
|
code = "text = f'This is x: {x}.'"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {}, state=state)
|
|
# evaluate returns the value of the last assignment.
|
|
assert result == "This is x: 3."
|
|
self.assertDictEqual(state, {"x": 3, "text": "This is x: 3."})
|
|
|
|
def test_evaluate_if(self):
|
|
code = "if x <= 3:\n y = 2\nelse:\n y = 5"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {}, state=state)
|
|
# evaluate returns the value of the last assignment.
|
|
assert result == 2
|
|
self.assertDictEqual(state, {"x": 3, "y": 2})
|
|
|
|
state = {"x": 8}
|
|
result = evaluate(code, {}, state=state)
|
|
# evaluate returns the value of the last assignment.
|
|
assert result == 5
|
|
self.assertDictEqual(state, {"x": 8, "y": 5})
|
|
|
|
def test_evaluate_list(self):
|
|
code = "test_list = [x, add_two(x)]"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {"add_two": add_two}, state=state)
|
|
self.assertListEqual(result, [3, 5])
|
|
self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]})
|
|
|
|
def test_evaluate_name(self):
|
|
code = "y = x"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {}, state=state)
|
|
assert result == 3
|
|
self.assertDictEqual(state, {"x": 3, "y": 3})
|
|
|
|
def test_evaluate_subscript(self):
|
|
code = "test_list = [x, add_two(x)]\ntest_list[1]"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {"add_two": add_two}, state=state)
|
|
assert result == 5
|
|
self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]})
|
|
|
|
code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
|
|
state = {"x": 3}
|
|
result = evaluate(code, {"add_two": add_two}, state=state)
|
|
assert result == 5
|
|
self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}})
|