Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Rename tool call instances #1492

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
36 changes: 18 additions & 18 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
from camel.utils import track_agent


class FunctionCallingRecord(BaseModel):
class ToolCallingRecord(BaseModel):
r"""Historical records of functions called in the conversation.

Attributes:
JINO-ROHIT marked this conversation as resolved.
Show resolved Hide resolved
Expand Down Expand Up @@ -489,7 +489,7 @@ def get_info(
usage: Optional[Dict[str, int]],
termination_reasons: List[str],
num_tokens: int,
tool_calls: List[FunctionCallingRecord],
tool_calls: List[ToolCallingRecord],
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
) -> Dict[str, Any]:
r"""Returns a dictionary containing information about the chat session.
Expand All @@ -501,7 +501,7 @@ def get_info(
termination_reasons (List[str]): The reasons for the termination
of the chat session.
num_tokens (int): The number of tokens used in the chat session.
tool_calls (List[FunctionCallingRecord]): The list of function
tool_calls (List[ToolCallingRecord]): The list of function
calling records, containing the information of called tools.
external_tool_request
(Optional[ChatCompletionMessageToolCall], optional):
Expand Down Expand Up @@ -645,7 +645,7 @@ def _handle_step(
)

# Record function calls made during the session
tool_call_records: List[FunctionCallingRecord] = []
tool_call_records: List[ToolCallingRecord] = []

external_tool_request = None

Expand Down Expand Up @@ -885,7 +885,7 @@ async def step_async(

self.update_memory(input_message, OpenAIBackendRole.USER)

tool_call_records: List[FunctionCallingRecord] = []
tool_call_records: List[ToolCallingRecord] = []
while True:
try:
openai_messages, num_tokens = self.memory.get_context()
Expand Down Expand Up @@ -970,7 +970,7 @@ async def step_async(

def _step_tool_call_and_update(
self, response: ChatCompletion
) -> FunctionCallingRecord:
) -> ToolCallingRecord:
r"""Processes a function call within the chat completion response,
records the function call in the provided list of tool calls and
updates the memory of the current agent.
Expand All @@ -980,7 +980,7 @@ def _step_tool_call_and_update(
completion.

Returns:
FunctionCallingRecord: The record of calling the function.
ToolCallingRecord: The record of calling the function.
"""

# Perform function calling
Expand All @@ -996,7 +996,7 @@ def _step_tool_call_and_update(

async def _step_tool_call_and_update_async(
self, response: ChatCompletion
) -> FunctionCallingRecord:
) -> ToolCallingRecord:
(
func_assistant_msg,
func_result_msg,
Expand All @@ -1015,7 +1015,7 @@ def _structure_output_with_function(
List[str],
Dict[str, int],
str,
FunctionCallingRecord,
ToolCallingRecord,
int,
]:
r"""Internal function of structuring the output of the agent based on
Expand All @@ -1027,7 +1027,7 @@ def _structure_output_with_function(

Returns:
Tuple[List[BaseMessage], List[str], Dict[str, int], str,
FunctionCallingRecord, int]:
ToolCallingRecord, int]:
A tuple containing the output messages, finish reasons, usage
dictionary, response ID, function calling record, and number of
tokens.
Expand Down Expand Up @@ -1141,7 +1141,7 @@ def _step_get_info(
finish_reasons: List[str],
usage_dict: Dict[str, int],
response_id: str,
tool_calls: List[FunctionCallingRecord],
tool_calls: List[ToolCallingRecord],
num_tokens: int,
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
) -> Dict[str, Any]:
Expand All @@ -1160,7 +1160,7 @@ def _step_get_info(
usage_dict (Dict[str, int]): Dictionary containing token usage
information.
response_id (str): The ID of the response from the model.
tool_calls (List[FunctionCallingRecord]): Records of function calls
tool_calls (List[ToolCallingRecord]): Records of function calls
made during this step.
num_tokens (int): The number of tokens used in this step.
external_tool_request (Optional[ChatCompletionMessageToolCall]):
Expand Down Expand Up @@ -1335,15 +1335,15 @@ def handle_stream_response(
def _step_token_exceed(
self,
num_tokens: int,
tool_calls: List[FunctionCallingRecord],
tool_calls: List[ToolCallingRecord],
termination_reason: str,
) -> ChatAgentResponse:
r"""Return trivial response containing number of tokens and information
of called functions when the number of tokens exceeds.

Args:
num_tokens (int): Number of tokens in the messages.
tool_calls (List[FunctionCallingRecord]): List of information
tool_calls (List[ToolCallingRecord]): List of information
objects of functions called in the current step.
termination_reason (str): String of termination reason.

Expand Down Expand Up @@ -1372,7 +1372,7 @@ def _step_tool_call(
self,
response: ChatCompletion,
) -> Tuple[
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
FunctionCallingMessage, FunctionCallingMessage, ToolCallingRecord
]:
r"""Execute the function with arguments following the model's response.

Expand Down Expand Up @@ -1418,7 +1418,7 @@ def _step_tool_call(
)

# Record information about this function call
func_record = FunctionCallingRecord(
func_record = ToolCallingRecord(
func_name=func_name,
args=args,
result=result,
Expand All @@ -1442,7 +1442,7 @@ async def step_tool_call_async(
self,
response: ChatCompletion,
) -> Tuple[
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
FunctionCallingMessage, FunctionCallingMessage, ToolCallingRecord
]:
r"""Execute the async function with arguments following the model's
response.
Expand Down Expand Up @@ -1488,7 +1488,7 @@ async def step_tool_call_async(
)

# Record information about this function call
func_record = FunctionCallingRecord(
func_record = ToolCallingRecord(
func_name=func_name,
args=args,
result=result,
Expand Down
4 changes: 2 additions & 2 deletions docs/cookbooks/advanced_features/agents_tracking.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@
"\n",
"from colorama import Fore\n",
"\n",
"from camel.agents.chat_agent import FunctionCallingRecord\n",
"from camel.agents.chat_agent import ToolCallingRecord\n",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

without updating camel's version, we shouldn't directly make change to the output content of cookbooks

"from camel.configs import ChatGPTConfig\n",
"from camel.models import ModelFactory\n",
"from camel.societies import RolePlaying\n",
Expand Down Expand Up @@ -765,7 +765,7 @@
" # Print output from the assistant, including any function\n",
" # execution information\n",
" print_text_animated(Fore.GREEN + \"AI Assistant:\")\n",
" tool_calls: List[FunctionCallingRecord] = assistant_response.info[\n",
" tool_calls: List[ToolCallingRecord] = assistant_response.info[\n",
" 'tool_calls'\n",
" ]\n",
" for func_record in tool_calls:\n",
Expand Down
6 changes: 3 additions & 3 deletions docs/cookbooks/advanced_features/agents_with_rag.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@
"from typing import List\n",
"from colorama import Fore\n",
"\n",
"from camel.agents.chat_agent import FunctionCallingRecord\n",
"from camel.agents.chat_agent import ToolCallingRecord\n",
"from camel.configs import ChatGPTConfig\n",
"from camel.toolkits import (\n",
" MathToolkit,\n",
Expand Down Expand Up @@ -568,8 +568,8 @@
" # Print output from the assistant, including any function\n",
" # execution information\n",
" print_text_animated(Fore.GREEN + \"AI Assistant:\")\n",
" tool_calls: List[FunctionCallingRecord] = [\n",
" FunctionCallingRecord(**call.as_dict())\n",
" tool_calls: List[ToolCallingRecord] = [\n",
" ToolCallingRecord(**call.as_dict())\n",
" for call in assistant_response.info['tool_calls']\n",
" ]\n",
" for func_record in tool_calls:\n",
Expand Down
4 changes: 2 additions & 2 deletions docs/cookbooks/advanced_features/agents_with_tools.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@
"outputs": [],
"source": [
"from camel.societies import RolePlaying\n",
"from camel.agents.chat_agent import FunctionCallingRecord\n",
"from camel.agents.chat_agent import ToolCallingRecord\n",
"from camel.utils import print_text_animated\n",
"from colorama import Fore"
]
Expand Down Expand Up @@ -566,7 +566,7 @@
" # Print output from the assistant, including any function\n",
" # execution information\n",
" print_text_animated(Fore.GREEN + \"AI Assistant:\")\n",
" tool_calls: list[FunctionCallingRecord] = assistant_response.info[\n",
" tool_calls: list[ToolCallingRecord] = assistant_response.info[\n",
" 'tool_calls'\n",
" ]\n",
" for func_record in tool_calls:\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@
"from colorama import Fore\n",
"from composio_camel import Action, ComposioToolSet\n",
"\n",
"from camel.agents.chat_agent import FunctionCallingRecord\n",
"from camel.agents.chat_agent import ToolCallingRecord\n",
"from camel.configs import ChatGPTConfig\n",
"from camel.models import ModelFactory\n",
"from camel.societies import RolePlaying\n",
Expand Down Expand Up @@ -486,7 +486,7 @@
" # Print output from the assistant, including any function\n",
" # execution information\n",
" print_text_animated(Fore.GREEN + \"AI Assistant:\")\n",
" tool_calls: List[FunctionCallingRecord] = assistant_response.info[\n",
" tool_calls: List[ToolCallingRecord] = assistant_response.info[\n",
" 'tool_calls'\n",
" ]\n",
" for func_record in tool_calls:\n",
Expand Down
6 changes: 3 additions & 3 deletions docs/cookbooks/applications/dynamic_travel_planner.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@
"\n",
"from colorama import Fore\n",
"\n",
"from camel.agents.chat_agent import FunctionCallingRecord\n",
"from camel.agents.chat_agent import ToolCallingRecord\n",
"from camel.societies import RolePlaying\n",
"from camel.toolkits import FunctionTool\n",
"from camel.utils import print_text_animated"
Expand Down Expand Up @@ -896,8 +896,8 @@
" # Print output from the assistant, including any function\n",
" # execution information\n",
" print_text_animated(Fore.GREEN + \"AI Assistant:\", 0.01)\n",
" tool_calls: List[FunctionCallingRecord] = [\n",
" FunctionCallingRecord(**call.as_dict())\n",
" tool_calls: List[ToolCallingRecord] = [\n",
" ToolCallingRecord(**call.as_dict())\n",
" for call in assistant_response.info['tool_calls']\n",
" ]\n",
" for func_record in tool_calls:\n",
Expand Down
6 changes: 3 additions & 3 deletions docs/cookbooks/applications/roleplaying_scraper.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -840,7 +840,7 @@
"\n",
"from colorama import Fore\n",
"\n",
"from camel.agents.chat_agent import FunctionCallingRecord\n",
"from camel.agents.chat_agent import ToolCallingRecord\n",
"from camel.societies import RolePlaying\n",
"from camel.utils import print_text_animated"
]
Expand Down Expand Up @@ -1288,8 +1288,8 @@
" # Print output from the assistant, including any function\n",
" # execution information\n",
" print_text_animated(Fore.GREEN + \"AI Assistant:\", 0.01)\n",
" tool_calls: List[FunctionCallingRecord] = [\n",
" FunctionCallingRecord(**call.as_dict())\n",
" tool_calls: List[ToolCallingRecord] = [\n",
" ToolCallingRecord(**call.as_dict())\n",
" for call in assistant_response.info['tool_calls']\n",
" ]\n",
" for func_record in tool_calls:\n",
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import CohereConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -120,8 +120,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import MistralConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -120,8 +120,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.models import ModelFactory
from camel.societies import RolePlaying
from camel.types import ModelPlatformType
Expand Down Expand Up @@ -100,8 +100,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_sambanova.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import agentops
from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import SambaCloudAPIConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -128,8 +128,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import agentops
from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import ChatGPTConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -125,8 +125,8 @@
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
4 changes: 2 additions & 2 deletions examples/toolkits/arxiv_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
print(str(response.info['tool_calls'])[:1000])
'''
===============================================================================
[FunctionCallingRecord(func_name='search_papers', args={'query': 'attention is
[ToolCallingRecord(func_name='search_papers', args={'query': 'attention is
all you need'}, result=[{'title': "Attention Is All You Need But You Don't
Need All Of It For Inference of Large Language Models", 'published_date':
'2024-07-22', 'authors': ['Georgy Tyukin', 'Gbetondji J-S Dovonon', 'Jean
Expand Down Expand Up @@ -85,7 +85,7 @@
print(str(response.info['tool_calls'])[:1000])
'''
===============================================================================
[FunctionCallingRecord(func_name='download_papers', args={'query': 'attention
[ToolCallingRecord(func_name='download_papers', args={'query': 'attention
is all you need', 'output_dir': '/Users/enrei/Desktop/camel0826/camel/examples/
tool_call', 'paper_ids': ['2407.15516v1', '2107.08000v1', '2306.01926v1',
'2112.05993v1', '1912.11959v2']}, result='papers downloaded successfully')]
Expand Down
Loading
Loading