Skip to content

Commit

Permalink
add function to qwen
Browse files Browse the repository at this point in the history
  • Loading branch information
WHALEEYE committed Jan 13, 2025
1 parent 520c29a commit 8faf857
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 61 deletions.
52 changes: 52 additions & 0 deletions camel/models/_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import textwrap
from typing import Optional, Type

from pydantic import BaseModel


def get_prompt_with_response_format(
response_format: Optional[Type[BaseModel]],
user_message: str,
) -> str:
"""
This function generates a prompt based on the provided Pydantic model and
user message.
Args:
response_format (Optional[Type[BaseModel]]): The Pydantic model class.
user_message (str): The user message to be used in the prompt.
Returns:
str: A prompt string for the LLM.
"""
if response_format is None:
return user_message

json_schema = response_format.model_json_schema()
updated_prompt = textwrap.dedent(
f"""\
Given the user message, please generate a JSON response adhering
to the following JSON schema:
{json_schema}
Make sure the JSON response is valid and matches the EXACT structure
defined in the schema. Your result should only be a valid json
object, without any other text or comments.
Following is the original user message:
{user_message}
"""
)
return updated_prompt
4 changes: 1 addition & 3 deletions camel/models/anthropic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,7 @@ async def _arun(
)

# format response to openai format
response = self._convert_response_from_anthropic_to_openai(response)

return response
return self._convert_response_from_anthropic_to_openai(response)

def check_model_config(self):
r"""Check whether the model configuration is valid for anthropic
Expand Down
27 changes: 12 additions & 15 deletions camel/models/qwen_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from camel.configs import QWEN_API_PARAMS, QwenConfig
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.models._utils import get_prompt_with_response_format
from camel.types import (
ChatCompletion,
ChatCompletionChunk,
Expand Down Expand Up @@ -148,25 +149,21 @@ def _prepare_request(
tools: Optional[List[Dict[str, Any]]],
) -> Dict[str, Any]:
request_config = self.model_config_dict.copy()
user_message = messages[-1]

if tools:
request_config["tools"] = tools

if response_format is None:
return request_config
if not isinstance(user_message["content"], str):
raise ValueError("Only text messages are supported")

# get all keys of the response_format
response_format_keys = response_format.model_fields.keys()
additional_prompt = (
"The response should be in JSON format with the following keys: "
f"{', '.join(response_format_keys)}."
)
user_message = messages[-1]
user_message["content"] = (
f"{user_message['content']}\n{additional_prompt}"
user_message["content"] = get_prompt_with_response_format(
response_format, user_message["content"]
)
if tools:
request_config["tools"] = tools
elif response_format:
# Improve stability with native response format support
# This config will be unstable if being used with tools
request_config["response_format"] = {"type": "json_object"}

request_config["response_format"] = {"type": "json_object"}
return request_config

@property
Expand Down
2 changes: 0 additions & 2 deletions camel/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
download_github_subdirectory,
download_tasks,
func_string_to_callable,
generate_prompt_for_structured_output,
get_first_int,
get_prompt_template_key_words,
get_pydantic_major_version,
Expand Down Expand Up @@ -81,5 +80,4 @@
"handle_http_error",
"get_pydantic_model",
"download_github_subdirectory",
"generate_prompt_for_structured_output",
]
36 changes: 0 additions & 36 deletions camel/utils/commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -692,39 +692,3 @@ def download_github_subdirectory(
download_github_subdirectory(
repo, f'{subdir}/{file["name"]}', file_path, branch
)


def generate_prompt_for_structured_output(
response_format: Optional[Type[BaseModel]],
user_message: str,
) -> str:
"""
This function generates a prompt based on the provided Pydantic model and
user message.
Args:
response_format (Type[BaseModel]): The Pydantic model class.
user_message (str): The user message to be used in the prompt.
Returns:
str: A prompt string for the LLM.
"""
if response_format is None:
return user_message

json_schema = response_format.model_json_schema()
sys_prompt = (
"Given the user message, please generate a JSON response adhering "
"to the following JSON schema:\n"
f"{json_schema}\n"
"Make sure the JSON response is valid and matches the EXACT structure "
"defined in the schema. Your result should only be a valid json "
"object, without any other text or comments.\n"
)
user_prompt = f"User message: {user_message}\n"

final_prompt = f"""
{sys_prompt}
{user_prompt}
"""
return final_prompt
22 changes: 17 additions & 5 deletions examples/simple_agent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,20 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from pydantic import BaseModel

from camel.agents import ChatAgent
from camel.models import ModelFactory
from camel.toolkits import WeatherToolkit
from camel.types import ModelPlatformType, ModelType

model = ModelFactory.create(
Expand All @@ -12,14 +24,14 @@


class ResponseFormat(BaseModel):
weather: str
time: str
celsius: str
fahrenheit: str


agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data])
agent = ChatAgent(model=model)

resp = agent.step(
"What's the current weather in New York?",
"At what tempreature does water boil?",
response_format=ResponseFormat,
)
print(resp.msg.content)
Expand Down

0 comments on commit 8faf857

Please sign in to comment.