Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: update anthropic #1392

Merged
merged 5 commits into from
Jan 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 17 additions & 15 deletions camel/configs/anthropic_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from __future__ import annotations

from typing import List, Union
from typing import Any, ClassVar, List, Union

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven
from camel.types import NotGiven


class AnthropicConfig(BaseConfig):
Expand All @@ -29,41 +29,43 @@ class AnthropicConfig(BaseConfig):
generate before stopping. Note that Anthropic models may stop
before reaching this maximum. This parameter only specifies the
absolute maximum number of tokens to generate.
(default: :obj:`256`)
(default: :obj:`8192`)
stop_sequences (List[str], optional): Sequences that will cause the
model to stop generating completion text. Anthropic models stop
on "\n\nHuman:", and may include additional built-in stop sequences
in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop
generating.
generating. (default: :obj:`[]`)
temperature (float, optional): Amount of randomness injected into the
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
for analytical / multiple choice, and closer to 1 for creative
and generative tasks.
(default: :obj:`1`)
and generative tasks. (default: :obj:`1`)
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
compute the cumulative distribution over all the options for each
subsequent token in decreasing probability order and cut it off
once it reaches a particular probability specified by `top_p`.
You should either alter `temperature` or `top_p`,
but not both.
(default: :obj:`0.7`)
but not both. (default: :obj:`0.7`)
top_k (int, optional): Only sample from the top K options for each
subsequent token. Used to remove "long tail" low probability
responses.
(default: :obj:`5`)
responses. (default: :obj:`5`)
metadata: An object describing metadata about the request.
stream (bool, optional): Whether to incrementally stream the response
using server-sent events. (default: :obj:`False`)
"""

max_tokens: int = 256
stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
max_tokens: int = 8192
stop_sequences: ClassVar[Union[List[str], NotGiven]] = []
temperature: float = 1
top_p: Union[float, NotGiven] = NOT_GIVEN
top_k: Union[int, NotGiven] = NOT_GIVEN
metadata: NotGiven = NOT_GIVEN
top_p: Union[float, NotGiven] = 0.7
top_k: Union[int, NotGiven] = 5
stream: bool = False

def as_dict(self) -> dict[str, Any]:
config_dict = super().as_dict()
if "tools" in config_dict:
del config_dict["tools"] # TODO: Support tool calling.
return config_dict


ANTHROPIC_API_PARAMS = {param for param in AnthropicConfig.model_fields.keys()}
23 changes: 1 addition & 22 deletions camel/models/anthropic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
from typing import Any, Dict, List, Literal, Optional, Union
from typing import Any, Dict, List, Optional, Union

from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
from camel.messages import OpenAIMessage
Expand Down Expand Up @@ -102,27 +102,6 @@ def token_counter(self) -> BaseTokenCounter:
self._token_counter = AnthropicTokenCounter(self.model_type)
return self._token_counter

@dependencies_required('anthropic')
def count_tokens_from_prompt(
self, prompt: str, role: Literal["user", "assistant"]
) -> int:
r"""Count the number of tokens from a prompt.

Args:
prompt (str): The prompt string.
role (Literal["user", "assistant"]): The role of the message
sender, either "user" or "assistant".

Returns:
int: The number of tokens in the prompt.
"""
from anthropic.types.beta import BetaMessageParam

return self.client.beta.messages.count_tokens(
messages=[BetaMessageParam(content=prompt, role=role)],
model=self.model_type,
).input_tokens

def run(
self,
messages: List[OpenAIMessage],
Expand Down
6 changes: 3 additions & 3 deletions camel/utils/token_counting.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,11 +253,11 @@ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
Returns:
int: Number of tokens in the messages.
"""
from anthropic.types.beta import BetaMessageParam
from anthropic.types import MessageParam

return self.client.beta.messages.count_tokens(
return self.client.messages.count_tokens(
messages=[
BetaMessageParam(
MessageParam(
content=str(msg["content"]),
role="user" if msg["role"] == "user" else "assistant",
)
Expand Down
2 changes: 1 addition & 1 deletion examples/models/role_playing_with_claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,4 +100,4 @@ def main(model_type=None) -> None:


if __name__ == "__main__":
main(model_type=ModelType.CLAUDE_2_0)
main(model_type=ModelType.CLAUDE_3_5_SONNET)
Loading
Loading