Skip to content

Commit

Permalink
Merge branch 'main' into CM-8549_add_error_handling_for_current_chain
Browse files Browse the repository at this point in the history
  • Loading branch information
jynx10 authored Nov 14, 2023
2 parents 8d9ceba + 0731a02 commit 9ac5468
Show file tree
Hide file tree
Showing 12 changed files with 171 additions and 12 deletions.
2 changes: 2 additions & 0 deletions .github/PACKAGE_README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ comet_llm.log_prompt(
- [x] Visualize your prompts and responses in the UI.
- [x] Log your chain execution down to the level of granularity that you need.
- [x] Visualize your chain execution in the UI.
- [x] Automatically tracks your prompts when using the OpenAI chat models.
- [x] Track and analyze user feedback.
- [ ] Diff your prompts and chain execution in the UI.

## 👀 Examples
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ comet_llm.log_prompt(
- [x] Visualize your prompts and responses in the UI.
- [x] Log your chain execution down to the level of granularity that you need.
- [x] Visualize your chain execution in the UI.
- [x] Automatically tracks your prompts when using the OpenAI chat models.
- [x] Track and analyze user feedback.
- [ ] Diff your prompts and chain execution in the UI.

## 👀 Examples
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
package_dir={"": "src"},
url="https://www.comet.com",
project_urls=project_urls,
version="1.5.0",
version="1.6.0",
zip_safe=False,
license="MIT",
)
3 changes: 2 additions & 1 deletion src/comet_llm/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@
import logging
from typing import Optional

from . import experiment_info, logging_messages
from . import config, exceptions, experiment_info, logging_messages
from .experiment_api import ExperimentAPI

LOGGER = logging.getLogger(__name__)


@exceptions.filter(allow_raising=config.raising_enabled())
def log_user_feedback(id: str, score: float, api_key: Optional[str] = None) -> None:
"""
Logs user feedback for the provided Prompt or Chain ID. This will
Expand Down
44 changes: 43 additions & 1 deletion src/comet_llm/autologgers/openai/chat_completion_parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,28 @@
# *******************************************************

import inspect
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Tuple, Union

import comet_llm.logging

from . import metadata

if TYPE_CHECKING:
from openai import Stream
from openai.openai_object import OpenAIObject
from openai.types.chat.chat_completion import ChatCompletion

Inputs = Dict[str, Any]
Outputs = Dict[str, Any]
Metadata = Dict[str, Any]

CreateCallResult = Union[
"ChatCompletion", "Stream", "OpenAIObject", Iterable["OpenAIObject"]
]

LOGGER = logging.getLogger(__file__)


def create_arguments_supported(kwargs: Dict[str, Any]) -> bool:
if "messages" not in kwargs:
Expand All @@ -43,7 +56,16 @@ def parse_create_arguments(kwargs: Dict[str, Any]) -> Tuple[Inputs, Metadata]:
return inputs, metadata


def parse_create_result(
def parse_create_result(result: CreateCallResult) -> Tuple[Outputs, Metadata]:
openai_version = metadata.openai_version()

if openai_version is not None and openai_version.startswith("0."):
return _v0_x_x__parse_create_result(result)

return _v1_x_x__parse_create_result(result)


def _v0_x_x__parse_create_result(
result: Union["OpenAIObject", Iterable["OpenAIObject"]]
) -> Tuple[Outputs, Metadata]:
if inspect.isgenerator(result):
Expand All @@ -60,3 +82,23 @@ def parse_create_result(
metadata["output_model"] = metadata.pop("model")

return outputs, metadata


def _v1_x_x__parse_create_result(
result: Union["ChatCompletion", "Stream"]
) -> Tuple[Outputs, Metadata]:
stream_mode = not hasattr(result, "model_dump")
if stream_mode:
choices = "Generation is not logged when using stream mode"
metadata = {}
else:
result_dict = result.model_dump()
choices: List[Dict[str, Any]] = result_dict.pop("choices") # type: ignore
metadata = result_dict

outputs = {"choices": choices}

if "model" in metadata:
metadata["output_model"] = metadata.pop("model")

return outputs, metadata
27 changes: 27 additions & 0 deletions src/comet_llm/autologgers/openai/metadata.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
# *******************************************************
# ____ _ _
# / ___|___ _ __ ___ ___| |_ _ __ ___ | |
# | | / _ \| '_ ` _ \ / _ \ __| | '_ ` _ \| |
# | |__| (_) | | | | | | __/ |_ _| | | | | | |
# \____\___/|_| |_| |_|\___|\__(_)_| |_| |_|_|
#
# Sign up for free at https://www.comet.com
# Copyright (C) 2015-2023 Comet ML INC
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this package.
# *******************************************************

import functools
from typing import Optional


@functools.lru_cache(maxsize=1)
def openai_version() -> Optional[str]:
try:
import openai

version: str = openai.__version__
return version
except Exception:
return None
16 changes: 16 additions & 0 deletions src/comet_llm/autologgers/openai/patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,19 @@ def patch(registry: "registry.Registry") -> None:
registry.register_after_exception(
"openai", "ChatCompletion.create", hooks.after_exception_chat_completion_create
)

registry.register_before(
"openai.resources.chat.completions",
"Completions.create",
hooks.before_chat_completion_create,
)
registry.register_after(
"openai.resources.chat.completions",
"Completions.create",
hooks.after_chat_completion_create,
)
registry.register_after_exception(
"openai.resources.chat.completions",
"Completions.create",
hooks.after_exception_chat_completion_create,
)
9 changes: 6 additions & 3 deletions src/comet_llm/exceptions/filter_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import functools
import logging
from typing import TYPE_CHECKING, Any, Callable
from typing import TYPE_CHECKING, Any, Callable, Optional

from comet_llm import logging as comet_logging

Expand All @@ -24,14 +24,17 @@
LOGGER = logging.getLogger(__name__)


def filter(allow_raising: bool, summary: "summary.Summary") -> Callable:
def filter(
allow_raising: bool, summary: Optional["summary.Summary"] = None
) -> Callable:
def decorator(function: Callable) -> Callable:
@functools.wraps(function)
def wrapper(*args, **kwargs) -> Any: # type: ignore
try:
return function(*args, **kwargs)
except Exception as exception:
summary.increment_failed()
if summary is not None:
summary.increment_failed()

if allow_raising:
raise
Expand Down
2 changes: 1 addition & 1 deletion src/comet_llm/import_hooks/patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def _get_object(module: ModuleType, callable_path: str) -> Any:
for part in callable_path:
try:
current_object = getattr(current_object, part)
except AttributeError:
except Exception:
return None

return current_object
Expand Down
10 changes: 5 additions & 5 deletions tests/unit/autologgers/openai/test_chat_completion_parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@ def test_parse_create_arguments__only_messages_presented():
)


def test_parse_create_result__input_is_openai_object__input_parsed_successfully():
def test_parse_create_result__input_is_ChatCompletion__input_parsed_successfully():
create_result = Fake("create_result")
with Scenario() as s:
s.create_result.to_dict() >> {
s.create_result.model_dump() >> {
"choices": "the-choices",
"some-key": "some-value",
}
Expand All @@ -75,10 +75,10 @@ def test_parse_create_result__input_is_openai_object__input_parsed_successfully(
assert metadata == {"some-key": "some-value"}


def test_parse_create_result__input_is_openai_object__input_parsed_successfully__model_key_renamed_to_output_model():
def test_parse_create_result__input_is_ChatCompletion__input_parsed_successfully__model_key_renamed_to_output_model():
create_result = Fake("create_result")
with Scenario() as s:
s.create_result.to_dict() >> {
s.create_result.model_dump() >> {
"choices": "the-choices",
"some-key": "some-value",
"model": "the-model",
Expand All @@ -90,7 +90,7 @@ def test_parse_create_result__input_is_openai_object__input_parsed_successfully_
assert metadata == {"some-key": "some-value", "output_model": "the-model"}


def test_parse_create_result__input_is_generator_object__input_parsed_with_hardcoded_values_used():
def test_parse_create_result__input_is_Stream__input_parsed_with_hardcoded_values_used():
create_result = (x for x in [])

outputs, metadata = chat_completion_parsers.parse_create_result(create_result)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import box
import pytest
from testix import *

from comet_llm.autologgers.openai import chat_completion_parsers


@pytest.fixture(autouse=True)
def mock_imports(patch_module):
patch_module(chat_completion_parsers, "metadata")

def test_parse_create_result__input_is_openai_object__input_parsed_successfully():
create_result = Fake("create_result")
with Scenario() as s:
s.metadata.openai_version() >> "0.99.99"
s.create_result.to_dict() >> {
"choices": "the-choices",
"some-key": "some-value",
}

outputs, metadata = chat_completion_parsers.parse_create_result(create_result)

assert outputs == {"choices": "the-choices"}
assert metadata == {"some-key": "some-value"}


def test_parse_create_result__input_is_openai_object__input_parsed_successfully__model_key_renamed_to_output_model():
create_result = Fake("create_result")
with Scenario() as s:
s.metadata.openai_version() >> "0.99.99"
s.create_result.to_dict() >> {
"choices": "the-choices",
"some-key": "some-value",
"model": "the-model",
}

outputs, metadata = chat_completion_parsers.parse_create_result(create_result)

assert outputs == {"choices": "the-choices"}
assert metadata == {"some-key": "some-value", "output_model": "the-model"}


def test_parse_create_result__input_is_generator_object__input_parsed_with_hardcoded_values_used():
create_result = (x for x in [])

with Scenario() as s:
s.metadata.openai_version() >> "0.99.99"
outputs, metadata = chat_completion_parsers.parse_create_result(create_result)

assert outputs == {"choices": "Generation is not logged when using stream mode"}
assert metadata == {}
15 changes: 15 additions & 0 deletions tests/unit/exceptions/test_filter_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,3 +61,18 @@ def f():
extra={"show_traceback": True}
)
assert f() is None


def test_filter__upraising_not_allowed__summary_not_passed_to_filter__function_raised_exception__nothing_done_with_summary():
@filter_decorator.filter(allow_raising=False)
def f():
raise exceptions.CometLLMException("some-message", log_message_once=True)
with Scenario() as s:
s.comet_logging.log_once_at_level(
filter_decorator.LOGGER,
logging.ERROR,
"some-message",
exc_info=True,
extra={"show_traceback": True}
)
assert f() is None

0 comments on commit 9ac5468

Please sign in to comment.