Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add return_contexts #2

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions backend/danswer/chat/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,24 +74,33 @@ class DanswerQuotes(BaseModel):
quotes: list[DanswerQuote]


class DanswerContext(BaseModel):
content: str
document_id: str
semantic_identifier: str
blurb: str


class DanswerContexts(BaseModel):
contexts: list[DanswerContext]


class DanswerAnswer(BaseModel):
answer: str | None


class QAResponse(SearchResponse, DanswerAnswer):
quotes: list[DanswerQuote] | None
contexts: list[DanswerContexts] | None
predicted_flow: QueryFlow
predicted_search: SearchType
eval_res_valid: bool | None = None
llm_chunks_indices: list[int] | None = None
error_msg: str | None = None


AnswerQuestionReturn = tuple[DanswerAnswer, DanswerQuotes]
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can this be removed? Seems like it's not used anywhere.



AnswerQuestionStreamReturn = Iterator[
DanswerAnswerPiece | DanswerQuotes | StreamingError
DanswerAnswerPiece | DanswerQuotes | DanswerContexts | StreamingError
]


Expand Down
21 changes: 21 additions & 0 deletions backend/danswer/one_shot_answer/answer_question.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import itertools
from collections.abc import Callable
from collections.abc import Iterator
from typing import cast
Expand All @@ -6,6 +7,8 @@

from danswer.chat.chat_utils import get_chunks_for_qa
from danswer.chat.models import DanswerAnswerPiece
from danswer.chat.models import DanswerContext
from danswer.chat.models import DanswerContexts
from danswer.chat.models import DanswerQuotes
from danswer.chat.models import LLMMetricsContainer
from danswer.chat.models import LLMRelevanceFilterResponse
Expand Down Expand Up @@ -67,6 +70,7 @@ def stream_answer_objects(
| LLMRelevanceFilterResponse
| DanswerAnswerPiece
| DanswerQuotes
| DanswerContexts
| StreamingError
| ChatMessageDetail
]:
Expand Down Expand Up @@ -229,6 +233,21 @@ def stream_answer_objects(
else no_gen_ai_response()
)

if qa_model is not None and query_req.return_contexts:
contexts = DanswerContexts(
contexts=[
DanswerContext(
content=context_doc.content,
document_id=context_doc.document_id,
semantic_identifier=context_doc.semantic_identifier,
blurb=context_doc.semantic_identifier,
)
for context_doc in llm_chunks
]
)

response_packets = itertools.chain(response_packets, [contexts])

# Capture outputs and errors
llm_output = ""
error: str | None = None
Expand Down Expand Up @@ -316,6 +335,8 @@ def get_search_answer(
qa_response.llm_chunks_indices = packet.relevant_chunk_indices
elif isinstance(packet, DanswerQuotes):
qa_response.quotes = packet
elif isinstance(packet, DanswerContexts):
qa_response.contexts = packet
elif isinstance(packet, StreamingError):
qa_response.error_msg = packet.error
elif isinstance(packet, ChatMessageDetail):
Expand Down
3 changes: 3 additions & 0 deletions backend/danswer/one_shot_answer/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from pydantic import BaseModel
from pydantic import root_validator

from danswer.chat.models import DanswerContexts
from danswer.chat.models import DanswerQuotes
from danswer.chat.models import QADocsResponse
from danswer.configs.constants import MessageType
Expand All @@ -25,6 +26,7 @@ class DirectQARequest(BaseModel):
persona_id: int
retrieval_options: RetrievalDetails
chain_of_thought: bool = False
return_contexts: bool = False

@root_validator
def check_chain_of_thought_and_prompt_id(
Expand Down Expand Up @@ -53,3 +55,4 @@ class OneShotQAResponse(BaseModel):
error_msg: str | None = None
answer_valid: bool = True # Reflexion result, default True if Reflexion not run
chat_message_id: int | None = None
contexts: DanswerContexts | None = None
Loading