diff --git a/.env.example b/.env.example index 2f77ac6..c413461 100644 --- a/.env.example +++ b/.env.example @@ -1,10 +1,8 @@ # required. NEUROAGENT_TOOLS__LITERATURE__URL= NEUROAGENT_KNOWLEDGE_GRAPH__BASE_URL= -NEUROAGENT_GENERATIVE__OPENAI__TOKEN= +NEUROAGENT_OPENAI__TOKEN= -# Important but not required -NEUROAGENT_AGENT__MODEL= NEUROAGENT_KNOWLEDGE_GRAPH__DOWNLOAD_HIERARCHY= diff --git a/src/neuroagent/agent_routine.py b/src/neuroagent/agent_routine.py index c123201..bcef856 100644 --- a/src/neuroagent/agent_routine.py +++ b/src/neuroagent/agent_routine.py @@ -56,7 +56,7 @@ async def get_chat_completion( "stream": stream, } if stream: - create_params["stream_options"] = {"include_usage": True} + create_params["stream_options"] = {"include_usage": True} # type: ignore if tools: create_params["parallel_tool_calls"] = agent.parallel_tool_calls diff --git a/src/neuroagent/app/config.py b/src/neuroagent/app/config.py index f0334e9..0328268 100644 --- a/src/neuroagent/app/config.py +++ b/src/neuroagent/app/config.py @@ -5,7 +5,7 @@ from typing import Literal, Optional from dotenv import dotenv_values -from pydantic import BaseModel, ConfigDict, SecretStr, model_validator +from pydantic import BaseModel, ConfigDict, SecretStr from pydantic_settings import BaseSettings, SettingsConfigDict @@ -228,21 +228,21 @@ class Settings(BaseSettings): frozen=True, ) - @model_validator(mode="after") - def check_consistency(self) -> "Settings": - """Check if consistent. + # @model_validator(mode="after") + # def check_consistency(self) -> "Settings": + # """Check if consistent. - ATTENTION: Do not put model validators into the child settings. The - model validator is run during instantiation. + # ATTENTION: Do not put model validators into the child settings. The + # model validator is run during instantiation. - """ - # If you don't enforce keycloak auth, you need a way to communicate with the APIs the tools leverage - if not self.keycloak.password and not self.keycloak.validate_token: - raise ValueError( - "Need an auth method for subsequent APIs called by the tools." - ) + # """ + # # If you don't enforce keycloak auth, you need a way to communicate with the APIs the tools leverage + # if not self.keycloak.password and not self.keycloak.validate_token: + # raise ValueError( + # "Need an auth method for subsequent APIs called by the tools." + # ) - return self + # return self # Load the remaining variables into the environment diff --git a/src/neuroagent/app/routers/qa.py b/src/neuroagent/app/routers/qa.py index 0ab4624..437d2ae 100644 --- a/src/neuroagent/app/routers/qa.py +++ b/src/neuroagent/app/routers/qa.py @@ -27,6 +27,7 @@ AgentRequest, AgentResponse, HILResponse, + VercelRequest, ) from neuroagent.stream import stream_agent_response @@ -95,7 +96,7 @@ async def run_chat_agent( @router.post("/chat_streamed/{thread_id}") async def stream_chat_agent( - user_request: AgentRequest, + user_request: VercelRequest, request: Request, agents_routine: Annotated[AgentsRoutine, Depends(get_agents_routine)], agent: Annotated[Agent, Depends(get_starting_agent)], @@ -114,7 +115,9 @@ async def stream_chat_agent( order=len(messages), thread_id=thread.thread_id, entity=Entity.USER, - content=json.dumps({"role": "user", "content": user_request.query}), + content=json.dumps( + {"role": "user", "content": user_request.messages[0].content} + ), ) ) stream_generator = stream_agent_response( @@ -125,4 +128,8 @@ async def stream_chat_agent( thread, request, ) - return StreamingResponse(stream_generator, media_type="text/event-stream") + return StreamingResponse( + stream_generator, + media_type="text/event-stream", + headers={"x-vercel-ai-data-stream": "v1"}, + ) diff --git a/src/neuroagent/new_types.py b/src/neuroagent/new_types.py index 2a22a46..212c229 100644 --- a/src/neuroagent/new_types.py +++ b/src/neuroagent/new_types.py @@ -56,6 +56,38 @@ class AgentResponse(BaseModel): message: str = "" +class ClientAttachment(BaseModel): + """Vercel class.""" + + name: str + contentType: str + url: str + + +class ToolInvocation(BaseModel): + """Vercel class.""" + + toolCallId: str + toolName: str + args: dict[str, Any] + result: dict[str, Any] + + +class ClientMessage(BaseModel): + """Vercel class.""" + + role: str + content: str + experimental_attachments: list[ClientAttachment] | None = None + toolInvocations: list[ToolInvocation] | None = None + + +class VercelRequest(BaseModel): + """Vercel class.""" + + messages: list[ClientMessage] + + class Result(BaseModel): """ Encapsulates the possible return values for an agent function.