Skip to content

Commit

Permalink
test: add PromptList
Browse files Browse the repository at this point in the history
  • Loading branch information
peppescg committed Jan 3, 2025
1 parent daa73fb commit afa5e11
Show file tree
Hide file tree
Showing 4 changed files with 393 additions and 7 deletions.
50 changes: 50 additions & 0 deletions src/components/__tests__/PromptList.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import { describe, expect, it } from "vitest";
import { render, screen } from "@testing-library/react";
import { PromptList } from "../PromptList";
import mockedPrompts from "@/mocks/msw/fixtures/GetMessages.json";
import { MemoryRouter } from "react-router-dom";

describe("PromptList", () => {
it("should render correct prompt", () => {
render(
<MemoryRouter>
<PromptList prompts={[mockedPrompts[0]]} />
</MemoryRouter>,
);
expect(
screen.getByRole("link", {
name: /server\.py do you see any security issue\?/i,
}),
).toBeVisible();
});

it("should render default prompt value when missing question", async () => {
const conversationTimestamp = "2025-01-02T14:19:58.024100Z";
render(
<MemoryRouter>
<PromptList
prompts={[
{
question_answers: [
{
question: null,
answer: null,
},
],
provider: "vllm",
type: "fim",
chat_id: "b97fbe59-0e34-4b98-8f2f-41332ebc059a",
conversation_timestamp: conversationTimestamp,
},
]}
/>
</MemoryRouter>,
);

expect(
screen.getByRole("link", {
name: `Prompt ${conversationTimestamp}`,
}),
).toBeVisible();
});
});
30 changes: 30 additions & 0 deletions src/mocks/msw/fixtures/GetAlerts.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
[
{
"conversation": {
"question_answers": [
{
"question": {
"message": "```/Users/giuseppe/workspace/codegate/src/codegate/server.py\nimport traceback\n\nimport structlog\nfrom fastapi import APIRouter, FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom starlette.middleware.errors import ServerErrorMiddleware\n\nfrom codegate import __description__, __version__\nfrom codegate.dashboard.dashboard import dashboard_router\nfrom codegate.pipeline.factory import PipelineFactory\nfrom codegate.providers.anthropic.provider import AnthropicProvider\nfrom codegate.providers.llamacpp.provider import LlamaCppProvider\nfrom codegate.providers.ollama.provider import OllamaProvider\nfrom codegate.providers.openai.provider import OpenAIProvider\nfrom codegate.providers.registry import ProviderRegistry\nfrom codegate.providers.vllm.provider import VLLMProvider\n\nlogger = structlog.get_logger(\"codegate\")\n\n\nasync def custom_error_handler(request, exc: Exception):\n \"\"\"This is a Middleware to handle exceptions and log them.\"\"\"\n # Capture the stack trace\n extracted_traceback = traceback.extract_tb(exc.__traceback__)\n # Log only the last 3 items of the stack trace. 3 is an arbitrary number.\n logger.error(traceback.print_list(extracted_traceback[-3:]))\n return JSONResponse({\"error\": str(exc)}, status_code=500)\n\n\ndef init_app(pipeline_factory: PipelineFactory) -> FastAPI:\n \"\"\"Create the FastAPI application.\"\"\"\n app = FastAPI(\n title=\"CodeGate\",\n description=__description__,\n version=__version__,\n )\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n # Apply error handling middleware\n app.add_middleware(ServerErrorMiddleware, handler=custom_error_handler)\n\n # Create provider registry\n registry = ProviderRegistry(app)\n\n # Register all known providers\n registry.add_provider(\n \"openai\",\n OpenAIProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"anthropic\",\n AnthropicProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"llamacpp\",\n LlamaCppProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"vllm\",\n VLLMProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"ollama\",\n OllamaProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n\n # Create and add system routes\n system_router = APIRouter(tags=[\"System\"])\n\n @system_router.get(\"/health\")\n async def health_check():\n return {\"status\": \"healthy\"}\n\n app.include_router(system_router)\n app.include_router(dashboard_router)\n\n return app\n\n```\n\nserver.py do you see any security issue?",
"timestamp": "2025-01-03T09:10:00.689177Z",
"message_id": "0445530c-9288-4591-b48a-4fdc2f0aec6c"
},
"answer": {
"message": "Yes, there are several security issues in the provided `server.py` code. Here are the main ones:\n\n1. **CORS Configuration**:\n ```python\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n ```\n - Allowing all origins (`allow_origins=[\"*\"]`) and methods (`allow_methods=[\"*\"]`) can expose your server to Cross-Site Request Forgery (CSRF) attacks and other security vulnerabilities. It's recommended to restrict these settings to only the origins and methods that your application needs to support.\n\n2. **Error Handling Middleware**:\n ```python\n async def custom_error_handler(request, exc: Exception):\n \"\"\"This is a Middleware to handle exceptions and log them.\"\"\"\n # Capture the stack trace\n extracted_traceback = traceback.extract_tb(exc.__traceback__)\n # Log only the last 3 items of the stack trace. 3 is an arbitrary number.\n logger.error(traceback.print_list(extracted_traceback[-3:]))\n return JSONResponse({\"error\": str(exc)}, status_code=500)\n ```\n - Exposing detailed error messages in the response can be dangerous, as it can provide attackers with information that can be used to exploit vulnerabilities. It's better to log the error details and return a generic error message to the client.\n\n3. **Handling Exceptions**:\n - The `custom_error_handler` function logs the traceback and returns the exception message directly to the client. This can expose sensitive information about the server's internal state. It's better to log the error and return a generic message.\n\n4. **Provider Configuration**:\n - The providers are configured with the same pipeline processors. This might not be a security issue in itself, but it's important to ensure that the pipeline processors are secure and correctly configured.\n\n5. **Logging**:\n - The `logger.error` function is used to log the error traceback. Ensure that the logging configuration is set up correctly to prevent logging sensitive information to insecure destinations.\n\nHere is a revised version of the code with some of these issues addressed:\n\n```python\nimport traceback\n\nimport structlog\nfrom fastapi import APIRouter, FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom starlette.middleware.errors import ServerErrorMiddleware\n\nfrom codegate import __description__, __version__\nfrom codegate.dashboard.dashboard import dashboard_router\nfrom codegate.pipeline.factory import PipelineFactory\nfrom codegate.providers.anthropic.provider import AnthropicProvider\nfrom codegate.providers.llamacpp.provider import LlamaCppProvider\nfrom codegate.providers.ollama.provider import OllamaProvider\nfrom codegate.providers.openai.provider import OpenAIProvider\nfrom codegate.providers.registry import ProviderRegistry\nfrom codegate.providers.vllm.provider import VLLMProvider\n\nlogger = structlog.get_logger(\"codegate\")\n\n\nasync def custom_error_handler(request, exc: Exception):\n \"\"\"This is a Middleware to handle exceptions and log them.\"\"\"\n # Capture the stack trace\n extracted_traceback = traceback.extract_tb(exc.__traceback__)\n # Log only the last 3 items of the stack trace. 3 is an arbitrary number.\n logger.error(traceback.print_list(extracted_traceback[-3:]))\n return JSONResponse({\"error\": \"An internal server error occurred\"}, status_code=500)\n\n\ndef init_app(pipeline_factory: PipelineFactory) -> FastAPI:\n \"\"\"Create the FastAPI application.\"\"\"\n app = FastAPI(\n title=\"CodeGate\",\n description=__description__,\n version=__version__,\n )\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"https://example.com\"], # Restrict to specific origins\n allow_credentials=True,\n allow_methods=[\"GET\", \"POST\"], # Restrict to specific methods\n allow_headers=[\"*\"],\n )\n # Apply error handling middleware\n app.add_middleware(ServerErrorMiddleware, handler=custom_error_handler)\n\n # Create provider registry\n registry = ProviderRegistry(app)\n\n # Register all known providers\n registry.add_provider(\n \"openai\",\n OpenAIProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"anthropic\",\n AnthropicProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"llamacpp\",\n LlamaCppProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"vllm\",\n VLLMProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n registry.add_provider(\n \"ollama\",\n OllamaProvider(\n pipeline_processor=pipeline_factory.create_input_pipeline(),\n fim_pipeline_processor=pipeline_factory.create_fim_pipeline(),\n output_pipeline_processor=pipeline_factory.create_output_pipeline(),\n fim_output_pipeline_processor=pipeline_factory.create_fim_output_pipeline(),\n ),\n )\n\n # Create and add system routes\n system_router = APIRouter(tags=[\"System\"])\n\n @system_router.get(\"/health\")\n async def health_check():\n return {\"status\": \"healthy\"}\n\n app.include_router(system_router)\n app.include_router(dashboard_router)\n\n return app\n```\n\nIn this revised version, the CORS middleware is restricted to specific origins and methods, and the error handler returns a generic error message to the client.",
"timestamp": "2025-01-03T09:09:34.359269Z",
"message_id": "68e7c9bc-9afa-4240-8eba-621eaae4a2a0"
}
}
],
"provider": "vllm",
"type": "chat",
"chat_id": "chatcmpl-eebad2bd68434c7c947d10754a204ea4",
"conversation_timestamp": "2025-01-03T09:10:00.689177Z"
},
"alert_id": "0445530c-9288-4591-b48a-4fdc2f0aec6c",
"code_snippet": null,
"trigger_string": "`\n\n",
"trigger_type": "code-comment",
"trigger_category": "info",
"timestamp": "2025-01-03T09:10:00.689177Z"
}
]
Loading

0 comments on commit afa5e11

Please sign in to comment.