Skip to content

Commit

Permalink
multiModels support and add cluade handling
Browse files Browse the repository at this point in the history
  • Loading branch information
Stan370 committed Sep 12, 2024
1 parent e0ce0fc commit ad3d1c7
Show file tree
Hide file tree
Showing 9 changed files with 496 additions and 374 deletions.
52 changes: 52 additions & 0 deletions app/api/chat/cluade/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import { getServerConfig } from "@/config/server";
import { ServerRuntime } from "next";
import Anthropic from '@anthropic-ai/sdk';

export const runtime: ServerRuntime = "edge";

export async function POST(request: Request) {
const encoder = new TextEncoder();
const stream = new TransformStream();
const writer = stream.writable.getWriter();

try {
const config = await getServerConfig();
const { message, model } = await request.json();

if (!config.anthropicApiKey) {
throw new Error('ANTHROPIC_API_KEY is not set');
}

const anthropic = new Anthropic({
apiKey: config.anthropicApiKey,
});

const stream = await anthropic.completions.create({
model: model || "claude-2",
max_tokens_to_sample: 300,
prompt: `Human: ${message}\n\nAssistant:`,
stream: true,
});

for await (const completion of stream) {
await writer.write(
encoder.encode(`data: ${JSON.stringify({ text: completion.completion })}\n\n`)
);
}
} catch (error: any) {
console.error('Error in Claude chat:', error);
await writer.write(
encoder.encode(`data: ${JSON.stringify({ error: error.message })}\n\n`)
);
} finally {
await writer.close();
}

return new Response(stream.readable, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
},
});
}
110 changes: 49 additions & 61 deletions app/api/chat/google/route.ts
Original file line number Diff line number Diff line change
@@ -1,67 +1,55 @@
import { getServerConfig } from "@/config/server";
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";

export const runtime = 'edge';

export async function POST(req: Request) {
try {
// Fetch server configuration
const config = await getServerConfig();

// Initialize Google Generative AI with the provided credentials
const genAI = new GoogleGenerativeAI(config.geminiKey);
const modelG = genAI.getGenerativeModel({ model: "gemini-pro" });

// Define the generation configuration
const generationConfig = {
temperature: 0.8,
topK: 0.9,
topP: 1,
maxOutputTokens: 2048,
};

// Define the safety settings for content filtering
const safetySettings = [
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
];

// Start a chat session with the generative AI model
const chat = modelG.startChat({
generationConfig,
safetySettings, // Pass safety settings if needed
});

// Extract messages from the request
const { messages } = await req.json();

// Send the message to the model and await the response
const result = await chat.sendMessage(messages);
const response = await result.response;

return new Response(JSON.stringify(response), {
status: 200,
});
} catch (error: any) {
const errorMessage = error.message || "An unexpected error occurred";
const errorCode = error.status || 500;
console.error(error);

return new Response(JSON.stringify({ message: errorMessage }), {
status: errorCode,
});
}
const encoder = new TextEncoder();
const stream = new TransformStream();
const writer = stream.writable.getWriter();

try {
const config = await getServerConfig();
const { message, model } = await req.json();

const genAI = new GoogleGenerativeAI(config.geminiKey);
const modelG = genAI.getGenerativeModel({ model: model || "gemini-1.5-flash" });

const chat = modelG.startChat({
generationConfig: {
temperature: 0.8,
topK: 0.9,
topP: 1,
maxOutputTokens: 2048,
},
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
// ... other safety settings ...
],
});

const result = await chat.sendMessage(message);
const response = await result.response;

// Stream the response
for (const chunk of response.text().split(' ')) {
await writer.write(encoder.encode(`data: ${JSON.stringify({ text: chunk + ' ' })}\n\n`));
}
} catch (error: any) {
console.error('Error in Google AI chat:', error);
await writer.write(encoder.encode(`data: ${JSON.stringify({ error: error.message })}\n\n`));
} finally {
await writer.close();
}

return new Response(stream.readable, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
},
});
}
105 changes: 105 additions & 0 deletions app/api/chat/google/test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
/**
* @license
* This file is licensed under the Apache License 2.0.
* See the LICENSE file for more details.
*/

import { GoogleGenerativeAI } from "@google/generative-ai";
import fs from "fs";
import { dirname } from "path";
import { fileURLToPath } from "url";

const __dirname = dirname(fileURLToPath(import.meta.url));
const mediaPath = __dirname + "/media";

async function chat() {
// [START chat]
// Make sure to include these imports:
// import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
let result = await chat.sendMessage("I have 2 dogs in my house.");
console.log(result.response.text());
result = await chat.sendMessage("How many paws are in my house?");
console.log(result.response.text());
// [END chat]
}

async function chatStreaming() {
// [START chat_streaming]
// Make sure to include these imports:
// import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
let result = await chat.sendMessageStream("I have 2 dogs in my house.");
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
result = await chat.sendMessageStream("How many paws are in my house?");
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
// [END chat_streaming]
}

async function chatStreamingWithImages() {
// [START chat_streaming_with_images]
// Make sure to include these imports:
// import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat();

let result = await chat.sendMessageStream("Hello, I'm designing inventions. Can I show you one?");
process.stdout.write('\n\nmodel:\n');
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
result = await chat.sendMessageStream(["What do you think about this design?", {
inlineData: {
data: Buffer.from(fs.readFileSync(`${mediaPath}/jetpack.jpg`)).toString("base64"),
mimeType: "image/jpeg",
},
}]);
process.stdout.write('\n\nmodel:\n');
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
// [END chat_streaming_with_images]
}

// async function runAll() {
// await chat();
// await chatStreaming();
// await chatStreamingWithImages();
// }

// runAll();
25 changes: 7 additions & 18 deletions app/api/chat/openai/route.ts
Original file line number Diff line number Diff line change
@@ -1,45 +1,34 @@
import { getServerConfig } from "@/config/server";
import { ServerRuntime } from "next";
import OpenAI from "openai";

import { OpenAIStream, StreamingTextResponse } from 'ai';

export const runtime: ServerRuntime = "edge";

export async function POST(request: Request) {
try {
const config = await getServerConfig();
const { message, model } = await request.json();

const openai = new OpenAI({
apiKey: config.openaiApiKey,
baseURL: config.openaiBaseUrl || config.openaiProxyUrl,
});
console.log(openai.baseURL);
console.log(openai.apiKey);

const response = await openai.chat.completions.create(
{
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: "Say this is a test" }],
model: model || "gpt-4o-mini",
messages: [{ role: "user", content: message }],
stream: true,
},
{
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${openai.apiKey}`,
},
}
);


for await (const chunk of response) {
console.log(chunk.choices[0].delta);
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}
return ;
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
} catch (error: any) {
const errorMessage = error.error?.message || "An unexpected error occurred";
const errorCode = error.status || 500;
console.log(error);
console.error(error);

return new Response(JSON.stringify({ message: errorMessage }), {
status: errorCode,
Expand Down
34 changes: 34 additions & 0 deletions app/api/chat/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// app/api/chat/route.ts
import { NextRequest, NextResponse } from 'next/server';
import { AIServiceFactory } from '@/lib/ai/AIServiceFactory';
import { ModelProviderConfig } from '@/lib/ModelSetting';
import { verifyJWT } from '@/lib/auth'; // Assuming you have a JWT verification function

export const runtime = 'edge';

export async function POST(req: NextRequest) {
try {
const { message, model, token } = await req.json();
const payload = await verifyJWT(token); // Verify and decode JWT

const config: ModelProviderConfig = {
modelProvider: model.split('-')[0] as 'openai' | 'gemini' | 'claude' | /* other providers */,
model: model,
// ... other config options
};

const service = await AIServiceFactory.createService(config, payload);
const stream = await service.generateResponse(message);

return new NextResponse(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
},
});
} catch (error: any) {
console.error('Error in chat API:', error);
return NextResponse.json({ error: error.message }, { status: 500 });
}
}
3 changes: 3 additions & 0 deletions config/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ declare global {
NEXT_PUBLIC_AZURE_GPT_35_TURBO_ID: string;
NEXT_PUBLIC_AZURE_GPT_45_VISION_ID: string;
NEXT_PUBLIC_AZURE_GPT_45_TURBO_ID: string;
ANTHROPIC_API_KEY: string;
}
}
}
Expand All @@ -40,6 +41,7 @@ declare global {
NEXT_PUBLIC_AZURE_GPT_35_TURBO_ID,
NEXT_PUBLIC_AZURE_GPT_45_VISION_ID,
NEXT_PUBLIC_AZURE_GPT_45_TURBO_ID,
ANTHROPIC_API_KEY,
} = process.env;

return {
Expand All @@ -57,6 +59,7 @@ declare global {
azureGpt35TurboId: NEXT_PUBLIC_AZURE_GPT_35_TURBO_ID,
azureGpt45VisionId: NEXT_PUBLIC_AZURE_GPT_45_VISION_ID,
azureGpt45TurboId: NEXT_PUBLIC_AZURE_GPT_45_TURBO_ID,
anthropicApiKey: ANTHROPIC_API_KEY,
};
};
// const express = require("express");
Expand Down
Loading

0 comments on commit ad3d1c7

Please sign in to comment.