-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
multiModels support and add cluade handling
- Loading branch information
Showing
9 changed files
with
496 additions
and
374 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
import { getServerConfig } from "@/config/server"; | ||
import { ServerRuntime } from "next"; | ||
import Anthropic from '@anthropic-ai/sdk'; | ||
|
||
export const runtime: ServerRuntime = "edge"; | ||
|
||
export async function POST(request: Request) { | ||
const encoder = new TextEncoder(); | ||
const stream = new TransformStream(); | ||
const writer = stream.writable.getWriter(); | ||
|
||
try { | ||
const config = await getServerConfig(); | ||
const { message, model } = await request.json(); | ||
|
||
if (!config.anthropicApiKey) { | ||
throw new Error('ANTHROPIC_API_KEY is not set'); | ||
} | ||
|
||
const anthropic = new Anthropic({ | ||
apiKey: config.anthropicApiKey, | ||
}); | ||
|
||
const stream = await anthropic.completions.create({ | ||
model: model || "claude-2", | ||
max_tokens_to_sample: 300, | ||
prompt: `Human: ${message}\n\nAssistant:`, | ||
stream: true, | ||
}); | ||
|
||
for await (const completion of stream) { | ||
await writer.write( | ||
encoder.encode(`data: ${JSON.stringify({ text: completion.completion })}\n\n`) | ||
); | ||
} | ||
} catch (error: any) { | ||
console.error('Error in Claude chat:', error); | ||
await writer.write( | ||
encoder.encode(`data: ${JSON.stringify({ error: error.message })}\n\n`) | ||
); | ||
} finally { | ||
await writer.close(); | ||
} | ||
|
||
return new Response(stream.readable, { | ||
headers: { | ||
'Content-Type': 'text/event-stream', | ||
'Cache-Control': 'no-cache', | ||
'Connection': 'keep-alive', | ||
}, | ||
}); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,67 +1,55 @@ | ||
import { getServerConfig } from "@/config/server"; | ||
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai"; | ||
|
||
export const runtime = 'edge'; | ||
|
||
export async function POST(req: Request) { | ||
try { | ||
// Fetch server configuration | ||
const config = await getServerConfig(); | ||
|
||
// Initialize Google Generative AI with the provided credentials | ||
const genAI = new GoogleGenerativeAI(config.geminiKey); | ||
const modelG = genAI.getGenerativeModel({ model: "gemini-pro" }); | ||
|
||
// Define the generation configuration | ||
const generationConfig = { | ||
temperature: 0.8, | ||
topK: 0.9, | ||
topP: 1, | ||
maxOutputTokens: 2048, | ||
}; | ||
|
||
// Define the safety settings for content filtering | ||
const safetySettings = [ | ||
{ | ||
category: HarmCategory.HARM_CATEGORY_HARASSMENT, | ||
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, | ||
}, | ||
{ | ||
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, | ||
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, | ||
}, | ||
{ | ||
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, | ||
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, | ||
}, | ||
{ | ||
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, | ||
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, | ||
}, | ||
]; | ||
|
||
// Start a chat session with the generative AI model | ||
const chat = modelG.startChat({ | ||
generationConfig, | ||
safetySettings, // Pass safety settings if needed | ||
}); | ||
|
||
// Extract messages from the request | ||
const { messages } = await req.json(); | ||
|
||
// Send the message to the model and await the response | ||
const result = await chat.sendMessage(messages); | ||
const response = await result.response; | ||
|
||
return new Response(JSON.stringify(response), { | ||
status: 200, | ||
}); | ||
} catch (error: any) { | ||
const errorMessage = error.message || "An unexpected error occurred"; | ||
const errorCode = error.status || 500; | ||
console.error(error); | ||
|
||
return new Response(JSON.stringify({ message: errorMessage }), { | ||
status: errorCode, | ||
}); | ||
} | ||
const encoder = new TextEncoder(); | ||
const stream = new TransformStream(); | ||
const writer = stream.writable.getWriter(); | ||
|
||
try { | ||
const config = await getServerConfig(); | ||
const { message, model } = await req.json(); | ||
|
||
const genAI = new GoogleGenerativeAI(config.geminiKey); | ||
const modelG = genAI.getGenerativeModel({ model: model || "gemini-1.5-flash" }); | ||
|
||
const chat = modelG.startChat({ | ||
generationConfig: { | ||
temperature: 0.8, | ||
topK: 0.9, | ||
topP: 1, | ||
maxOutputTokens: 2048, | ||
}, | ||
safetySettings: [ | ||
{ | ||
category: HarmCategory.HARM_CATEGORY_HARASSMENT, | ||
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, | ||
}, | ||
// ... other safety settings ... | ||
], | ||
}); | ||
|
||
const result = await chat.sendMessage(message); | ||
const response = await result.response; | ||
|
||
// Stream the response | ||
for (const chunk of response.text().split(' ')) { | ||
await writer.write(encoder.encode(`data: ${JSON.stringify({ text: chunk + ' ' })}\n\n`)); | ||
} | ||
} catch (error: any) { | ||
console.error('Error in Google AI chat:', error); | ||
await writer.write(encoder.encode(`data: ${JSON.stringify({ error: error.message })}\n\n`)); | ||
} finally { | ||
await writer.close(); | ||
} | ||
|
||
return new Response(stream.readable, { | ||
headers: { | ||
'Content-Type': 'text/event-stream', | ||
'Cache-Control': 'no-cache', | ||
'Connection': 'keep-alive', | ||
}, | ||
}); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,105 @@ | ||
/** | ||
* @license | ||
* This file is licensed under the Apache License 2.0. | ||
* See the LICENSE file for more details. | ||
*/ | ||
|
||
import { GoogleGenerativeAI } from "@google/generative-ai"; | ||
import fs from "fs"; | ||
import { dirname } from "path"; | ||
import { fileURLToPath } from "url"; | ||
|
||
const __dirname = dirname(fileURLToPath(import.meta.url)); | ||
const mediaPath = __dirname + "/media"; | ||
|
||
async function chat() { | ||
// [START chat] | ||
// Make sure to include these imports: | ||
// import { GoogleGenerativeAI } from "@google/generative-ai"; | ||
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY); | ||
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); | ||
const chat = model.startChat({ | ||
history: [ | ||
{ | ||
role: "user", | ||
parts: [{ text: "Hello" }], | ||
}, | ||
{ | ||
role: "model", | ||
parts: [{ text: "Great to meet you. What would you like to know?" }], | ||
}, | ||
], | ||
}); | ||
let result = await chat.sendMessage("I have 2 dogs in my house."); | ||
console.log(result.response.text()); | ||
result = await chat.sendMessage("How many paws are in my house?"); | ||
console.log(result.response.text()); | ||
// [END chat] | ||
} | ||
|
||
async function chatStreaming() { | ||
// [START chat_streaming] | ||
// Make sure to include these imports: | ||
// import { GoogleGenerativeAI } from "@google/generative-ai"; | ||
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY); | ||
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); | ||
const chat = model.startChat({ | ||
history: [ | ||
{ | ||
role: "user", | ||
parts: [{ text: "Hello" }], | ||
}, | ||
{ | ||
role: "model", | ||
parts: [{ text: "Great to meet you. What would you like to know?" }], | ||
}, | ||
], | ||
}); | ||
let result = await chat.sendMessageStream("I have 2 dogs in my house."); | ||
for await (const chunk of result.stream) { | ||
const chunkText = chunk.text(); | ||
process.stdout.write(chunkText); | ||
} | ||
result = await chat.sendMessageStream("How many paws are in my house?"); | ||
for await (const chunk of result.stream) { | ||
const chunkText = chunk.text(); | ||
process.stdout.write(chunkText); | ||
} | ||
// [END chat_streaming] | ||
} | ||
|
||
async function chatStreamingWithImages() { | ||
// [START chat_streaming_with_images] | ||
// Make sure to include these imports: | ||
// import { GoogleGenerativeAI } from "@google/generative-ai"; | ||
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY); | ||
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); | ||
const chat = model.startChat(); | ||
|
||
let result = await chat.sendMessageStream("Hello, I'm designing inventions. Can I show you one?"); | ||
process.stdout.write('\n\nmodel:\n'); | ||
for await (const chunk of result.stream) { | ||
const chunkText = chunk.text(); | ||
process.stdout.write(chunkText); | ||
} | ||
result = await chat.sendMessageStream(["What do you think about this design?", { | ||
inlineData: { | ||
data: Buffer.from(fs.readFileSync(`${mediaPath}/jetpack.jpg`)).toString("base64"), | ||
mimeType: "image/jpeg", | ||
}, | ||
}]); | ||
process.stdout.write('\n\nmodel:\n'); | ||
for await (const chunk of result.stream) { | ||
const chunkText = chunk.text(); | ||
process.stdout.write(chunkText); | ||
} | ||
// [END chat_streaming_with_images] | ||
} | ||
|
||
// async function runAll() { | ||
// await chat(); | ||
// await chatStreaming(); | ||
// await chatStreamingWithImages(); | ||
// } | ||
|
||
// runAll(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
// app/api/chat/route.ts | ||
import { NextRequest, NextResponse } from 'next/server'; | ||
import { AIServiceFactory } from '@/lib/ai/AIServiceFactory'; | ||
import { ModelProviderConfig } from '@/lib/ModelSetting'; | ||
import { verifyJWT } from '@/lib/auth'; // Assuming you have a JWT verification function | ||
|
||
export const runtime = 'edge'; | ||
|
||
export async function POST(req: NextRequest) { | ||
try { | ||
const { message, model, token } = await req.json(); | ||
const payload = await verifyJWT(token); // Verify and decode JWT | ||
|
||
const config: ModelProviderConfig = { | ||
modelProvider: model.split('-')[0] as 'openai' | 'gemini' | 'claude' | /* other providers */, | ||
model: model, | ||
// ... other config options | ||
}; | ||
|
||
const service = await AIServiceFactory.createService(config, payload); | ||
const stream = await service.generateResponse(message); | ||
|
||
return new NextResponse(stream, { | ||
headers: { | ||
'Content-Type': 'text/event-stream', | ||
'Cache-Control': 'no-cache', | ||
'Connection': 'keep-alive', | ||
}, | ||
}); | ||
} catch (error: any) { | ||
console.error('Error in chat API:', error); | ||
return NextResponse.json({ error: error.message }, { status: 500 }); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.