Skip to content

Commit

Permalink
Add designed front end (#32)
Browse files Browse the repository at this point in the history
* Add designed index page

* fix some stuff for llama2 compat

Signed-off-by: Matteo Collina <[email protected]>

* fixup

Signed-off-by: Matteo Collina <[email protected]>

---------

Signed-off-by: Matteo Collina <[email protected]>
Co-authored-by: Matteo Collina <[email protected]>
  • Loading branch information
flakey5 and mcollina authored May 9, 2024
1 parent 4f215b7 commit a2cbd1d
Show file tree
Hide file tree
Showing 19 changed files with 973 additions and 79 deletions.
16 changes: 16 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,22 @@ Steps for downloading and setting up AI Warp for local development.
node ../dist/cli/start.js
```
### Testing a model with OpenAI
To test a remote model with with OpenAI, you can use the following to
download the model we used for testing:
```json
"aiProvider": {
"openai": {
"model": "gpt-3.5-turbo",
"apiKey": "{PLT_OPENAI_API_KEY}"
}
}
```
Make sure to add your OpenAI api key as `PLT_OPENAI_API_KEY` in your `.env` file.
### Testing a local model with llama2
To test a local model with with llama2, you can use the following to
Expand Down
43 changes: 34 additions & 9 deletions ai-providers/llama2.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { ReadableByteStreamController, ReadableStream, UnderlyingByteSource } from 'stream/web'
import { FastifyLoggerInstance } from 'fastify'
import {
LLamaChatPromptOptions,
LlamaChatSession,
Expand Down Expand Up @@ -60,13 +61,16 @@ class Llama2ByteSource implements UnderlyingByteSource {
backloggedChunks: ChunkQueue = new ChunkQueue()
finished: boolean = false
controller?: ReadableByteStreamController
abortController: AbortController

constructor (session: LlamaChatSession, prompt: string, chunkCallback?: StreamChunkCallback) {
constructor (session: LlamaChatSession, prompt: string, logger: FastifyLoggerInstance, chunkCallback?: StreamChunkCallback) {
this.session = session
this.chunkCallback = chunkCallback
this.abortController = new AbortController()

session.prompt(prompt, {
onToken: this.onToken
onToken: this.onToken,
signal: this.abortController.signal
}).then(() => {
this.finished = true
// Don't close the stream if we still have chunks to send
Expand All @@ -75,22 +79,36 @@ class Llama2ByteSource implements UnderlyingByteSource {
}
}).catch((err: any) => {
this.finished = true
if (this.controller !== undefined) {
this.controller.close()
logger.info({ err })
if (!this.abortController.signal.aborted && this.controller !== undefined) {
try {
this.controller.close()
} catch (err) {
logger.info({ err })
}
}
throw err
})
}

cancel (): void {
this.abortController.abort()
}

onToken: LLamaChatPromptOptions['onToken'] = async (chunk) => {
if (this.controller === undefined) {
// Stream hasn't started yet, added it to the backlog queue
this.backloggedChunks.push(chunk)
return
}

await this.clearBacklog()
await this.enqueueChunk(chunk)
try {
await this.clearBacklog()
await this.enqueueChunk(chunk)
// Ignore all errors, we can't do anything about them
// TODO: Log these errors
} catch (err) {
console.error(err)
}
}

private async enqueueChunk (chunk: number[]): Promise<void> {
Expand All @@ -103,6 +121,10 @@ class Llama2ByteSource implements UnderlyingByteSource {
response = await this.chunkCallback(response)
}

if (response === '') {
response = '\n' // It seems empty chunks are newlines
}

const eventData: AiStreamEvent = {
event: 'content',
data: {
Expand Down Expand Up @@ -139,14 +161,17 @@ class Llama2ByteSource implements UnderlyingByteSource {

interface Llama2ProviderCtorOptions {
modelPath: string
logger: FastifyLoggerInstance
}

export class Llama2Provider implements AiProvider {
context: LlamaContext
logger: FastifyLoggerInstance

constructor ({ modelPath }: Llama2ProviderCtorOptions) {
constructor ({ modelPath, logger }: Llama2ProviderCtorOptions) {
const model = new LlamaModel({ modelPath })
this.context = new LlamaContext({ model })
this.logger = logger
}

async ask (prompt: string): Promise<string> {
Expand All @@ -159,6 +184,6 @@ export class Llama2Provider implements AiProvider {
async askStream (prompt: string, chunkCallback?: StreamChunkCallback): Promise<ReadableStream> {
const session = new LlamaChatSession({ context: this.context })

return new ReadableStream(new Llama2ByteSource(session, prompt, chunkCallback))
return new ReadableStream(new Llama2ByteSource(session, prompt, this.logger, chunkCallback))
}
}
10 changes: 7 additions & 3 deletions plugins/warp.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// eslint-disable-next-line
/// <reference path="../index.d.ts" />
import { FastifyLoggerInstance } from 'fastify'
import fastifyPlugin from 'fastify-plugin'
import { OpenAiProvider } from '../ai-providers/open-ai.js'
import { MistralProvider } from '../ai-providers/mistral.js'
Expand All @@ -12,7 +13,7 @@ import { Llama2Provider } from '../ai-providers/llama2.js'

const UnknownAiProviderError = createError('UNKNOWN_AI_PROVIDER', 'Unknown AI Provider')

function build (aiProvider: AiWarpConfig['aiProvider']): AiProvider {
function build (aiProvider: AiWarpConfig['aiProvider'], logger: FastifyLoggerInstance): AiProvider {
if ('openai' in aiProvider) {
return new OpenAiProvider(aiProvider.openai)
} else if ('mistral' in aiProvider) {
Expand All @@ -22,15 +23,18 @@ function build (aiProvider: AiWarpConfig['aiProvider']): AiProvider {
} else if ('azure' in aiProvider) {
return new AzureProvider(aiProvider.azure)
} else if ('llama2' in aiProvider) {
return new Llama2Provider(aiProvider.llama2)
return new Llama2Provider({
...aiProvider.llama2,
logger
})
} else {
throw new UnknownAiProviderError()
}
}

export default fastifyPlugin(async (fastify) => {
const { config } = fastify.platformatic
const provider = build(config.aiProvider)
const provider = build(config.aiProvider, fastify.log)

fastify.decorate('ai', {
warp: async (request, prompt) => {
Expand Down
34 changes: 34 additions & 0 deletions static/chat.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Chat - AI Warp</title>

<link rel="stylesheet" href="/styles/common.css">
<link rel="stylesheet" href="/styles/chat.css">
</head>
<body>
<div id="navbar">
<a href="https://platformatic.dev">
<img id="navbar-logo" src="/images/platformatic-logo.svg" alt="Platformatic">
</a>
</div>

<div id="messages"></div>

<div id="prompt">
<input id="prompt-input" type="text" placeholder="Enter your prompt to Platformatic Ai-Warp" />
<button id="prompt-button" type="button">
<img src="/images/icons/arrow-long-right.svg" />
</button>
</div>

<div id="bottom-links">
<a href="/chat.html">Start a new chat</a>
<a href="/documentation">View OpenAPI Documentation</a>
</div>

<script src="/scripts/chat.js"></script>
</body>
</html>
9 changes: 9 additions & 0 deletions static/images/avatars/platformatic.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions static/images/avatars/you.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 3 additions & 0 deletions static/images/icons/arrow-long-right.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 4 additions & 0 deletions static/images/icons/checkmark.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions static/images/icons/copy.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions static/images/icons/edit.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions static/images/icons/error.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 4 additions & 0 deletions static/images/icons/regenerate.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit a2cbd1d

Please sign in to comment.