From 173f1d55ac43f4e520756266a031f812a919b49b Mon Sep 17 00:00:00 2001 From: Ryan Lamb <4955475+kinyoklion@users.noreply.github.com> Date: Mon, 9 Dec 2024 10:09:53 -0800 Subject: [PATCH] refactor!: Rename model and providerid to name. --- .../__tests__/LDAIClientImpl.test.ts | 22 +++++++++---------- .../server-ai/examples/bedrock/src/index.ts | 2 +- .../server-ai/examples/openai/src/index.ts | 2 +- .../server-ai/src/api/config/LDAIConfig.ts | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/packages/sdk/server-ai/__tests__/LDAIClientImpl.test.ts b/packages/sdk/server-ai/__tests__/LDAIClientImpl.test.ts index 83ecb3aa8..7a4819633 100644 --- a/packages/sdk/server-ai/__tests__/LDAIClientImpl.test.ts +++ b/packages/sdk/server-ai/__tests__/LDAIClientImpl.test.ts @@ -15,18 +15,18 @@ it('returns config with interpolated messagess', async () => { const client = new LDAIClientImpl(mockLdClient); const key = 'test-flag'; const defaultValue: LDAIDefaults = { - model: { id: 'test', parameters: { name: 'test-model' } }, + model: { name: 'test', parameters: { name: 'test-model' } }, messages: [], enabled: true, }; const mockVariation = { model: { - id: 'example-model', + name: 'example-model', parameters: { name: 'imagination', temperature: 0.7, maxTokens: 4096 }, }, provider: { - id: 'example-provider', + name: 'example-provider', }, messages: [ { role: 'system', content: 'Hello {{name}}' }, @@ -45,11 +45,11 @@ it('returns config with interpolated messagess', async () => { expect(result).toEqual({ model: { - id: 'example-model', + name: 'example-model', parameters: { name: 'imagination', temperature: 0.7, maxTokens: 4096 }, }, provider: { - id: 'example-provider', + name: 'example-provider', }, messages: [ { role: 'system', content: 'Hello John' }, @@ -64,7 +64,7 @@ it('includes context in variables for messages interpolation', async () => { const client = new LDAIClientImpl(mockLdClient); const key = 'test-flag'; const defaultValue: LDAIDefaults = { - model: { id: 'test', parameters: { name: 'test-model' } }, + model: { name: 'test', parameters: { name: 'test-model' } }, messages: [], }; @@ -84,12 +84,12 @@ it('handles missing metadata in variation', async () => { const client = new LDAIClientImpl(mockLdClient); const key = 'test-flag'; const defaultValue: LDAIDefaults = { - model: { id: 'test', parameters: { name: 'test-model' } }, + model: { name: 'test', parameters: { name: 'test-model' } }, messages: [], }; const mockVariation = { - model: { id: 'example-provider', parameters: { name: 'imagination' } }, + model: { name: 'example-provider', parameters: { name: 'imagination' } }, messages: [{ role: 'system', content: 'Hello' }], }; @@ -98,7 +98,7 @@ it('handles missing metadata in variation', async () => { const result = await client.config(key, testContext, defaultValue); expect(result).toEqual({ - model: { id: 'example-provider', parameters: { name: 'imagination' } }, + model: { name: 'example-provider', parameters: { name: 'imagination' } }, messages: [{ role: 'system', content: 'Hello' }], tracker: expect.any(Object), enabled: false, @@ -109,8 +109,8 @@ it('passes the default value to the underlying client', async () => { const client = new LDAIClientImpl(mockLdClient); const key = 'non-existent-flag'; const defaultValue: LDAIDefaults = { - model: { id: 'default-model', parameters: { name: 'default' } }, - provider: { id: 'default-provider' }, + model: { name: 'default-model', parameters: { name: 'default' } }, + provider: { name: 'default-provider' }, messages: [{ role: 'system', content: 'Default messages' }], enabled: true, }; diff --git a/packages/sdk/server-ai/examples/bedrock/src/index.ts b/packages/sdk/server-ai/examples/bedrock/src/index.ts index e01fc0afb..d15ccf4b9 100644 --- a/packages/sdk/server-ai/examples/bedrock/src/index.ts +++ b/packages/sdk/server-ai/examples/bedrock/src/index.ts @@ -53,7 +53,7 @@ async function main() { context, { model: { - id: 'my-default-model', + name: 'my-default-model', }, enabled: true, }, diff --git a/packages/sdk/server-ai/examples/openai/src/index.ts b/packages/sdk/server-ai/examples/openai/src/index.ts index 1ab9d629e..b54f303db 100644 --- a/packages/sdk/server-ai/examples/openai/src/index.ts +++ b/packages/sdk/server-ai/examples/openai/src/index.ts @@ -61,7 +61,7 @@ async function main(): Promise { const completion = await tracker.trackOpenAIMetrics(async () => client.chat.completions.create({ messages: aiConfig.messages || [], - model: aiConfig.model?.id || 'gpt-4', + model: aiConfig.model?.name || 'gpt-4', temperature: (aiConfig.model?.parameters?.temperature as number) ?? 0.5, max_tokens: (aiConfig.model?.parameters?.maxTokens as number) ?? 4096, }), diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts index 580f8b97b..21949f711 100644 --- a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts +++ b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts @@ -7,7 +7,7 @@ export interface LDModelConfig { /** * The ID of the model. */ - id: string; + name: string; /** * Model specific parameters. @@ -24,7 +24,7 @@ export interface LDProviderConfig { /** * The ID of the provider. */ - id: string; + name: string; } /**