diff --git a/README.md b/README.md index 0d2e327..c50b88c 100644 --- a/README.md +++ b/README.md @@ -235,9 +235,10 @@ const tokenLimit = 10 const withinTokenLimit = isWithinTokenLimit(text, tokenLimit) ``` -### `countTokens(text: string | Iterable): number` +### `countTokens(text: string | Iterable, encodeOptions?: EncodeOptions): number` Counts the number of tokens in the input text or chat. Use this method when you need to determine the number of tokens without checking against a limit. +The optional `encodeOptions` parameter allows you to specify custom sets of allowed or disallowed special tokens. Example: @@ -370,6 +371,28 @@ const encoded = encode(inputText, undefined, disallowedSpecial) In this example, an Error is thrown, because the input text contains a disallowed special token. +## Performance Optimization + +### LRU Merge Cache + +The tokenizer uses an LRU (Least Recently Used) cache to improve encoding performance for similar strings. By default, it stores up to 100,000 merged token pairs. You can adjust this value to optimize for your specific use case: + +- Increasing the cache size will make encoding similar strings faster but consume more memory +- Setting it to 0 will disable caching completely +- For applications processing many unique strings, a smaller cache might be more efficient + +You can modify the cache size using the `setMergeCacheSize` function: + +```ts +import { setMergeCacheSize } from 'gpt-tokenizer' + +// Set to 5000 entries +setMergeCacheSize(5000) + +// Disable caching completely +setMergeCacheSize(0) +``` + ## Testing and Validation `gpt-tokenizer` includes a set of test cases in the [TestPlans.txt](./data/TestPlans.txt) file to ensure its compatibility with OpenAI's Python `tiktoken` library. These test cases validate the functionality and behavior of `gpt-tokenizer`, providing a reliable reference for developers. diff --git a/benchmark/src/benchmarkRunner.ts b/benchmark/src/benchmarkRunner.ts index 7b5bb01..e691380 100644 --- a/benchmark/src/benchmarkRunner.ts +++ b/benchmark/src/benchmarkRunner.ts @@ -15,16 +15,16 @@ const calculateAverage = (numbers: number[]): number => { return numbers.reduce((a, b) => a + b, 0) / numbers.length } -// Define the number of executions for performance testing -const EXECUTIONS = 10000 - // Define the number of iterations for averaging -const ITERATIONS = 3 +const ITERATIONS = 1 + +// Define the execution multiplier for performance testing +const EXECUTIONS_MULTIPLIER = 1 // Function to run a single benchmark iteration in a child process const runSingleBenchmark = ( tokenizerIndex: number, - executions: number, + executionsMultiplier: number, ): Promise => { return new Promise((resolve, reject) => { const workerPath = path.resolve(__dirname, 'benchmarkWorker.js') @@ -36,7 +36,10 @@ const runSingleBenchmark = ( reject(new Error('Failed to spawn child process')) return } - const message: WorkerInput = { tokenizerIndex, executions } + const message: WorkerInput = { + tokenizerIndex, + executionsMultiplier, + } child.send(message) child.on('message', (msg: any) => { // Changed to any to avoid TypeScript issues @@ -117,12 +120,17 @@ const displayUnifiedResults = (results: BenchmarkResult[]) => { label: 'Encode Avg (ms)', better: 'lower' as const, precision: 4, - }, // Increased precision + }, decodeTimeAvg: { label: 'Decode Avg (ms)', better: 'lower' as const, precision: 4, - }, // Increased precision + }, + countTokensTimeAvg: { + label: 'Count Tokens Avg (ms)', + better: 'lower' as const, + precision: 4, + }, memoryIncrease: { label: 'Memory Increase (MB)', better: 'lower' as const, @@ -146,6 +154,8 @@ const displayUnifiedResults = (results: BenchmarkResult[]) => { return r.datasetsAverage?.encodeTimeMs || 0 case 'decodeTimeAvg': return r.datasetsAverage?.decodeTimeMs || 0 + case 'countTokensTimeAvg': + return r.datasetsAverage?.countTimeMs || 0 case 'memoryIncrease': return r.memoryChangeAfterRunMb default: @@ -166,6 +176,7 @@ const displayUnifiedResults = (results: BenchmarkResult[]) => { chalk.green('Init\nMem RSS'), chalk.yellow('Encode\nAvg (ms)'), chalk.yellow('Decode\nAvg (ms)'), + chalk.yellow('Count\nAvg (ms)'), chalk.red('Memory\nIncrease'), chalk.red('Mem\nLeak?'), ], @@ -238,6 +249,13 @@ const displayUnifiedResults = (results: BenchmarkResult[]) => { changes.decodeTimeMs, ), ) + row.push( + applyHighlight( + res.datasetsAverage?.countTimeMs || 0, + 'countTokensTimeAvg', + changes.countTimeMs, + ), + ) row.push( applyHighlight( res.memoryChangeAfterRunMb, @@ -277,7 +295,10 @@ const runBenchmarks = async ( for (let i = 0; i < ITERATIONS; i++) { console.log(` ${chalk.yellow(`Iteration ${i + 1}/${ITERATIONS}`)}`) try { - const result = await runSingleBenchmark(tokenizerIndex, EXECUTIONS) + const result = await runSingleBenchmark( + tokenizerIndex, + EXECUTIONS_MULTIPLIER, + ) tokenizerResults.push(result) } catch (error) { console.error( @@ -317,6 +338,9 @@ const runBenchmarks = async ( const decodeTimes = tokenizerResults.map( (r) => r.datasets[dataset].decode.averageTimeMs, ) + const countTimes = tokenizerResults.map( + (r) => r.datasets[dataset].countTokens.averageTimeMs, + ) const memoryChanges = tokenizerResults.map( (r) => r.datasets[dataset].memoryChangeAfterExecutionsMb, ) @@ -327,6 +351,9 @@ const runBenchmarks = async ( decode: { averageTimeMs: calculateAverage(decodeTimes), }, + countTokens: { + averageTimeMs: calculateAverage(countTimes), + }, memoryChangeAfterExecutionsMb: calculateAverage(memoryChanges), } } @@ -344,6 +371,13 @@ const runBenchmarks = async ( Object.values(r.datasets).map((d) => d.decode.averageTimeMs), ), ), + countTimeMs: calculateAverage( + tokenizerResults.flatMap((r) => + Object.values(r.datasets).map( + (d) => d.countTokens?.averageTimeMs || 0, + ), + ), + ), }, }) } @@ -406,7 +440,10 @@ const watchMode = async (previousResults: BenchmarkResult[] | null) => { for (let i = 0; i < ITERATIONS; i++) { console.log(` ${chalk.yellow(`Iteration ${i + 1}/${ITERATIONS}`)}`) try { - const result = await runSingleBenchmark(tokenizerIndex, EXECUTIONS) + const result = await runSingleBenchmark( + tokenizerIndex, + EXECUTIONS_MULTIPLIER, + ) tokenizerResults.push(result) } catch (error) { console.error( @@ -447,6 +484,13 @@ const watchMode = async (previousResults: BenchmarkResult[] | null) => { Object.values(r.datasets).map((d) => d.decode.averageTimeMs), ), ), + countTimeMs: calculateAverage( + tokenizerResults.flatMap((r) => + Object.values(r.datasets).map( + (d) => d.countTokens.averageTimeMs, + ), + ), + ), }, } // Aggregate per-dataset results @@ -458,6 +502,9 @@ const watchMode = async (previousResults: BenchmarkResult[] | null) => { const decodeTimes = tokenizerResults.map( (r) => r.datasets[dataset].decode.averageTimeMs, ) + const countTimes = tokenizerResults.map( + (r) => r.datasets[dataset].countTokens.averageTimeMs, + ) const memoryChanges = tokenizerResults.map( (r) => r.datasets[dataset].memoryChangeAfterExecutionsMb, ) @@ -468,6 +515,9 @@ const watchMode = async (previousResults: BenchmarkResult[] | null) => { decode: { averageTimeMs: calculateAverage(decodeTimes), }, + countTokens: { + averageTimeMs: calculateAverage(countTimes), + }, memoryChangeAfterExecutionsMb: calculateAverage(memoryChanges), } } @@ -506,6 +556,11 @@ const watchMode = async (previousResults: BenchmarkResult[] | null) => { (lastResult.datasetsAverage?.decodeTimeMs || 0)) / (lastResult.datasetsAverage?.decodeTimeMs || 1)) * 100, + countTimeMs: + (((newAggregated.datasetsAverage?.countTimeMs || 0) - + (lastResult.datasetsAverage?.countTimeMs || 0)) / + (lastResult.datasetsAverage?.countTimeMs || 1)) * + 100, memoryChangeAfterRunMb: ((newAggregated.memoryChangeAfterRunMb - lastResult.memoryChangeAfterRunMb) / diff --git a/benchmark/src/benchmarkWorker.ts b/benchmark/src/benchmarkWorker.ts index 479e8e1..a771776 100644 --- a/benchmark/src/benchmarkWorker.ts +++ b/benchmark/src/benchmarkWorker.ts @@ -10,7 +10,7 @@ import { memoryUsage } from 'process' import { tokenizers } from './tokenizers.js' const runWorker = async (message: WorkerInput) => { - const { tokenizerIndex, executions } = message + const { tokenizerIndex, executionsMultiplier } = message const tokenizer = tokenizers[tokenizerIndex] const result: BenchmarkResult = { packageName: tokenizer.packageName, @@ -19,10 +19,10 @@ const runWorker = async (message: WorkerInput) => { datasets: {}, memoryChangeAfterRunMb: 0, memoryLeakWarning: false, - datasetsAverage: { encodeTimeMs: 0, decodeTimeMs: 0 }, + datasetsAverage: { encodeTimeMs: 0, decodeTimeMs: 0, countTimeMs: 0 }, } - const encodeTimes: number[] = new Array(executions) - const decodeTimes: number[] = new Array(executions) + const testData = Object.entries(datasets) + try { const initMemoryUsageBefore = memoryUsage() const initStart = performance.now() @@ -46,38 +46,66 @@ const runWorker = async (message: WorkerInput) => { } // Prepare datasets - const testData = Object.entries(datasets) - for (const [name, text] of testData) { - // Warm-up encode and decode + for (const [name, data] of testData) { + // Calculate actual execution counts + const encodeExecs = Math.max( + 1, + Math.round(data.encodeExecutionsCount * executionsMultiplier), + ) + const decodeExecs = Math.max( + 1, + Math.round(data.decodeExecutionsCount * executionsMultiplier), + ) + const countExecs = Math.max( + 1, + Math.round(data.countTokensExecutionsCount * executionsMultiplier), + ) + + // Warm-up encode, decode and countTokens (using 5% of execution count) let encodedTokens: number[] | Uint8Array = [] - for (let i = 0; i < 50; i++) { - encodedTokens = tokenizerModule.encode(text) + const warmUpCount = Math.max(1, Math.round(encodeExecs * 0.05)) + for (let i = 0; i < warmUpCount; i++) { + encodedTokens = tokenizerModule.encode(data.text) tokenizerModule.decode(encodedTokens) + tokenizerModule.countTokens(data.text) } // Encode benchmark - for (let i = 0; i < executions; i++) { + const encodeTimes: number[] = new Array(encodeExecs) + for (let i = 0; i < encodeExecs; i++) { const start = performance.now() - encodedTokens = tokenizerModule.encode(text) + encodedTokens = tokenizerModule.encode(data.text) const end = performance.now() encodeTimes[i] = end - start } - const avgEncodeTime = encodeTimes.reduce((a, b) => a + b, 0) / executions + const avgEncodeTime = encodeTimes.reduce((a, b) => a + b, 0) / encodeExecs // Decode benchmark + const decodeTimes: number[] = new Array(decodeExecs) let decodedText: string = '' - for (let i = 0; i < executions; i++) { + for (let i = 0; i < decodeExecs; i++) { const start = performance.now() decodedText = tokenizerModule.decode(encodedTokens) const end = performance.now() decodeTimes[i] = end - start } - const avgDecodeTime = decodeTimes.reduce((a, b) => a + b, 0) / executions + const avgDecodeTime = decodeTimes.reduce((a, b) => a + b, 0) / decodeExecs + + // Count tokens benchmark + const countTokensTimes: number[] = new Array(countExecs) + for (let i = 0; i < countExecs; i++) { + const start = performance.now() + tokenizerModule.countTokens(data.text) + const end = performance.now() + countTokensTimes[i] = end - start + } + const avgCountTokensTime = + countTokensTimes.reduce((a, b) => a + b, 0) / countExecs // Verify correctness - if (decodedText !== text) { + if (decodedText !== data.text) { console.warn( - `Warning: Decoded text does not match original for dataset ${name}. \nExpected:\n${text}\nGot:\n${decodedText}`, + `Warning: Decoded text does not match original for dataset ${name}. \nExpected:\n${data.text}\nGot:\n${decodedText}`, ) } @@ -97,12 +125,36 @@ const runWorker = async (message: WorkerInput) => { decode: { averageTimeMs: parseFloat(avgDecodeTime.toFixed(4)), }, + countTokens: { + averageTimeMs: parseFloat(avgCountTokensTime.toFixed(4)), + }, memoryChangeAfterExecutionsMb: parseFloat( (memoryUsed / 1024 / 1024).toFixed(2), ), } } + // Calculate dataset averages + const datasetCount = Object.keys(result.datasets).length + const encodeTimeSum = Object.values(result.datasets).reduce( + (sum, dataset) => sum + dataset.encode.averageTimeMs, + 0, + ) + const decodeTimeSum = Object.values(result.datasets).reduce( + (sum, dataset) => sum + dataset.decode.averageTimeMs, + 0, + ) + const countTimeSum = Object.values(result.datasets).reduce( + (sum, dataset) => sum + dataset.countTokens.averageTimeMs, + 0, + ) + + result.datasetsAverage = { + encodeTimeMs: parseFloat((encodeTimeSum / datasetCount).toFixed(4)), + decodeTimeMs: parseFloat((decodeTimeSum / datasetCount).toFixed(4)), + countTimeMs: parseFloat((countTimeSum / datasetCount).toFixed(4)), + } + // Overall memory leak detection const finalMemoryUsage = memoryUsage() const totalMemoryIncrease = @@ -110,7 +162,7 @@ const runWorker = async (message: WorkerInput) => { result.memoryChangeAfterRunMb = parseFloat( (totalMemoryIncrease / 1024 / 1024).toFixed(2), ) - result.memoryLeakWarning = totalMemoryIncrease > 1 * 1024 * 1024 // 1 MB threshold + result.memoryLeakWarning = totalMemoryIncrease > 10 * 1024 * 1024 // 10 MB threshold // Send the result back to the parent process const output: WorkerOutput = { @@ -130,7 +182,10 @@ const runWorker = async (message: WorkerInput) => { } if (process.argv.length > 2) { - runWorker({ executions: 100000, tokenizerIndex: tokenizers.length - 1 }) + runWorker({ + executionsMultiplier: 1, + tokenizerIndex: tokenizers.length - 1, + }) } else { process.on('message', runWorker) } diff --git a/benchmark/src/datasets.ts b/benchmark/src/datasets.ts index 13774be..32cbd6a 100644 --- a/benchmark/src/datasets.ts +++ b/benchmark/src/datasets.ts @@ -1,8 +1,79 @@ -export const datasets: Record = { - English: `The quick brown fox jumps over the lazy dog.`, - Chinese: `快速的棕色狐狸跳过懒狗。`, - French: `Le renard brun rapide saute par-dessus le chien paresseux.`, - Code: `function greet(name: string): string { +import type { BenchData } from './interfaces.js' + +const EXECUTIONS_COUNT = 1000 +const LONG_MSG_REPEATS = 2000 +// LatinExpectedTokens: 86 +const Latin = `Occaecat est tempor incididunt voluptate exercitation irure quis aliqua sunt dolor. Anim nostrud incididunt eu aliquip quis culpa do incididunt eu. Magna qui dolor deserunt sit velit. Dolor anim laborum ut ad in et occaecat enim elit culpa commodo. Sit ut sit mollit adipisicing. Labore culpa do cillum proident incididunt et. Reprehenderit nisi excepteur culpa consectetur mollit consectetur laborum` + +// Generate a random Unicode string with various character ranges +function generateRandomUnicode(length: number): string { + const ranges = [ + [0x0020, 0x007f], // Basic Latin + [0x00a0, 0x00ff], // Latin-1 Supplement + [0x0100, 0x017f], // Latin Extended-A + [0x0400, 0x04ff], // Cyrillic + [0x0980, 0x09ff], // Bengali + [0x0f00, 0x0fff], // Tibetan + [0x1200, 0x137f], // Ethiopic + [0x3040, 0x309f], // Hiragana + [0x30a0, 0x30ff], // Katakana + [0x4e00, 0x4fff], // CJK Unified Ideographs (partial) + [0x1f300, 0x1f6ff], // Emoji & Pictographs + ] + + let result = '' + for (let i = 0; i < length; i++) { + const range = ranges[Math.floor(Math.random() * ranges.length)] + const codePoint = + Math.floor(Math.random() * (range[1] - range[0])) + range[0] + result += String.fromCodePoint(codePoint) + } + return result +} + +export const datasets: Record = { + English: { + text: `The quick brown fox jumps over the lazy dog.`, + encodeExecutionsCount: EXECUTIONS_COUNT, + decodeExecutionsCount: EXECUTIONS_COUNT * 10, + countTokensExecutionsCount: EXECUTIONS_COUNT, + }, + Chinese: { + text: `快速的棕色狐狸跳过懒狗。`, + encodeExecutionsCount: EXECUTIONS_COUNT, + decodeExecutionsCount: EXECUTIONS_COUNT * 10, + countTokensExecutionsCount: EXECUTIONS_COUNT, + }, + French: { + text: `Le renard brun rapide saute par-dessus le chien paresseux.`, + encodeExecutionsCount: EXECUTIONS_COUNT, + decodeExecutionsCount: EXECUTIONS_COUNT * 10, + countTokensExecutionsCount: EXECUTIONS_COUNT, + }, + Code: { + text: `function greet(name: string): string { return \`Hello, \${name}!\`; }`, + encodeExecutionsCount: EXECUTIONS_COUNT, + decodeExecutionsCount: EXECUTIONS_COUNT * 10, + countTokensExecutionsCount: EXECUTIONS_COUNT, + }, + Latin: { + text: Latin, + encodeExecutionsCount: EXECUTIONS_COUNT, + decodeExecutionsCount: EXECUTIONS_COUNT * 10, + countTokensExecutionsCount: EXECUTIONS_COUNT, + }, + LatinRepeat: { + text: Latin.repeat(LONG_MSG_REPEATS), + encodeExecutionsCount: 1, + decodeExecutionsCount: 1, + countTokensExecutionsCount: 1, + }, + UnicodeRandom: { + text: generateRandomUnicode(120000), + encodeExecutionsCount: 2, + decodeExecutionsCount: 2, + countTokensExecutionsCount: 2, + }, } diff --git a/benchmark/src/interfaces.ts b/benchmark/src/interfaces.ts index b8dc6ac..767f353 100644 --- a/benchmark/src/interfaces.ts +++ b/benchmark/src/interfaces.ts @@ -5,12 +5,20 @@ export interface TokenizerBenchmark { load: () => Promise<{ encode: (input: string) => T decode: (tokens: T) => string + countTokens: (input: string) => number }> } export interface WorkerInput { tokenizerIndex: number - executions: number + executionsMultiplier: number +} + +export interface BenchData { + text: string + encodeExecutionsCount: number + decodeExecutionsCount: number + countTokensExecutionsCount: number } export interface WorkerOutput { @@ -33,6 +41,7 @@ export interface BenchmarkResult { datasetsAverage: { encodeTimeMs: number decodeTimeMs: number + countTimeMs: number } // For re-run rows, keep track of the change change?: { @@ -41,6 +50,7 @@ export interface BenchmarkResult { initMemoryRssMb?: number encodeTimeMs?: number decodeTimeMs?: number + countTimeMs?: number memoryChangeAfterRunMb?: number } } @@ -52,5 +62,8 @@ interface DatasetResult { decode: { averageTimeMs: number } + countTokens: { + averageTimeMs: number + } memoryChangeAfterExecutionsMb: number } diff --git a/benchmark/src/tokenizers.ts b/benchmark/src/tokenizers.ts index 9180511..e950371 100644 --- a/benchmark/src/tokenizers.ts +++ b/benchmark/src/tokenizers.ts @@ -15,6 +15,7 @@ export const tokenizers: TokenizerBenchmark[] = [ return { encode: (t: string) => tiktokenWasm.encode(t), decode: (i: Uint32Array) => decoder.decode(tiktokenWasm.decode(i)), + countTokens: (t: string) => tiktokenWasm.encode(t).length, } }, }, @@ -27,20 +28,36 @@ export const tokenizers: TokenizerBenchmark[] = [ return { encode: (i: string) => encoder.encode(i), decode: (i: number[]) => encoder.decode(i), + countTokens: (i: string) => encoder.encode(i).length, } }, }, - { - packageName: 'gpt-3-encoder', - version: require('../node_modules/gpt-3-encoder/package.json').version, - load: async () => { - const gpt3Encoder = await import('gpt-3-encoder') - return { - encode: (i: string) => gpt3Encoder.encode(i), - decode: (i: number[]) => gpt3Encoder.decode(i), - } - }, - }, + // { + // packageName: 'gpt-3-encoder', + // version: require('../node_modules/gpt-3-encoder/package.json').version, + // load: async () => { + // const gpt3Encoder = await import('gpt-3-encoder') + // return { + // encode: (i: string) => gpt3Encoder.encode(i), + // decode: (i: number[]) => gpt3Encoder.decode(i), + // countTokens: (i: string) => gpt3Encoder.encode(i).length, + // } + // }, + // }, + // { + // packageName: 'gpt3-tokenizer', + // version: require('../node_modules/gpt3-tokenizer/package.json').version, + // load: async () => { + // const { default: gpt3tokenizer } = await import('gpt3-tokenizer') + // const TokenizerClass = gpt3tokenizer.default + // const tokenizer = new TokenizerClass({ type: 'gpt3' }) + // return { + // encode: (i: string) => tokenizer.encode(i).bpe, + // decode: (i: number[]) => tokenizer.decode(i), + // countTokens: (i: string) => tokenizer.encode(i).bpe.length, + // } + // }, + // }, { packageName: 'js-tiktoken', version: require('../node_modules/js-tiktoken/package.json').version, @@ -52,19 +69,7 @@ export const tokenizers: TokenizerBenchmark[] = [ return { encode: (i: string) => jsTiktokenTokenizer.encode(i), decode: (i: number[]) => jsTiktokenTokenizer.decode(i), - } - }, - }, - { - packageName: 'gpt3-tokenizer', - version: require('../node_modules/gpt3-tokenizer/package.json').version, - load: async () => { - const { default: gpt3tokenizer } = await import('gpt3-tokenizer') - const TokenizerClass = gpt3tokenizer.default - const tokenizer = new TokenizerClass({ type: 'gpt3' }) - return { - encode: (i: string) => tokenizer.encode(i).bpe, - decode: (i: number[]) => tokenizer.decode(i), + countTokens: (i: string) => jsTiktokenTokenizer.encode(i).length, } }, }, @@ -78,6 +83,7 @@ export const tokenizers: TokenizerBenchmark[] = [ return { encode: tokenizer.encode, decode: tokenizer.decode, + countTokens: (i: string) => tokenizer.encode(i).length, } }, }, @@ -91,6 +97,7 @@ export const tokenizers: TokenizerBenchmark[] = [ return { encode: tokenizer.encode, decode: tokenizer.decode, + countTokens: (i) => tokenizer.countTokens(i, { allowedSpecial: 'all' }), } }, }, diff --git a/src/BytePairEncodingCore.ts b/src/BytePairEncodingCore.ts index a869887..12908b8 100644 --- a/src/BytePairEncodingCore.ts +++ b/src/BytePairEncodingCore.ts @@ -1,5 +1,6 @@ /* eslint-disable no-continue */ +import { DEFAULT_MERGE_CACHE_SIZE } from './constants.js' import { compareUint8Arrays, isAscii, tryConvertToString } from './utfUtil.js' import { escapeRegExp } from './util.js' @@ -9,6 +10,13 @@ export interface BytePairEncodingConfig { bytePairRankDecoder: RawBytePairRanks specialTokensEncoder?: Map tokenSplitRegex: RegExp + /** + * LRU cache for merged tokens pairs. + * Increasing this value should make encoding similar strings faster, + * but will consume more memory. + * @default 100000 + */ + mergeCacheSize?: number } const emptyBuffer = new Uint8Array(0) @@ -36,14 +44,21 @@ export class BytePairEncodingCore { private specialTokensDecoder: Map private specialTokenPatternRegex: RegExp private textEncoder = new TextEncoder() + private mergeCache?: Map + private mergeCacheSize: number constructor({ bytePairRankDecoder, specialTokensEncoder, tokenSplitRegex, + mergeCacheSize = DEFAULT_MERGE_CACHE_SIZE, }: BytePairEncodingConfig) { this.bytePairRankDecoder = bytePairRankDecoder this.bytePairStringRankEncoder = new Map() + this.mergeCacheSize = mergeCacheSize + if (mergeCacheSize > 0) { + this.mergeCache = new Map() + } // size without array holes (which may be present in the encoder) this.mergeableBytePairRankCount = Object.keys(bytePairRankDecoder).length @@ -79,6 +94,16 @@ export class BytePairEncodingCore { } } + setMergeCacheSize(newSize: number): void { + if (this.mergeCacheSize === 0 && newSize > 0) { + this.mergeCache = new Map() + } + this.mergeCacheSize = newSize + if (newSize === 0) { + this.mergeCache = undefined + } + } + *encodeNativeGenerator( text: string, allowedSpecial?: Set, @@ -396,14 +421,34 @@ export class BytePairEncodingCore { return this.specialTokensDecoder.get(tokenRank) } + private addToMergeCache(key: string, value: number[]): void { + if (!this.mergeCache) return + + if (this.mergeCache.size >= this.mergeCacheSize) { + // Remove least recently used item (first item) + const firstKey = this.mergeCache.keys().next().value! + this.mergeCache.delete(firstKey) + } + this.mergeCache.set(key, value) + } + private bytePairEncode(input: string): number[] { if (input.length === 1 && isAscii(input.codePointAt(0)!)) { return [this.getBpeRankFromStringOrThrow(input)] } - const inputBytes = this.textEncoder.encode(input) + if (this.mergeCache?.has(input)) { + const result = this.mergeCache.get(input)! + // Move to end to mark as recently used + this.mergeCache.delete(input) + this.mergeCache.set(input, result) + return result + } - return this.bytePairMerge(inputBytes) + const inputBytes = this.textEncoder.encode(input) + const result = this.bytePairMerge(inputBytes) + this.addToMergeCache(input, result) + return result } private bytePairMerge( diff --git a/src/GptEncoding.ts b/src/GptEncoding.ts index e15626e..e2b12a1 100644 --- a/src/GptEncoding.ts +++ b/src/GptEncoding.ts @@ -127,6 +127,7 @@ export class GptEncoding { this.encodeChat = this.encodeChat.bind(this) this.encodeChatGenerator = this.encodeChatGenerator.bind(this) this.countTokens = this.countTokens.bind(this) + this.setMergeCacheSize = this.setMergeCacheSize.bind(this) this.modelName = modelName } @@ -361,6 +362,10 @@ export class GptEncoding { return count } + setMergeCacheSize(size: number): void { + this.bytePairEncodingCoreProcessor.setMergeCacheSize(size) + } + decode(inputTokensToDecode: Iterable): string { return this.bytePairEncodingCoreProcessor.decodeNative(inputTokensToDecode) } diff --git a/src/constants.ts b/src/constants.ts index f76c54d..85bb693 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -1 +1,2 @@ export const ALL_SPECIAL_TOKENS = 'all' +export const DEFAULT_MERGE_CACHE_SIZE = 100_000 diff --git a/src/encoding/cl100k_base.ts b/src/encoding/cl100k_base.ts index 8d9350d..307a389 100644 --- a/src/encoding/cl100k_base.ts +++ b/src/encoding/cl100k_base.ts @@ -17,6 +17,7 @@ const { encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { countTokens, @@ -28,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/encoding/o200k_base.ts b/src/encoding/o200k_base.ts index 617392f..b976eb8 100644 --- a/src/encoding/o200k_base.ts +++ b/src/encoding/o200k_base.ts @@ -17,6 +17,7 @@ const { encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { countTokens, @@ -28,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/encoding/p50k_base.ts b/src/encoding/p50k_base.ts index ce84aa6..38cbab6 100644 --- a/src/encoding/p50k_base.ts +++ b/src/encoding/p50k_base.ts @@ -15,6 +15,7 @@ const { isWithinTokenLimit, countTokens, vocabularySize, + setMergeCacheSize, } = api export { countTokens, @@ -24,6 +25,7 @@ export { encode, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/encoding/p50k_edit.ts b/src/encoding/p50k_edit.ts index 8c03b3a..eb30538 100644 --- a/src/encoding/p50k_edit.ts +++ b/src/encoding/p50k_edit.ts @@ -15,6 +15,7 @@ const { isWithinTokenLimit, countTokens, vocabularySize, + setMergeCacheSize, } = api export { countTokens, @@ -24,6 +25,7 @@ export { encode, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/encoding/r50k_base.ts b/src/encoding/r50k_base.ts index 1d4b735..bae351d 100644 --- a/src/encoding/r50k_base.ts +++ b/src/encoding/r50k_base.ts @@ -15,6 +15,7 @@ const { isWithinTokenLimit, countTokens, vocabularySize, + setMergeCacheSize, } = api export { countTokens, @@ -24,6 +25,7 @@ export { encode, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-0125.ts b/src/model/gpt-3.5-turbo-0125.ts index a86f51d..e40a00e 100644 --- a/src/model/gpt-3.5-turbo-0125.ts +++ b/src/model/gpt-3.5-turbo-0125.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-0301.ts b/src/model/gpt-3.5-turbo-0301.ts index d043f32..815ab14 100644 --- a/src/model/gpt-3.5-turbo-0301.ts +++ b/src/model/gpt-3.5-turbo-0301.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-0613.ts b/src/model/gpt-3.5-turbo-0613.ts index a59bdc9..7a0b979 100644 --- a/src/model/gpt-3.5-turbo-0613.ts +++ b/src/model/gpt-3.5-turbo-0613.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-1106.ts b/src/model/gpt-3.5-turbo-1106.ts index 2029214..aed84f1 100644 --- a/src/model/gpt-3.5-turbo-1106.ts +++ b/src/model/gpt-3.5-turbo-1106.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-16k-0613.ts b/src/model/gpt-3.5-turbo-16k-0613.ts index 3181eda..e880e94 100644 --- a/src/model/gpt-3.5-turbo-16k-0613.ts +++ b/src/model/gpt-3.5-turbo-16k-0613.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-16k.ts b/src/model/gpt-3.5-turbo-16k.ts index df3909e..d82fe1a 100644 --- a/src/model/gpt-3.5-turbo-16k.ts +++ b/src/model/gpt-3.5-turbo-16k.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo-finetune.ts b/src/model/gpt-3.5-turbo-finetune.ts index 613e333..624f395 100644 --- a/src/model/gpt-3.5-turbo-finetune.ts +++ b/src/model/gpt-3.5-turbo-finetune.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-3.5-turbo.ts b/src/model/gpt-3.5-turbo.ts index 8196c90..021bea3 100644 --- a/src/model/gpt-3.5-turbo.ts +++ b/src/model/gpt-3.5-turbo.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-0125-preview.ts b/src/model/gpt-4-0125-preview.ts index 5236867..1bb37dd 100644 --- a/src/model/gpt-4-0125-preview.ts +++ b/src/model/gpt-4-0125-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-0314.ts b/src/model/gpt-4-0314.ts index 69e2c02..26089b2 100644 --- a/src/model/gpt-4-0314.ts +++ b/src/model/gpt-4-0314.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-0613.ts b/src/model/gpt-4-0613.ts index d13568d..253fc4a 100644 --- a/src/model/gpt-4-0613.ts +++ b/src/model/gpt-4-0613.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-1106-preview.ts b/src/model/gpt-4-1106-preview.ts index e0cee8b..ea1322e 100644 --- a/src/model/gpt-4-1106-preview.ts +++ b/src/model/gpt-4-1106-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-1106-vision-preview.ts b/src/model/gpt-4-1106-vision-preview.ts index be2bf18..c27fc17 100644 --- a/src/model/gpt-4-1106-vision-preview.ts +++ b/src/model/gpt-4-1106-vision-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-32k-0314.ts b/src/model/gpt-4-32k-0314.ts index e03610a..a1ab5dd 100644 --- a/src/model/gpt-4-32k-0314.ts +++ b/src/model/gpt-4-32k-0314.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-32k-0613.ts b/src/model/gpt-4-32k-0613.ts index c956011..965f8fd 100644 --- a/src/model/gpt-4-32k-0613.ts +++ b/src/model/gpt-4-32k-0613.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-32k.ts b/src/model/gpt-4-32k.ts index 0cbe12f..aa906a2 100644 --- a/src/model/gpt-4-32k.ts +++ b/src/model/gpt-4-32k.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-turbo-2024-04-09.ts b/src/model/gpt-4-turbo-2024-04-09.ts index 2e3100e..2ef5d41 100644 --- a/src/model/gpt-4-turbo-2024-04-09.ts +++ b/src/model/gpt-4-turbo-2024-04-09.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-turbo-preview.ts b/src/model/gpt-4-turbo-preview.ts index 269d460..3ab1b9a 100644 --- a/src/model/gpt-4-turbo-preview.ts +++ b/src/model/gpt-4-turbo-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-turbo.ts b/src/model/gpt-4-turbo.ts index bd9f847..8c07fa1 100644 --- a/src/model/gpt-4-turbo.ts +++ b/src/model/gpt-4-turbo.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4-vision-preview.ts b/src/model/gpt-4-vision-preview.ts index 1ee55ab..06e7d1b 100644 --- a/src/model/gpt-4-vision-preview.ts +++ b/src/model/gpt-4-vision-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4.ts b/src/model/gpt-4.ts index db7979c..072e4bb 100644 --- a/src/model/gpt-4.ts +++ b/src/model/gpt-4.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-2024-05-13.ts b/src/model/gpt-4o-2024-05-13.ts index e89df76..293fa1d 100644 --- a/src/model/gpt-4o-2024-05-13.ts +++ b/src/model/gpt-4o-2024-05-13.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-2024-08-06-finetune.ts b/src/model/gpt-4o-2024-08-06-finetune.ts index 268c4c9..050e7f2 100644 --- a/src/model/gpt-4o-2024-08-06-finetune.ts +++ b/src/model/gpt-4o-2024-08-06-finetune.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-2024-08-06.ts b/src/model/gpt-4o-2024-08-06.ts index 8967207..70a3302 100644 --- a/src/model/gpt-4o-2024-08-06.ts +++ b/src/model/gpt-4o-2024-08-06.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-audio-preview-2024-10-01.ts b/src/model/gpt-4o-audio-preview-2024-10-01.ts index df60423..0e3eb36 100644 --- a/src/model/gpt-4o-audio-preview-2024-10-01.ts +++ b/src/model/gpt-4o-audio-preview-2024-10-01.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-audio-preview.ts b/src/model/gpt-4o-audio-preview.ts index 3acfc78..3445e3c 100644 --- a/src/model/gpt-4o-audio-preview.ts +++ b/src/model/gpt-4o-audio-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-mini-2024-07-18-finetune.ts b/src/model/gpt-4o-mini-2024-07-18-finetune.ts index c46c7a1..f9514fc 100644 --- a/src/model/gpt-4o-mini-2024-07-18-finetune.ts +++ b/src/model/gpt-4o-mini-2024-07-18-finetune.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-mini-2024-07-18.ts b/src/model/gpt-4o-mini-2024-07-18.ts index 7de1f98..7d55493 100644 --- a/src/model/gpt-4o-mini-2024-07-18.ts +++ b/src/model/gpt-4o-mini-2024-07-18.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-mini-training-2024-07-18.ts b/src/model/gpt-4o-mini-training-2024-07-18.ts index 85851c7..1e57fad 100644 --- a/src/model/gpt-4o-mini-training-2024-07-18.ts +++ b/src/model/gpt-4o-mini-training-2024-07-18.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-mini-training.ts b/src/model/gpt-4o-mini-training.ts index 9eb2da3..29e5297 100644 --- a/src/model/gpt-4o-mini-training.ts +++ b/src/model/gpt-4o-mini-training.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-mini.ts b/src/model/gpt-4o-mini.ts index b7b9e04..db06be6 100644 --- a/src/model/gpt-4o-mini.ts +++ b/src/model/gpt-4o-mini.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-realtime-preview-2024-10-01.ts b/src/model/gpt-4o-realtime-preview-2024-10-01.ts index 0cc2c6e..beaf482 100644 --- a/src/model/gpt-4o-realtime-preview-2024-10-01.ts +++ b/src/model/gpt-4o-realtime-preview-2024-10-01.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o-realtime-preview.ts b/src/model/gpt-4o-realtime-preview.ts index 9ee7212..d49f792 100644 --- a/src/model/gpt-4o-realtime-preview.ts +++ b/src/model/gpt-4o-realtime-preview.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export diff --git a/src/model/gpt-4o.ts b/src/model/gpt-4o.ts index 916896f..9d77cd2 100644 --- a/src/model/gpt-4o.ts +++ b/src/model/gpt-4o.ts @@ -13,11 +13,14 @@ const { encode, encodeGenerator, isWithinTokenLimit, + countTokens, encodeChat, encodeChatGenerator, vocabularySize, + setMergeCacheSize, } = api export { + countTokens, decode, decodeAsyncGenerator, decodeGenerator, @@ -26,6 +29,7 @@ export { encodeChatGenerator, encodeGenerator, isWithinTokenLimit, + setMergeCacheSize, vocabularySize, } // eslint-disable-next-line import/no-default-export