Skip to content

Commit

Permalink
Removed old handling for requests that exceed the maximum number of t…
Browse files Browse the repository at this point in the history
…okens.
  • Loading branch information
JusticeRage committed Dec 18, 2023
1 parent 260d3a6 commit 6660610
Showing 1 changed file with 4 additions and 13 deletions.
17 changes: 4 additions & 13 deletions gepetto/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,20 +71,11 @@ def query_model(self, query, cb, additional_model_options=None):
ida_kernwin.MFF_WRITE)
except openai.BadRequestError as e:
# Context length exceeded. Determine the max number of tokens we can ask for and retry.
m = re.search(r'maximum context length is (\d+) tokens, however you requested \d+ tokens \((\d+) in your '
r'prompt;', str(e))
if not m:
print(_("{model} could not complete the request: {error}").format(model=self.model, error=str(e)))
return
(hard_limit, prompt_tokens) = (int(m.group(1)), int(m.group(2)))
max_tokens = hard_limit - prompt_tokens
if max_tokens >= 750:
print(_("Context length exceeded! Reducing the completion tokens to "
"{max_tokens}...").format(max_tokens=max_tokens))
self.query_model(query, cb, max_tokens)
m = re.search(r'maximum context length is \d+ tokens, however you requested \d+ tokens', str(e))
if m:
print(_("Unfortunately, this function is too big to be analyzed with the model's current API limits."))
else:
print("Unfortunately, this function is too big to be analyzed with the model's current API limits.")

print(_("General exception encountered while running the query: {error}").format(error=str(e)))
except openai.OpenAIError as e:
print(_("{model} could not complete the request: {error}").format(model=self.model, error=str(e)))
except Exception as e:
Expand Down

0 comments on commit 6660610

Please sign in to comment.