Skip to content
This repository has been archived by the owner on Dec 18, 2023. It is now read-only.

Commit

Permalink
Merge pull request #48 from QAInsights/feature/gpt-4
Browse files Browse the repository at this point in the history
Add: GPT-4 model
  • Loading branch information
QAInsights authored Mar 17, 2023
2 parents fb9dd5d + 2d61fd4 commit 322100f
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 14 deletions.
30 changes: 18 additions & 12 deletions application.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from flask import send_from_directory
from flask_dance.contrib.github import make_github_blueprint

import constants
from integrations.slack import slack
import version
from utils import *
Expand Down Expand Up @@ -222,24 +223,29 @@ def fetch_performance_results(contents, filename, username):
# Below prompts dict has the results title and the prompt for GPT to process
prompts = {
"High level Summary": "Act like a performance engineer. Please analyse this performance test results and give "
"me a high level summary.",
"me a high level summary. Beautify the response in a HTML format.",
"Detailed Summary": "Act like a performance engineer and write a detailed summary from this raw performance "
"results."
"results without a title. You need to identify the anomalies, standard deviations, "
"minimum and maximum response"
"time, number of errors, and number of transactions. Help me identifying potential "
"bottlenecks as well. Beautify the response in a HTML list format."
}

results = {}
for title, prompt in prompts.items():
response = openai.Completion.create(
model=constants.model,
prompt=f"""
{prompt}: \n {contents}
""",
response = openai.ChatCompletion.create(
model=constants.openai_model,
messages=[
{"role": "system", "content": f"{prompt}"},
{"role": "user", "content": f"{contents}"},
],
temperature=constants.temperature,
max_tokens=constants.max_tokens,
top_p=constants.top_p,
max_tokens=constants.max_tokens,
presence_penalty=constants.presence_penalty,
frequency_penalty=constants.frequency_penalty,
presence_penalty=constants.presence_penalty
)

log_db(username=username, openai_id=response['id'],
openai_prompt_tokens=response['usage']['prompt_tokens'],
openai_completion_tokens=response['usage']['completion_tokens'],
Expand All @@ -249,17 +255,17 @@ def fetch_performance_results(contents, filename, username):
# Send Slack Notifications if enabled
if get_slack_notification_status() == 'true':
try:
slack.send_slack_notifications(msg=response['choices'][0]['text'],
slack.send_slack_notifications(msg=response['choices'][0]['message']['content'],
filename=filename,
title=title,
webhook=get_webhook())
except Exception as e:
capture_exception(e)
pass

response = beautify_response(response['choices'][0]['text'])
# response = beautify_response(response['choices'][0]['message']['content'])

results[title] = response
results[title] = response['choices'][0]['message']['content']

return results

Expand Down
4 changes: 2 additions & 2 deletions constants.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
FILE_SIZE = 100_000_00
MONTHLY_UPLOAD_LIMIT = 10
AWS_DEFAULT_REGION = "us-east-2"
model = "text-davinci-003"
openai_model = "gpt-4"
temperature = 0
max_tokens = 500
max_tokens = 4000
top_p = 1.0
frequency_penalty = 0.0
presence_penalty = 0.0
Expand Down
Binary file modified requirements.txt
Binary file not shown.

0 comments on commit 322100f

Please sign in to comment.