-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsimple.py
40 lines (32 loc) · 1.01 KB
/
simple.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
"""Build a simple LLM Application"""
import os
import groq
from dotenv import load_dotenv
load_dotenv()
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
groq_client = groq.Groq(api_key = GROQ_API_KEY)
sys_prompt ="""You are a helpful virtual assistant. \
Your goal is to provide useful and relevant \
responses to my request"""
models = [
"llama-3.1-405b-reasoning",
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
"mixtral-8x7b-32768"
]
def generate(model, query, temperature):
response = groq_client.chat.completions.create(
model = model,
messages = [
{"role": "system", "content": sys_prompt},
{"role": "user", "content": query}
],
response_format = {"type": "text"},
temperature = temperature
)
answer = response.choices[0].message.content
return answer
if __name__ == "__main__":
model = models[1]
query = "Which is bigger, 9.11 or 9.9"
print(generate(model, query, temperature=0.5))