-
Notifications
You must be signed in to change notification settings - Fork 42
/
Copy pathmain.py
121 lines (93 loc) · 5.09 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Standard library imports
import time
# Third-Party Imports
import streamlit as st
# Importing other files for setup and functionalities
from setup_st import *
from helper_functions import *
from index_functions import *
# Initialize session state variables if they don't exist
initialize_session_state()
# Setup Streamlit UI/UX elements
set_design()
sidebar()
get_user_config()
clear_button()
download_button()
# Setting up environment variables for OpenAI API key
if 'api_key' in st.session_state and st.session_state['api_key']:
openai.api_key = st.session_state['api_key']
# Setting up indexing functionality
try:
index = load_data()
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
except Exception as e:
st.sidebar.error(f"An error occurred while loading indexed data: {e}")
# Warning to show that index is not currently being used if checkbox is unchecked
if not st.session_state['use_index']:
st.sidebar.warning("Index is not currently being used. Toggle box above if you'd like to enable it.")
# Displaying the existing chat messages from the user and the chatbot
for message in st.session_state.messages: # For every message in the chat history
with st.chat_message(message["role"]): # Create a chat message box
st.markdown(message["content"]) # Display the content of the message
# Accept user input and generate response
if prompt := st.chat_input("How would you like to reply?"):
# Add user's message to the chat history
if prompt != "":
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt}) # Add user's message to chat history
# Increment total message count
st.session_state['message_count'] += 1
# Call either generate_response or generate_response_index based on st.session_state['use_index']
if st.session_state['use_index']:
response_generated = generate_response_index(
"You are an expert who is great at assisting users with whatever query they have",
st.session_state.messages,
st.session_state['model_name'],
st.session_state['temperature'],
chat_engine
)
else:
response_generated = generate_response(
"You are an expert who is great at assisting users with whatever query they have",
st.session_state.messages,
st.session_state['model_name'],
st.session_state['temperature']
)
# Create spinner to indicate to the user that the assistant is generating a response
with st.spinner('CoPilot is thinking...'):
# Create a chat message box for displaying the assistant's response
with st.chat_message("assistant"):
# Initialize an empty string to construct the full response incrementally
full_response = ""
# Create an empty placeholder to stream the assistant's response
message_placeholder = st.empty()
# Loop through the response generator
for response in response_generated:
# If the full_response is not empty, display it and save to message history
if full_response:
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Reset full_response and create a new empty placeholder
full_response = ""
message_placeholder = st.empty()
# Break the content into chunks of 10 words each
chunks = response["content"].split(' ')
full_response = ""
# Loop through the chunks to simulate a 'typing' effect
for i in range(0, len(chunks), 10):
# Join the next 10 words to form a chunk
chunk = ' '.join(chunks[i:i+10])
# Add the chunk to the full response string
full_response += chunk + " " # Add a space at the end of each chunk
# Display the currently generated text followed by a 'typing' cursor
message_placeholder.markdown(full_response + "▌")
# Wait for a small amount of time to simulate the typing effect
time.sleep(0.2)
# Remove the 'typing' cursor and display the final full response
message_placeholder.markdown(full_response)
# Add the assistant's final full response to the session state message history
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Code to update the progress bar; assuming a message cap of 10 messages, but can be changed to be dynamic depending on your implementation.
current_progress = st.progress(st.session_state['message_count'] / 10)