Skip to content

Commit

Permalink
Move input down, change color, customize texts
Browse files Browse the repository at this point in the history
  • Loading branch information
Mins0o committed Dec 22, 2023
1 parent 50904b8 commit 6489ce1
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 41 deletions.
2 changes: 0 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,7 @@ ARG gid=1000

RUN groupadd -g ${gid} ${group} && useradd -u ${uid} -g ${group} -s /bin/bash ${user}

EXPOSE 8888
EXPOSE 7860
EXPOSE 5000

USER h2ogpt

Expand Down
1 change: 1 addition & 0 deletions docs/linux_install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ fi
# broad support, but no training-time or data creation dependencies

pip install -r requirements.txt
pip install -U duckduckgo-search
# ```
#* Optional: Install document question-answer dependencies:
# ```bash
Expand Down
4 changes: 2 additions & 2 deletions gradio_utils/prompt_form.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ def get_chatbot_name(base_model, model_path_llama, inference_server='', debug=Fa
model_path_llama = os.path.basename(model_path_llama)
if model_path_llama.endswith('?download=true'):
model_path_llama = model_path_llama.replace('?download=true', '')
return f'h2oGPT [Model: {model_path_llama}{inference_server}]'
return f'memesooGPT [Model: {model_path_llama}{inference_server}]'
else:
return f'h2oGPT [Model: {base_model}{inference_server}]'
return f'memesooGPT [Model: {base_model}{inference_server}]'


def get_avatars(base_model, model_path_llama, inference_server=''):
Expand Down
72 changes: 37 additions & 35 deletions src/gradio_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -998,6 +998,43 @@ def get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=None):
# CHAT
col_chat = gr.Column(visible=kwargs['chat'])
with col_chat:
text_output, text_output2, text_outputs = make_chatbots(output_label0, output_label0_model2,
**kwargs)

with gr.Row():
with gr.Column(visible=kwargs['score_model']):
score_text = gr.Textbox(res_value,
show_label=False,
visible=True)
score_text2 = gr.Textbox("Response Score2: NA", show_label=False,
visible=False and not kwargs['model_lock'])

visible_model_choice = bool(kwargs['model_lock']) and \
len(model_states) > 1 and \
kwargs['visible_visible_models']
with gr.Row(visible=not kwargs['actions_in_sidebar'] or visible_model_choice):
visible_models = gr.Dropdown(kwargs['all_possible_visible_models'],
label="Visible Models",
value=visible_models_state0,
interactive=True,
multiselect=True,
visible=visible_model_choice,
elem_id="multi-selection",
filterable=False,
)
mw0 = 100
with gr.Column(min_width=mw0):
if not kwargs['actions_in_sidebar']:
langchain_action = gr.Radio(
allowed_actions,
value=default_action,
label='Action',
show_label=visible_model_choice,
visible=True,
min_width=mw0)



with gr.Row():
with gr.Column(scale=50):
with gr.Row(elem_id="prompt-form-row"):
Expand Down Expand Up @@ -1141,41 +1178,6 @@ def clear_audio_state():
stop_text.change(fn=clear_audio_state, outputs=audio_state) \
.then(fn=lambda: None, **stop_kwargs)

visible_model_choice = bool(kwargs['model_lock']) and \
len(model_states) > 1 and \
kwargs['visible_visible_models']
with gr.Row(visible=not kwargs['actions_in_sidebar'] or visible_model_choice):
visible_models = gr.Dropdown(kwargs['all_possible_visible_models'],
label="Visible Models",
value=visible_models_state0,
interactive=True,
multiselect=True,
visible=visible_model_choice,
elem_id="multi-selection",
filterable=False,
)
mw0 = 100
with gr.Column(min_width=mw0):
if not kwargs['actions_in_sidebar']:
langchain_action = gr.Radio(
allowed_actions,
value=default_action,
label='Action',
show_label=visible_model_choice,
visible=True,
min_width=mw0)

text_output, text_output2, text_outputs = make_chatbots(output_label0, output_label0_model2,
**kwargs)

with gr.Row():
with gr.Column(visible=kwargs['score_model']):
score_text = gr.Textbox(res_value,
show_label=False,
visible=True)
score_text2 = gr.Textbox("Response Score2: NA", show_label=False,
visible=False and not kwargs['model_lock'])

doc_selection_tab = gr.TabItem("Document Selection") \
if kwargs['visible_doc_selection_tab'] else gr.Row(visible=False)
with doc_selection_tab:
Expand Down
4 changes: 2 additions & 2 deletions src/gradio_themes.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ class SoftTheme(Soft):
def __init__(
self,
*,
primary_hue: colors.Color | str = colors.indigo,
secondary_hue: colors.Color | str = colors.indigo,
primary_hue: colors.Color | str = colors.teal,
secondary_hue: colors.Color | str = colors.sky,
neutral_hue: colors.Color | str = colors.gray,
spacing_size: sizes.Size | str = sizes.spacing_md,
radius_size: sizes.Size | str = sizes.radius_md,
Expand Down

0 comments on commit 6489ce1

Please sign in to comment.