From 6489ce1de6c3258a67f9b585506ce8ee7b8cc1df Mon Sep 17 00:00:00 2001 From: Minsoo Date: Sat, 23 Dec 2023 04:04:04 +0900 Subject: [PATCH] Move input down, change color, customize texts --- Dockerfile | 2 -- docs/linux_install.sh | 1 + gradio_utils/prompt_form.py | 4 +-- src/gradio_runner.py | 72 +++++++++++++++++++------------------ src/gradio_themes.py | 4 +-- 5 files changed, 42 insertions(+), 41 deletions(-) diff --git a/Dockerfile b/Dockerfile index fbd7ba5e7..053796339 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,9 +26,7 @@ ARG gid=1000 RUN groupadd -g ${gid} ${group} && useradd -u ${uid} -g ${group} -s /bin/bash ${user} -EXPOSE 8888 EXPOSE 7860 -EXPOSE 5000 USER h2ogpt diff --git a/docs/linux_install.sh b/docs/linux_install.sh index 1fab48210..7e7b0b771 100644 --- a/docs/linux_install.sh +++ b/docs/linux_install.sh @@ -20,6 +20,7 @@ fi # broad support, but no training-time or data creation dependencies pip install -r requirements.txt + pip install -U duckduckgo-search # ``` #* Optional: Install document question-answer dependencies: # ```bash diff --git a/gradio_utils/prompt_form.py b/gradio_utils/prompt_form.py index 3b94a2935..04f0fc1c4 100644 --- a/gradio_utils/prompt_form.py +++ b/gradio_utils/prompt_form.py @@ -13,9 +13,9 @@ def get_chatbot_name(base_model, model_path_llama, inference_server='', debug=Fa model_path_llama = os.path.basename(model_path_llama) if model_path_llama.endswith('?download=true'): model_path_llama = model_path_llama.replace('?download=true', '') - return f'h2oGPT [Model: {model_path_llama}{inference_server}]' + return f'memesooGPT [Model: {model_path_llama}{inference_server}]' else: - return f'h2oGPT [Model: {base_model}{inference_server}]' + return f'memesooGPT [Model: {base_model}{inference_server}]' def get_avatars(base_model, model_path_llama, inference_server=''): diff --git a/src/gradio_runner.py b/src/gradio_runner.py index 1c385f953..5424b27b3 100644 --- a/src/gradio_runner.py +++ b/src/gradio_runner.py @@ -998,6 +998,43 @@ def get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=None): # CHAT col_chat = gr.Column(visible=kwargs['chat']) with col_chat: + text_output, text_output2, text_outputs = make_chatbots(output_label0, output_label0_model2, + **kwargs) + + with gr.Row(): + with gr.Column(visible=kwargs['score_model']): + score_text = gr.Textbox(res_value, + show_label=False, + visible=True) + score_text2 = gr.Textbox("Response Score2: NA", show_label=False, + visible=False and not kwargs['model_lock']) + + visible_model_choice = bool(kwargs['model_lock']) and \ + len(model_states) > 1 and \ + kwargs['visible_visible_models'] + with gr.Row(visible=not kwargs['actions_in_sidebar'] or visible_model_choice): + visible_models = gr.Dropdown(kwargs['all_possible_visible_models'], + label="Visible Models", + value=visible_models_state0, + interactive=True, + multiselect=True, + visible=visible_model_choice, + elem_id="multi-selection", + filterable=False, + ) + mw0 = 100 + with gr.Column(min_width=mw0): + if not kwargs['actions_in_sidebar']: + langchain_action = gr.Radio( + allowed_actions, + value=default_action, + label='Action', + show_label=visible_model_choice, + visible=True, + min_width=mw0) + + + with gr.Row(): with gr.Column(scale=50): with gr.Row(elem_id="prompt-form-row"): @@ -1141,41 +1178,6 @@ def clear_audio_state(): stop_text.change(fn=clear_audio_state, outputs=audio_state) \ .then(fn=lambda: None, **stop_kwargs) - visible_model_choice = bool(kwargs['model_lock']) and \ - len(model_states) > 1 and \ - kwargs['visible_visible_models'] - with gr.Row(visible=not kwargs['actions_in_sidebar'] or visible_model_choice): - visible_models = gr.Dropdown(kwargs['all_possible_visible_models'], - label="Visible Models", - value=visible_models_state0, - interactive=True, - multiselect=True, - visible=visible_model_choice, - elem_id="multi-selection", - filterable=False, - ) - mw0 = 100 - with gr.Column(min_width=mw0): - if not kwargs['actions_in_sidebar']: - langchain_action = gr.Radio( - allowed_actions, - value=default_action, - label='Action', - show_label=visible_model_choice, - visible=True, - min_width=mw0) - - text_output, text_output2, text_outputs = make_chatbots(output_label0, output_label0_model2, - **kwargs) - - with gr.Row(): - with gr.Column(visible=kwargs['score_model']): - score_text = gr.Textbox(res_value, - show_label=False, - visible=True) - score_text2 = gr.Textbox("Response Score2: NA", show_label=False, - visible=False and not kwargs['model_lock']) - doc_selection_tab = gr.TabItem("Document Selection") \ if kwargs['visible_doc_selection_tab'] else gr.Row(visible=False) with doc_selection_tab: diff --git a/src/gradio_themes.py b/src/gradio_themes.py index 24ab05551..38b3bdcce 100644 --- a/src/gradio_themes.py +++ b/src/gradio_themes.py @@ -152,8 +152,8 @@ class SoftTheme(Soft): def __init__( self, *, - primary_hue: colors.Color | str = colors.indigo, - secondary_hue: colors.Color | str = colors.indigo, + primary_hue: colors.Color | str = colors.teal, + secondary_hue: colors.Color | str = colors.sky, neutral_hue: colors.Color | str = colors.gray, spacing_size: sizes.Size | str = sizes.spacing_md, radius_size: sizes.Size | str = sizes.radius_md,