Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ui unit test and Merge UI constants and AGA constants #168

Open
wants to merge 16 commits into
base: main
Choose a base branch
from
15 changes: 15 additions & 0 deletions .github/workflow_scripts/env_setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,18 @@ function install_all_pip {
python -m pip install --upgrade pip
python -m pip install -e ".[dev]"
}

function install_ui_test {
python3 -m pip install --upgrade pip
python3 -m pip install --upgrade -e ".[dev]"
python3 -m pip install pytest
python3 -m pip install pytest-cov
}

function install_coverage_test {
python3 -m pip install --upgrade pip
python3 -m pip install --upgrade -e ".[dev]"
python3 -m pip install pytest
python3 -m pip install pytest-cov
python3 -m pip install coverage-threshold
}
7 changes: 7 additions & 0 deletions .github/workflow_scripts/test_ui.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash
set -ex

source $(dirname "$0")/env_setup.sh

install_ui_test
python3 -m pytest --junitxml=ui_results.xml tests/unittests/ui --cov=src/autogluon/assistant/ui --cov-report json:ui_coverage.json
9 changes: 9 additions & 0 deletions .github/workflow_scripts/test_ui_coverage.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash
set -ex

source $(dirname "$0")/env_setup.sh

install_coverage_test

coverage-threshold --line-coverage-min 70 --coverage-json ui_coverage.json

42 changes: 42 additions & 0 deletions .github/workflows/test_ui.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: UI Test

on:
push:
pull_request:
types: [labeled, opened, synchronize]

jobs:
run_tests:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run UI Tests
run: |
chmod +x ./.github/workflow_scripts/test_ui.sh && ./.github/workflow_scripts/test_ui.sh
- name: Archive code coverage results
uses: actions/upload-artifact@v4
with:
name: ui-coverage-report
path: ./ui_coverage.json
run_coverage_check:
needs: run_tests
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Download code coverage results
uses: actions/download-artifact@v4
with:
name: ui-coverage-report
- name: Run UI Unittests Coverage Check
run: |
chmod +x ./.github/workflow_scripts/test_ui_coverage.sh && ./.github/workflow_scripts/test_ui_coverage.sh
146 changes: 146 additions & 0 deletions src/autogluon/assistant/constants.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from copy import deepcopy

# Task Inference
NO_FILE_IDENTIFIED = "NO_FILE_IDENTIFIED"
NO_ID_COLUMN_IDENTIFIED = "NO_ID_COLUMN_IDENTIFIED"
Expand Down Expand Up @@ -79,3 +81,147 @@
"gpt-4o-2024-08-06",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
]

# The below constants are for Autogluon-Assistant UI
BASE_DATA_DIR = "./user_data"


# Preset configurations
PRESET_DEFAULT_CONFIG = {
"Best Quality": {"time_limit": "4 hrs", "feature_generation": False},
"High Quality": {"time_limit": "1 hr", "feature_generation": False},
"Medium Quality": {"time_limit": "10 mins", "feature_generation": False},
}
DEFAULT_PRESET = "Medium Quality"

PRESET_MAPPING = {
"Best Quality": "best_quality",
"High Quality": "high_quality",
"Medium Quality": "medium_quality",
}
PRESET_OPTIONS = ["Best Quality", "High Quality", "Medium Quality"]

# Time limit configurations (in seconds)
TIME_LIMIT_MAPPING = {
"3 mins": 180,
"10 mins": 600,
"30 mins": 1800,
"1 hr": 3600,
"2 hrs": 7200,
"4 hrs": 14400,
}

DEFAULT_TIME_LIMIT = "10 mins"

TIME_LIMIT_OPTIONS = ["3 mins", "10 mins", "30 mins", "1 hr", "2 hrs", "4 hrs"]

# LLM configurations
LLM_MAPPING = {
"Claude 3.5 with Amazon Bedrock": "anthropic.claude-3-5-sonnet-20241022-v2:0",
"GPT 4o": "gpt-4o-2024-08-06",
}

LLM_OPTIONS = ["Claude 3.5 with Amazon Bedrock", "GPT 4o"]

# Provider configuration
PROVIDER_MAPPING = {"Claude 3.5 with Amazon Bedrock": "bedrock", "GPT 4o": "openai"}

INITIAL_STAGE = {
"Task Understanding": [],
"Feature Generation": [],
"Model Training": [],
"Prediction": [],
}
# Initial Session state
DEFAULT_SESSION_VALUES = {
"config_overrides": [],
"preset": DEFAULT_PRESET,
"time_limit": DEFAULT_TIME_LIMIT,
"llm": None,
"pid": None,
"logs": "",
"process": None,
"clicked": False,
"task_running": False,
"output_file": None,
"output_filename": None,
"task_description": "",
"sample_description": "",
"return_code": None,
"task_canceled": False,
"uploaded_files": {},
"sample_files": {},
"selected_dataset": None,
"sample_dataset_dir": None,
"description_uploader_key": 0,
"sample_dataset_selector": None,
"current_stage": None,
"feature_generation": False,
"stage_status": {},
"show_remaining_time": False,
"model_path": None,
"elapsed_time": 0,
"progress_bar": None,
"increment": 2,
"zip_path": None,
"stage_container": deepcopy(INITIAL_STAGE),
"start_time": None,
"remaining_time": 0,
"start_model_train_time": 0,
}

# Message to display different logging stage
STATUS_BAR_STAGE = {
"Task loaded!": 10,
"Model training starts": 25,
"Fitting model": 50,
"AutoGluon training complete": 80,
"Prediction starts": 90,
}

STAGE_COMPLETE_SIGNAL = [
"Task understanding complete",
"Automatic feature generation complete",
"Model training complete",
"Prediction complete",
]

# Stage Names
STAGE_TASK_UNDERSTANDING = "Task Understanding"
STAGE_FEATURE_GENERATION = "Feature Generation"
STAGE_MODEL_TRAINING = "Model Training"
STAGE_PREDICTION = "Prediction"

# Log Messages
MSG_TASK_UNDERSTANDING = "Task understanding starts"
MSG_FEATURE_GENERATION = "Automatic feature generation starts"
MSG_MODEL_TRAINING = "Model training starts"
MSG_PREDICTION = "Prediction starts"

# Mapping
STAGE_MESSAGES = {
MSG_TASK_UNDERSTANDING: STAGE_TASK_UNDERSTANDING,
MSG_FEATURE_GENERATION: STAGE_FEATURE_GENERATION,
MSG_MODEL_TRAINING: STAGE_MODEL_TRAINING,
MSG_PREDICTION: STAGE_PREDICTION,
}
# DataSet Options
DATASET_OPTIONS = ["Sample Dataset", "Upload Dataset"]

# Captions under DataSet Options
CAPTIONS = ["Run with sample dataset", "Upload Train (Required), Test (Required) and Output (Optional) Dataset"]

DEMO_URL = "https://automl-mm-bench.s3.amazonaws.com/autogluon-assistant/aga-kaggle-demo.mp4"

LOGO_PATH = "static/page_icon.png"
SUCCESS_MESSAGE = """
🎉🎉Task completed successfully! If you found this useful, please consider:
⭐ [Starring our repository](https://github.com/autogluon/autogluon-assistant)
"""
S3_URL = "https://automl-mm-bench.s3.us-east-1.amazonaws.com/autogluon-assistant/sample_dataset.zip"
LOCAL_ZIP_PATH = "sample_data.zip"
EXTRACT_DIR = "sample_dataset"
IGNORED_MESSAGES = [
"Failed to identify the sample_submission_data of the task, it is set to None.",
"Too many requests, please wait before trying again",
]
Empty file.
9 changes: 7 additions & 2 deletions src/autogluon/assistant/ui/app.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import os
from copy import deepcopy

import streamlit as st
import streamlit.components.v1 as components

from autogluon.assistant.ui.constants import DEFAULT_SESSION_VALUES, LOGO_PATH
from autogluon.assistant.constants import DEFAULT_SESSION_VALUES, LOGO_PATH
from autogluon.assistant.ui.pages.demo import main as demo
from autogluon.assistant.ui.pages.feature import main as feature
from autogluon.assistant.ui.pages.nav_bar import nav_bar
Expand Down Expand Up @@ -33,7 +34,11 @@
""",
unsafe_allow_html=True,
)
with open("style.css") as f:
current_dir = os.path.dirname(os.path.abspath(__file__))

css_file_path = os.path.join(current_dir, "style.css")

with open(css_file_path) as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)


Expand Down
Loading
Loading