Skip to content

Commit

Permalink
feat: create xblocks
Browse files Browse the repository at this point in the history
feat: add HTML/CSS support

fix: minor changes

fix: remove the extra space around the code editor

* extra space on right was caused by the 100vh for
  width instead of 100vw
* on the left, the default lineNumbersMinChars value
  of 5 was causing a huge margin, along with the
  code folding gutter.
  See: suren-atoyan/monaco-react#303

fix: add the border and the box showdow to match mockup styling

fix: scroll just the tab content instead, not parent container

fix: make the tab switches smaller to match mockups

fix: submission button styling and border

fix: added multiple fixes and tweaks

fix: added multiple fixes and tweaks

fix: styling adjustments for the prog language badge
  • Loading branch information
CefBoud committed Aug 2, 2024
1 parent 7979775 commit 01b6280
Show file tree
Hide file tree
Showing 46 changed files with 3,934 additions and 29 deletions.
29 changes: 0 additions & 29 deletions .github/workflows/upgrade-python-requirements.yml

This file was deleted.

98 changes: 98 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
.PHONY: clean help compile_translations dummy_translations extract_translations detect_changed_source_translations \
build_dummy_translations validate_translations check_translations_up_to_date \
requirements selfcheck test test.python test.unit test.quality upgrade

.DEFAULT_GOAL := help

WORKING_DIR := ai_eval
JS_TARGET := $(WORKING_DIR)/public/js/translations
EXTRACT_DIR := $(WORKING_DIR)/conf/locale/en/LC_MESSAGES
EXTRACTED_DJANGO_PARTIAL := $(EXTRACT_DIR)/django-partial.po
EXTRACTED_DJANGOJS_PARTIAL := $(EXTRACT_DIR)/djangojs-partial.po
EXTRACTED_DJANGO := $(EXTRACT_DIR)/django.po

help: ## display this help message
@echo "Please use \`make <target>' where <target> is one of"
@perl -nle'print $& if m{^[a-zA-Z_-]+:.*?## .*$$}' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-25s\033[0m %s\n", $$1, $$2}'

clean: ## remove generated byte code, coverage reports, and build artifacts
find . -name '__pycache__' -exec rm -rf {} +
find . -name '*.pyc' -exec rm -f {} +
find . -name '*.pyo' -exec rm -f {} +
find . -name '*~' -exec rm -f {} +
rm -fr build/
rm -fr dist/
rm -fr *.egg-info

## Localization targets

extract_translations: ## extract strings to be translated, outputting .po files
cd $(WORKING_DIR) && i18n_tool extract
mv $(EXTRACTED_DJANGO_PARTIAL) $(EXTRACTED_DJANGO)
# Safely concatenate djangojs if it exists. The file will exist in this repo, but we're trying to follow a pattern
# between all repositories that use i18n_tool
if test -f $(EXTRACTED_DJANGOJS_PARTIAL); then \
msgcat $(EXTRACTED_DJANGO) $(EXTRACTED_DJANGOJS_PARTIAL) -o $(EXTRACTED_DJANGO) && \
rm $(EXTRACTED_DJANGOJS_PARTIAL); \
fi
sed -i'' -e 's/nplurals=INTEGER/nplurals=2/' $(EXTRACTED_DJANGO)
sed -i'' -e 's/plural=EXPRESSION/plural=\(n != 1\)/' $(EXTRACTED_DJANGO)

compile_translations: ## compile translation files, outputting .mo files for each supported language
cd $(WORKING_DIR) && i18n_tool generate -v
python manage.py compilejsi18n --namespace DragAndDropI18N --output $(JS_TARGET)

detect_changed_source_translations:
cd $(WORKING_DIR) && i18n_tool changed

dummy_translations: ## generate dummy translation (.po) files
cd $(WORKING_DIR) && i18n_tool dummy

build_dummy_translations: dummy_translations compile_translations ## generate and compile dummy translation files

validate_translations: build_dummy_translations detect_changed_source_translations ## validate translations

check_translations_up_to_date: extract_translations compile_translations dummy_translations detect_changed_source_translations ## extract, compile, and check if translation files are up-to-date

piptools: ## install pinned version of pip-compile and pip-sync
pip install -r requirements/pip.txt
pip install -r requirements/pip-tools.txt

requirements: piptools ## install test requirements locally
pip-sync requirements/ci.txt

requirements_python: piptools ## install all requirements locally
pip-sync requirements/dev.txt requirements/private.*

test.quality: selfcheck ## run quality checkers on the codebase
tox -e quality

test.python: ## run python unit tests in the local virtualenv
pytest --cov ai_eval $(TEST)

test.unit: ## run all unit tests
tox $(TEST)

test: test.unit test.quality ## Run all tests
tox -e translations

# Define PIP_COMPILE_OPTS=-v to get more information during make upgrade.
PIP_COMPILE = pip-compile --upgrade $(PIP_COMPILE_OPTS)

upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade
upgrade: ## update the requirements/*.txt files with the latest packages satisfying requirements/*.in
pip install -qr requirements/pip-tools.txt
# Make sure to compile files after any other files they include!
$(PIP_COMPILE) --allow-unsafe -o requirements/pip.txt requirements/pip.in
$(PIP_COMPILE) -o requirements/pip-tools.txt requirements/pip-tools.in
pip install -qr requirements/pip.txt
pip install -qr requirements/pip-tools.txt
$(PIP_COMPILE) -o requirements/base.txt requirements/base.in
$(PIP_COMPILE) -o requirements/test.txt requirements/test.in
$(PIP_COMPILE) -o requirements/quality.txt requirements/quality.in
$(PIP_COMPILE) -o requirements/ci.txt requirements/ci.in
$(PIP_COMPILE) -o requirements/dev.txt requirements/dev.in
sed -i '/^[dD]jango==/d' requirements/test.txt

selfcheck: ## check that the Makefile is well-formed
@echo "The Makefile is well-formed."
38 changes: 38 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
## Introduction

This repository hosts two Open edX XBlocks:

1. **Short Answer with AI Evaluation**: This XBlock allows students to submit short answers, which are then evaluated with the help of a large language model (LLM).
2. **Coding with AI Evaluation**: This XBlock allows students to submit code in a text editor. The code is executed via a third-party API (currently using [Judge0](https://judge0.com/)), and both the code and its output are sent to an LLM for feedback.

## Screeshots

| ![Short Answer with AI evaluation Xblock](docs/shortanswer-xblock.png) | ![Coding with AI evaluation Xblock](docs/coding-xblock.png) |
|-----------------------------------------------------------------------|----------------------------------------------------------------|
| ![Coding with AI evaluation Xblock HTML](docs/coding-xblock-ai-feedback.png) | ![Coding with AI evaluation Xblock AI feedback](docs/coding-xblock-html.png) |



## Setup

### Using Tutor

1. Add the following line to the `OPENEDX_EXTRA_PIP_REQUIREMENTS` in your Tutor `config.yml` file:
```yaml
OPENEDX_EXTRA_PIP_REQUIREMENTS:
- git+https://github.com/open-craft/xblock-ai-evaluation
```
You can append `@vX.Y.Z` to the URL to specify your desired version.

2. Launch Tutor.

3. In the Open edX platform, navigate to `Settings > Advanced Settings` and add `shortanswer_ai_eval` and `coding_ai_eval` to the `Advanced Module List`.

4. Add either XBlock using the `Advanced` button in the `Add New Component` section of Studio.

5. Configure the added Xblock and make sure to add correct API keys. You can format your question and prompts using [Markdown](https://marked.js.org/demo/).

## Dependencies
- [Judge0 API](https://judge0.com/)
- [Monaco editor](https://github.com/microsoft/monaco-editor)
- [LiteLLM](https://github.com/BerriAI/litellm)
6 changes: 6 additions & 0 deletions ai_eval/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""
Xblock to have short text and code entries with AI-driven evaluation.
"""

from .shortanswer import ShortAnswerAIEvalXBlock
from .coding_ai_eval import CodingAIEvalXBlock
146 changes: 146 additions & 0 deletions ai_eval/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
"""Base Xblock with AI evaluation."""

import pkg_resources

from django.utils.translation import gettext_noop as _
from xblock.core import XBlock
from xblock.fields import String, Scope, Dict
from xblock.validation import ValidationMessage


from .llm import SupportedModels

try:
from xblock.utils.studio_editable import StudioEditableXBlockMixin
except ModuleNotFoundError: # For compatibility with Palm and earlier
from xblockutils.studio_editable import StudioEditableXBlockMixin

try:
from xblock.utils.resources import ResourceLoader
except (
ModuleNotFoundError
): # For backward compatibility with releases older than Quince.
from xblockutils.resources import ResourceLoader


class AIEvalXBlock(StudioEditableXBlockMixin, XBlock):
"""
Base class for Xblocks with AI evaluation
"""

USER_KEY = "USER"
LLM_KEY = "LLM"

loader = ResourceLoader(__name__)

icon_class = "problem"
model_api_key = String(
display_name=_("Chosen model API Key"),
help=_("Enter your the API Key of your chosen model."),
default="",
scope=Scope.settings,
)
model_api_url = String(
display_name=_("Set your API URL"),
help=_(
"Fill this only for LLama. This required with models that don't have an official provider."
" Example URL: https://model-provider-example/llama3_70b"
),
default=None,
scope=Scope.settings,
)
model = String(
display_name=_("AI model"),
help=_("Select the AI language model to use."),
values=[
{"display_name": model, "value": model} for model in SupportedModels.list()
],
Scope=Scope.settings,
default=SupportedModels.GPT4O.value,
)

evaluation_prompt = String(
display_name=_("Evaluation prompt"),
help=_(
"Enter the evaluation prompt given to the model."
" The question will be inserted right after it."
" The student's answer would then follow the question. Markdown format can be used."
),
default="You are a teacher. Evaluate the student's answer for the following question:",
multiline_editor=True,
scope=Scope.settings,
)
question = String(
display_name=_("Question"),
help=_(
"Enter the question you would like the students to answer."
" Markdown format can be used."
),
default="",
multiline_editor=True,
scope=Scope.settings,
)

messages = Dict(
help=_("Dictionary with chat messages"),
scope=Scope.user_state,
default={USER_KEY: [], LLM_KEY: []},
)
editable_fields = (
"display_name",
"evaluation_prompt",
"question",
"model",
"model_api_key",
"model_api_url",
)

def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")

def validate_field_data(self, validation, data):
"""
Validate fields.
"""

if not data.model or data.model not in SupportedModels.list():
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_( # pylint: disable=translation-of-non-string
f"Model field is mandatory and must be one of {', '.join(SupportedModels.list())}"
),
)
)

if not data.model_api_key:
validation.add(
ValidationMessage(
ValidationMessage.ERROR, _("Model API key is mandatory")
)
)

if data.model == SupportedModels.LLAMA and not data.model_api_url:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_("API URL field is mandatory when using ollama/llama2."),
)
)

if data.model != SupportedModels.LLAMA and data.model_api_url:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_("API URL field can be set only when using ollama/llama2."),
)
)

if not data.question:
validation.add(
ValidationMessage(
ValidationMessage.ERROR, _("Question field is mandatory")
)
)
Loading

0 comments on commit 01b6280

Please sign in to comment.