feat: Automated Preset Docker Image Building #98
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Build and Push Preset Models | |
on: | |
# For Testing | |
pull_request: | |
branches: | |
- main | |
paths: | |
- 'pkg/presets/falcon/**' | |
- 'pkg/presets/llama-2/**' | |
- 'pkg/presets/llama-2-chat/**' | |
push: | |
branches: | |
- main | |
paths: | |
- 'pkg/presets/falcon/**' | |
- 'pkg/presets/llama-2/**' | |
- 'pkg/presets/llama-2-chat/**' | |
workflow_dispatch: | |
inputs: | |
release: | |
description: 'Release (yes/no)' | |
required: true | |
default: 'no' | |
image_tag: | |
description: 'Image Tag' | |
required: false | |
permissions: | |
id-token: write | |
contents: read | |
jobs: | |
setup: | |
runs-on: ubuntu-20.04 | |
outputs: | |
image_tag: ${{ steps.set_tag.outputs.image_tag }} | |
FALCON_MODIFIED: ${{ steps.check_modified_paths.outputs.FALCON_MODIFIED }} | |
LLAMA2_MODIFIED: ${{ steps.check_modified_paths.outputs.LLAMA2_MODIFIED }} | |
LLAMA2_CHAT_MODIFIED: ${{ steps.check_modified_paths.outputs.LLAMA2_CHAT_MODIFIED }} | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
with: | |
submodules: true | |
fetch-depth: 0 | |
- name: Check Available Disk Space | |
run: df -h | |
- name: Get Modified files | |
run: | | |
files=$(git diff --name-only HEAD^ HEAD) | |
echo "Modified files: $files" | |
FILES_MODIFIED="" | |
while IFS= read -r file; do | |
trimmed_file=$(echo "$file" | tr -d '[:space:]') | |
echo "Trimmed file: $trimmed_file" | |
FILES_MODIFIED="${FILES_MODIFIED}${trimmed_file};" | |
done <<< "$files" | |
echo "FILES_MODIFIED=${FILES_MODIFIED}" >> $GITHUB_ENV | |
- name: Check Modified Paths | |
id: check_modified_paths | |
run: | | |
FALCON_MODIFIED=false | |
LLAMA2_MODIFIED=false | |
LLAMA2_CHAT_MODIFIED=false | |
IFS=';' read -ra ADDR <<< "$FILES_MODIFIED" | |
for file in "${ADDR[@]}"; do | |
echo "Checking file: $file" | |
if [[ "$file" == pkg/presets/falcon/* ]] && [[ "$FALCON_MODIFIED" == false ]]; then | |
echo "File matches falcon path: $file" | |
FALCON_MODIFIED=true | |
elif [[ "$file" == pkg/presets/llama-2/* ]] && [[ "$LLAMA2_MODIFIED" == false ]]; then | |
echo "File matches llama-2 path: $file" | |
LLAMA2_MODIFIED=true | |
elif [[ "$file" == pkg/presets/llama-2-chat/* ]] && [[ "$LLAMA2_CHAT_MODIFIED" == false ]]; then | |
echo "File matches llama-2-chat path: $file" | |
LLAMA2_CHAT_MODIFIED=true | |
else | |
echo "File does not match any paths: $file" | |
fi | |
done | |
echo "FALCON_MODIFIED=$FALCON_MODIFIED" >> $GITHUB_OUTPUT | |
echo "LLAMA2_MODIFIED=$LLAMA2_MODIFIED" >> $GITHUB_OUTPUT | |
echo "LLAMA2_CHAT_MODIFIED=$LLAMA2_CHAT_MODIFIED" >> $GITHUB_OUTPUT | |
- name: Images to Build | |
run: | | |
echo "FALCON_MODIFIED for this job: ${{ steps.check_modified_paths.outputs.FALCON_MODIFIED }}" | |
echo "LLAMA2_MODIFIED for this job: ${{ steps.check_modified_paths.outputs.LLAMA2_MODIFIED }}" | |
echo "LLAMA2_CHAT_MODIFIED for this job: ${{ steps.check_modified_paths.outputs.LLAMA2_CHAT_MODIFIED }}" | |
- name: Set Image Tag | |
id: set_tag | |
run: | | |
if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.event.inputs.image_tag }}" ]]; then | |
echo "Using workflow dispatch to set image tag" | |
echo "image_tag=${{ github.event.inputs.image_tag }}" >> $GITHUB_OUTPUT | |
else | |
echo "Setting image tag based on latest commit" | |
echo "image_tag=latest" >> $GITHUB_OUTPUT | |
fi | |
- name: Print Image Tag | |
run: | | |
echo "image_tag for this job: ${{ steps.set_tag.outputs.image_tag }}" | |
build: | |
needs: setup | |
runs-on: ubuntu-20.04 | |
strategy: | |
fail-fast: false | |
matrix: | |
model: | |
- name: falcon-7b | |
dockerfile: docker/presets/falcon/Dockerfile | |
build_args: "--build-arg FALCON_MODEL_NAME=tiiuae/falcon-7b" | |
- name: falcon-7b-instruct | |
dockerfile: docker/presets/falcon/Dockerfile | |
build_args: "--build-arg FALCON_MODEL_NAME=tiiuae/falcon-7b-instruct" | |
# - name: llama-2-7b | |
# dockerfile: docker/presets/llama-2/Dockerfile | |
# build_args: "--build-arg EXTERNAL_IP=__EXTERNAL_IP__ --build-arg EXTERNAL_PORT=__EXTERNAL_PORT__ --build-arg LLAMA_VERSION=llama-2-7b --build-arg SRC_DIR=pkg/presets/llama-2 --build-arg WEB_SERVER_AUTH_TOKEN=__WEB_SERVER_AUTH_TOKEN__" | |
# - name: llama-2-7b-chat | |
# dockerfile: docker/presets/llama-2/Dockerfile | |
# build_args: "--build-arg EXTERNAL_IP=__EXTERNAL_IP__ --build-arg EXTERNAL_PORT=__EXTERNAL_PORT__ --build-arg LLAMA_VERSION=llama-2-7b-chat --build-arg SRC_DIR=pkg/presets/llama-2-chat --build-arg WEB_SERVER_AUTH_TOKEN=__WEB_SERVER_AUTH_TOKEN__" | |
# - name: llama-2-13b | |
# dockerfile: docker/presets/llama-2/Dockerfile | |
# build_args: "--build-arg EXTERNAL_IP=__EXTERNAL_IP__ --build-arg EXTERNAL_PORT=__EXTERNAL_PORT__ --build-arg LLAMA_VERSION=llama-2-13b --build-arg SRC_DIR=pkg/presets/llama-2 --build-arg WEB_SERVER_AUTH_TOKEN=__WEB_SERVER_AUTH_TOKEN__" | |
# - name: llama-2-13b-chat | |
# dockerfile: docker/presets/llama-2/Dockerfile | |
# build_args: "--build-arg EXTERNAL_IP=__EXTERNAL_IP__ --build-arg EXTERNAL_PORT=__EXTERNAL_PORT__ --build-arg LLAMA_VERSION=llama-2-13b-chat --build-arg SRC_DIR=pkg/presets/llama-2-chat --build-arg WEB_SERVER_AUTH_TOKEN=__WEB_SERVER_AUTH_TOKEN__" | |
# TODO: Support large models | |
# - name: falcon-40b | |
# dockerfile: docker/presets/falcon/Dockerfile | |
# build_args: "--build-arg FALCON_MODEL_NAME=tiiuae/falcon-40b" | |
# - name: falcon-40b-instruct | |
# dockerfile: docker/presets/falcon/Dockerfile | |
# build_args: "--build-arg FALCON_MODEL_NAME=tiiuae/falcon-40b-instruct" | |
# - name: llama-2-70b | |
# dockerfile: docker/presets/llama-2/Dockerfile | |
# build_args: "--build-arg EXTERNAL_IP=__EXTERNAL_IP__ --build-arg EXTERNAL_PORT=__EXTERNAL_PORT__ --build-arg LLAMA_VERSION=llama-2-70b --build-arg SRC_DIR=pkg/presets/llama-2 --build-arg WEB_SERVER_AUTH_TOKEN=__WEB_SERVER_AUTH_TOKEN__" | |
# - name: llama-2-70b-chat | |
# dockerfile: docker/presets/llama-2/Dockerfile | |
# build_args: "--build-arg EXTERNAL_IP=__EXTERNAL_IP__ --build-arg EXTERNAL_PORT=__EXTERNAL_PORT__ --build-arg LLAMA_VERSION=llama-2-70b-chat --build-arg SRC_DIR=pkg/presets/llama-2-chat --build-arg WEB_SERVER_AUTH_TOKEN=__WEB_SERVER_AUTH_TOKEN__" | |
include: | |
- name: falcon-7b | |
if: ${{ needs.setup.outputs.FALCON_MODIFIED == 'true' }} | |
- name: falcon-7b-instruct | |
if: ${{ needs.setup.outputs.FALCON_MODIFIED == 'true' }} | |
- name: llama-2-7b | |
if: ${{ needs.setup.outputs.LLAMA2_MODIFIED == 'true' }} | |
- name: llama-2-7b-chat | |
if: ${{ needs.setup.outputs.LLAMA2_CHAT_MODIFIED == 'true' }} | |
- name: llama-2-13b | |
if: ${{ needs.setup.outputs.LLAMA2_MODIFIED == 'true' }} | |
- name: llama-2-13b-chat | |
if: ${{ needs.setup.outputs.LLAMA2_CHAT_MODIFIED == 'true' }} | |
# TODO: Support large models | |
# - name: falcon-40b | |
# if: ${{ needs.setup.outputs.FALCON_MODIFIED == 'true' }} | |
# - name: falcon-40b-instruct | |
# if: ${{ needs.setup.outputs.FALCON_MODIFIED == 'true' }} | |
# - name: llama-2-70b | |
# if: ${{ needs.setup.outputs.LLAMA2_MODIFIED == 'true' }} | |
# - name: llama-2-70b-chat | |
# if: ${{ needs.setup.outputs.LLAMA2_CHAT_MODIFIED == 'true' }} | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
with: | |
submodules: true | |
fetch-depth: 0 | |
- name: Az CLI login | |
uses: azure/login@v1 | |
with: | |
client-id: ${{ secrets.AZURE_KDM_PRESET_CLIENT_ID }} | |
tenant-id: ${{ secrets.AZURE_TENANT_ID }} | |
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} | |
- name: Install Azure CLI latest | |
run: | | |
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash | |
- name: 'Login to ACR' | |
run: az acr login --name aimodelsregistry | |
- name: Build and push model (with retries) | |
run: | | |
retries=3 | |
while [ $retries -gt 0 ]; do | |
BUILD_ARGS="${{ matrix.model.build_args }}" | |
BUILD_ARGS=${BUILD_ARGS/__EXTERNAL_IP__/${{ secrets.AZURE_WEB_SERVER_EXTERNAL_IP }}} | |
BUILD_ARGS=${BUILD_ARGS/__EXTERNAL_PORT__/${{ secrets.AZURE_WEB_SERVER_EXTERNAL_PORT }}} | |
BUILD_ARGS=${BUILD_ARGS/__WEB_SERVER_AUTH_TOKEN__/${{ secrets.WEB_SERVER_AUTH_TOKEN }}} | |
echo "Docker BUILD_ARGS: $BUILD_ARGS" | |
az acr build \ | |
$BUILD_ARGS \ | |
-t aimodelsregistry.azurecr.io/${{ matrix.model.name }}:${{ needs.setup.outputs.image_tag }} \ | |
-r aimodelsregistry \ | |
-f ${{ matrix.model.dockerfile }} \ | |
. && break | |
retries=$((retries-1)) | |
sleep 15 | |
done |