-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
3 changed files
with
223 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
# Specify multi-platform base images | ||
FROM --platform=$TARGETPLATFORM ubuntu:22.04 as base | ||
|
||
# Set architecture-specific variables | ||
ARG TARGETPLATFORM | ||
ARG BUILDPLATFORM | ||
|
||
# Set up Python and basic dependencies | ||
FROM base as python-deps | ||
ENV PYTHONUNBUFFERED=1 \ | ||
DEBIAN_FRONTEND=noninteractive | ||
|
||
# Install Python and dependencies based on architecture | ||
RUN apt-get update && apt-get install -y \ | ||
python3.10 \ | ||
python3.10-venv \ | ||
python3-pip \ | ||
git \ | ||
&& rm -rf /var/lib/apt/lists/* | ||
|
||
# Create venv and install dependencies | ||
RUN python3.10 -m venv /opt/venv | ||
ENV PATH="/opt/venv/bin:$PATH" | ||
|
||
WORKDIR /app | ||
COPY requirements.txt . | ||
|
||
# Install PyTorch based on architecture | ||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ | ||
# Install PyTorch for Apple Silicon | ||
pip install --no-cache-dir torch torchvision torchaudio; \ | ||
else \ | ||
# Install CUDA version for NVIDIA GPUs | ||
pip install --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118; \ | ||
fi | ||
|
||
RUN pip install --no-cache-dir -r requirements.txt | ||
|
||
# Final stage | ||
FROM base | ||
COPY --from=python-deps /opt/venv /opt/venv | ||
ENV PATH="/opt/venv/bin:$PATH" | ||
|
||
WORKDIR /app | ||
COPY . . | ||
|
||
# Install the package | ||
RUN pip install -e ".[all]" | ||
|
||
# Create volume mount points | ||
VOLUME ["/app/outputs", "/app/models"] | ||
|
||
# Set environment variables | ||
ENV TORCH_HOME=/app/models | ||
ENV HF_HOME=/app/models | ||
|
||
# Default command that can be overridden | ||
ENTRYPOINT ["python3", "-m", "flux.api"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,94 @@ | ||
#!/bin/bash | ||
|
||
# Enhanced CLI wrapper for FLUX Docker implementation | ||
# Supports both local model inference and API access | ||
|
||
set -e | ||
|
||
# Default values | ||
USE_LOCAL=false | ||
API_KEY="${BFL_API_KEY:-}" | ||
MODEL="flux.1-pro" | ||
PROMPT="" | ||
OUTPUT="flux-output.jpg" | ||
OUTPUT_FORMAT="save" | ||
GPU_SUPPORT="" | ||
|
||
usage() { | ||
cat << EOF | ||
Usage: $0 [options] | ||
Options: | ||
--local Use local model instead of API | ||
--api-key KEY API key for remote usage | ||
--model NAME Model name to use (default: flux.1-pro) | ||
--prompt TEXT Prompt for image generation | ||
--output PATH Output path (default: outputs/image.jpg) | ||
--format FORMAT Output format: save|url|image (default: save) | ||
--gpu Enable GPU support | ||
-h, --help Show this help message | ||
Examples: | ||
$0 --prompt "A beautiful sunset" --output sunset.jpg | ||
$0 --local --model flux.1-schnell --prompt "A forest" --gpu | ||
EOF | ||
} | ||
|
||
# Parse arguments | ||
while [[ "$#" -gt 0 ]]; do | ||
case $1 in | ||
--local) USE_LOCAL=true ;; | ||
--api-key) API_KEY="$2"; shift ;; | ||
--model) MODEL="$2"; shift ;; | ||
--prompt) PROMPT="$2"; shift ;; | ||
--output) OUTPUT="$2"; shift ;; | ||
--format) OUTPUT_FORMAT="$2"; shift ;; | ||
--gpu) GPU_SUPPORT="--gpus all" ;; | ||
-h|--help) usage; exit 0 ;; | ||
*) echo "Unknown parameter: $1"; usage; exit 1 ;; | ||
esac | ||
shift | ||
done | ||
|
||
# Validate required arguments | ||
if [ -z "$PROMPT" ]; then | ||
echo "Error: --prompt is required" | ||
usage | ||
exit 1 | ||
fi | ||
|
||
if [ "$USE_LOCAL" = true ] && [ -z "$MODEL" ]; then | ||
echo "Error: --model is required when using local mode" | ||
usage | ||
exit 1 | ||
fi | ||
|
||
if [ "$USE_LOCAL" = false ] && [ -z "$API_KEY" ]; then | ||
echo "Error: --api-key is required when using API mode" | ||
usage | ||
exit 1 | ||
fi | ||
|
||
# Ensure output directory exists | ||
mkdir -p "$(dirname "$OUTPUT")" | ||
|
||
# Build Docker command | ||
DOCKER_CMD="docker run --rm ${GPU_SUPPORT} \ | ||
-v $(pwd)/outputs:/app/outputs \ | ||
-v $(pwd)/models:/app/models" | ||
|
||
if [ "$USE_LOCAL" = false ]; then | ||
DOCKER_CMD="$DOCKER_CMD -e BFL_API_KEY=$API_KEY" | ||
fi | ||
|
||
# Execute Docker command | ||
if [ "$USE_LOCAL" = true ]; then | ||
$DOCKER_CMD flux-project \ | ||
--model "$MODEL" \ | ||
--prompt "$PROMPT" \ | ||
"$OUTPUT_FORMAT" "$OUTPUT" | ||
else | ||
$DOCKER_CMD flux-project \ | ||
--prompt "$PROMPT" \ | ||
"$OUTPUT_FORMAT" "$OUTPUT" | ||
fi |