#!/bin/bash set -e mkdir -p ~/ai-stack LOG_FILE=~/ai-stack/setup.log exec > >(tee -a "$LOG_FILE") 2>&1 echo "๐ฆ AI Stack Setup Script" echo "Log: $LOG_FILE" echo # Containers to manage CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui") # Handle update option if [[ "$1" == "--update" ]]; then echo "๐ Updating running containers..." for name in "${CONTAINERS[@]}"; do if docker ps --format '{{.Names}}' | grep -q "^$name$"; then echo "๐งผ Updating $name..." docker pull "$(docker inspect --format='{{.Config.Image}}' $name)" docker stop $name docker rm $name else echo "โ ๏ธ Container $name is not running, skipping." fi done echo "โ Update complete. Rerun the script without --update to restart containers." exit 0 fi check_container_running() { local name="$1" if docker ps --format '{{.Names}}' | grep -q "^$name$"; then echo "๐ข Container '$name' is already running." return 0 fi return 1 } # Function to clean up existing container clean_container() { local name=$1 docker rm -f "$name" 2>/dev/null || true } # ๐ง Open WebUI + Ollama echo "๐ง Setting up Open WebUI + Ollama..." clean_container open-webui docker run -d -p 3000:8080 --gpus=all \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \ --restart always \ ghcr.io/open-webui/open-webui:ollama # โ JupyterLab read -p "โ Install JupyterLab for Python/AI Dev? [y/N]: " jupyter if [[ "$jupyter" =~ ^[Yy]$ ]]; then echo "๐ Installing JupyterLab..." clean_container jupyterlab docker run -d \ --name jupyterlab \ --gpus all \ -p 8888:8888 \ -v jupyter_data:/home/jovyan/work \ jupyter/scipy-notebook:latest echo "๐ Jupyter token:" docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt echo "๐ Token saved to ~/ai-stack/jupyter_token.txt" fi # โ Whisper.cpp with GPU Support (Build Locally) read -p "โ Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper if [[ "$whisper" =~ ^[Yy]$ ]]; then echo "๐ Cloning and building Whisper.cpp with GPU support..." WHISPER_DIR=~/ai-stack/whisper.cpp mkdir -p ~/ai-stack git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF FROM nvidia/cuda:12.2.0-base-ubuntu22.04 RUN apt-get update && \\ apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\ apt-get clean WORKDIR /opt RUN git clone https://github.com/ggerganov/whisper.cpp.git WORKDIR /opt/whisper.cpp RUN cmake -DWITH_CUBLAS=ON . && make -j CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"] EOF echo "๐ณ Building Docker image 'whispercpp-gpu'..." docker build -t whispercpp-gpu -f "$WHISPER_DIR/Dockerfile.gpu" "$WHISPER_DIR" echo "๐ Setting up input folder at ~/ai-stack/audio" mkdir -p ~/ai-stack/audio echo "โ To run whisper on a file:" echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/yourfile.wav" echo "๐ข Reminder: Download a model like ggml-large.bin into ~/ai-stack/whisper.cpp/models/" echo "e.g. https://huggingface.co/ggerganov/whisper.cpp/tree/main/models" fi if [[ "$1" == "--update" ]]; then # ...existing container update loop echo "๐ Updating whisper.cpp..." WHISPER_DIR=~/ai-stack/whisper.cpp if [ -d "$WHISPER_DIR" ]; then cd "$WHISPER_DIR" git pull origin master docker build -t whispercpp-gpu -f Dockerfile.gpu . echo "โ whispercpp-gpu rebuilt with latest source." else echo "โ ๏ธ whisper.cpp not found at $WHISPER_DIR โ skipping." fi fi # โ Stable Diffusion (AUTOMATIC1111 custom GPU build) read -p "โ Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd if [[ "$sd" =~ ^[Yy]$ ]]; then echo "๐จ Cloning and building AUTOMATIC1111 WebUI..." SD_DIR=~/ai-stack/stable-diffusion mkdir -p ~/ai-stack git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true cd "$SD_DIR" cat > ~/ai-stack/stable-diffusion/Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive # Install dependencies RUN apt-get update && \ apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \ apt-get clean WORKDIR /app COPY . /app # Create venv and install deps RUN python3 -m venv venv && \ . venv/bin/activate && \ pip install --upgrade pip && \ pip install -r requirements_versions.txt && \ pip install -r requirements.txt EXPOSE 7860 CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"] EOF echo "๐ณ Building Docker image 'a1111-webui'..." docker stop stable-diffusion 2>/dev/null || true docker rm stable-diffusion 2>/dev/null || true docker build -t a1111-webui . echo "๐ Mounting model directory from ~/ai-stack/sd-models" mkdir -p ~/ai-stack/sd-models echo "๐ Starting Stable Diffusion WebUI..." docker run -d --gpus all \ --name stable-diffusion \ -p 7860:7860 \ -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \ -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \ a1111-webui echo "๐ Access Stable Diffusion WebUI at http://localhost:7860" fi if [[ "$1" == "--update" ]]; then echo "๐ Updating Stable Diffusion WebUI..." SD_DIR=~/ai-stack/stable-diffusion if [ -d "$SD_DIR" ]; then cd "$SD_DIR" git pull origin master docker build -t a1111-webui . docker stop stable-diffusion 2>/dev/null || true docker rm stable-diffusion 2>/dev/null || true docker run -d --gpus all \ --name stable-diffusion \ -p 7860:7860 \ -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \ -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \ a1111-webui echo "โ Stable Diffusion updated and restarted." else echo "โ ๏ธ Stable Diffusion directory not found โ skipping update." fi fi # โ Text-generation-webui (custom Docker build with GPU + API) read -p "โ Install Text-generation-webui? [y/N]: " tg if [[ "$tg" =~ ^[Yy]$ ]]; then echo "๐งช Cloning and building Text-generation-webui..." TG_DIR=~/ai-stack/tg-webui git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true cd "$TG_DIR" echo "๐ Creating Dockerfile for build..." cat > Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip build-essential && \ apt-get clean WORKDIR /app COPY . /app # Install dependencies RUN pip install --upgrade pip && \ (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers) EXPOSE 7861 CMD ["/bin/bash", "-c", "python server.py --listen --api"] EOF echo "๐ณ Building Docker image 'tg-webui'..." docker stop tg-webui 2>/dev/null || true docker rm tg-webui 2>/dev/null || true docker build -t tg-webui . echo "๐ Mounting model directory from ~/ai-stack/tg-models" mkdir -p ~/ai-stack/tg-models echo "๐ Running text-generation-webui on port 7861..." docker run -d --gpus all \ --name tg-webui \ -p 7861:7861 \ -v ~/ai-stack/tg-models:/app/models \ tg-webui echo "๐ Access at http://localhost:7861" fi echo "๐ Updating Text-generation-webui..." TG_DIR=~/ai-stack/tg-webui if [ -d "$TG_DIR" ]; then cd "$TG_DIR" git pull origin master docker stop tg-webui 2>/dev/null || true docker rm tg-webui 2>/dev/null || true docker build -t tg-webui . docker run -d --gpus all \ --name tg-webui \ -p 7861:7861 \ -v ~/ai-stack/tg-models:/app/models \ tg-webui echo "โ Text-generation-webui updated and restarted." else echo "โ ๏ธ Text-generation-webui directory not found โ skipping update." fi # โ ComfyUI (Docker - graph-based Stable Diffusion) read -p "โ Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy if [[ "$comfy" =~ ^[Yy]$ ]]; then echo "๐งฉ Installing ComfyUI (Docker)..." clean_container comfyui docker run -d --gpus all \ --name comfyui \ -p 8188:8188 \ -v comfyui_data:/workspace \ ghcr.io/cmdr2/comfyui:latest echo "๐ ComfyUI running at http://localhost:8188" fi echo "โ AI stack setup complete."