#!/bin/bash set -e mkdir -p ~/ai-stack mkdir -p ~/ai-stack/localai/models mkdir -p ~/ai-stack/localai/config LOG_FILE=~/ai-stack/setup.log exec > >(tee -a "$LOG_FILE") 2>&1 echo "๐Ÿ“ฆ AI Stack Setup Script" echo "Log: $LOG_FILE" echo docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net check_container_running() { local name="$1" if docker ps --format '{{.Names}}' | grep -q "^$name$"; then echo "๐ŸŸข Container '$name' is already running." return 0 fi return 1 } # Handle UPDATE (--update option passed) -----------------------------------------------------------------UPDATE SECTION BELOW if [[ "$1" == "--update" ]]; then echo "๐Ÿ”„ Updating containers..." # jupyterLab if check_container_running "jupyterlab"; then echo "๐Ÿงผ Updating JupyterLab..." docker stop jupyterlab docker rm jupyterlab docker pull jupyter/scipy-notebook:latest docker run -d \ --name jupyterlab \ --network ai-stack-net \ --gpus all \ -p 8888:8888 \ -v jupyter_data:/home/jovyan/work \ jupyter/scipy-notebook:latest echo "โœ… JupyterLab updated." else echo "โš ๏ธ JupyterLab is not running, skipping update." fi # pipelines if check_container_running "pipelines"; then echo "๐Ÿงผ Updating Pipelines..." docker stop pipelines docker rm pipelines docker pull ghcr.io/open-webui/pipelines:main docker run -d \ --name pipelines \ --network ai-stack-net \ -p 9099:9099 \ -v ~/ai-stack/pipelines:/app/pipelines \ --restart unless-stopped \ ghcr.io/open-webui/pipelines:main echo "โœ… Pipelines running at http://localhost:9099" else echo "pipelines not running, skipping update" fi # open-webui if check_container_running "open-webui"; then echo "๐Ÿงผ Updating open-webui..." docker stop open-webui docker rm open-webui docker pull ghcr.io/open-webui/open-webui:ollama docker run -d -p 3000:8080 --gpus all \ -e OPENAI_API_BASE_URL=http://pipelines:9099 \ -e OPENAI_API_KEY=0p3n-w3bu! \ -e OLLAMA_MAX_LOADED_MODELS=2 \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \ --network ai-stack-net \ --restart always \ ghcr.io/open-webui/open-webui:ollama else echo "pipelines not running, skipping update" fi # localai (GPU-support) if check_container_running "localai"; then echo "๐Ÿง  Updating LocalAI..." docker stop localai docker rm localai docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 docker run -d \ --network ai-stack-net \ --name localai \ --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/models \ -v ~/ai-stack/localai/config:/config \ -e ENABLE_BACKENDS=llama-cuda,ollama \ -e INCLUDE_DEFAULT_MODELS=true \ -e AUTOLOAD_MODELS=true \ -e MODEL_IDLE_TIMEOUT=900 \ -e MODEL_PATH=/models \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12 echo "โœ… LocalAI running at http://localhost:8080" else echo "โž• localai isn't running, must install first." fi # whisper-cpp echo "๐Ÿ” Updating Whisper.cpp (custom GPU build)..." WHISPER_DIR=~/ai-stack/whisper.cpp if [ -d "$WHISPER_DIR" ]; then cd "$WHISPER_DIR" git pull origin master docker stop whispercpp-gpu 2>/dev/null || true docker rm whispercpp-gpu 2>/dev/null || true docker build -t whispercpp-gpu -f Dockerfile.gpu . echo "โœ… Whisper.cpp GPU image rebuilt." else echo "โš ๏ธ Whisper.cpp directory not found โ€” skipping update." fi # stable-diffusion echo "๐Ÿ” Updating Stable Diffusion WebUI..." SD_DIR=~/ai-stack/stable-diffusion if [ -d "$SD_DIR" ]; then cd "$SD_DIR" git pull origin master docker stop stable-diffusion 2>/dev/null || true docker rm stable-diffusion 2>/dev/null || true docker build -t a1111-webui . docker run -d --gpus all \ --name stable-diffusion \ --network ai-stack-net \ -p 7860:7860 \ -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \ -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \ a1111-webui echo "โœ… Stable Diffusion WebUI updated and restarted." else echo "โš ๏ธ Stable Diffusion directory not found โ€” skipping update." fi # tg-webui echo "๐Ÿ” Updating Text-generation-webui..." TG_DIR=~/ai-stack/tg-webui if [ -d "$TG_DIR" ]; then cd "$TG_DIR" git pull origin master docker stop tg-webui 2>/dev/null || true docker rm tg-webui 2>/dev/null || true docker build -t tg-webui . docker run -d --gpus all \ --name tg-webui \ --network ai-stack-net \ -p 7861:7861 \ -v ~/ai-stack/tg-models:/app/models \ tg-webui echo "โœ… Text-generation-webui updated and restarted." else echo "โš ๏ธ Text-generation-webui directory not found โ€” skipping update." fi # comfyui echo "๐Ÿ” Updating ComfyUI (custom build)..." COMFYUI_DIR=~/ai-stack/comfyui if [ -d "$COMFYUI_DIR" ]; then cd "$COMFYUI_DIR" git pull origin master docker stop comfyui 2>/dev/null || true docker rm comfyui 2>/dev/null || true docker build -t comfyui-custom . docker run -d --gpus all \ --name comfyui \ --network ai-stack-net \ -p 8188:8188 \ -v ~/ai-stack/comfyui/workspace:/app/output \ comfyui-custom echo "โœ… ComfyUI updated and restarted." else echo "โš ๏ธ ComfyUI directory not found โ€” skipping update." fi echo "โœ… Update complete!" exit 0 fi # Function to clean up existing container clean_container() { local name=$1 docker rm -f "$name" 2>/dev/null || true } # Handle INSTALL (no --update passed) ------------------------------------------------------------------------INSTALL SECTION BELOW # open-webui INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "open-webui"; then echo "โœ… Skipping Open WebUI + Ollama install (already running)." else read -p "โž• Install Open WebUI + Ollama? [y/N]: " openui if [[ "$openui" =~ ^[Yy]$ ]]; then echo "๐Ÿง  Setting up Open WebUI + Ollama..." docker stop open-webui 2>/dev/null || true docker rm open-webui 2>/dev/null || true docker run -d -p 3000:8080 --gpus all \ -e OPENAI_API_BASE_URL=http://pipelines:9099 \ -e OPENAI_API_KEY=0p3n-w3bu! \ -e OLLAMA_MAX_LOADED_MODELS=2 \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \ --network ai-stack-net \ --restart always \ ghcr.io/open-webui/open-webui:ollama echo "๐ŸŒ Open WebUI running at http://localhost:3000" fi fi fi # jupyterlab INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "jupyterlab"; then echo "โœ… Skipping JupyterLab install (already running)." else read -p "โž• Install JupyterLab for Python/AI Dev? [y/N]: " jupyter if [[ "$jupyter" =~ ^[Yy]$ ]]; then echo "๐Ÿ“” Installing JupyterLab..." docker stop jupyterlab 2>/dev/null || true docker rm jupyterlab 2>/dev/null || true docker run -d \ --name jupyterlab \ --network ai-stack-net \ --gpus all \ -p 8888:8888 \ -v jupyter_data:/home/jovyan/work \ jupyter/scipy-notebook:latest echo "๐Ÿ” Jupyter token (saved to ~/ai-stack/jupyter_token.txt):" mkdir -p ~/ai-stack docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt fi fi fi # whisper-cpp-gpu custom INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "whispercpp-gpu"; then echo "โœ… Skipping Whisper.cpp install (already running)." else read -p "โž• Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper if [[ "$whisper" =~ ^[Yy]$ ]]; then echo "๐Ÿ”Š Cloning and building Whisper.cpp with GPU support..." WHISPER_DIR=~/ai-stack/whisper.cpp mkdir -p "$WHISPER_DIR" git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true cat > "$WHISPER_DIR/Dockerfile.gpu" < Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \ apt-get clean WORKDIR /app COPY . /app RUN pip install --upgrade pip && \ pip install -r requirements_versions.txt && \ pip install -r requirements.txt EXPOSE 7860 CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"] EOF echo "๐Ÿณ Building Docker image 'a1111-webui'..." docker stop stable-diffusion 2>/dev/null || true docker rm stable-diffusion 2>/dev/null || true docker build -t a1111-webui . echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/sd-models" mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs echo "๐Ÿš€ Running Stable Diffusion WebUI on port 7860..." docker run -d --gpus all \ --name stable-diffusion \ --network ai-stack-net \ -p 7860:7860 \ -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \ -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \ a1111-webui echo "๐ŸŒ Access at http://localhost:7860" fi fi fi # tg-webui INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "tg-webui"; then echo "โœ… Skipping Text-generation-webui install (already running)." else read -p "โž• Install Text-generation-webui? [y/N]: " tg if [[ "$tg" =~ ^[Yy]$ ]]; then echo "๐Ÿง  Cloning and building Text-generation-webui..." TG_DIR=~/ai-stack/tg-webui git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true cd "$TG_DIR" echo "๐Ÿ“„ Writing Dockerfile..." cat > Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip build-essential && \ apt-get clean WORKDIR /app COPY . /app RUN pip install --upgrade pip && \ (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers) EXPOSE 7861 CMD ["/bin/bash", "-c", "python server.py --listen --api"] EOF echo "๐Ÿณ Building Docker image 'tg-webui'..." docker stop tg-webui 2>/dev/null || true docker rm tg-webui 2>/dev/null || true docker build -t tg-webui . echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/tg-models" mkdir -p ~/ai-stack/tg-models echo "๐Ÿš€ Running text-generation-webui on port 7861..." docker run -d --gpus all \ --name tg-webui \ --network ai-stack-net \ -p 7861:7861 \ -v ~/ai-stack/tg-models:/app/models \ tg-webui echo "๐ŸŒ Access at http://localhost:7861" fi fi fi # comfy ui INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "comfyui"; then echo "โœ… Skipping ComfyUI install (already running)." else read -p "โž• Install ComfyUI (custom Docker build)? [y/N]: " comfy if [[ "$comfy" =~ ^[Yy]$ ]]; then echo "๐Ÿงฉ Cloning and building ComfyUI..." COMFYUI_DIR=~/ai-stack/comfyui git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true cd "$COMFYUI_DIR" echo "๐Ÿ“„ Writing Dockerfile..." cat > Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \ apt-get clean WORKDIR /app COPY . /app RUN pip install --upgrade pip && pip install -r requirements.txt EXPOSE 8188 CMD ["python3", "main.py", "--listen", "--port", "8188"] EOF echo "๐Ÿณ Building Docker image 'comfyui-custom'..." docker stop comfyui 2>/dev/null || true docker rm comfyui 2>/dev/null || true docker build -t comfyui-custom . echo "๐Ÿ“‚ Preparing workspace volume..." mkdir -p ~/ai-stack/comfyui/workspace echo "๐Ÿš€ Running ComfyUI on port 8188..." docker run -d --gpus all \ --name comfyui \ --network ai-stack-net \ -p 8188:8188 \ -v ~/ai-stack/comfyui/workspace:/app/output \ comfyui-custom echo "๐ŸŒ ComfyUI running at http://localhost:8188" fi fi fi # localai INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "localai"; then echo "โœ… Skipping LocalAI install (already running)." else read -p "โž• Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then echo "๐Ÿง  Installing LocalAI (NVIDIA CUDA 12)..." mkdir -p ~/ai-stack/localai/models mkdir -p ~/ai-stack/localai/config docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 docker run -d \ --name localai \ --gpus all \ --network ai-stack-net \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/build/models \ -v ~/ai-stack/localai/config:/config \ -e ENABLE_BACKENDS=llama-cuda,ollama \ -e INCLUDE_DEFAULT_MODELS=true \ -e AUTOLOAD_MODELS=true \ -e MODEL_IDLE_TIMEOUT=600 \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12 echo "๐ŸŒ LocalAI is now running at http://localhost:8080" fi fi fi # pipelines INSTALL if [[ "$1" != "--update" ]]; then if check_container_running "pipelines"; then echo "โœ… Skipping pipelines install (already running)." else read -p "โž• Install pipelines for open-webui? [y/N]: " pipelines_prompt if [[ "$pipelines_prompt" =~ ^[Yy]$ ]]; then mkdir -p ~/ai-stack/pipelines docker run -d \ --name pipelines \ --network ai-stack-net \ -p 9099:9099 \ -v ~/ai-stack/pipelines:/app/pipelines \ --restart unless-stopped \ ghcr.io/open-webui/pipelines:main echo "pipelines is now running..." fi fi fi echo "โœ… AI stack setup complete."