commit c4fe4a522faa63c5342abfbf3852aa54c638e261 Author: crowetic Date: Sat May 17 13:54:15 2025 -0700 Initial commit diff --git a/backups/setup-ai-stack.sh b/backups/setup-ai-stack.sh new file mode 100644 index 0000000..5e0c9c8 --- /dev/null +++ b/backups/setup-ai-stack.sh @@ -0,0 +1,286 @@ +#!/bin/bash +set -e + +mkdir -p ~/ai-stack +LOG_FILE=~/ai-stack/setup.log +exec > >(tee -a "$LOG_FILE") 2>&1 + +echo "๐Ÿ“ฆ AI Stack Setup Script" +echo "Log: $LOG_FILE" +echo + +# Containers to manage +CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui") + + +# Handle update option +if [[ "$1" == "--update" ]]; then + echo "๐Ÿ”„ Updating running containers..." + for name in "${CONTAINERS[@]}"; do + if docker ps --format '{{.Names}}' | grep -q "^$name$"; then + echo "๐Ÿงผ Updating $name..." + docker pull "$(docker inspect --format='{{.Config.Image}}' $name)" + docker stop $name + docker rm $name + else + echo "โš ๏ธ Container $name is not running, skipping." + fi + done + echo "โœ… Update complete. Rerun the script without --update to restart containers." + exit 0 +fi + +check_container_running() { + local name="$1" + if docker ps --format '{{.Names}}' | grep -q "^$name$"; then + echo "๐ŸŸข Container '$name' is already running." + return 0 + fi + return 1 +} + +# Function to clean up existing container +clean_container() { + local name=$1 + docker rm -f "$name" 2>/dev/null || true +} + +# ๐Ÿง  Open WebUI + Ollama +echo "๐Ÿง  Setting up Open WebUI + Ollama..." +clean_container open-webui +docker run -d -p 3000:8080 --gpus=all \ + -v ollama:/root/.ollama \ + -v open-webui:/app/backend/data \ + --name open-webui \ + --restart always \ + ghcr.io/open-webui/open-webui:ollama + +# โž• JupyterLab +read -p "โž• Install JupyterLab for Python/AI Dev? [y/N]: " jupyter +if [[ "$jupyter" =~ ^[Yy]$ ]]; then + echo "๐Ÿ“” Installing JupyterLab..." + clean_container jupyterlab + docker run -d \ + --name jupyterlab \ + --gpus all \ + -p 8888:8888 \ + -v jupyter_data:/home/jovyan/work \ + jupyter/scipy-notebook:latest + + echo "๐Ÿ” Jupyter token:" + docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt + echo "๐Ÿ“ Token saved to ~/ai-stack/jupyter_token.txt" +fi + +# โž• Whisper.cpp with GPU Support (Build Locally) +read -p "โž• Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper +if [[ "$whisper" =~ ^[Yy]$ ]]; then + echo "๐Ÿ”Š Cloning and building Whisper.cpp with GPU support..." + + WHISPER_DIR=~/ai-stack/whisper.cpp + mkdir -p ~/ai-stack + git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true + + cat > "$WHISPER_DIR/Dockerfile.gpu" < ~/ai-stack/stable-diffusion/Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +# Install dependencies +RUN apt-get update && \ + apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \ + apt-get clean + +WORKDIR /app +COPY . /app + +# Create venv and install deps +RUN python3 -m venv venv && \ + . venv/bin/activate && \ + pip install --upgrade pip && \ + pip install -r requirements_versions.txt && \ + pip install -r requirements.txt + +EXPOSE 7860 + +CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"] + +EOF + echo "๐Ÿณ Building Docker image 'a1111-webui'..." + docker stop stable-diffusion 2>/dev/null || true + docker rm stable-diffusion 2>/dev/null || true + docker build -t a1111-webui . + + echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/sd-models" + mkdir -p ~/ai-stack/sd-models + + echo "๐Ÿš€ Starting Stable Diffusion WebUI..." + docker run -d --gpus all \ + --name stable-diffusion \ + -p 7860:7860 \ + -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \ + -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \ + a1111-webui + + echo "๐ŸŒ Access Stable Diffusion WebUI at http://localhost:7860" +fi + +if [[ "$1" == "--update" ]]; then + echo "๐Ÿ” Updating Stable Diffusion WebUI..." + SD_DIR=~/ai-stack/stable-diffusion + if [ -d "$SD_DIR" ]; then + cd "$SD_DIR" + git pull origin master + docker build -t a1111-webui . + docker stop stable-diffusion 2>/dev/null || true + docker rm stable-diffusion 2>/dev/null || true + docker run -d --gpus all \ + --name stable-diffusion \ + -p 7860:7860 \ + -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \ + -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \ + a1111-webui + echo "โœ… Stable Diffusion updated and restarted." + else + echo "โš ๏ธ Stable Diffusion directory not found โ€” skipping update." + fi +fi + + + +# โž• Text-generation-webui (custom Docker build with GPU + API) +read -p "โž• Install Text-generation-webui? [y/N]: " tg +if [[ "$tg" =~ ^[Yy]$ ]]; then + echo "๐Ÿงช Cloning and building Text-generation-webui..." + + TG_DIR=~/ai-stack/tg-webui + git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true + cd "$TG_DIR" + + echo "๐Ÿ“ Creating Dockerfile for build..." + cat > Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y git python3 python3-pip build-essential && \ + apt-get clean + +WORKDIR /app +COPY . /app + +# Install dependencies +RUN pip install --upgrade pip && \ + (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers) + +EXPOSE 7861 + +CMD ["/bin/bash", "-c", "python server.py --listen --api"] +EOF + + echo "๐Ÿณ Building Docker image 'tg-webui'..." + docker stop tg-webui 2>/dev/null || true + docker rm tg-webui 2>/dev/null || true + docker build -t tg-webui . + + echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/tg-models" + mkdir -p ~/ai-stack/tg-models + + echo "๐Ÿš€ Running text-generation-webui on port 7861..." + docker run -d --gpus all \ + --name tg-webui \ + -p 7861:7861 \ + -v ~/ai-stack/tg-models:/app/models \ + tg-webui + + echo "๐ŸŒ Access at http://localhost:7861" +fi + + +echo "๐Ÿ” Updating Text-generation-webui..." +TG_DIR=~/ai-stack/tg-webui +if [ -d "$TG_DIR" ]; then + cd "$TG_DIR" + git pull origin master + docker stop tg-webui 2>/dev/null || true + docker rm tg-webui 2>/dev/null || true + docker build -t tg-webui . + docker run -d --gpus all \ + --name tg-webui \ + -p 7861:7861 \ + -v ~/ai-stack/tg-models:/app/models \ + tg-webui + echo "โœ… Text-generation-webui updated and restarted." +else + echo "โš ๏ธ Text-generation-webui directory not found โ€” skipping update." +fi + + +# โž• ComfyUI (Docker - graph-based Stable Diffusion) +read -p "โž• Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy +if [[ "$comfy" =~ ^[Yy]$ ]]; then + echo "๐Ÿงฉ Installing ComfyUI (Docker)..." + clean_container comfyui + docker run -d --gpus all \ + --name comfyui \ + -p 8188:8188 \ + -v comfyui_data:/workspace \ + ghcr.io/cmdr2/comfyui:latest + echo "๐ŸŒ ComfyUI running at http://localhost:8188" +fi + +echo "โœ… AI stack setup complete." + diff --git a/backups/setup-ai-stack.sh~ b/backups/setup-ai-stack.sh~ new file mode 100644 index 0000000..a74eef5 --- /dev/null +++ b/backups/setup-ai-stack.sh~ @@ -0,0 +1,278 @@ +#!/bin/bash +set -e + +mkdir -p ~/ai-stack +LOG_FILE=~/ai-stack/setup.log +exec > >(tee -a "$LOG_FILE") 2>&1 + +echo "๐Ÿ“ฆ AI Stack Setup Script" +echo "Log: $LOG_FILE" +echo + +# Containers to manage +CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui") + + +# Handle update option +if [[ "$1" == "--update" ]]; then + echo "๐Ÿ”„ Updating running containers..." + for name in "${CONTAINERS[@]}"; do + if docker ps --format '{{.Names}}' | grep -q "^$name$"; then + echo "๐Ÿงผ Updating $name..." + docker pull "$(docker inspect --format='{{.Config.Image}}' $name)" + docker stop $name + docker rm $name + else + echo "โš ๏ธ Container $name is not running, skipping." + fi + done + echo "โœ… Update complete. Rerun the script without --update to restart containers." + exit 0 +fi + + +# Function to clean up existing container +clean_container() { + local name=$1 + docker rm -f "$name" 2>/dev/null || true +} + +# ๐Ÿง  Open WebUI + Ollama +echo "๐Ÿง  Setting up Open WebUI + Ollama..." +clean_container open-webui +docker run -d -p 3000:8080 --gpus=all \ + -v ollama:/root/.ollama \ + -v open-webui:/app/backend/data \ + --name open-webui \ + --restart always \ + ghcr.io/open-webui/open-webui:ollama + +# โž• JupyterLab +read -p "โž• Install JupyterLab for Python/AI Dev? [y/N]: " jupyter +if [[ "$jupyter" =~ ^[Yy]$ ]]; then + echo "๐Ÿ“” Installing JupyterLab..." + clean_container jupyterlab + docker run -d \ + --name jupyterlab \ + --gpus all \ + -p 8888:8888 \ + -v jupyter_data:/home/jovyan/work \ + jupyter/scipy-notebook:latest + + echo "๐Ÿ” Jupyter token:" + docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt + echo "๐Ÿ“ Token saved to ~/ai-stack/jupyter_token.txt" +fi + +# โž• Whisper.cpp with GPU Support (Build Locally) +read -p "โž• Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper +if [[ "$whisper" =~ ^[Yy]$ ]]; then + echo "๐Ÿ”Š Cloning and building Whisper.cpp with GPU support..." + + WHISPER_DIR=~/ai-stack/whisper.cpp + mkdir -p ~/ai-stack + git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true + + cat > "$WHISPER_DIR/Dockerfile.gpu" < ~/ai-stack/stable-diffusion/Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +# Install dependencies +RUN apt-get update && \ + apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \ + apt-get clean + +WORKDIR /app +COPY . /app + +# Create venv and install deps +RUN python3 -m venv venv && \ + . venv/bin/activate && \ + pip install --upgrade pip && \ + pip install -r requirements_versions.txt && \ + pip install -r requirements.txt + +EXPOSE 7860 + +CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"] + +EOF + echo "๐Ÿณ Building Docker image 'a1111-webui'..." + docker stop stable-diffusion 2>/dev/null || true + docker rm stable-diffusion 2>/dev/null || true + docker build -t a1111-webui . + + echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/sd-models" + mkdir -p ~/ai-stack/sd-models + + echo "๐Ÿš€ Starting Stable Diffusion WebUI..." + docker run -d --gpus all \ + --name stable-diffusion \ + -p 7860:7860 \ + -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \ + -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \ + a1111-webui + + echo "๐ŸŒ Access Stable Diffusion WebUI at http://localhost:7860" +fi + +if [[ "$1" == "--update" ]]; then + echo "๐Ÿ” Updating Stable Diffusion WebUI..." + SD_DIR=~/ai-stack/stable-diffusion + if [ -d "$SD_DIR" ]; then + cd "$SD_DIR" + git pull origin master + docker build -t a1111-webui . + docker stop stable-diffusion 2>/dev/null || true + docker rm stable-diffusion 2>/dev/null || true + docker run -d --gpus all \ + --name stable-diffusion \ + -p 7860:7860 \ + -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \ + -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \ + a1111-webui + echo "โœ… Stable Diffusion updated and restarted." + else + echo "โš ๏ธ Stable Diffusion directory not found โ€” skipping update." + fi +fi + + + +# โž• Text-generation-webui (custom Docker build with GPU + API) +read -p "โž• Install Text-generation-webui? [y/N]: " tg +if [[ "$tg" =~ ^[Yy]$ ]]; then + echo "๐Ÿงช Cloning and building Text-generation-webui..." + + TG_DIR=~/ai-stack/tg-webui + git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true + cd "$TG_DIR" + + echo "๐Ÿ“ Creating Dockerfile for build..." + cat > Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y git python3 python3-pip build-essential && \ + apt-get clean + +WORKDIR /app +COPY . /app + +# Install dependencies +RUN pip install --upgrade pip && \ + (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers) + +EXPOSE 7861 + +CMD ["/bin/bash", "-c", "python server.py --listen --api"] +EOF + + echo "๐Ÿณ Building Docker image 'tg-webui'..." + docker stop tg-webui 2>/dev/null || true + docker rm tg-webui 2>/dev/null || true + docker build -t tg-webui . + + echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/tg-models" + mkdir -p ~/ai-stack/tg-models + + echo "๐Ÿš€ Running text-generation-webui on port 7861..." + docker run -d --gpus all \ + --name tg-webui \ + -p 7861:7861 \ + -v ~/ai-stack/tg-models:/app/models \ + tg-webui + + echo "๐ŸŒ Access at http://localhost:7861" +fi + + +echo "๐Ÿ” Updating Text-generation-webui..." +TG_DIR=~/ai-stack/tg-webui +if [ -d "$TG_DIR" ]; then + cd "$TG_DIR" + git pull origin master + docker stop tg-webui 2>/dev/null || true + docker rm tg-webui 2>/dev/null || true + docker build -t tg-webui . + docker run -d --gpus all \ + --name tg-webui \ + -p 7861:7861 \ + -v ~/ai-stack/tg-models:/app/models \ + tg-webui + echo "โœ… Text-generation-webui updated and restarted." +else + echo "โš ๏ธ Text-generation-webui directory not found โ€” skipping update." +fi + + +# โž• ComfyUI (Docker - graph-based Stable Diffusion) +read -p "โž• Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy +if [[ "$comfy" =~ ^[Yy]$ ]]; then + echo "๐Ÿงฉ Installing ComfyUI (Docker)..." + clean_container comfyui + docker run -d --gpus all \ + --name comfyui \ + -p 8188:8188 \ + -v comfyui_data:/workspace \ + ghcr.io/cmdr2/comfyui:latest + echo "๐ŸŒ ComfyUI running at http://localhost:8188" +fi + +echo "โœ… AI stack setup complete." + diff --git a/setup-ai-stack.sh b/setup-ai-stack.sh new file mode 100644 index 0000000..3c2906a --- /dev/null +++ b/setup-ai-stack.sh @@ -0,0 +1,420 @@ +#!/bin/bash +set -e + +mkdir -p ~/ai-stack +LOG_FILE=~/ai-stack/setup.log +exec > >(tee -a "$LOG_FILE") 2>&1 + +echo "๐Ÿ“ฆ AI Stack Setup Script" +echo "Log: $LOG_FILE" +echo + + +check_container_running() { + local name="$1" + if docker ps --format '{{.Names}}' | grep -q "^$name$"; then + echo "๐ŸŸข Container '$name' is already running." + return 0 + fi + return 1 +} + +# Handle update option +if [[ "$1" == "--update" ]]; then + echo "๐Ÿ”„ Updating containers..." + + # JupyterLab + if check_container_running "jupyterlab"; then + echo "๐Ÿงผ Updating JupyterLab..." + docker stop jupyterlab + docker rm jupyterlab + docker pull jupyter/scipy-notebook:latest + docker run -d \ + --name jupyterlab \ + --gpus all \ + -p 8888:8888 \ + -v jupyter_data:/home/jovyan/work \ + jupyter/scipy-notebook:latest + echo "โœ… JupyterLab updated." + else + echo "โš ๏ธ JupyterLab is not running, skipping update." + fi + + # Whisper (whisper.cpp) + if check_container_running "whisper"; then + echo "๐Ÿงผ Updating Whisper..." + docker stop whisper + docker rm whisper + docker pull ggerganov/whisper.cpp:latest + docker run -d \ + --name whisper \ + --gpus all \ + -v whisper_data:/app/data \ + ggerganov/whisper.cpp:latest + echo "โœ… Whisper updated." + else + echo "โš ๏ธ Whisper is not running, skipping update." + fi + + # Stable Diffusion (A1111) + if check_container_running "stable-diffusion"; then + echo "๐Ÿงผ Updating Stable Diffusion..." + docker stop stable-diffusion + docker rm stable-diffusion + docker pull abraham-ai/automatic1111-webui:latest + docker run -d \ + --name stable-diffusion \ + --gpus all \ + -p 7860:7860 \ + -v sd_models:/data \ + abraham-ai/automatic1111-webui:latest + echo "โœ… Stable Diffusion updated." + else + echo "โš ๏ธ Stable Diffusion is not running, skipping update." + fi + + echo "๐Ÿ” Updating ComfyUI..." + docker stop comfyui 2>/dev/null || true + docker rm comfyui 2>/dev/null || true + docker pull ghcr.io/cmdr2/comfyui:latest + docker run -d --gpus all \ + --name comfyui \ + -p 8188:8188 \ + -v comfyui_data:/workspace \ + ghcr.io/cmdr2/comfyui:latest + echo "โœ… ComfyUI updated and restarted." + + + echo "๐Ÿ” Updating Whisper.cpp (custom GPU build)..." + WHISPER_DIR=~/ai-stack/whisper.cpp + if [ -d "$WHISPER_DIR" ]; then + cd "$WHISPER_DIR" + git pull origin master + docker stop whispercpp-gpu 2>/dev/null || true + docker rm whispercpp-gpu 2>/dev/null || true + docker build -t whispercpp-gpu -f Dockerfile.gpu . + echo "โœ… Whisper.cpp GPU image rebuilt." + else + echo "โš ๏ธ Whisper.cpp directory not found โ€” skipping update." + fi + + echo "๐Ÿ” Updating Stable Diffusion WebUI..." + SD_DIR=~/ai-stack/stable-diffusion + if [ -d "$SD_DIR" ]; then + cd "$SD_DIR" + git pull origin master + docker stop stable-diffusion 2>/dev/null || true + docker rm stable-diffusion 2>/dev/null || true + docker build -t a1111-webui . + docker run -d --gpus all \ + --name stable-diffusion \ + -p 7860:7860 \ + -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \ + -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \ + a1111-webui + echo "โœ… Stable Diffusion WebUI updated and restarted." + else + echo "โš ๏ธ Stable Diffusion directory not found โ€” skipping update." + fi + + echo "๐Ÿ” Updating Text-generation-webui..." + TG_DIR=~/ai-stack/tg-webui + if [ -d "$TG_DIR" ]; then + cd "$TG_DIR" + git pull origin master + docker stop tg-webui 2>/dev/null || true + docker rm tg-webui 2>/dev/null || true + docker build -t tg-webui . + docker run -d --gpus all \ + --name tg-webui \ + -p 7861:7861 \ + -v ~/ai-stack/tg-models:/app/models \ + tg-webui + echo "โœ… Text-generation-webui updated and restarted." + else + echo "โš ๏ธ Text-generation-webui directory not found โ€” skipping update." + fi + + echo "๐Ÿ” Updating ComfyUI (custom build)..." + COMFYUI_DIR=~/ai-stack/comfyui + if [ -d "$COMFYUI_DIR" ]; then + cd "$COMFYUI_DIR" + git pull origin master + docker stop comfyui 2>/dev/null || true + docker rm comfyui 2>/dev/null || true + docker build -t comfyui-custom . + docker run -d --gpus all \ + --name comfyui \ + -p 8188:8188 \ + -v ~/ai-stack/comfyui/workspace:/app/output \ + comfyui-custom + echo "โœ… ComfyUI updated and restarted." + else + echo "โš ๏ธ ComfyUI directory not found โ€” skipping update." + fi + + echo "โœ… Update complete!" + exit 0 +fi + +# Function to clean up existing container +clean_container() { + local name=$1 + docker rm -f "$name" 2>/dev/null || true +} + +if [[ "$1" != "--update" ]]; then + if check_container_running "open-webui"; then + echo "โœ… Skipping Open WebUI + Ollama install (already running)." + else + read -p "โž• Install Open WebUI + Ollama? [y/N]: " openui + if [[ "$openui" =~ ^[Yy]$ ]]; then + echo "๐Ÿง  Setting up Open WebUI + Ollama..." + docker stop open-webui 2>/dev/null || true + docker rm open-webui 2>/dev/null || true + docker run -d -p 3000:8080 --gpus all \ + -v ollama:/root/.ollama \ + -v open-webui:/app/backend/data \ + --name open-webui \ + --restart always \ + ghcr.io/open-webui/open-webui:ollama + echo "๐ŸŒ Open WebUI running at http://localhost:3000" + fi + fi +fi + + +if [[ "$1" != "--update" ]]; then + if check_container_running "jupyterlab"; then + echo "โœ… Skipping JupyterLab install (already running)." + else + read -p "โž• Install JupyterLab for Python/AI Dev? [y/N]: " jupyter + if [[ "$jupyter" =~ ^[Yy]$ ]]; then + echo "๐Ÿ“” Installing JupyterLab..." + docker stop jupyterlab 2>/dev/null || true + docker rm jupyterlab 2>/dev/null || true + docker run -d \ + --name jupyterlab \ + --gpus all \ + -p 8888:8888 \ + -v jupyter_data:/home/jovyan/work \ + jupyter/scipy-notebook:latest + echo "๐Ÿ” Jupyter token (saved to ~/ai-stack/jupyter_token.txt):" + mkdir -p ~/ai-stack + docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt + fi + fi +fi + + +if [[ "$1" != "--update" ]]; then + if check_container_running "whispercpp-gpu"; then + echo "โœ… Skipping Whisper.cpp install (already running)." + else + read -p "โž• Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper + if [[ "$whisper" =~ ^[Yy]$ ]]; then + echo "๐Ÿ”Š Cloning and building Whisper.cpp with GPU support..." + + WHISPER_DIR=~/ai-stack/whisper.cpp + mkdir -p "$WHISPER_DIR" + git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true + + cat > "$WHISPER_DIR/Dockerfile.gpu" < Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \ + apt-get clean + +WORKDIR /app +COPY . /app + +RUN pip install --upgrade pip && \ + pip install -r requirements_versions.txt && \ + pip install -r requirements.txt + +EXPOSE 7860 +CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"] +EOF + + echo "๐Ÿณ Building Docker image 'a1111-webui'..." + docker stop stable-diffusion 2>/dev/null || true + docker rm stable-diffusion 2>/dev/null || true + docker build -t a1111-webui . + + echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/sd-models" + mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs + + echo "๐Ÿš€ Running Stable Diffusion WebUI on port 7860..." + docker run -d --gpus all \ + --name stable-diffusion \ + -p 7860:7860 \ + -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \ + -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \ + a1111-webui + + echo "๐ŸŒ Access at http://localhost:7860" + fi + fi +fi + +if [[ "$1" != "--update" ]]; then + if check_container_running "tg-webui"; then + echo "โœ… Skipping Text-generation-webui install (already running)." + else + read -p "โž• Install Text-generation-webui? [y/N]: " tg + if [[ "$tg" =~ ^[Yy]$ ]]; then + echo "๐Ÿง  Cloning and building Text-generation-webui..." + + TG_DIR=~/ai-stack/tg-webui + git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true + cd "$TG_DIR" + + echo "๐Ÿ“„ Writing Dockerfile..." + cat > Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y git python3 python3-pip build-essential && \ + apt-get clean + +WORKDIR /app +COPY . /app + +RUN pip install --upgrade pip && \ + (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers) + +EXPOSE 7861 +CMD ["/bin/bash", "-c", "python server.py --listen --api"] +EOF + + echo "๐Ÿณ Building Docker image 'tg-webui'..." + docker stop tg-webui 2>/dev/null || true + docker rm tg-webui 2>/dev/null || true + docker build -t tg-webui . + + echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/tg-models" + mkdir -p ~/ai-stack/tg-models + + echo "๐Ÿš€ Running text-generation-webui on port 7861..." + docker run -d --gpus all \ + --name tg-webui \ + -p 7861:7861 \ + -v ~/ai-stack/tg-models:/app/models \ + tg-webui + + echo "๐ŸŒ Access at http://localhost:7861" + fi + fi +fi + + + +if [[ "$1" != "--update" ]]; then + if check_container_running "comfyui"; then + echo "โœ… Skipping ComfyUI install (already running)." + else + read -p "โž• Install ComfyUI (custom Docker build)? [y/N]: " comfy + if [[ "$comfy" =~ ^[Yy]$ ]]; then + echo "๐Ÿงฉ Cloning and building ComfyUI..." + + COMFYUI_DIR=~/ai-stack/comfyui + git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true + cd "$COMFYUI_DIR" + + echo "๐Ÿ“„ Writing Dockerfile..." + cat > Dockerfile <<'EOF' +FROM nvidia/cuda:12.2.0-base-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \ + apt-get clean + +WORKDIR /app +COPY . /app + +RUN pip install --upgrade pip && pip install -r requirements.txt + +EXPOSE 8188 +CMD ["python3", "main.py", "--listen", "--port", "8188"] +EOF + + echo "๐Ÿณ Building Docker image 'comfyui-custom'..." + docker stop comfyui 2>/dev/null || true + docker rm comfyui 2>/dev/null || true + docker build -t comfyui-custom . + + echo "๐Ÿ“‚ Preparing workspace volume..." + mkdir -p ~/ai-stack/comfyui/workspace + + echo "๐Ÿš€ Running ComfyUI on port 8188..." + docker run -d --gpus all \ + --name comfyui \ + -p 8188:8188 \ + -v ~/ai-stack/comfyui/workspace:/app/output \ + comfyui-custom + + echo "๐ŸŒ ComfyUI running at http://localhost:8188" + fi + fi +fi + + + + + +echo "โœ… AI stack setup complete." + diff --git a/setup-nvidia-toolkit.sh b/setup-nvidia-toolkit.sh new file mode 100644 index 0000000..2611e6c --- /dev/null +++ b/setup-nvidia-toolkit.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Set up the repository +curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + +# Install the toolkit +sudo apt update +sudo apt install -y nvidia-container-toolkit + +# Configure Docker to use the nvidia runtime +sudo nvidia-ctk runtime configure --runtime=docker + +# Restart Docker +sudo systemctl restart docker + diff --git a/start-ai-stack.sh b/start-ai-stack.sh new file mode 100644 index 0000000..b99924c --- /dev/null +++ b/start-ai-stack.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +echo "๐Ÿš€ Starting AI stack containers..." + +start_container() { + local name=$1 + local cmd=$2 + + if docker ps --format '{{.Names}}' | grep -q "^$name$"; then + echo "โœ… $name already running." + elif docker container ls -a --format '{{.Names}}' | grep -q "^$name$"; then + echo "๐Ÿ” Starting $name..." + docker start "$name" + else + echo "โŒ $name not found โ€” skipping." + fi +} + +# Core tools (prebuilt) +start_container open-webui +start_container jupyterlab +start_container comfyui +start_container whisper +start_container stable-diffusion +start_container tg-webui + +# Custom-built (may need rebuilding if deleted) +start_container whispercpp-gpu +start_container a1111-webui +start_container tg-webui + +echo "โœ… All available containers started." +