AI-Dev/backups/setup-ai-stack.sh~
2025-05-17 13:54:15 -07:00

279 lines
8.1 KiB
Bash
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/bin/bash
set -e
mkdir -p ~/ai-stack
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1
echo "📦 AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo
# Containers to manage
CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui")
# Handle update option
if [[ "$1" == "--update" ]]; then
echo "🔄 Updating running containers..."
for name in "${CONTAINERS[@]}"; do
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🧼 Updating $name..."
docker pull "$(docker inspect --format='{{.Config.Image}}' $name)"
docker stop $name
docker rm $name
else
echo "⚠️ Container $name is not running, skipping."
fi
done
echo "✅ Update complete. Rerun the script without --update to restart containers."
exit 0
fi
# Function to clean up existing container
clean_container() {
local name=$1
docker rm -f "$name" 2>/dev/null || true
}
# 🧠 Open WebUI + Ollama
echo "🧠 Setting up Open WebUI + Ollama..."
clean_container open-webui
docker run -d -p 3000:8080 --gpus=all \
-v ollama:/root/.ollama \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
ghcr.io/open-webui/open-webui:ollama
# JupyterLab
read -p " Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
echo "📔 Installing JupyterLab..."
clean_container jupyterlab
docker run -d \
--name jupyterlab \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "🔍 Jupyter token:"
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
echo "📁 Token saved to ~/ai-stack/jupyter_token.txt"
fi
# Whisper.cpp with GPU Support (Build Locally)
read -p " Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
WHISPER_DIR=~/ai-stack/whisper.cpp
mkdir -p ~/ai-stack
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
RUN apt-get update && \\
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
apt-get clean
WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF
echo "🐳 Building Docker image 'whispercpp-gpu'..."
docker build -t whispercpp-gpu -f "$WHISPER_DIR/Dockerfile.gpu" "$WHISPER_DIR"
echo "📂 Setting up input folder at ~/ai-stack/audio"
mkdir -p ~/ai-stack/audio
echo "✅ To run whisper on a file:"
echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/yourfile.wav"
echo "📢 Reminder: Download a model like ggml-large.bin into ~/ai-stack/whisper.cpp/models/"
echo "e.g. https://huggingface.co/ggerganov/whisper.cpp/tree/main/models"
fi
if [[ "$1" == "--update" ]]; then
# ...existing container update loop
echo "🔁 Updating whisper.cpp..."
WHISPER_DIR=~/ai-stack/whisper.cpp
if [ -d "$WHISPER_DIR" ]; then
cd "$WHISPER_DIR"
git pull origin master
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "✅ whispercpp-gpu rebuilt with latest source."
else
echo "⚠️ whisper.cpp not found at $WHISPER_DIR — skipping."
fi
fi
# Stable Diffusion (AUTOMATIC1111 custom GPU build)
read -p " Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
mkdir -p ~/ai-stack
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
cd "$SD_DIR"
cat > ~/ai-stack/stable-diffusion/Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
RUN apt-get update && \
apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
# Create venv and install deps
RUN python3 -m venv venv && \
. venv/bin/activate && \
pip install --upgrade pip && \
pip install -r requirements_versions.txt && \
pip install -r requirements.txt
EXPOSE 7860
CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"]
EOF
echo "🐳 Building Docker image 'a1111-webui'..."
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
mkdir -p ~/ai-stack/sd-models
echo "🚀 Starting Stable Diffusion WebUI..."
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
a1111-webui
echo "🌐 Access Stable Diffusion WebUI at http://localhost:7860"
fi
if [[ "$1" == "--update" ]]; then
echo "🔁 Updating Stable Diffusion WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
if [ -d "$SD_DIR" ]; then
cd "$SD_DIR"
git pull origin master
docker build -t a1111-webui .
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
a1111-webui
echo "✅ Stable Diffusion updated and restarted."
else
echo "⚠️ Stable Diffusion directory not found — skipping update."
fi
fi
# Text-generation-webui (custom Docker build with GPU + API)
read -p " Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
echo "🧪 Cloning and building Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
cd "$TG_DIR"
echo "📝 Creating Dockerfile for build..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip build-essential && \
apt-get clean
WORKDIR /app
COPY . /app
# Install dependencies
RUN pip install --upgrade pip && \
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF
echo "🐳 Building Docker image 'tg-webui'..."
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
mkdir -p ~/ai-stack/tg-models
echo "🚀 Running text-generation-webui on port 7861..."
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "🌐 Access at http://localhost:7861"
fi
echo "🔁 Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
cd "$TG_DIR"
git pull origin master
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "✅ Text-generation-webui updated and restarted."
else
echo "⚠️ Text-generation-webui directory not found — skipping update."
fi
# ComfyUI (Docker - graph-based Stable Diffusion)
read -p " Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
echo "🧩 Installing ComfyUI (Docker)..."
clean_container comfyui
docker run -d --gpus all \
--name comfyui \
-p 8188:8188 \
-v comfyui_data:/workspace \
ghcr.io/cmdr2/comfyui:latest
echo "🌐 ComfyUI running at http://localhost:8188"
fi
echo "✅ AI stack setup complete."