AI-Dev/setup-ai-stack.sh

479 lines
14 KiB
Bash
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/bin/bash
set -e
mkdir -p ~/ai-stack
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1
echo "📦 AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo
docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net
check_container_running() {
local name="$1"
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🟢 Container '$name' is already running."
return 0
fi
return 1
}
# Handle update option
if [[ "$1" == "--update" ]]; then
echo "🔄 Updating containers..."
# JupyterLab
if check_container_running "jupyterlab"; then
echo "🧼 Updating JupyterLab..."
docker stop jupyterlab
docker rm jupyterlab
docker pull jupyter/scipy-notebook:latest
docker run -d \
--name jupyterlab \
--network ai-stack-net \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "✅ JupyterLab updated."
else
echo "⚠️ JupyterLab is not running, skipping update."
fi
# Whisper (whisper.cpp)
if check_container_running "whisper"; then
echo "🧼 Updating Whisper..."
docker stop whisper
docker rm whisper
docker pull ggerganov/whisper.cpp:latest
docker run -d \
--name whisper \
--network ai-stack-net \
--gpus all \
-v whisper_data:/app/data \
ggerganov/whisper.cpp:latest
echo "✅ Whisper updated."
else
echo "⚠️ Whisper is not running, skipping update."
fi
# # Stable Diffusion (A1111)
# if check_container_running "stable-diffusion"; then
# echo "🧼 Updating Stable Diffusion..."
# docker stop stable-diffusion
# docker rm stable-diffusion
# docker pull abraham-ai/automatic1111-webui:latest
# docker run -d \
# --name stable-diffusion \
# --gpus all \
# -p 7860:7860 \
# -v sd_models:/data \
# abraham-ai/automatic1111-webui:latest
# echo "✅ Stable Diffusion updated."
# else
# echo "⚠️ Stable Diffusion is not running, skipping update."
# fi
echo "🔁 Updating ComfyUI..."
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker pull ghcr.io/cmdr2/comfyui:latest
docker run -d --gpus all \
--name comfyui \
--network ai-stack-net \
-p 8188:8188 \
-v comfyui_data:/workspace \
ghcr.io/cmdr2/comfyui:latest
echo "✅ ComfyUI updated and restarted."
echo "🔁 Updating LocalAI..."
docker stop localai 2>/dev/null || true
docker rm localai 2>/dev/null || true
mkdir -p ~/ai-stack/localai
docker pull quay.io/go-skynet/local-ai:latest
docker run -d \
--name localai \
--network ai-stack-net \
-p 8080:8080 \
-v ~/ai-stack/localai:/models \
-e MODELS_PATH=/models \
-e ENABLE_OOLLAMA_BACKEND=true \
-v /var/run/docker.sock:/var/run/docker.sock \
quay.io/go-skynet/local-ai:latest
echo "✅ LocalAI updated and restarted."
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
WHISPER_DIR=~/ai-stack/whisper.cpp
if [ -d "$WHISPER_DIR" ]; then
cd "$WHISPER_DIR"
git pull origin master
docker stop whispercpp-gpu 2>/dev/null || true
docker rm whispercpp-gpu 2>/dev/null || true
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "✅ Whisper.cpp GPU image rebuilt."
else
echo "⚠️ Whisper.cpp directory not found — skipping update."
fi
echo "🔁 Updating Stable Diffusion WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
if [ -d "$SD_DIR" ]; then
cd "$SD_DIR"
git pull origin master
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
docker run -d --gpus all \
--name stable-diffusion \
--network ai-stack-net \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
a1111-webui
echo "✅ Stable Diffusion WebUI updated and restarted."
else
echo "⚠️ Stable Diffusion directory not found — skipping update."
fi
echo "🔁 Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
cd "$TG_DIR"
git pull origin master
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
docker run -d --gpus all \
--name tg-webui \
--network ai-stack-net \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "✅ Text-generation-webui updated and restarted."
else
echo "⚠️ Text-generation-webui directory not found — skipping update."
fi
echo "🔁 Updating ComfyUI (custom build)..."
COMFYUI_DIR=~/ai-stack/comfyui
if [ -d "$COMFYUI_DIR" ]; then
cd "$COMFYUI_DIR"
git pull origin master
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker build -t comfyui-custom .
docker run -d --gpus all \
--name comfyui \
--network ai-stack-net \
-p 8188:8188 \
-v ~/ai-stack/comfyui/workspace:/app/output \
comfyui-custom
echo "✅ ComfyUI updated and restarted."
else
echo "⚠️ ComfyUI directory not found — skipping update."
fi
echo "✅ Update complete!"
exit 0
fi
# Function to clean up existing container
clean_container() {
local name=$1
docker rm -f "$name" 2>/dev/null || true
}
if [[ "$1" != "--update" ]]; then
if check_container_running "open-webui"; then
echo "✅ Skipping Open WebUI + Ollama install (already running)."
else
read -p " Install Open WebUI + Ollama? [y/N]: " openui
if [[ "$openui" =~ ^[Yy]$ ]]; then
echo "🧠 Setting up Open WebUI + Ollama..."
docker stop open-webui 2>/dev/null || true
docker rm open-webui 2>/dev/null || true
docker run -d -p 3000:8080 --gpus all \
-v ollama:/root/.ollama \
-v open-webui:/app/backend/data \
--name open-webui \
--network ai-stack-net \
--restart always \
ghcr.io/open-webui/open-webui:ollama
echo "🌐 Open WebUI running at http://localhost:3000"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "jupyterlab"; then
echo "✅ Skipping JupyterLab install (already running)."
else
read -p " Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
echo "📔 Installing JupyterLab..."
docker stop jupyterlab 2>/dev/null || true
docker rm jupyterlab 2>/dev/null || true
docker run -d \
--name jupyterlab \
--network ai-stack-net \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "🔍 Jupyter token (saved to ~/ai-stack/jupyter_token.txt):"
mkdir -p ~/ai-stack
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "whispercpp-gpu"; then
echo "✅ Skipping Whisper.cpp install (already running)."
else
read -p " Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
WHISPER_DIR=~/ai-stack/whisper.cpp
mkdir -p "$WHISPER_DIR"
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
RUN apt-get update && \\
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
apt-get clean
WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF
echo "🐳 Building Docker image 'whispercpp-gpu'..."
cd "$WHISPER_DIR"
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "📂 Creating ~/ai-stack/audio and models directories"
mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models
echo "✅ Whisper.cpp built successfully."
echo "To run it:"
echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "stable-diffusion"; then
echo "✅ Skipping Stable Diffusion install (already running)."
else
read -p " Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
mkdir -p ~/ai-stack
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
cd "$SD_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && \
pip install -r requirements_versions.txt && \
pip install -r requirements.txt
EXPOSE 7860
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
EOF
echo "🐳 Building Docker image 'a1111-webui'..."
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs
echo "🚀 Running Stable Diffusion WebUI on port 7860..."
docker run -d --gpus all \
--name stable-diffusion \
--network ai-stack-net \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
a1111-webui
echo "🌐 Access at http://localhost:7860"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "tg-webui"; then
echo "✅ Skipping Text-generation-webui install (already running)."
else
read -p " Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
echo "🧠 Cloning and building Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
cd "$TG_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip build-essential && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && \
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF
echo "🐳 Building Docker image 'tg-webui'..."
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
mkdir -p ~/ai-stack/tg-models
echo "🚀 Running text-generation-webui on port 7861..."
docker run -d --gpus all \
--name tg-webui \
--network ai-stack-net \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "🌐 Access at http://localhost:7861"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "comfyui"; then
echo "✅ Skipping ComfyUI install (already running)."
else
read -p " Install ComfyUI (custom Docker build)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
echo "🧩 Cloning and building ComfyUI..."
COMFYUI_DIR=~/ai-stack/comfyui
git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
cd "$COMFYUI_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && pip install -r requirements.txt
EXPOSE 8188
CMD ["python3", "main.py", "--listen", "--port", "8188"]
EOF
echo "🐳 Building Docker image 'comfyui-custom'..."
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker build -t comfyui-custom .
echo "📂 Preparing workspace volume..."
mkdir -p ~/ai-stack/comfyui/workspace
echo "🚀 Running ComfyUI on port 8188..."
docker run -d --gpus all \
--name comfyui \
--network ai-stack-net \
-p 8188:8188 \
-v ~/ai-stack/comfyui/workspace:/app/output \
comfyui-custom
echo "🌐 ComfyUI running at http://localhost:8188"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "localai"; then
echo "✅ Skipping LocalAI install (already running)."
else
read -p " Install LocalAI (OpenAI-compatible API proxy)? [y/N]: " lai
if [[ "$lai" =~ ^[Yy]$ ]]; then
echo "🧠 Setting up LocalAI..."
docker stop localai 2>/dev/null || true
docker rm localai 2>/dev/null || true
mkdir -p ~/ai-stack/localai
docker run -d \
--name localai \
--network ai-stack-net \
-p 8080:8080 \
-v ~/ai-stack/localai:/models \
-e MODELS_PATH=/models \
-e ENABLE_OOLLAMA_BACKEND=true \
-v /var/run/docker.sock:/var/run/docker.sock \
quay.io/go-skynet/local-ai:latest
echo "🌐 LocalAI running at http://localhost:8080"
fi
fi
fi
echo "✅ AI stack setup complete."