AI-Dev/setup-ai-stack.sh
2025-05-19 17:57:25 -07:00

562 lines
17 KiB
Bash
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/bin/bash
set -e
mkdir -p ~/ai-stack
mkdir -p ~/ai-stack/localai/models
mkdir -p ~/ai-stack/localai/config
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1
echo "📦 AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo
docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net
check_container_running() {
local name="$1"
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🟢 Container '$name' is already running."
return 0
fi
return 1
}
# Handle update option
if [[ "$1" == "--update" ]]; then
echo "🔄 Updating containers..."
# JupyterLab
if check_container_running "jupyterlab"; then
echo "🧼 Updating JupyterLab..."
docker stop jupyterlab
docker rm jupyterlab
docker pull jupyter/scipy-notebook:latest
docker run -d \
--name jupyterlab \
--network ai-stack-net \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "✅ JupyterLab updated."
else
echo "⚠️ JupyterLab is not running, skipping update."
fi
# --- LocalAI custom build and update ---
LOCALAI_REPO=~/git-repos/LocalAI
DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile"
echo "🔁 Rebuilding LocalAI (custom)..."
# Backup and remove existing repo
if [ -d "$LOCALAI_REPO" ]; then
echo "📦 Backing up existing LocalAI repo..."
mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)"
fi
# Clone latest LocalAI
git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO"
# Remove default Docker-related files
cd "$LOCALAI_REPO"
rm -f Dockerfile Dockerfile.aio docker-compose.yaml
# Prepare and build LocalAI binary
# NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods
# Docker-compose methods will be coming soon.
echo "🔧 Running make prepare..."
make prepare
echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
make build \
TAGS="localrecall,localagi,gpu" \
BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
GPU_SUPPORT="cuda"
# Download your custom Dockerfile
echo "📥 Downloading custom Dockerfile..."
curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
# Build the container
echo "🔨 Building custom LocalAI Docker image..."
docker build -t localai:custom .
# Remove existing container
docker stop localai 2>/dev/null || true
docker rm localai 2>/dev/null || true
# Create model and config directories if needed
mkdir -p ~/ai-stack/localai/models
mkdir -p ~/ai-stack/localai/config
# Run new container
docker run -d \
--name localai \
--gpus all \
-p 8080:8080 \
-v ~/ai-stack/localai/models:/models \
-v ~/ai-stack/localai/config:/config \
-e ENABLE_BACKENDS=llama-cuda,ollama \
-e INCLUDE_DEFAULT_MODELS=true \
-e AUTOLOAD_MODELS=true \
-e MODEL_PATH=/models \
--restart unless-stopped \
localai:custom
echo "✅ LocalAI (custom) is now running at http://localhost:8080"
# # 🔁 Updating LocalAI (GPU build)
# if check_container_running "localai"; then
# echo "🧠 Updating LocalAI..."
# docker stop localai
# docker rm localai
# else
# echo " Installing LocalAI..."
# fi
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
# # Ensure default models.yaml exists
# MODELS_YAML=~/ai-stack/localai/config/models.yaml
# if [ ! -f "$MODELS_YAML" ]; then
# echo "📄 Creating default models.yaml..."
# cat > "$MODELS_YAML" <<EOF
# - name: phi
# backend: llama-cuda
# parameters:
# model: /models/phi-2.gguf
# EOF
# fi
# docker run -d \
# --name localai \
# --gpus all \
# -p 8080:8080 \
# -v ~/ai-stack/localai/models:/models \
# -v ~/ai-stack/localai/config:/config \
# -e ENABLE_BACKENDS=llama-cuda,ollama \
# -e INCLUDE_DEFAULT_MODELS=true \
# -e AUTOLOAD_MODELS=true \
# -e MODEL_PATH=/models \
# --restart unless-stopped \
# localai/localai:latest-aio-gpu-nvidia-cuda-12
# echo "✅ LocalAI running at http://localhost:8080"
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
WHISPER_DIR=~/ai-stack/whisper.cpp
if [ -d "$WHISPER_DIR" ]; then
cd "$WHISPER_DIR"
git pull origin master
docker stop whispercpp-gpu 2>/dev/null || true
docker rm whispercpp-gpu 2>/dev/null || true
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "✅ Whisper.cpp GPU image rebuilt."
else
echo "⚠️ Whisper.cpp directory not found — skipping update."
fi
echo "🔁 Updating Stable Diffusion WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
if [ -d "$SD_DIR" ]; then
cd "$SD_DIR"
git pull origin master
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
docker run -d --gpus all \
--name stable-diffusion \
--network ai-stack-net \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
a1111-webui
echo "✅ Stable Diffusion WebUI updated and restarted."
else
echo "⚠️ Stable Diffusion directory not found — skipping update."
fi
echo "🔁 Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
cd "$TG_DIR"
git pull origin master
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
docker run -d --gpus all \
--name tg-webui \
--network ai-stack-net \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "✅ Text-generation-webui updated and restarted."
else
echo "⚠️ Text-generation-webui directory not found — skipping update."
fi
echo "🔁 Updating ComfyUI (custom build)..."
COMFYUI_DIR=~/ai-stack/comfyui
if [ -d "$COMFYUI_DIR" ]; then
cd "$COMFYUI_DIR"
git pull origin master
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker build -t comfyui-custom .
docker run -d --gpus all \
--name comfyui \
--network ai-stack-net \
-p 8188:8188 \
-v ~/ai-stack/comfyui/workspace:/app/output \
comfyui-custom
echo "✅ ComfyUI updated and restarted."
else
echo "⚠️ ComfyUI directory not found — skipping update."
fi
echo "✅ Update complete!"
exit 0
fi
# Function to clean up existing container
clean_container() {
local name=$1
docker rm -f "$name" 2>/dev/null || true
}
if [[ "$1" != "--update" ]]; then
if check_container_running "open-webui"; then
echo "✅ Skipping Open WebUI + Ollama install (already running)."
else
read -p " Install Open WebUI + Ollama? [y/N]: " openui
if [[ "$openui" =~ ^[Yy]$ ]]; then
echo "🧠 Setting up Open WebUI + Ollama..."
docker stop open-webui 2>/dev/null || true
docker rm open-webui 2>/dev/null || true
docker run -d -p 3000:8080 --gpus all \
-v ollama:/root/.ollama \
-v open-webui:/app/backend/data \
--name open-webui \
--network ai-stack-net \
--restart always \
ghcr.io/open-webui/open-webui:ollama
echo "🌐 Open WebUI running at http://localhost:3000"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "jupyterlab"; then
echo "✅ Skipping JupyterLab install (already running)."
else
read -p " Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
echo "📔 Installing JupyterLab..."
docker stop jupyterlab 2>/dev/null || true
docker rm jupyterlab 2>/dev/null || true
docker run -d \
--name jupyterlab \
--network ai-stack-net \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "🔍 Jupyter token (saved to ~/ai-stack/jupyter_token.txt):"
mkdir -p ~/ai-stack
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "whispercpp-gpu"; then
echo "✅ Skipping Whisper.cpp install (already running)."
else
read -p " Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
WHISPER_DIR=~/ai-stack/whisper.cpp
mkdir -p "$WHISPER_DIR"
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
RUN apt-get update && \\
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
apt-get clean
WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF
echo "🐳 Building Docker image 'whispercpp-gpu'..."
cd "$WHISPER_DIR"
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "📂 Creating ~/ai-stack/audio and models directories"
mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models
echo "✅ Whisper.cpp built successfully."
echo "To run it:"
echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "stable-diffusion"; then
echo "✅ Skipping Stable Diffusion install (already running)."
else
read -p " Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
mkdir -p ~/ai-stack
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
cd "$SD_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && \
pip install -r requirements_versions.txt && \
pip install -r requirements.txt
EXPOSE 7860
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
EOF
echo "🐳 Building Docker image 'a1111-webui'..."
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs
echo "🚀 Running Stable Diffusion WebUI on port 7860..."
docker run -d --gpus all \
--name stable-diffusion \
--network ai-stack-net \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
a1111-webui
echo "🌐 Access at http://localhost:7860"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "tg-webui"; then
echo "✅ Skipping Text-generation-webui install (already running)."
else
read -p " Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
echo "🧠 Cloning and building Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
cd "$TG_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip build-essential && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && \
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF
echo "🐳 Building Docker image 'tg-webui'..."
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
mkdir -p ~/ai-stack/tg-models
echo "🚀 Running text-generation-webui on port 7861..."
docker run -d --gpus all \
--name tg-webui \
--network ai-stack-net \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "🌐 Access at http://localhost:7861"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "comfyui"; then
echo "✅ Skipping ComfyUI install (already running)."
else
read -p " Install ComfyUI (custom Docker build)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
echo "🧩 Cloning and building ComfyUI..."
COMFYUI_DIR=~/ai-stack/comfyui
git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
cd "$COMFYUI_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && pip install -r requirements.txt
EXPOSE 8188
CMD ["python3", "main.py", "--listen", "--port", "8188"]
EOF
echo "🐳 Building Docker image 'comfyui-custom'..."
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker build -t comfyui-custom .
echo "📂 Preparing workspace volume..."
mkdir -p ~/ai-stack/comfyui/workspace
echo "🚀 Running ComfyUI on port 8188..."
docker run -d --gpus all \
--name comfyui \
--network ai-stack-net \
-p 8188:8188 \
-v ~/ai-stack/comfyui/workspace:/app/output \
comfyui-custom
echo "🌐 ComfyUI running at http://localhost:8188"
fi
fi
fi
# if [[ "$1" != "--update" ]]; then
# if check_container_running "localai"; then
# echo "✅ Skipping LocalAI install (already running)."
# else
# read -p " Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
# echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
# mkdir -p ~/ai-stack/localai/models
# mkdir -p ~/ai-stack/localai/config
# echo "📄 Creating default models.yaml..."
# cat > ~/ai-stack/localai/config/models.yaml <<EOF
# - name: phi
# backend: llama-cuda
# parameters:
# model: /models/phi-2.gguf
# EOF
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
# docker run -d \
# --name localai \
# --gpus all \
# -p 8080:8080 \
# -v ~/ai-stack/localai/models:/models \
# -v ~/ai-stack/localai/config:/config \
# -e ENABLE_BACKENDS=llama-cuda,ollama \
# -e INCLUDE_DEFAULT_MODELS=true \
# -e AUTOLOAD_MODELS=true \
# -e MODEL_PATH=/models \
# --restart unless-stopped \
# localai/localai:latest-aio-gpu-nvidia-cuda-12
# echo "🌐 LocalAI is now running at http://localhost:8080"
# fi
# fi
# fi
if [[ "$1" != "--update" ]]; then
if check_container_running "localai"; then
echo "✅ Skipping LocalAI install (already running)."
else
read -p " Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
echo "🧠 Installing LocalAI (Custom created localAI container)..."
mkdir -p ~/ai-stack/localai/models
mkdir -p ~/ai-stack/localai/config
echo "📦 Using custom built LocalAI image (localai:custom)"
docker run -d \
--name localai \
--gpus all \
--network ai-stack-net \
-p 8080:8080 \
-v ~/ai-stack/localai/models:/models \
-v ~/ai-stack/localai/config:/config \
-e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \
-e INCLUDE_DEFAULT_MODELS=true \
-e AUTOLOAD_MODELS=true \
-e MODEL_PATH=/models \
-e CONFIG_PATH=/config \
-e LOG_LEVEL=debug \
--restart unless-stopped \
localai:custom
echo "🌐 LocalAI is now running at http://localhost:8080"
fi
fi
fi
echo "✅ AI stack setup complete."