Initial commit

This commit is contained in:
crowetic 2025-05-17 13:54:15 -07:00
commit c4fe4a522f
5 changed files with 1036 additions and 0 deletions

286
backups/setup-ai-stack.sh Normal file
View File

@ -0,0 +1,286 @@
#!/bin/bash
set -e
mkdir -p ~/ai-stack
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1
echo "📦 AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo
# Containers to manage
CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui")
# Handle update option
if [[ "$1" == "--update" ]]; then
echo "🔄 Updating running containers..."
for name in "${CONTAINERS[@]}"; do
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🧼 Updating $name..."
docker pull "$(docker inspect --format='{{.Config.Image}}' $name)"
docker stop $name
docker rm $name
else
echo "⚠️ Container $name is not running, skipping."
fi
done
echo "✅ Update complete. Rerun the script without --update to restart containers."
exit 0
fi
check_container_running() {
local name="$1"
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🟢 Container '$name' is already running."
return 0
fi
return 1
}
# Function to clean up existing container
clean_container() {
local name=$1
docker rm -f "$name" 2>/dev/null || true
}
# 🧠 Open WebUI + Ollama
echo "🧠 Setting up Open WebUI + Ollama..."
clean_container open-webui
docker run -d -p 3000:8080 --gpus=all \
-v ollama:/root/.ollama \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
ghcr.io/open-webui/open-webui:ollama
# JupyterLab
read -p " Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
echo "📔 Installing JupyterLab..."
clean_container jupyterlab
docker run -d \
--name jupyterlab \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "🔍 Jupyter token:"
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
echo "📁 Token saved to ~/ai-stack/jupyter_token.txt"
fi
# Whisper.cpp with GPU Support (Build Locally)
read -p " Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
WHISPER_DIR=~/ai-stack/whisper.cpp
mkdir -p ~/ai-stack
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
RUN apt-get update && \\
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
apt-get clean
WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF
echo "🐳 Building Docker image 'whispercpp-gpu'..."
docker build -t whispercpp-gpu -f "$WHISPER_DIR/Dockerfile.gpu" "$WHISPER_DIR"
echo "📂 Setting up input folder at ~/ai-stack/audio"
mkdir -p ~/ai-stack/audio
echo "✅ To run whisper on a file:"
echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/yourfile.wav"
echo "📢 Reminder: Download a model like ggml-large.bin into ~/ai-stack/whisper.cpp/models/"
echo "e.g. https://huggingface.co/ggerganov/whisper.cpp/tree/main/models"
fi
if [[ "$1" == "--update" ]]; then
# ...existing container update loop
echo "🔁 Updating whisper.cpp..."
WHISPER_DIR=~/ai-stack/whisper.cpp
if [ -d "$WHISPER_DIR" ]; then
cd "$WHISPER_DIR"
git pull origin master
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "✅ whispercpp-gpu rebuilt with latest source."
else
echo "⚠️ whisper.cpp not found at $WHISPER_DIR — skipping."
fi
fi
# Stable Diffusion (AUTOMATIC1111 custom GPU build)
read -p " Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
mkdir -p ~/ai-stack
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
cd "$SD_DIR"
cat > ~/ai-stack/stable-diffusion/Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
RUN apt-get update && \
apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
# Create venv and install deps
RUN python3 -m venv venv && \
. venv/bin/activate && \
pip install --upgrade pip && \
pip install -r requirements_versions.txt && \
pip install -r requirements.txt
EXPOSE 7860
CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"]
EOF
echo "🐳 Building Docker image 'a1111-webui'..."
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
mkdir -p ~/ai-stack/sd-models
echo "🚀 Starting Stable Diffusion WebUI..."
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
a1111-webui
echo "🌐 Access Stable Diffusion WebUI at http://localhost:7860"
fi
if [[ "$1" == "--update" ]]; then
echo "🔁 Updating Stable Diffusion WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
if [ -d "$SD_DIR" ]; then
cd "$SD_DIR"
git pull origin master
docker build -t a1111-webui .
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
a1111-webui
echo "✅ Stable Diffusion updated and restarted."
else
echo "⚠️ Stable Diffusion directory not found — skipping update."
fi
fi
# Text-generation-webui (custom Docker build with GPU + API)
read -p " Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
echo "🧪 Cloning and building Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
cd "$TG_DIR"
echo "📝 Creating Dockerfile for build..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip build-essential && \
apt-get clean
WORKDIR /app
COPY . /app
# Install dependencies
RUN pip install --upgrade pip && \
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF
echo "🐳 Building Docker image 'tg-webui'..."
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
mkdir -p ~/ai-stack/tg-models
echo "🚀 Running text-generation-webui on port 7861..."
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "🌐 Access at http://localhost:7861"
fi
echo "🔁 Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
cd "$TG_DIR"
git pull origin master
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "✅ Text-generation-webui updated and restarted."
else
echo "⚠️ Text-generation-webui directory not found — skipping update."
fi
# ComfyUI (Docker - graph-based Stable Diffusion)
read -p " Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
echo "🧩 Installing ComfyUI (Docker)..."
clean_container comfyui
docker run -d --gpus all \
--name comfyui \
-p 8188:8188 \
-v comfyui_data:/workspace \
ghcr.io/cmdr2/comfyui:latest
echo "🌐 ComfyUI running at http://localhost:8188"
fi
echo "✅ AI stack setup complete."

278
backups/setup-ai-stack.sh~ Normal file
View File

@ -0,0 +1,278 @@
#!/bin/bash
set -e
mkdir -p ~/ai-stack
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1
echo "📦 AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo
# Containers to manage
CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui")
# Handle update option
if [[ "$1" == "--update" ]]; then
echo "🔄 Updating running containers..."
for name in "${CONTAINERS[@]}"; do
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🧼 Updating $name..."
docker pull "$(docker inspect --format='{{.Config.Image}}' $name)"
docker stop $name
docker rm $name
else
echo "⚠️ Container $name is not running, skipping."
fi
done
echo "✅ Update complete. Rerun the script without --update to restart containers."
exit 0
fi
# Function to clean up existing container
clean_container() {
local name=$1
docker rm -f "$name" 2>/dev/null || true
}
# 🧠 Open WebUI + Ollama
echo "🧠 Setting up Open WebUI + Ollama..."
clean_container open-webui
docker run -d -p 3000:8080 --gpus=all \
-v ollama:/root/.ollama \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
ghcr.io/open-webui/open-webui:ollama
# JupyterLab
read -p " Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
echo "📔 Installing JupyterLab..."
clean_container jupyterlab
docker run -d \
--name jupyterlab \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "🔍 Jupyter token:"
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
echo "📁 Token saved to ~/ai-stack/jupyter_token.txt"
fi
# Whisper.cpp with GPU Support (Build Locally)
read -p " Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
WHISPER_DIR=~/ai-stack/whisper.cpp
mkdir -p ~/ai-stack
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
RUN apt-get update && \\
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
apt-get clean
WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF
echo "🐳 Building Docker image 'whispercpp-gpu'..."
docker build -t whispercpp-gpu -f "$WHISPER_DIR/Dockerfile.gpu" "$WHISPER_DIR"
echo "📂 Setting up input folder at ~/ai-stack/audio"
mkdir -p ~/ai-stack/audio
echo "✅ To run whisper on a file:"
echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/yourfile.wav"
echo "📢 Reminder: Download a model like ggml-large.bin into ~/ai-stack/whisper.cpp/models/"
echo "e.g. https://huggingface.co/ggerganov/whisper.cpp/tree/main/models"
fi
if [[ "$1" == "--update" ]]; then
# ...existing container update loop
echo "🔁 Updating whisper.cpp..."
WHISPER_DIR=~/ai-stack/whisper.cpp
if [ -d "$WHISPER_DIR" ]; then
cd "$WHISPER_DIR"
git pull origin master
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "✅ whispercpp-gpu rebuilt with latest source."
else
echo "⚠️ whisper.cpp not found at $WHISPER_DIR — skipping."
fi
fi
# Stable Diffusion (AUTOMATIC1111 custom GPU build)
read -p " Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
mkdir -p ~/ai-stack
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
cd "$SD_DIR"
cat > ~/ai-stack/stable-diffusion/Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
RUN apt-get update && \
apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
# Create venv and install deps
RUN python3 -m venv venv && \
. venv/bin/activate && \
pip install --upgrade pip && \
pip install -r requirements_versions.txt && \
pip install -r requirements.txt
EXPOSE 7860
CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"]
EOF
echo "🐳 Building Docker image 'a1111-webui'..."
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
mkdir -p ~/ai-stack/sd-models
echo "🚀 Starting Stable Diffusion WebUI..."
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
a1111-webui
echo "🌐 Access Stable Diffusion WebUI at http://localhost:7860"
fi
if [[ "$1" == "--update" ]]; then
echo "🔁 Updating Stable Diffusion WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
if [ -d "$SD_DIR" ]; then
cd "$SD_DIR"
git pull origin master
docker build -t a1111-webui .
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
a1111-webui
echo "✅ Stable Diffusion updated and restarted."
else
echo "⚠️ Stable Diffusion directory not found — skipping update."
fi
fi
# Text-generation-webui (custom Docker build with GPU + API)
read -p " Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
echo "🧪 Cloning and building Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
cd "$TG_DIR"
echo "📝 Creating Dockerfile for build..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip build-essential && \
apt-get clean
WORKDIR /app
COPY . /app
# Install dependencies
RUN pip install --upgrade pip && \
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF
echo "🐳 Building Docker image 'tg-webui'..."
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
mkdir -p ~/ai-stack/tg-models
echo "🚀 Running text-generation-webui on port 7861..."
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "🌐 Access at http://localhost:7861"
fi
echo "🔁 Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
cd "$TG_DIR"
git pull origin master
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "✅ Text-generation-webui updated and restarted."
else
echo "⚠️ Text-generation-webui directory not found — skipping update."
fi
# ComfyUI (Docker - graph-based Stable Diffusion)
read -p " Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
echo "🧩 Installing ComfyUI (Docker)..."
clean_container comfyui
docker run -d --gpus all \
--name comfyui \
-p 8188:8188 \
-v comfyui_data:/workspace \
ghcr.io/cmdr2/comfyui:latest
echo "🌐 ComfyUI running at http://localhost:8188"
fi
echo "✅ AI stack setup complete."

420
setup-ai-stack.sh Normal file
View File

@ -0,0 +1,420 @@
#!/bin/bash
set -e
mkdir -p ~/ai-stack
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1
echo "📦 AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo
check_container_running() {
local name="$1"
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "🟢 Container '$name' is already running."
return 0
fi
return 1
}
# Handle update option
if [[ "$1" == "--update" ]]; then
echo "🔄 Updating containers..."
# JupyterLab
if check_container_running "jupyterlab"; then
echo "🧼 Updating JupyterLab..."
docker stop jupyterlab
docker rm jupyterlab
docker pull jupyter/scipy-notebook:latest
docker run -d \
--name jupyterlab \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "✅ JupyterLab updated."
else
echo "⚠️ JupyterLab is not running, skipping update."
fi
# Whisper (whisper.cpp)
if check_container_running "whisper"; then
echo "🧼 Updating Whisper..."
docker stop whisper
docker rm whisper
docker pull ggerganov/whisper.cpp:latest
docker run -d \
--name whisper \
--gpus all \
-v whisper_data:/app/data \
ggerganov/whisper.cpp:latest
echo "✅ Whisper updated."
else
echo "⚠️ Whisper is not running, skipping update."
fi
# Stable Diffusion (A1111)
if check_container_running "stable-diffusion"; then
echo "🧼 Updating Stable Diffusion..."
docker stop stable-diffusion
docker rm stable-diffusion
docker pull abraham-ai/automatic1111-webui:latest
docker run -d \
--name stable-diffusion \
--gpus all \
-p 7860:7860 \
-v sd_models:/data \
abraham-ai/automatic1111-webui:latest
echo "✅ Stable Diffusion updated."
else
echo "⚠️ Stable Diffusion is not running, skipping update."
fi
echo "🔁 Updating ComfyUI..."
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker pull ghcr.io/cmdr2/comfyui:latest
docker run -d --gpus all \
--name comfyui \
-p 8188:8188 \
-v comfyui_data:/workspace \
ghcr.io/cmdr2/comfyui:latest
echo "✅ ComfyUI updated and restarted."
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
WHISPER_DIR=~/ai-stack/whisper.cpp
if [ -d "$WHISPER_DIR" ]; then
cd "$WHISPER_DIR"
git pull origin master
docker stop whispercpp-gpu 2>/dev/null || true
docker rm whispercpp-gpu 2>/dev/null || true
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "✅ Whisper.cpp GPU image rebuilt."
else
echo "⚠️ Whisper.cpp directory not found — skipping update."
fi
echo "🔁 Updating Stable Diffusion WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
if [ -d "$SD_DIR" ]; then
cd "$SD_DIR"
git pull origin master
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
a1111-webui
echo "✅ Stable Diffusion WebUI updated and restarted."
else
echo "⚠️ Stable Diffusion directory not found — skipping update."
fi
echo "🔁 Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
cd "$TG_DIR"
git pull origin master
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "✅ Text-generation-webui updated and restarted."
else
echo "⚠️ Text-generation-webui directory not found — skipping update."
fi
echo "🔁 Updating ComfyUI (custom build)..."
COMFYUI_DIR=~/ai-stack/comfyui
if [ -d "$COMFYUI_DIR" ]; then
cd "$COMFYUI_DIR"
git pull origin master
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker build -t comfyui-custom .
docker run -d --gpus all \
--name comfyui \
-p 8188:8188 \
-v ~/ai-stack/comfyui/workspace:/app/output \
comfyui-custom
echo "✅ ComfyUI updated and restarted."
else
echo "⚠️ ComfyUI directory not found — skipping update."
fi
echo "✅ Update complete!"
exit 0
fi
# Function to clean up existing container
clean_container() {
local name=$1
docker rm -f "$name" 2>/dev/null || true
}
if [[ "$1" != "--update" ]]; then
if check_container_running "open-webui"; then
echo "✅ Skipping Open WebUI + Ollama install (already running)."
else
read -p " Install Open WebUI + Ollama? [y/N]: " openui
if [[ "$openui" =~ ^[Yy]$ ]]; then
echo "🧠 Setting up Open WebUI + Ollama..."
docker stop open-webui 2>/dev/null || true
docker rm open-webui 2>/dev/null || true
docker run -d -p 3000:8080 --gpus all \
-v ollama:/root/.ollama \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
ghcr.io/open-webui/open-webui:ollama
echo "🌐 Open WebUI running at http://localhost:3000"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "jupyterlab"; then
echo "✅ Skipping JupyterLab install (already running)."
else
read -p " Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
echo "📔 Installing JupyterLab..."
docker stop jupyterlab 2>/dev/null || true
docker rm jupyterlab 2>/dev/null || true
docker run -d \
--name jupyterlab \
--gpus all \
-p 8888:8888 \
-v jupyter_data:/home/jovyan/work \
jupyter/scipy-notebook:latest
echo "🔍 Jupyter token (saved to ~/ai-stack/jupyter_token.txt):"
mkdir -p ~/ai-stack
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "whispercpp-gpu"; then
echo "✅ Skipping Whisper.cpp install (already running)."
else
read -p " Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
WHISPER_DIR=~/ai-stack/whisper.cpp
mkdir -p "$WHISPER_DIR"
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
RUN apt-get update && \\
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
apt-get clean
WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF
echo "🐳 Building Docker image 'whispercpp-gpu'..."
cd "$WHISPER_DIR"
docker build -t whispercpp-gpu -f Dockerfile.gpu .
echo "📂 Creating ~/ai-stack/audio and models directories"
mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models
echo "✅ Whisper.cpp built successfully."
echo "To run it:"
echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "stable-diffusion"; then
echo "✅ Skipping Stable Diffusion install (already running)."
else
read -p " Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
SD_DIR=~/ai-stack/stable-diffusion
mkdir -p ~/ai-stack
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
cd "$SD_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && \
pip install -r requirements_versions.txt && \
pip install -r requirements.txt
EXPOSE 7860
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
EOF
echo "🐳 Building Docker image 'a1111-webui'..."
docker stop stable-diffusion 2>/dev/null || true
docker rm stable-diffusion 2>/dev/null || true
docker build -t a1111-webui .
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs
echo "🚀 Running Stable Diffusion WebUI on port 7860..."
docker run -d --gpus all \
--name stable-diffusion \
-p 7860:7860 \
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
a1111-webui
echo "🌐 Access at http://localhost:7860"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "tg-webui"; then
echo "✅ Skipping Text-generation-webui install (already running)."
else
read -p " Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
echo "🧠 Cloning and building Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
cd "$TG_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip build-essential && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && \
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF
echo "🐳 Building Docker image 'tg-webui'..."
docker stop tg-webui 2>/dev/null || true
docker rm tg-webui 2>/dev/null || true
docker build -t tg-webui .
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
mkdir -p ~/ai-stack/tg-models
echo "🚀 Running text-generation-webui on port 7861..."
docker run -d --gpus all \
--name tg-webui \
-p 7861:7861 \
-v ~/ai-stack/tg-models:/app/models \
tg-webui
echo "🌐 Access at http://localhost:7861"
fi
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "comfyui"; then
echo "✅ Skipping ComfyUI install (already running)."
else
read -p " Install ComfyUI (custom Docker build)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
echo "🧩 Cloning and building ComfyUI..."
COMFYUI_DIR=~/ai-stack/comfyui
git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
cd "$COMFYUI_DIR"
echo "📄 Writing Dockerfile..."
cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
apt-get clean
WORKDIR /app
COPY . /app
RUN pip install --upgrade pip && pip install -r requirements.txt
EXPOSE 8188
CMD ["python3", "main.py", "--listen", "--port", "8188"]
EOF
echo "🐳 Building Docker image 'comfyui-custom'..."
docker stop comfyui 2>/dev/null || true
docker rm comfyui 2>/dev/null || true
docker build -t comfyui-custom .
echo "📂 Preparing workspace volume..."
mkdir -p ~/ai-stack/comfyui/workspace
echo "🚀 Running ComfyUI on port 8188..."
docker run -d --gpus all \
--name comfyui \
-p 8188:8188 \
-v ~/ai-stack/comfyui/workspace:/app/output \
comfyui-custom
echo "🌐 ComfyUI running at http://localhost:8188"
fi
fi
fi
echo "✅ AI stack setup complete."

18
setup-nvidia-toolkit.sh Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
# Set up the repository
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
# Install the toolkit
sudo apt update
sudo apt install -y nvidia-container-toolkit
# Configure Docker to use the nvidia runtime
sudo nvidia-ctk runtime configure --runtime=docker
# Restart Docker
sudo systemctl restart docker

34
start-ai-stack.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
set -e
echo "🚀 Starting AI stack containers..."
start_container() {
local name=$1
local cmd=$2
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
echo "$name already running."
elif docker container ls -a --format '{{.Names}}' | grep -q "^$name$"; then
echo "🔁 Starting $name..."
docker start "$name"
else
echo "$name not found — skipping."
fi
}
# Core tools (prebuilt)
start_container open-webui
start_container jupyterlab
start_container comfyui
start_container whisper
start_container stable-diffusion
start_container tg-webui
# Custom-built (may need rebuilding if deleted)
start_container whispercpp-gpu
start_container a1111-webui
start_container tg-webui
echo "✅ All available containers started."