506 lines
15 KiB
Bash
506 lines
15 KiB
Bash
#!/bin/bash
|
||
set -e
|
||
|
||
mkdir -p ~/ai-stack
|
||
mkdir -p ~/ai-stack/localai/models
|
||
mkdir -p ~/ai-stack/localai/config
|
||
LOG_FILE=~/ai-stack/setup.log
|
||
exec > >(tee -a "$LOG_FILE") 2>&1
|
||
|
||
echo "📦 AI Stack Setup Script"
|
||
echo "Log: $LOG_FILE"
|
||
echo
|
||
|
||
docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net
|
||
|
||
check_container_running() {
|
||
local name="$1"
|
||
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
|
||
echo "🟢 Container '$name' is already running."
|
||
return 0
|
||
fi
|
||
return 1
|
||
}
|
||
|
||
# Handle UPDATE (--update option passed) -----------------------------------------------------------------UPDATE SECTION BELOW
|
||
if [[ "$1" == "--update" ]]; then
|
||
echo "🔄 Updating containers..."
|
||
|
||
# jupyterLab
|
||
if check_container_running "jupyterlab"; then
|
||
echo "🧼 Updating JupyterLab..."
|
||
docker stop jupyterlab
|
||
docker rm jupyterlab
|
||
docker pull jupyter/scipy-notebook:latest
|
||
docker run -d \
|
||
--name jupyterlab \
|
||
--network ai-stack-net \
|
||
--gpus all \
|
||
-p 8888:8888 \
|
||
-v jupyter_data:/home/jovyan/work \
|
||
jupyter/scipy-notebook:latest
|
||
echo "✅ JupyterLab updated."
|
||
else
|
||
echo "⚠️ JupyterLab is not running, skipping update."
|
||
fi
|
||
|
||
# pipelines
|
||
if check_container_running "pipelines"; then
|
||
echo "🧼 Updating Pipelines..."
|
||
docker stop pipelines
|
||
docker rm pipelines
|
||
docker pull ghcr.io/open-webui/pipelines:main
|
||
docker run -d \
|
||
--name pipelines \
|
||
--network ai-stack-net \
|
||
-p 9099:9099 \
|
||
-v ~/ai-stack/pipelines:/app/pipelines \
|
||
--restart unless-stopped \
|
||
ghcr.io/open-webui/pipelines:main
|
||
echo "✅ Pipelines running at http://localhost:9099"
|
||
else
|
||
echo "pipelines not running, skipping update"
|
||
fi
|
||
|
||
# open-webui
|
||
if check_container_running "open-webui"; then
|
||
echo "🧼 Updating open-webui..."
|
||
docker stop open-webui
|
||
docker rm open-webui
|
||
docker pull ghcr.io/open-webui/open-webui:ollama
|
||
docker run -d -p 3000:8080 --gpus all \
|
||
-v ollama:/root/.ollama \
|
||
-v open-webui:/app/backend/data \
|
||
--name open-webui \
|
||
--network ai-stack-net \
|
||
--restart always \
|
||
ghcr.io/open-webui/open-webui:ollama
|
||
else
|
||
echo "pipelines not running, skipping update"
|
||
fi
|
||
|
||
# localai (GPU-support)
|
||
if check_container_running "localai"; then
|
||
echo "🧠 Updating LocalAI..."
|
||
docker stop localai
|
||
docker rm localai
|
||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||
docker run -d \
|
||
--name localai \
|
||
--gpus all \
|
||
-p 8080:8080 \
|
||
-v ~/ai-stack/localai/models:/models \
|
||
-v ~/ai-stack/localai/config:/config \
|
||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||
-e INCLUDE_DEFAULT_MODELS=true \
|
||
-e AUTOLOAD_MODELS=true \
|
||
-e MODEL_PATH=/models \
|
||
--restart unless-stopped \
|
||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||
echo "✅ LocalAI running at http://localhost:8080"
|
||
else
|
||
echo "➕ localai isn't running, must install first."
|
||
fi
|
||
|
||
# whisper-cpp
|
||
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
|
||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||
if [ -d "$WHISPER_DIR" ]; then
|
||
cd "$WHISPER_DIR"
|
||
git pull origin master
|
||
docker stop whispercpp-gpu 2>/dev/null || true
|
||
docker rm whispercpp-gpu 2>/dev/null || true
|
||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||
echo "✅ Whisper.cpp GPU image rebuilt."
|
||
else
|
||
echo "⚠️ Whisper.cpp directory not found — skipping update."
|
||
fi
|
||
|
||
# stable-diffusion
|
||
echo "🔁 Updating Stable Diffusion WebUI..."
|
||
SD_DIR=~/ai-stack/stable-diffusion
|
||
if [ -d "$SD_DIR" ]; then
|
||
cd "$SD_DIR"
|
||
git pull origin master
|
||
docker stop stable-diffusion 2>/dev/null || true
|
||
docker rm stable-diffusion 2>/dev/null || true
|
||
docker build -t a1111-webui .
|
||
docker run -d --gpus all \
|
||
--name stable-diffusion \
|
||
--network ai-stack-net \
|
||
-p 7860:7860 \
|
||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||
a1111-webui
|
||
echo "✅ Stable Diffusion WebUI updated and restarted."
|
||
else
|
||
echo "⚠️ Stable Diffusion directory not found — skipping update."
|
||
fi
|
||
|
||
# tg-webui
|
||
echo "🔁 Updating Text-generation-webui..."
|
||
TG_DIR=~/ai-stack/tg-webui
|
||
if [ -d "$TG_DIR" ]; then
|
||
cd "$TG_DIR"
|
||
git pull origin master
|
||
docker stop tg-webui 2>/dev/null || true
|
||
docker rm tg-webui 2>/dev/null || true
|
||
docker build -t tg-webui .
|
||
docker run -d --gpus all \
|
||
--name tg-webui \
|
||
--network ai-stack-net \
|
||
-p 7861:7861 \
|
||
-v ~/ai-stack/tg-models:/app/models \
|
||
tg-webui
|
||
echo "✅ Text-generation-webui updated and restarted."
|
||
else
|
||
echo "⚠️ Text-generation-webui directory not found — skipping update."
|
||
fi
|
||
|
||
# comfyui
|
||
echo "🔁 Updating ComfyUI (custom build)..."
|
||
COMFYUI_DIR=~/ai-stack/comfyui
|
||
if [ -d "$COMFYUI_DIR" ]; then
|
||
cd "$COMFYUI_DIR"
|
||
git pull origin master
|
||
docker stop comfyui 2>/dev/null || true
|
||
docker rm comfyui 2>/dev/null || true
|
||
docker build -t comfyui-custom .
|
||
docker run -d --gpus all \
|
||
--name comfyui \
|
||
--network ai-stack-net \
|
||
-p 8188:8188 \
|
||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||
comfyui-custom
|
||
echo "✅ ComfyUI updated and restarted."
|
||
else
|
||
echo "⚠️ ComfyUI directory not found — skipping update."
|
||
fi
|
||
|
||
echo "✅ Update complete!"
|
||
exit 0
|
||
fi
|
||
|
||
# Function to clean up existing container
|
||
clean_container() {
|
||
local name=$1
|
||
docker rm -f "$name" 2>/dev/null || true
|
||
}
|
||
|
||
# Handle INSTALL (no --update passed) ------------------------------------------------------------------------INSTALL SECTION BELOW
|
||
|
||
|
||
# open-webui INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "open-webui"; then
|
||
echo "✅ Skipping Open WebUI + Ollama install (already running)."
|
||
else
|
||
read -p "➕ Install Open WebUI + Ollama? [y/N]: " openui
|
||
if [[ "$openui" =~ ^[Yy]$ ]]; then
|
||
echo "🧠 Setting up Open WebUI + Ollama..."
|
||
docker stop open-webui 2>/dev/null || true
|
||
docker rm open-webui 2>/dev/null || true
|
||
docker run -d -p 3000:8080 --gpus all \
|
||
-v ollama:/root/.ollama \
|
||
-v open-webui:/app/backend/data \
|
||
--name open-webui \
|
||
--network ai-stack-net \
|
||
--restart always \
|
||
ghcr.io/open-webui/open-webui:ollama
|
||
echo "🌐 Open WebUI running at http://localhost:3000"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# jupyterlab INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "jupyterlab"; then
|
||
echo "✅ Skipping JupyterLab install (already running)."
|
||
else
|
||
read -p "➕ Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
|
||
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
|
||
echo "📔 Installing JupyterLab..."
|
||
docker stop jupyterlab 2>/dev/null || true
|
||
docker rm jupyterlab 2>/dev/null || true
|
||
docker run -d \
|
||
--name jupyterlab \
|
||
--network ai-stack-net \
|
||
--gpus all \
|
||
-p 8888:8888 \
|
||
-v jupyter_data:/home/jovyan/work \
|
||
jupyter/scipy-notebook:latest
|
||
echo "🔍 Jupyter token (saved to ~/ai-stack/jupyter_token.txt):"
|
||
mkdir -p ~/ai-stack
|
||
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# whisper-cpp-gpu custom INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "whispercpp-gpu"; then
|
||
echo "✅ Skipping Whisper.cpp install (already running)."
|
||
else
|
||
read -p "➕ Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
|
||
if [[ "$whisper" =~ ^[Yy]$ ]]; then
|
||
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
|
||
|
||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||
mkdir -p "$WHISPER_DIR"
|
||
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
|
||
|
||
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
|
||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||
|
||
RUN apt-get update && \\
|
||
apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
|
||
apt-get clean
|
||
|
||
WORKDIR /opt
|
||
RUN git clone https://github.com/ggerganov/whisper.cpp.git
|
||
WORKDIR /opt/whisper.cpp
|
||
RUN cmake -DWITH_CUBLAS=ON . && make -j
|
||
|
||
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
|
||
EOF
|
||
|
||
echo "🐳 Building Docker image 'whispercpp-gpu'..."
|
||
cd "$WHISPER_DIR"
|
||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||
|
||
echo "📂 Creating ~/ai-stack/audio and models directories"
|
||
mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models
|
||
|
||
echo "✅ Whisper.cpp built successfully."
|
||
echo "To run it:"
|
||
echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# stable-diffusion INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "stable-diffusion"; then
|
||
echo "✅ Skipping Stable Diffusion install (already running)."
|
||
else
|
||
read -p "➕ Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
|
||
if [[ "$sd" =~ ^[Yy]$ ]]; then
|
||
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
|
||
|
||
SD_DIR=~/ai-stack/stable-diffusion
|
||
mkdir -p ~/ai-stack
|
||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
|
||
cd "$SD_DIR"
|
||
|
||
echo "📄 Writing Dockerfile..."
|
||
cat > Dockerfile <<'EOF'
|
||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||
|
||
ENV DEBIAN_FRONTEND=noninteractive
|
||
|
||
RUN apt-get update && \
|
||
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
|
||
apt-get clean
|
||
|
||
WORKDIR /app
|
||
COPY . /app
|
||
|
||
RUN pip install --upgrade pip && \
|
||
pip install -r requirements_versions.txt && \
|
||
pip install -r requirements.txt
|
||
|
||
EXPOSE 7860
|
||
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
|
||
EOF
|
||
|
||
echo "🐳 Building Docker image 'a1111-webui'..."
|
||
docker stop stable-diffusion 2>/dev/null || true
|
||
docker rm stable-diffusion 2>/dev/null || true
|
||
docker build -t a1111-webui .
|
||
|
||
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
|
||
mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs
|
||
|
||
echo "🚀 Running Stable Diffusion WebUI on port 7860..."
|
||
docker run -d --gpus all \
|
||
--name stable-diffusion \
|
||
--network ai-stack-net \
|
||
-p 7860:7860 \
|
||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||
a1111-webui
|
||
|
||
echo "🌐 Access at http://localhost:7860"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# tg-webui INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "tg-webui"; then
|
||
echo "✅ Skipping Text-generation-webui install (already running)."
|
||
else
|
||
read -p "➕ Install Text-generation-webui? [y/N]: " tg
|
||
if [[ "$tg" =~ ^[Yy]$ ]]; then
|
||
echo "🧠 Cloning and building Text-generation-webui..."
|
||
|
||
TG_DIR=~/ai-stack/tg-webui
|
||
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
|
||
cd "$TG_DIR"
|
||
|
||
echo "📄 Writing Dockerfile..."
|
||
cat > Dockerfile <<'EOF'
|
||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||
|
||
ENV DEBIAN_FRONTEND=noninteractive
|
||
|
||
RUN apt-get update && \
|
||
apt-get install -y git python3 python3-pip build-essential && \
|
||
apt-get clean
|
||
|
||
WORKDIR /app
|
||
COPY . /app
|
||
|
||
RUN pip install --upgrade pip && \
|
||
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
|
||
|
||
EXPOSE 7861
|
||
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
|
||
EOF
|
||
|
||
echo "🐳 Building Docker image 'tg-webui'..."
|
||
docker stop tg-webui 2>/dev/null || true
|
||
docker rm tg-webui 2>/dev/null || true
|
||
docker build -t tg-webui .
|
||
|
||
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
|
||
mkdir -p ~/ai-stack/tg-models
|
||
|
||
echo "🚀 Running text-generation-webui on port 7861..."
|
||
docker run -d --gpus all \
|
||
--name tg-webui \
|
||
--network ai-stack-net \
|
||
-p 7861:7861 \
|
||
-v ~/ai-stack/tg-models:/app/models \
|
||
tg-webui
|
||
|
||
echo "🌐 Access at http://localhost:7861"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# comfy ui INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "comfyui"; then
|
||
echo "✅ Skipping ComfyUI install (already running)."
|
||
else
|
||
read -p "➕ Install ComfyUI (custom Docker build)? [y/N]: " comfy
|
||
if [[ "$comfy" =~ ^[Yy]$ ]]; then
|
||
echo "🧩 Cloning and building ComfyUI..."
|
||
|
||
COMFYUI_DIR=~/ai-stack/comfyui
|
||
git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
|
||
cd "$COMFYUI_DIR"
|
||
|
||
echo "📄 Writing Dockerfile..."
|
||
cat > Dockerfile <<'EOF'
|
||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||
|
||
ENV DEBIAN_FRONTEND=noninteractive
|
||
|
||
RUN apt-get update && \
|
||
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
|
||
apt-get clean
|
||
|
||
WORKDIR /app
|
||
COPY . /app
|
||
|
||
RUN pip install --upgrade pip && pip install -r requirements.txt
|
||
|
||
EXPOSE 8188
|
||
CMD ["python3", "main.py", "--listen", "--port", "8188"]
|
||
EOF
|
||
echo "🐳 Building Docker image 'comfyui-custom'..."
|
||
docker stop comfyui 2>/dev/null || true
|
||
docker rm comfyui 2>/dev/null || true
|
||
docker build -t comfyui-custom .
|
||
|
||
echo "📂 Preparing workspace volume..."
|
||
mkdir -p ~/ai-stack/comfyui/workspace
|
||
|
||
echo "🚀 Running ComfyUI on port 8188..."
|
||
docker run -d --gpus all \
|
||
--name comfyui \
|
||
--network ai-stack-net \
|
||
-p 8188:8188 \
|
||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||
comfyui-custom
|
||
|
||
echo "🌐 ComfyUI running at http://localhost:8188"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# localai INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "localai"; then
|
||
echo "✅ Skipping LocalAI install (already running)."
|
||
else
|
||
read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
||
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||
echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
||
|
||
mkdir -p ~/ai-stack/localai/models
|
||
mkdir -p ~/ai-stack/localai/config
|
||
|
||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||
|
||
docker run -d \
|
||
--name localai \
|
||
--gpus all \
|
||
--network ai-stack-net
|
||
-p 8080:8080 \
|
||
-v ~/ai-stack/localai/models:/build/models \
|
||
-v ~/ai-stack/localai/config:/config \
|
||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||
-e INCLUDE_DEFAULT_MODELS=true \
|
||
-e AUTOLOAD_MODELS=true \
|
||
--restart unless-stopped \
|
||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||
|
||
echo "🌐 LocalAI is now running at http://localhost:8080"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
# pipelines INSTALL
|
||
if [[ "$1" != "--update" ]]; then
|
||
if check_container_running "pipelines"; then
|
||
echo "✅ Skipping pipelines install (already running)."
|
||
else
|
||
read -p "➕ Install pipelines for open-webui? [y/N]: " pipelines_prompt
|
||
if [[ "$pipelines_prompt" =~ ^[Yy]$ ]]; then
|
||
mkdir -p ~/ai-stack/pipelines
|
||
docker run -d \
|
||
--name pipelines \
|
||
--network ai-stack-net \
|
||
-p 9099:9099 \
|
||
-v ~/ai-stack/pipelines:/app/pipelines \
|
||
--restart unless-stopped \
|
||
ghcr.io/open-webui/pipelines:main
|
||
echo "pipelines is now running..."
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
|
||
echo "✅ AI stack setup complete."
|
||
|