added pipelines for additional features for open-webui
This commit is contained in:
parent
63a1527b8f
commit
26a8c501ec
@ -2,6 +2,8 @@
|
||||
set -e
|
||||
|
||||
mkdir -p ~/ai-stack
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
LOG_FILE=~/ai-stack/setup.log
|
||||
exec > >(tee -a "$LOG_FILE") 2>&1
|
||||
|
||||
@ -9,26 +11,7 @@ echo "📦 AI Stack Setup Script"
|
||||
echo "Log: $LOG_FILE"
|
||||
echo
|
||||
|
||||
# Containers to manage
|
||||
CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui")
|
||||
|
||||
|
||||
# Handle update option
|
||||
if [[ "$1" == "--update" ]]; then
|
||||
echo "🔄 Updating running containers..."
|
||||
for name in "${CONTAINERS[@]}"; do
|
||||
if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
|
||||
echo "🧼 Updating $name..."
|
||||
docker pull "$(docker inspect --format='{{.Config.Image}}' $name)"
|
||||
docker stop $name
|
||||
docker rm $name
|
||||
else
|
||||
echo "⚠️ Container $name is not running, skipping."
|
||||
fi
|
||||
done
|
||||
echo "✅ Update complete. Rerun the script without --update to restart containers."
|
||||
exit 0
|
||||
fi
|
||||
docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net
|
||||
|
||||
check_container_running() {
|
||||
local name="$1"
|
||||
@ -39,49 +22,261 @@ check_container_running() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# Handle update option
|
||||
if [[ "$1" == "--update" ]]; then
|
||||
echo "🔄 Updating containers..."
|
||||
|
||||
# JupyterLab
|
||||
if check_container_running "jupyterlab"; then
|
||||
echo "🧼 Updating JupyterLab..."
|
||||
docker stop jupyterlab
|
||||
docker rm jupyterlab
|
||||
docker pull jupyter/scipy-notebook:latest
|
||||
docker run -d \
|
||||
--name jupyterlab \
|
||||
--network ai-stack-net \
|
||||
--gpus all \
|
||||
-p 8888:8888 \
|
||||
-v jupyter_data:/home/jovyan/work \
|
||||
jupyter/scipy-notebook:latest
|
||||
echo "✅ JupyterLab updated."
|
||||
else
|
||||
echo "⚠️ JupyterLab is not running, skipping update."
|
||||
fi
|
||||
|
||||
# --- LocalAI custom build and update ---
|
||||
LOCALAI_REPO=~/git-repos/LocalAI
|
||||
DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile"
|
||||
|
||||
echo "🔁 Rebuilding LocalAI (custom)..."
|
||||
|
||||
# Backup and remove existing repo
|
||||
if [ -d "$LOCALAI_REPO" ]; then
|
||||
echo "📦 Backing up existing LocalAI repo..."
|
||||
mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)"
|
||||
fi
|
||||
|
||||
# Clone latest LocalAI
|
||||
git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO"
|
||||
|
||||
# Remove default Docker-related files
|
||||
cd "$LOCALAI_REPO"
|
||||
rm -f Dockerfile Dockerfile.aio docker-compose.yaml
|
||||
|
||||
# Prepare and build LocalAI binary
|
||||
# NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods
|
||||
# Docker-compose methods will be coming soon.
|
||||
echo "🔧 Running make prepare..."
|
||||
make prepare
|
||||
|
||||
# echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
|
||||
# make build \
|
||||
# TAGS="localrecall,localagi,gpu" \
|
||||
# BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
|
||||
# GPU_SUPPORT="cuda"
|
||||
|
||||
|
||||
# # Download your custom Dockerfile
|
||||
# echo "📥 Downloading custom Dockerfile..."
|
||||
# curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
|
||||
|
||||
# # Build the container
|
||||
# echo "🔨 Building custom LocalAI Docker image..."
|
||||
# docker build -t localai:custom .
|
||||
|
||||
# # Remove existing container
|
||||
# docker stop localai 2>/dev/null || true
|
||||
# docker rm localai 2>/dev/null || true
|
||||
|
||||
# # Create model and config directories if needed
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# # Run new container
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
# -p 8080:8080 \
|
||||
# -v ~/ai-stack/localai/models:/models \
|
||||
# -v ~/ai-stack/localai/config:/config \
|
||||
# -e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# --restart unless-stopped \
|
||||
# localai:custom
|
||||
|
||||
# echo "✅ LocalAI (custom) is now running at http://localhost:8080"
|
||||
|
||||
|
||||
# 🔁 Updating LocalAI (GPU build)
|
||||
if check_container_running "localai"; then
|
||||
echo "🧠 Updating LocalAI..."
|
||||
docker stop localai
|
||||
docker rm localai
|
||||
else
|
||||
echo "➕ Installing LocalAI..."
|
||||
fi
|
||||
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
-e MODEL_PATH=/models \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "✅ LocalAI running at http://localhost:8080"
|
||||
|
||||
|
||||
|
||||
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
|
||||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||||
if [ -d "$WHISPER_DIR" ]; then
|
||||
cd "$WHISPER_DIR"
|
||||
git pull origin master
|
||||
docker stop whispercpp-gpu 2>/dev/null || true
|
||||
docker rm whispercpp-gpu 2>/dev/null || true
|
||||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||||
echo "✅ Whisper.cpp GPU image rebuilt."
|
||||
else
|
||||
echo "⚠️ Whisper.cpp directory not found — skipping update."
|
||||
fi
|
||||
|
||||
echo "🔁 Updating Stable Diffusion WebUI..."
|
||||
SD_DIR=~/ai-stack/stable-diffusion
|
||||
if [ -d "$SD_DIR" ]; then
|
||||
cd "$SD_DIR"
|
||||
git pull origin master
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker build -t a1111-webui .
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
--network ai-stack-net \
|
||||
-p 7860:7860 \
|
||||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||||
a1111-webui
|
||||
echo "✅ Stable Diffusion WebUI updated and restarted."
|
||||
else
|
||||
echo "⚠️ Stable Diffusion directory not found — skipping update."
|
||||
fi
|
||||
|
||||
echo "🔁 Updating Text-generation-webui..."
|
||||
TG_DIR=~/ai-stack/tg-webui
|
||||
if [ -d "$TG_DIR" ]; then
|
||||
cd "$TG_DIR"
|
||||
git pull origin master
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
--network ai-stack-net \
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
echo "✅ Text-generation-webui updated and restarted."
|
||||
else
|
||||
echo "⚠️ Text-generation-webui directory not found — skipping update."
|
||||
fi
|
||||
|
||||
echo "🔁 Updating ComfyUI (custom build)..."
|
||||
COMFYUI_DIR=~/ai-stack/comfyui
|
||||
if [ -d "$COMFYUI_DIR" ]; then
|
||||
cd "$COMFYUI_DIR"
|
||||
git pull origin master
|
||||
docker stop comfyui 2>/dev/null || true
|
||||
docker rm comfyui 2>/dev/null || true
|
||||
docker build -t comfyui-custom .
|
||||
docker run -d --gpus all \
|
||||
--name comfyui \
|
||||
--network ai-stack-net \
|
||||
-p 8188:8188 \
|
||||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||||
comfyui-custom
|
||||
echo "✅ ComfyUI updated and restarted."
|
||||
else
|
||||
echo "⚠️ ComfyUI directory not found — skipping update."
|
||||
fi
|
||||
|
||||
echo "✅ Update complete!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Function to clean up existing container
|
||||
clean_container() {
|
||||
local name=$1
|
||||
docker rm -f "$name" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# 🧠 Open WebUI + Ollama
|
||||
echo "🧠 Setting up Open WebUI + Ollama..."
|
||||
clean_container open-webui
|
||||
docker run -d -p 3000:8080 --gpus=all \
|
||||
-v ollama:/root/.ollama \
|
||||
-v open-webui:/app/backend/data \
|
||||
--name open-webui \
|
||||
--restart always \
|
||||
ghcr.io/open-webui/open-webui:ollama
|
||||
|
||||
# ➕ JupyterLab
|
||||
read -p "➕ Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
|
||||
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
|
||||
echo "📔 Installing JupyterLab..."
|
||||
clean_container jupyterlab
|
||||
docker run -d \
|
||||
--name jupyterlab \
|
||||
--gpus all \
|
||||
-p 8888:8888 \
|
||||
-v jupyter_data:/home/jovyan/work \
|
||||
jupyter/scipy-notebook:latest
|
||||
|
||||
echo "🔍 Jupyter token:"
|
||||
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
|
||||
echo "📁 Token saved to ~/ai-stack/jupyter_token.txt"
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "open-webui"; then
|
||||
echo "✅ Skipping Open WebUI + Ollama install (already running)."
|
||||
else
|
||||
read -p "➕ Install Open WebUI + Ollama? [y/N]: " openui
|
||||
if [[ "$openui" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Setting up Open WebUI + Ollama..."
|
||||
docker stop open-webui 2>/dev/null || true
|
||||
docker rm open-webui 2>/dev/null || true
|
||||
docker run -d -p 3000:8080 --gpus all \
|
||||
-v ollama:/root/.ollama \
|
||||
-v open-webui:/app/backend/data \
|
||||
--name open-webui \
|
||||
--network ai-stack-net \
|
||||
--restart always \
|
||||
ghcr.io/open-webui/open-webui:ollama
|
||||
echo "🌐 Open WebUI running at http://localhost:3000"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# ➕ Whisper.cpp with GPU Support (Build Locally)
|
||||
read -p "➕ Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
|
||||
if [[ "$whisper" =~ ^[Yy]$ ]]; then
|
||||
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
|
||||
|
||||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||||
mkdir -p ~/ai-stack
|
||||
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
|
||||
|
||||
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "jupyterlab"; then
|
||||
echo "✅ Skipping JupyterLab install (already running)."
|
||||
else
|
||||
read -p "➕ Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
|
||||
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
|
||||
echo "📔 Installing JupyterLab..."
|
||||
docker stop jupyterlab 2>/dev/null || true
|
||||
docker rm jupyterlab 2>/dev/null || true
|
||||
docker run -d \
|
||||
--name jupyterlab \
|
||||
--network ai-stack-net \
|
||||
--gpus all \
|
||||
-p 8888:8888 \
|
||||
-v jupyter_data:/home/jovyan/work \
|
||||
jupyter/scipy-notebook:latest
|
||||
echo "🔍 Jupyter token (saved to ~/ai-stack/jupyter_token.txt):"
|
||||
mkdir -p ~/ai-stack
|
||||
docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "whispercpp-gpu"; then
|
||||
echo "✅ Skipping Whisper.cpp install (already running)."
|
||||
else
|
||||
read -p "➕ Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
|
||||
if [[ "$whisper" =~ ^[Yy]$ ]]; then
|
||||
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
|
||||
|
||||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||||
mkdir -p "$WHISPER_DIR"
|
||||
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
|
||||
|
||||
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
|
||||
RUN apt-get update && \\
|
||||
@ -96,121 +291,91 @@ RUN cmake -DWITH_CUBLAS=ON . && make -j
|
||||
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
|
||||
EOF
|
||||
|
||||
echo "🐳 Building Docker image 'whispercpp-gpu'..."
|
||||
docker build -t whispercpp-gpu -f "$WHISPER_DIR/Dockerfile.gpu" "$WHISPER_DIR"
|
||||
echo "🐳 Building Docker image 'whispercpp-gpu'..."
|
||||
cd "$WHISPER_DIR"
|
||||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||||
|
||||
echo "📂 Setting up input folder at ~/ai-stack/audio"
|
||||
mkdir -p ~/ai-stack/audio
|
||||
echo "📂 Creating ~/ai-stack/audio and models directories"
|
||||
mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models
|
||||
|
||||
echo "✅ To run whisper on a file:"
|
||||
echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/yourfile.wav"
|
||||
|
||||
echo "📢 Reminder: Download a model like ggml-large.bin into ~/ai-stack/whisper.cpp/models/"
|
||||
echo "e.g. https://huggingface.co/ggerganov/whisper.cpp/tree/main/models"
|
||||
fi
|
||||
|
||||
if [[ "$1" == "--update" ]]; then
|
||||
# ...existing container update loop
|
||||
|
||||
echo "🔁 Updating whisper.cpp..."
|
||||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||||
if [ -d "$WHISPER_DIR" ]; then
|
||||
cd "$WHISPER_DIR"
|
||||
git pull origin master
|
||||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||||
echo "✅ whispercpp-gpu rebuilt with latest source."
|
||||
else
|
||||
echo "⚠️ whisper.cpp not found at $WHISPER_DIR — skipping."
|
||||
echo "✅ Whisper.cpp built successfully."
|
||||
echo "To run it:"
|
||||
echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# ➕ Stable Diffusion (AUTOMATIC1111 custom GPU build)
|
||||
read -p "➕ Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
|
||||
if [[ "$sd" =~ ^[Yy]$ ]]; then
|
||||
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
|
||||
|
||||
SD_DIR=~/ai-stack/stable-diffusion
|
||||
mkdir -p ~/ai-stack
|
||||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
|
||||
cd "$SD_DIR"
|
||||
cat > ~/ai-stack/stable-diffusion/Dockerfile <<'EOF'
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "stable-diffusion"; then
|
||||
echo "✅ Skipping Stable Diffusion install (already running)."
|
||||
else
|
||||
read -p "➕ Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
|
||||
if [[ "$sd" =~ ^[Yy]$ ]]; then
|
||||
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
|
||||
|
||||
SD_DIR=~/ai-stack/stable-diffusion
|
||||
mkdir -p ~/ai-stack
|
||||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
|
||||
cd "$SD_DIR"
|
||||
|
||||
echo "📄 Writing Dockerfile..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \
|
||||
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
|
||||
apt-get clean
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
# Create venv and install deps
|
||||
RUN python3 -m venv venv && \
|
||||
. venv/bin/activate && \
|
||||
pip install --upgrade pip && \
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install -r requirements_versions.txt && \
|
||||
pip install -r requirements.txt
|
||||
|
||||
EXPOSE 7860
|
||||
|
||||
CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"]
|
||||
|
||||
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
|
||||
EOF
|
||||
echo "🐳 Building Docker image 'a1111-webui'..."
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker build -t a1111-webui .
|
||||
|
||||
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
|
||||
mkdir -p ~/ai-stack/sd-models
|
||||
echo "🐳 Building Docker image 'a1111-webui'..."
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker build -t a1111-webui .
|
||||
|
||||
echo "🚀 Starting Stable Diffusion WebUI..."
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
-p 7860:7860 \
|
||||
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
|
||||
a1111-webui
|
||||
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
|
||||
mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs
|
||||
|
||||
echo "🌐 Access Stable Diffusion WebUI at http://localhost:7860"
|
||||
fi
|
||||
echo "🚀 Running Stable Diffusion WebUI on port 7860..."
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
--network ai-stack-net \
|
||||
-p 7860:7860 \
|
||||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||||
a1111-webui
|
||||
|
||||
if [[ "$1" == "--update" ]]; then
|
||||
echo "🔁 Updating Stable Diffusion WebUI..."
|
||||
SD_DIR=~/ai-stack/stable-diffusion
|
||||
if [ -d "$SD_DIR" ]; then
|
||||
cd "$SD_DIR"
|
||||
git pull origin master
|
||||
docker build -t a1111-webui .
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
-p 7860:7860 \
|
||||
-v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
|
||||
a1111-webui
|
||||
echo "✅ Stable Diffusion updated and restarted."
|
||||
else
|
||||
echo "⚠️ Stable Diffusion directory not found — skipping update."
|
||||
echo "🌐 Access at http://localhost:7860"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "tg-webui"; then
|
||||
echo "✅ Skipping Text-generation-webui install (already running)."
|
||||
else
|
||||
read -p "➕ Install Text-generation-webui? [y/N]: " tg
|
||||
if [[ "$tg" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Cloning and building Text-generation-webui..."
|
||||
|
||||
TG_DIR=~/ai-stack/tg-webui
|
||||
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
|
||||
cd "$TG_DIR"
|
||||
|
||||
# ➕ Text-generation-webui (custom Docker build with GPU + API)
|
||||
read -p "➕ Install Text-generation-webui? [y/N]: " tg
|
||||
if [[ "$tg" =~ ^[Yy]$ ]]; then
|
||||
echo "🧪 Cloning and building Text-generation-webui..."
|
||||
|
||||
TG_DIR=~/ai-stack/tg-webui
|
||||
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
|
||||
cd "$TG_DIR"
|
||||
|
||||
echo "📝 Creating Dockerfile for build..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
echo "📄 Writing Dockerfile..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
@ -222,65 +387,161 @@ RUN apt-get update && \
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
# Install dependencies
|
||||
RUN pip install --upgrade pip && \
|
||||
(pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)
|
||||
|
||||
EXPOSE 7861
|
||||
|
||||
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
|
||||
EOF
|
||||
|
||||
echo "🐳 Building Docker image 'tg-webui'..."
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
echo "🐳 Building Docker image 'tg-webui'..."
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
|
||||
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
|
||||
mkdir -p ~/ai-stack/tg-models
|
||||
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
|
||||
mkdir -p ~/ai-stack/tg-models
|
||||
|
||||
echo "🚀 Running text-generation-webui on port 7861..."
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
echo "🚀 Running text-generation-webui on port 7861..."
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
--network ai-stack-net \
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
|
||||
echo "🌐 Access at http://localhost:7861"
|
||||
echo "🌐 Access at http://localhost:7861"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "🔁 Updating Text-generation-webui..."
|
||||
TG_DIR=~/ai-stack/tg-webui
|
||||
if [ -d "$TG_DIR" ]; then
|
||||
cd "$TG_DIR"
|
||||
git pull origin master
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
echo "✅ Text-generation-webui updated and restarted."
|
||||
else
|
||||
echo "⚠️ Text-generation-webui directory not found — skipping update."
|
||||
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "comfyui"; then
|
||||
echo "✅ Skipping ComfyUI install (already running)."
|
||||
else
|
||||
read -p "➕ Install ComfyUI (custom Docker build)? [y/N]: " comfy
|
||||
if [[ "$comfy" =~ ^[Yy]$ ]]; then
|
||||
echo "🧩 Cloning and building ComfyUI..."
|
||||
|
||||
COMFYUI_DIR=~/ai-stack/comfyui
|
||||
git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
|
||||
cd "$COMFYUI_DIR"
|
||||
|
||||
echo "📄 Writing Dockerfile..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
|
||||
apt-get clean
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
RUN pip install --upgrade pip && pip install -r requirements.txt
|
||||
|
||||
EXPOSE 8188
|
||||
CMD ["python3", "main.py", "--listen", "--port", "8188"]
|
||||
EOF
|
||||
|
||||
echo "🐳 Building Docker image 'comfyui-custom'..."
|
||||
docker stop comfyui 2>/dev/null || true
|
||||
docker rm comfyui 2>/dev/null || true
|
||||
docker build -t comfyui-custom .
|
||||
|
||||
echo "📂 Preparing workspace volume..."
|
||||
mkdir -p ~/ai-stack/comfyui/workspace
|
||||
|
||||
echo "🚀 Running ComfyUI on port 8188..."
|
||||
docker run -d --gpus all \
|
||||
--name comfyui \
|
||||
--network ai-stack-net \
|
||||
-p 8188:8188 \
|
||||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||||
comfyui-custom
|
||||
|
||||
echo "🌐 ComfyUI running at http://localhost:8188"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "localai"; then
|
||||
echo "✅ Skipping LocalAI install (already running)."
|
||||
else
|
||||
read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
||||
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
||||
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# ➕ ComfyUI (Docker - graph-based Stable Diffusion)
|
||||
read -p "➕ Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy
|
||||
if [[ "$comfy" =~ ^[Yy]$ ]]; then
|
||||
echo "🧩 Installing ComfyUI (Docker)..."
|
||||
clean_container comfyui
|
||||
docker run -d --gpus all \
|
||||
--name comfyui \
|
||||
-p 8188:8188 \
|
||||
-v comfyui_data:/workspace \
|
||||
ghcr.io/cmdr2/comfyui:latest
|
||||
echo "🌐 ComfyUI running at http://localhost:8188"
|
||||
echo "📄 Creating default models.yaml..."
|
||||
# cat > ~/ai-stack/localai/config/models.yaml <<EOF
|
||||
# - name: phi
|
||||
# backend: llama-cuda
|
||||
# parameters:
|
||||
# model: /models/phi-2.gguf
|
||||
# EOF
|
||||
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/build/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# if [[ "$1" != "--update" ]]; then
|
||||
# if check_container_running "localai"; then
|
||||
# echo "✅ Skipping LocalAI install (already running)."
|
||||
# else
|
||||
# read -p "➕ Install LocalAI (GPU-custom)? [y/N]: " localai_prompt
|
||||
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
# echo "🧠 Installing LocalAI (Custom created localAI container)..."
|
||||
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# echo "📦 Using custom built LocalAI image (localai:custom)"
|
||||
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
# --network ai-stack-net \
|
||||
# -p 8080:8080 \
|
||||
# -v ~/ai-stack/localai/models:/models \
|
||||
# -v ~/ai-stack/localai/models:/app/models \
|
||||
# -e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \
|
||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# -e LOG_LEVEL=debug \
|
||||
# --restart unless-stopped \
|
||||
# localai:custom
|
||||
|
||||
# echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
# fi
|
||||
# fi
|
||||
# fi
|
||||
|
||||
|
||||
|
||||
|
||||
echo "✅ AI stack setup complete."
|
||||
|
||||
|
@ -22,11 +22,11 @@ check_container_running() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# Handle update option
|
||||
# Handle UPDATE (--update option passed) -----------------------------------------------------------------UPDATE SECTION BELOW
|
||||
if [[ "$1" == "--update" ]]; then
|
||||
echo "🔄 Updating containers..."
|
||||
|
||||
# JupyterLab
|
||||
# jupyterLab
|
||||
if check_container_running "jupyterlab"; then
|
||||
echo "🧼 Updating JupyterLab..."
|
||||
docker stop jupyterlab
|
||||
@ -43,171 +43,140 @@ if [[ "$1" == "--update" ]]; then
|
||||
else
|
||||
echo "⚠️ JupyterLab is not running, skipping update."
|
||||
fi
|
||||
|
||||
# --- LocalAI custom build and update ---
|
||||
LOCALAI_REPO=~/git-repos/LocalAI
|
||||
DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile"
|
||||
|
||||
echo "🔁 Rebuilding LocalAI (custom)..."
|
||||
|
||||
# Backup and remove existing repo
|
||||
if [ -d "$LOCALAI_REPO" ]; then
|
||||
echo "📦 Backing up existing LocalAI repo..."
|
||||
mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)"
|
||||
|
||||
# pipelines
|
||||
if check_container_running "pipelines"; then
|
||||
echo "🧼 Updating Pipelines..."
|
||||
docker stop pipelines
|
||||
docker rm pipelines
|
||||
docker pull ghcr.io/open-webui/pipelines:main
|
||||
docker run -d \
|
||||
--name pipelines \
|
||||
--network ai-stack-net \
|
||||
-p 9099:9099 \
|
||||
-v ~/ai-stack/pipelines:/app/pipelines \
|
||||
--restart unless-stopped \
|
||||
ghcr.io/open-webui/pipelines:main
|
||||
echo "✅ Pipelines running at http://localhost:9099"
|
||||
else
|
||||
echo "pipelines not running, skipping update"
|
||||
fi
|
||||
|
||||
# open-webui
|
||||
if check_container_running "open-webui"; then
|
||||
echo "🧼 Updating open-webui..."
|
||||
docker stop open-webui
|
||||
docker rm open-webui
|
||||
docker pull ghcr.io/open-webui/open-webui:ollama
|
||||
docker run -d -p 3000:8080 --gpus all \
|
||||
-v ollama:/root/.ollama \
|
||||
-v open-webui:/app/backend/data \
|
||||
--name open-webui \
|
||||
--network ai-stack-net \
|
||||
--restart always \
|
||||
ghcr.io/open-webui/open-webui:ollama
|
||||
else
|
||||
echo "pipelines not running, skipping update"
|
||||
fi
|
||||
|
||||
# localai (GPU-support)
|
||||
if check_container_running "localai"; then
|
||||
echo "🧠 Updating LocalAI..."
|
||||
docker stop localai
|
||||
docker rm localai
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
-e MODEL_PATH=/models \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
echo "✅ LocalAI running at http://localhost:8080"
|
||||
else
|
||||
echo "➕ localai isn't running, must install first."
|
||||
fi
|
||||
|
||||
# Clone latest LocalAI
|
||||
git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO"
|
||||
|
||||
# Remove default Docker-related files
|
||||
cd "$LOCALAI_REPO"
|
||||
rm -f Dockerfile Dockerfile.aio docker-compose.yaml
|
||||
|
||||
# Prepare and build LocalAI binary
|
||||
# NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods
|
||||
# Docker-compose methods will be coming soon.
|
||||
echo "🔧 Running make prepare..."
|
||||
make prepare
|
||||
|
||||
# echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
|
||||
# make build \
|
||||
# TAGS="localrecall,localagi,gpu" \
|
||||
# BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
|
||||
# GPU_SUPPORT="cuda"
|
||||
|
||||
|
||||
# # Download your custom Dockerfile
|
||||
# echo "📥 Downloading custom Dockerfile..."
|
||||
# curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
|
||||
|
||||
# # Build the container
|
||||
# echo "🔨 Building custom LocalAI Docker image..."
|
||||
# docker build -t localai:custom .
|
||||
|
||||
# # Remove existing container
|
||||
# docker stop localai 2>/dev/null || true
|
||||
# docker rm localai 2>/dev/null || true
|
||||
|
||||
# # Create model and config directories if needed
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# # Run new container
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
# -p 8080:8080 \
|
||||
# -v ~/ai-stack/localai/models:/models \
|
||||
# -v ~/ai-stack/localai/config:/config \
|
||||
# -e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# --restart unless-stopped \
|
||||
# localai:custom
|
||||
|
||||
# echo "✅ LocalAI (custom) is now running at http://localhost:8080"
|
||||
|
||||
|
||||
# 🔁 Updating LocalAI (GPU build)
|
||||
if check_container_running "localai"; then
|
||||
echo "🧠 Updating LocalAI..."
|
||||
docker stop localai
|
||||
docker rm localai
|
||||
else
|
||||
echo "➕ Installing LocalAI..."
|
||||
fi
|
||||
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
-e MODEL_PATH=/models \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "✅ LocalAI running at http://localhost:8080"
|
||||
|
||||
|
||||
|
||||
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
|
||||
# whisper-cpp
|
||||
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
|
||||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||||
if [ -d "$WHISPER_DIR" ]; then
|
||||
cd "$WHISPER_DIR"
|
||||
git pull origin master
|
||||
docker stop whispercpp-gpu 2>/dev/null || true
|
||||
docker rm whispercpp-gpu 2>/dev/null || true
|
||||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||||
echo "✅ Whisper.cpp GPU image rebuilt."
|
||||
cd "$WHISPER_DIR"
|
||||
git pull origin master
|
||||
docker stop whispercpp-gpu 2>/dev/null || true
|
||||
docker rm whispercpp-gpu 2>/dev/null || true
|
||||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||||
echo "✅ Whisper.cpp GPU image rebuilt."
|
||||
else
|
||||
echo "⚠️ Whisper.cpp directory not found — skipping update."
|
||||
echo "⚠️ Whisper.cpp directory not found — skipping update."
|
||||
fi
|
||||
|
||||
# stable-diffusion
|
||||
echo "🔁 Updating Stable Diffusion WebUI..."
|
||||
SD_DIR=~/ai-stack/stable-diffusion
|
||||
if [ -d "$SD_DIR" ]; then
|
||||
cd "$SD_DIR"
|
||||
git pull origin master
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker build -t a1111-webui .
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
cd "$SD_DIR"
|
||||
git pull origin master
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker build -t a1111-webui .
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
--network ai-stack-net \
|
||||
-p 7860:7860 \
|
||||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||||
a1111-webui
|
||||
echo "✅ Stable Diffusion WebUI updated and restarted."
|
||||
else
|
||||
echo "⚠️ Stable Diffusion directory not found — skipping update."
|
||||
fi
|
||||
|
||||
-p 7860:7860 \
|
||||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||||
a1111-webui
|
||||
echo "✅ Stable Diffusion WebUI updated and restarted."
|
||||
else
|
||||
echo "⚠️ Stable Diffusion directory not found — skipping update."
|
||||
fi
|
||||
|
||||
# tg-webui
|
||||
echo "🔁 Updating Text-generation-webui..."
|
||||
TG_DIR=~/ai-stack/tg-webui
|
||||
if [ -d "$TG_DIR" ]; then
|
||||
cd "$TG_DIR"
|
||||
git pull origin master
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
cd "$TG_DIR"
|
||||
git pull origin master
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
--network ai-stack-net \
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
echo "✅ Text-generation-webui updated and restarted."
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
echo "✅ Text-generation-webui updated and restarted."
|
||||
else
|
||||
echo "⚠️ Text-generation-webui directory not found — skipping update."
|
||||
echo "⚠️ Text-generation-webui directory not found — skipping update."
|
||||
fi
|
||||
|
||||
# comfyui
|
||||
echo "🔁 Updating ComfyUI (custom build)..."
|
||||
COMFYUI_DIR=~/ai-stack/comfyui
|
||||
if [ -d "$COMFYUI_DIR" ]; then
|
||||
cd "$COMFYUI_DIR"
|
||||
git pull origin master
|
||||
docker stop comfyui 2>/dev/null || true
|
||||
docker rm comfyui 2>/dev/null || true
|
||||
docker build -t comfyui-custom .
|
||||
docker run -d --gpus all \
|
||||
--name comfyui \
|
||||
cd "$COMFYUI_DIR"
|
||||
git pull origin master
|
||||
docker stop comfyui 2>/dev/null || true
|
||||
docker rm comfyui 2>/dev/null || true
|
||||
docker build -t comfyui-custom .
|
||||
docker run -d --gpus all \
|
||||
--name comfyui \
|
||||
--network ai-stack-net \
|
||||
-p 8188:8188 \
|
||||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||||
comfyui-custom
|
||||
echo "✅ ComfyUI updated and restarted."
|
||||
-p 8188:8188 \
|
||||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||||
comfyui-custom
|
||||
echo "✅ ComfyUI updated and restarted."
|
||||
else
|
||||
echo "⚠️ ComfyUI directory not found — skipping update."
|
||||
echo "⚠️ ComfyUI directory not found — skipping update."
|
||||
fi
|
||||
|
||||
|
||||
echo "✅ Update complete!"
|
||||
exit 0
|
||||
fi
|
||||
@ -218,6 +187,10 @@ clean_container() {
|
||||
docker rm -f "$name" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Handle INSTALL (no --update passed) ------------------------------------------------------------------------INSTALL SECTION BELOW
|
||||
|
||||
|
||||
# open-webui INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "open-webui"; then
|
||||
echo "✅ Skipping Open WebUI + Ollama install (already running)."
|
||||
@ -240,6 +213,7 @@ if [[ "$1" != "--update" ]]; then
|
||||
fi
|
||||
|
||||
|
||||
# jupyterlab INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "jupyterlab"; then
|
||||
echo "✅ Skipping JupyterLab install (already running)."
|
||||
@ -271,11 +245,11 @@ if [[ "$1" != "--update" ]]; then
|
||||
read -p "➕ Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
|
||||
if [[ "$whisper" =~ ^[Yy]$ ]]; then
|
||||
echo "🔊 Cloning and building Whisper.cpp with GPU support..."
|
||||
|
||||
|
||||
WHISPER_DIR=~/ai-stack/whisper.cpp
|
||||
mkdir -p "$WHISPER_DIR"
|
||||
git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true
|
||||
|
||||
|
||||
cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
|
||||
@ -290,14 +264,14 @@ RUN cmake -DWITH_CUBLAS=ON . && make -j
|
||||
|
||||
CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
|
||||
EOF
|
||||
|
||||
|
||||
echo "🐳 Building Docker image 'whispercpp-gpu'..."
|
||||
cd "$WHISPER_DIR"
|
||||
docker build -t whispercpp-gpu -f Dockerfile.gpu .
|
||||
|
||||
|
||||
echo "📂 Creating ~/ai-stack/audio and models directories"
|
||||
mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models
|
||||
|
||||
|
||||
echo "✅ Whisper.cpp built successfully."
|
||||
echo "To run it:"
|
||||
echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
|
||||
@ -306,6 +280,7 @@ EOF
|
||||
fi
|
||||
|
||||
|
||||
# stable-diffusion INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "stable-diffusion"; then
|
||||
echo "✅ Skipping Stable Diffusion install (already running)."
|
||||
@ -313,12 +288,12 @@ if [[ "$1" != "--update" ]]; then
|
||||
read -p "➕ Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
|
||||
if [[ "$sd" =~ ^[Yy]$ ]]; then
|
||||
echo "🎨 Cloning and building AUTOMATIC1111 WebUI..."
|
||||
|
||||
|
||||
SD_DIR=~/ai-stack/stable-diffusion
|
||||
mkdir -p ~/ai-stack
|
||||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
|
||||
cd "$SD_DIR"
|
||||
|
||||
|
||||
echo "📄 Writing Dockerfile..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
@ -339,15 +314,15 @@ RUN pip install --upgrade pip && \
|
||||
EXPOSE 7860
|
||||
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
|
||||
EOF
|
||||
|
||||
|
||||
echo "🐳 Building Docker image 'a1111-webui'..."
|
||||
docker stop stable-diffusion 2>/dev/null || true
|
||||
docker rm stable-diffusion 2>/dev/null || true
|
||||
docker build -t a1111-webui .
|
||||
|
||||
|
||||
echo "📂 Mounting model directory from ~/ai-stack/sd-models"
|
||||
mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs
|
||||
|
||||
|
||||
echo "🚀 Running Stable Diffusion WebUI on port 7860..."
|
||||
docker run -d --gpus all \
|
||||
--name stable-diffusion \
|
||||
@ -356,12 +331,14 @@ EOF
|
||||
-v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
|
||||
-v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
|
||||
a1111-webui
|
||||
|
||||
|
||||
echo "🌐 Access at http://localhost:7860"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# tg-webui INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "tg-webui"; then
|
||||
echo "✅ Skipping Text-generation-webui install (already running)."
|
||||
@ -369,11 +346,11 @@ if [[ "$1" != "--update" ]]; then
|
||||
read -p "➕ Install Text-generation-webui? [y/N]: " tg
|
||||
if [[ "$tg" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Cloning and building Text-generation-webui..."
|
||||
|
||||
|
||||
TG_DIR=~/ai-stack/tg-webui
|
||||
git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
|
||||
cd "$TG_DIR"
|
||||
|
||||
|
||||
echo "📄 Writing Dockerfile..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
@ -393,15 +370,15 @@ RUN pip install --upgrade pip && \
|
||||
EXPOSE 7861
|
||||
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
|
||||
EOF
|
||||
|
||||
|
||||
echo "🐳 Building Docker image 'tg-webui'..."
|
||||
docker stop tg-webui 2>/dev/null || true
|
||||
docker rm tg-webui 2>/dev/null || true
|
||||
docker build -t tg-webui .
|
||||
|
||||
|
||||
echo "📂 Mounting model directory from ~/ai-stack/tg-models"
|
||||
mkdir -p ~/ai-stack/tg-models
|
||||
|
||||
|
||||
echo "🚀 Running text-generation-webui on port 7861..."
|
||||
docker run -d --gpus all \
|
||||
--name tg-webui \
|
||||
@ -409,14 +386,14 @@ EOF
|
||||
-p 7861:7861 \
|
||||
-v ~/ai-stack/tg-models:/app/models \
|
||||
tg-webui
|
||||
|
||||
|
||||
echo "🌐 Access at http://localhost:7861"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# comfy ui INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "comfyui"; then
|
||||
echo "✅ Skipping ComfyUI install (already running)."
|
||||
@ -424,11 +401,11 @@ if [[ "$1" != "--update" ]]; then
|
||||
read -p "➕ Install ComfyUI (custom Docker build)? [y/N]: " comfy
|
||||
if [[ "$comfy" =~ ^[Yy]$ ]]; then
|
||||
echo "🧩 Cloning and building ComfyUI..."
|
||||
|
||||
|
||||
COMFYUI_DIR=~/ai-stack/comfyui
|
||||
git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
|
||||
cd "$COMFYUI_DIR"
|
||||
|
||||
|
||||
echo "📄 Writing Dockerfile..."
|
||||
cat > Dockerfile <<'EOF'
|
||||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
@ -447,15 +424,14 @@ RUN pip install --upgrade pip && pip install -r requirements.txt
|
||||
EXPOSE 8188
|
||||
CMD ["python3", "main.py", "--listen", "--port", "8188"]
|
||||
EOF
|
||||
|
||||
echo "🐳 Building Docker image 'comfyui-custom'..."
|
||||
docker stop comfyui 2>/dev/null || true
|
||||
docker rm comfyui 2>/dev/null || true
|
||||
docker build -t comfyui-custom .
|
||||
|
||||
|
||||
echo "📂 Preparing workspace volume..."
|
||||
mkdir -p ~/ai-stack/comfyui/workspace
|
||||
|
||||
|
||||
echo "🚀 Running ComfyUI on port 8188..."
|
||||
docker run -d --gpus all \
|
||||
--name comfyui \
|
||||
@ -463,12 +439,14 @@ EOF
|
||||
-p 8188:8188 \
|
||||
-v ~/ai-stack/comfyui/workspace:/app/output \
|
||||
comfyui-custom
|
||||
|
||||
|
||||
echo "🌐 ComfyUI running at http://localhost:8188"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# localai INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "localai"; then
|
||||
echo "✅ Skipping LocalAI install (already running)."
|
||||
@ -479,20 +457,13 @@ if [[ "$1" != "--update" ]]; then
|
||||
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
echo "📄 Creating default models.yaml..."
|
||||
# cat > ~/ai-stack/localai/config/models.yaml <<EOF
|
||||
# - name: phi
|
||||
# backend: llama-cuda
|
||||
# parameters:
|
||||
# model: /models/phi-2.gguf
|
||||
# EOF
|
||||
|
||||
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
--network ai-stack-net
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/build/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
@ -501,46 +472,31 @@ if [[ "$1" != "--update" ]]; then
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
|
||||
echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# if [[ "$1" != "--update" ]]; then
|
||||
# if check_container_running "localai"; then
|
||||
# echo "✅ Skipping LocalAI install (already running)."
|
||||
# else
|
||||
# read -p "➕ Install LocalAI (GPU-custom)? [y/N]: " localai_prompt
|
||||
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
# echo "🧠 Installing LocalAI (Custom created localAI container)..."
|
||||
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# echo "📦 Using custom built LocalAI image (localai:custom)"
|
||||
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
# --network ai-stack-net \
|
||||
# -p 8080:8080 \
|
||||
# -v ~/ai-stack/localai/models:/models \
|
||||
# -v ~/ai-stack/localai/models:/app/models \
|
||||
# -e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \
|
||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# -e LOG_LEVEL=debug \
|
||||
# --restart unless-stopped \
|
||||
# localai:custom
|
||||
|
||||
# echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
# fi
|
||||
# fi
|
||||
# fi
|
||||
|
||||
|
||||
# pipelines INSTALL
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "pipelines"; then
|
||||
echo "✅ Skipping pipelines install (already running)."
|
||||
else
|
||||
read -p "➕ Install pipelines for open-webui? [y/N]: " pipelines_prompt
|
||||
if [[ "$pipelines_prompt" =~ ^[Yy]$ ]]; then
|
||||
docker run -d \
|
||||
--name pipelines \
|
||||
--network ai-stack-net \ # <-- key bit
|
||||
-p 9099:9099 \
|
||||
-v ~/ai-stack/pipelines:/app/pipelines \
|
||||
--restart unless-stopped \
|
||||
ghcr.io/open-webui/pipelines:main
|
||||
echo "pipelines is now running..."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "✅ AI stack setup complete."
|
||||
|
Loading…
x
Reference in New Issue
Block a user