#!/bin/bash set -e mkdir -p ~/ai-stack mkdir -p ~/ai-stack/localai/models mkdir -p ~/ai-stack/localai/config LOG_FILE=~/ai-stack/setup.log exec > >(tee -a "$LOG_FILE") 2>&1 echo "๐ฆ AI Stack Setup Script" echo "Log: $LOG_FILE" echo docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net check_container_running() { local name="$1" if docker ps --format '{{.Names}}' | grep -q "^$name$"; then echo "๐ข Container '$name' is already running." return 0 fi return 1 } # Handle update option if [[ "$1" == "--update" ]]; then echo "๐ Updating containers..." # JupyterLab if check_container_running "jupyterlab"; then echo "๐งผ Updating JupyterLab..." docker stop jupyterlab docker rm jupyterlab docker pull jupyter/scipy-notebook:latest docker run -d \ --name jupyterlab \ --network ai-stack-net \ --gpus all \ -p 8888:8888 \ -v jupyter_data:/home/jovyan/work \ jupyter/scipy-notebook:latest echo "โ JupyterLab updated." else echo "โ ๏ธ JupyterLab is not running, skipping update." fi # --- LocalAI custom build and update --- LOCALAI_REPO=~/git-repos/LocalAI DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile" echo "๐ Rebuilding LocalAI (custom)..." # Backup and remove existing repo if [ -d "$LOCALAI_REPO" ]; then echo "๐ฆ Backing up existing LocalAI repo..." mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)" fi # Clone latest LocalAI git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO" # Remove default Docker-related files cd "$LOCALAI_REPO" rm -f Dockerfile Dockerfile.aio docker-compose.yaml # Prepare and build LocalAI binary # NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods # Docker-compose methods will be coming soon. echo "๐ง Running make prepare..." make prepare # echo "๐ง Building LocalAI binary with GPU, localrecall, and localagi..." # make build \ # TAGS="localrecall,localagi,gpu" \ # BACKENDS="llama-cuda whispercpp stablediffusion ollama" \ # GPU_SUPPORT="cuda" # # Download your custom Dockerfile # echo "๐ฅ Downloading custom Dockerfile..." # curl -fsSL "$DOCKERFILE_URL" -o Dockerfile # # Build the container # echo "๐จ Building custom LocalAI Docker image..." # docker build -t localai:custom . # # Remove existing container # docker stop localai 2>/dev/null || true # docker rm localai 2>/dev/null || true # # Create model and config directories if needed # mkdir -p ~/ai-stack/localai/models # mkdir -p ~/ai-stack/localai/config # # Run new container # docker run -d \ # --name localai \ # --gpus all \ # -p 8080:8080 \ # -v ~/ai-stack/localai/models:/models \ # -v ~/ai-stack/localai/config:/config \ # -e ENABLE_BACKENDS=llama-cuda,ollama \ # -e INCLUDE_DEFAULT_MODELS=true \ # -e AUTOLOAD_MODELS=true \ # -e MODEL_PATH=/models \ # --restart unless-stopped \ # localai:custom # echo "โ LocalAI (custom) is now running at http://localhost:8080" # ๐ Updating LocalAI (GPU build) if check_container_running "localai"; then echo "๐ง Updating LocalAI..." docker stop localai docker rm localai else echo "โ Installing LocalAI..." fi docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 docker run -d \ --name localai \ --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/models \ -v ~/ai-stack/localai/config:/config \ -e ENABLE_BACKENDS=llama-cuda,ollama \ -e INCLUDE_DEFAULT_MODELS=true \ -e AUTOLOAD_MODELS=true \ -e MODEL_PATH=/models \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12 echo "โ LocalAI running at http://localhost:8080" echo "๐ Updating Whisper.cpp (custom GPU build)..." WHISPER_DIR=~/ai-stack/whisper.cpp if [ -d "$WHISPER_DIR" ]; then cd "$WHISPER_DIR" git pull origin master docker stop whispercpp-gpu 2>/dev/null || true docker rm whispercpp-gpu 2>/dev/null || true docker build -t whispercpp-gpu -f Dockerfile.gpu . echo "โ Whisper.cpp GPU image rebuilt." else echo "โ ๏ธ Whisper.cpp directory not found โ skipping update." fi echo "๐ Updating Stable Diffusion WebUI..." SD_DIR=~/ai-stack/stable-diffusion if [ -d "$SD_DIR" ]; then cd "$SD_DIR" git pull origin master docker stop stable-diffusion 2>/dev/null || true docker rm stable-diffusion 2>/dev/null || true docker build -t a1111-webui . docker run -d --gpus all \ --name stable-diffusion \ --network ai-stack-net \ -p 7860:7860 \ -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \ -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \ a1111-webui echo "โ Stable Diffusion WebUI updated and restarted." else echo "โ ๏ธ Stable Diffusion directory not found โ skipping update." fi echo "๐ Updating Text-generation-webui..." TG_DIR=~/ai-stack/tg-webui if [ -d "$TG_DIR" ]; then cd "$TG_DIR" git pull origin master docker stop tg-webui 2>/dev/null || true docker rm tg-webui 2>/dev/null || true docker build -t tg-webui . docker run -d --gpus all \ --name tg-webui \ --network ai-stack-net \ -p 7861:7861 \ -v ~/ai-stack/tg-models:/app/models \ tg-webui echo "โ Text-generation-webui updated and restarted." else echo "โ ๏ธ Text-generation-webui directory not found โ skipping update." fi echo "๐ Updating ComfyUI (custom build)..." COMFYUI_DIR=~/ai-stack/comfyui if [ -d "$COMFYUI_DIR" ]; then cd "$COMFYUI_DIR" git pull origin master docker stop comfyui 2>/dev/null || true docker rm comfyui 2>/dev/null || true docker build -t comfyui-custom . docker run -d --gpus all \ --name comfyui \ --network ai-stack-net \ -p 8188:8188 \ -v ~/ai-stack/comfyui/workspace:/app/output \ comfyui-custom echo "โ ComfyUI updated and restarted." else echo "โ ๏ธ ComfyUI directory not found โ skipping update." fi echo "โ Update complete!" exit 0 fi # Function to clean up existing container clean_container() { local name=$1 docker rm -f "$name" 2>/dev/null || true } if [[ "$1" != "--update" ]]; then if check_container_running "open-webui"; then echo "โ Skipping Open WebUI + Ollama install (already running)." else read -p "โ Install Open WebUI + Ollama? [y/N]: " openui if [[ "$openui" =~ ^[Yy]$ ]]; then echo "๐ง Setting up Open WebUI + Ollama..." docker stop open-webui 2>/dev/null || true docker rm open-webui 2>/dev/null || true docker run -d -p 3000:8080 --gpus all \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \ --network ai-stack-net \ --restart always \ ghcr.io/open-webui/open-webui:ollama echo "๐ Open WebUI running at http://localhost:3000" fi fi fi if [[ "$1" != "--update" ]]; then if check_container_running "jupyterlab"; then echo "โ Skipping JupyterLab install (already running)." else read -p "โ Install JupyterLab for Python/AI Dev? [y/N]: " jupyter if [[ "$jupyter" =~ ^[Yy]$ ]]; then echo "๐ Installing JupyterLab..." docker stop jupyterlab 2>/dev/null || true docker rm jupyterlab 2>/dev/null || true docker run -d \ --name jupyterlab \ --network ai-stack-net \ --gpus all \ -p 8888:8888 \ -v jupyter_data:/home/jovyan/work \ jupyter/scipy-notebook:latest echo "๐ Jupyter token (saved to ~/ai-stack/jupyter_token.txt):" mkdir -p ~/ai-stack docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt fi fi fi if [[ "$1" != "--update" ]]; then if check_container_running "whispercpp-gpu"; then echo "โ Skipping Whisper.cpp install (already running)." else read -p "โ Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper if [[ "$whisper" =~ ^[Yy]$ ]]; then echo "๐ Cloning and building Whisper.cpp with GPU support..." WHISPER_DIR=~/ai-stack/whisper.cpp mkdir -p "$WHISPER_DIR" git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF FROM nvidia/cuda:12.2.0-base-ubuntu22.04 RUN apt-get update && \\ apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\ apt-get clean WORKDIR /opt RUN git clone https://github.com/ggerganov/whisper.cpp.git WORKDIR /opt/whisper.cpp RUN cmake -DWITH_CUBLAS=ON . && make -j CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"] EOF echo "๐ณ Building Docker image 'whispercpp-gpu'..." cd "$WHISPER_DIR" docker build -t whispercpp-gpu -f Dockerfile.gpu . echo "๐ Creating ~/ai-stack/audio and models directories" mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models echo "โ Whisper.cpp built successfully." echo "To run it:" echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav" fi fi fi if [[ "$1" != "--update" ]]; then if check_container_running "stable-diffusion"; then echo "โ Skipping Stable Diffusion install (already running)." else read -p "โ Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd if [[ "$sd" =~ ^[Yy]$ ]]; then echo "๐จ Cloning and building AUTOMATIC1111 WebUI..." SD_DIR=~/ai-stack/stable-diffusion mkdir -p ~/ai-stack git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true cd "$SD_DIR" echo "๐ Writing Dockerfile..." cat > Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \ apt-get clean WORKDIR /app COPY . /app RUN pip install --upgrade pip && \ pip install -r requirements_versions.txt && \ pip install -r requirements.txt EXPOSE 7860 CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"] EOF echo "๐ณ Building Docker image 'a1111-webui'..." docker stop stable-diffusion 2>/dev/null || true docker rm stable-diffusion 2>/dev/null || true docker build -t a1111-webui . echo "๐ Mounting model directory from ~/ai-stack/sd-models" mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs echo "๐ Running Stable Diffusion WebUI on port 7860..." docker run -d --gpus all \ --name stable-diffusion \ --network ai-stack-net \ -p 7860:7860 \ -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \ -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \ a1111-webui echo "๐ Access at http://localhost:7860" fi fi fi if [[ "$1" != "--update" ]]; then if check_container_running "tg-webui"; then echo "โ Skipping Text-generation-webui install (already running)." else read -p "โ Install Text-generation-webui? [y/N]: " tg if [[ "$tg" =~ ^[Yy]$ ]]; then echo "๐ง Cloning and building Text-generation-webui..." TG_DIR=~/ai-stack/tg-webui git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true cd "$TG_DIR" echo "๐ Writing Dockerfile..." cat > Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip build-essential && \ apt-get clean WORKDIR /app COPY . /app RUN pip install --upgrade pip && \ (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers) EXPOSE 7861 CMD ["/bin/bash", "-c", "python server.py --listen --api"] EOF echo "๐ณ Building Docker image 'tg-webui'..." docker stop tg-webui 2>/dev/null || true docker rm tg-webui 2>/dev/null || true docker build -t tg-webui . echo "๐ Mounting model directory from ~/ai-stack/tg-models" mkdir -p ~/ai-stack/tg-models echo "๐ Running text-generation-webui on port 7861..." docker run -d --gpus all \ --name tg-webui \ --network ai-stack-net \ -p 7861:7861 \ -v ~/ai-stack/tg-models:/app/models \ tg-webui echo "๐ Access at http://localhost:7861" fi fi fi if [[ "$1" != "--update" ]]; then if check_container_running "comfyui"; then echo "โ Skipping ComfyUI install (already running)." else read -p "โ Install ComfyUI (custom Docker build)? [y/N]: " comfy if [[ "$comfy" =~ ^[Yy]$ ]]; then echo "๐งฉ Cloning and building ComfyUI..." COMFYUI_DIR=~/ai-stack/comfyui git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true cd "$COMFYUI_DIR" echo "๐ Writing Dockerfile..." cat > Dockerfile <<'EOF' FROM nvidia/cuda:12.2.0-base-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \ apt-get clean WORKDIR /app COPY . /app RUN pip install --upgrade pip && pip install -r requirements.txt EXPOSE 8188 CMD ["python3", "main.py", "--listen", "--port", "8188"] EOF echo "๐ณ Building Docker image 'comfyui-custom'..." docker stop comfyui 2>/dev/null || true docker rm comfyui 2>/dev/null || true docker build -t comfyui-custom . echo "๐ Preparing workspace volume..." mkdir -p ~/ai-stack/comfyui/workspace echo "๐ Running ComfyUI on port 8188..." docker run -d --gpus all \ --name comfyui \ --network ai-stack-net \ -p 8188:8188 \ -v ~/ai-stack/comfyui/workspace:/app/output \ comfyui-custom echo "๐ ComfyUI running at http://localhost:8188" fi fi fi if [[ "$1" != "--update" ]]; then if check_container_running "localai"; then echo "โ Skipping LocalAI install (already running)." else read -p "โ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then echo "๐ง Installing LocalAI (NVIDIA CUDA 12)..." mkdir -p ~/ai-stack/localai/models mkdir -p ~/ai-stack/localai/config echo "๐ Creating default models.yaml..." # cat > ~/ai-stack/localai/config/models.yaml <<EOF # - name: phi # backend: llama-cuda # parameters: # model: /models/phi-2.gguf # EOF docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 docker run -d \ --name localai \ --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/build/models \ -v ~/ai-stack/localai/config:/config \ -e ENABLE_BACKENDS=llama-cuda,ollama \ -e INCLUDE_DEFAULT_MODELS=true \ -e AUTOLOAD_MODELS=true \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12 echo "๐ LocalAI is now running at http://localhost:8080" fi fi fi # if [[ "$1" != "--update" ]]; then # if check_container_running "localai"; then # echo "โ Skipping LocalAI install (already running)." # else # read -p "โ Install LocalAI (GPU-custom)? [y/N]: " localai_prompt # if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then # echo "๐ง Installing LocalAI (Custom created localAI container)..." # mkdir -p ~/ai-stack/localai/models # mkdir -p ~/ai-stack/localai/config # echo "๐ฆ Using custom built LocalAI image (localai:custom)" # docker run -d \ # --name localai \ # --gpus all \ # --network ai-stack-net \ # -p 8080:8080 \ # -v ~/ai-stack/localai/models:/models \ # -v ~/ai-stack/localai/models:/app/models \ # -e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \ # -e INCLUDE_DEFAULT_MODELS=true \ # -e AUTOLOAD_MODELS=true \ # -e MODEL_PATH=/models \ # -e LOG_LEVEL=debug \ # --restart unless-stopped \ # localai:custom # echo "๐ LocalAI is now running at http://localhost:8080" # fi # fi # fi echo "โ AI stack setup complete."