#!/bin/bash
set -e

mkdir -p ~/ai-stack
mkdir -p ~/ai-stack/localai/models
mkdir -p ~/ai-stack/localai/config
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1

echo "๐Ÿ“ฆ AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo

docker network inspect ai-stack-net >/dev/null 2>&1 || docker network create ai-stack-net

check_container_running() {
  local name="$1"
  if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
    echo "๐ŸŸข Container '$name' is already running."
    return 0
  fi
  return 1
}

# Handle update option
if [[ "$1" == "--update" ]]; then
  echo "๐Ÿ”„ Updating containers..."
  
  # JupyterLab
  if check_container_running "jupyterlab"; then
    echo "๐Ÿงผ Updating JupyterLab..."
    docker stop jupyterlab
    docker rm jupyterlab
    docker pull jupyter/scipy-notebook:latest
    docker run -d \
      --name jupyterlab \
      --network ai-stack-net \
      --gpus all \
      -p 8888:8888 \
      -v jupyter_data:/home/jovyan/work \
      jupyter/scipy-notebook:latest
    echo "โœ… JupyterLab updated."
  else
    echo "โš ๏ธ  JupyterLab is not running, skipping update."
  fi

  # --- LocalAI custom build and update ---
  LOCALAI_REPO=~/git-repos/LocalAI
  DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile"

  echo "๐Ÿ” Rebuilding LocalAI (custom)..."

  # Backup and remove existing repo
  if [ -d "$LOCALAI_REPO" ]; then
    echo "๐Ÿ“ฆ Backing up existing LocalAI repo..."
    mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)"
  fi

  # Clone latest LocalAI
  git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO"

  # Remove default Docker-related files
  cd "$LOCALAI_REPO"
  rm -f Dockerfile Dockerfile.aio docker-compose.yaml

  # Prepare and build LocalAI binary
  # NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods
  # Docker-compose methods will be coming soon. 
  echo "๐Ÿ”ง Running make prepare..."
  make prepare

  # echo "๐Ÿง  Building LocalAI binary with GPU, localrecall, and localagi..."
  # make build \
  #   TAGS="localrecall,localagi,gpu" \
  #   BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
  #   GPU_SUPPORT="cuda"


  # # Download your custom Dockerfile
  # echo "๐Ÿ“ฅ Downloading custom Dockerfile..."
  # curl -fsSL "$DOCKERFILE_URL" -o Dockerfile

  # # Build the container
  # echo "๐Ÿ”จ Building custom LocalAI Docker image..."
  # docker build -t localai:custom .

  # # Remove existing container
  # docker stop localai 2>/dev/null || true
  # docker rm localai 2>/dev/null || true

  # # Create model and config directories if needed
  # mkdir -p ~/ai-stack/localai/models
  # mkdir -p ~/ai-stack/localai/config

  # # Run new container
  # docker run -d \
  #   --name localai \
  #   --gpus all \
  #   -p 8080:8080 \
  #   -v ~/ai-stack/localai/models:/models \
  #   -v ~/ai-stack/localai/config:/config \
  #   -e ENABLE_BACKENDS=llama-cuda,ollama \
  #   -e INCLUDE_DEFAULT_MODELS=true \
  #   -e AUTOLOAD_MODELS=true \
  #   -e MODEL_PATH=/models \
  #   --restart unless-stopped \
  #   localai:custom

  # echo "โœ… LocalAI (custom) is now running at http://localhost:8080"


    # ๐Ÿ” Updating LocalAI (GPU build)
    if check_container_running "localai"; then
      echo "๐Ÿง  Updating LocalAI..."
      docker stop localai
      docker rm localai
    else
      echo "โž• Installing LocalAI..."
    fi

    docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12

  docker run -d \
    --name localai \
    --gpus all \
    -p 8080:8080 \
    -v ~/ai-stack/localai/models:/models \
    -v ~/ai-stack/localai/config:/config \
    -e ENABLE_BACKENDS=llama-cuda,ollama \
    -e INCLUDE_DEFAULT_MODELS=true \
    -e AUTOLOAD_MODELS=true \
    -e MODEL_PATH=/models \
    --restart unless-stopped \
    localai/localai:latest-aio-gpu-nvidia-cuda-12

  echo "โœ… LocalAI running at http://localhost:8080"


  
  	echo "๐Ÿ” Updating Whisper.cpp (custom GPU build)..."
	WHISPER_DIR=~/ai-stack/whisper.cpp
	if [ -d "$WHISPER_DIR" ]; then
	  cd "$WHISPER_DIR"
	  git pull origin master
	  docker stop whispercpp-gpu 2>/dev/null || true
	  docker rm whispercpp-gpu 2>/dev/null || true
	  docker build -t whispercpp-gpu -f Dockerfile.gpu .
	  echo "โœ… Whisper.cpp GPU image rebuilt."
	else
	  echo "โš ๏ธ Whisper.cpp directory not found โ€” skipping update."
	fi
	
	echo "๐Ÿ” Updating Stable Diffusion WebUI..."
	SD_DIR=~/ai-stack/stable-diffusion
	if [ -d "$SD_DIR" ]; then
	  cd "$SD_DIR"
	  git pull origin master
	  docker stop stable-diffusion 2>/dev/null || true
	  docker rm stable-diffusion 2>/dev/null || true
	  docker build -t a1111-webui .
	  docker run -d --gpus all \
	    --name stable-diffusion \
      --network ai-stack-net \
	    -p 7860:7860 \
	    -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
	    -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
	    a1111-webui
	  echo "โœ… Stable Diffusion WebUI updated and restarted."
	else
	  echo "โš ๏ธ Stable Diffusion directory not found โ€” skipping update."
	fi
	
	echo "๐Ÿ” Updating Text-generation-webui..."
	TG_DIR=~/ai-stack/tg-webui
	if [ -d "$TG_DIR" ]; then
	  cd "$TG_DIR"
	  git pull origin master
	  docker stop tg-webui 2>/dev/null || true
	  docker rm tg-webui 2>/dev/null || true
	  docker build -t tg-webui .
	  docker run -d --gpus all \
	    --name tg-webui \
      --network ai-stack-net \
	    -p 7861:7861 \
	    -v ~/ai-stack/tg-models:/app/models \
	    tg-webui
	  echo "โœ… Text-generation-webui updated and restarted."
	else
	  echo "โš ๏ธ Text-generation-webui directory not found โ€” skipping update."
	fi
	
	echo "๐Ÿ” Updating ComfyUI (custom build)..."
	COMFYUI_DIR=~/ai-stack/comfyui
	if [ -d "$COMFYUI_DIR" ]; then
	  cd "$COMFYUI_DIR"
	  git pull origin master
	  docker stop comfyui 2>/dev/null || true
	  docker rm comfyui 2>/dev/null || true
	  docker build -t comfyui-custom .
	  docker run -d --gpus all \
	    --name comfyui \
      --network ai-stack-net \
	    -p 8188:8188 \
	    -v ~/ai-stack/comfyui/workspace:/app/output \
	    comfyui-custom
	  echo "โœ… ComfyUI updated and restarted."
	else
	  echo "โš ๏ธ ComfyUI directory not found โ€” skipping update."
	fi
	  
  echo "โœ… Update complete!"
  exit 0
fi

# Function to clean up existing container
clean_container() {
  local name=$1
  docker rm -f "$name" 2>/dev/null || true
}

if [[ "$1" != "--update" ]]; then
  if check_container_running "open-webui"; then
    echo "โœ… Skipping Open WebUI + Ollama install (already running)."
  else
    read -p "โž• Install Open WebUI + Ollama? [y/N]: " openui
    if [[ "$openui" =~ ^[Yy]$ ]]; then
      echo "๐Ÿง  Setting up Open WebUI + Ollama..."
      docker stop open-webui 2>/dev/null || true
      docker rm open-webui 2>/dev/null || true
      docker run -d -p 3000:8080 --gpus all \
        -v ollama:/root/.ollama \
        -v open-webui:/app/backend/data \
        --name open-webui \
        --network ai-stack-net \
        --restart always \
        ghcr.io/open-webui/open-webui:ollama
      echo "๐ŸŒ Open WebUI running at http://localhost:3000"
    fi
  fi
fi


if [[ "$1" != "--update" ]]; then
  if check_container_running "jupyterlab"; then
    echo "โœ… Skipping JupyterLab install (already running)."
  else
    read -p "โž• Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
    if [[ "$jupyter" =~ ^[Yy]$ ]]; then
      echo "๐Ÿ“” Installing JupyterLab..."
      docker stop jupyterlab 2>/dev/null || true
      docker rm jupyterlab 2>/dev/null || true
      docker run -d \
        --name jupyterlab \
        --network ai-stack-net \
        --gpus all \
        -p 8888:8888 \
        -v jupyter_data:/home/jovyan/work \
        jupyter/scipy-notebook:latest
      echo "๐Ÿ” Jupyter token (saved to ~/ai-stack/jupyter_token.txt):"
      mkdir -p ~/ai-stack
      docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
    fi
  fi
fi


if [[ "$1" != "--update" ]]; then
  if check_container_running "whispercpp-gpu"; then
    echo "โœ… Skipping Whisper.cpp install (already running)."
  else
    read -p "โž• Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
    if [[ "$whisper" =~ ^[Yy]$ ]]; then
      echo "๐Ÿ”Š Cloning and building Whisper.cpp with GPU support..."

      WHISPER_DIR=~/ai-stack/whisper.cpp
      mkdir -p "$WHISPER_DIR"
      git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true

      cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

RUN apt-get update && \\
    apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
    apt-get clean

WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j

CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF

      echo "๐Ÿณ Building Docker image 'whispercpp-gpu'..."
      cd "$WHISPER_DIR"
      docker build -t whispercpp-gpu -f Dockerfile.gpu .

      echo "๐Ÿ“‚ Creating ~/ai-stack/audio and models directories"
      mkdir -p ~/ai-stack/audio ~/ai-stack/whisper.cpp/models

      echo "โœ… Whisper.cpp built successfully."
      echo "To run it:"
      echo "docker run --rm --gpus all --network ai-stack-net -v ~/ai-stack/audio:/audio -v ~/ai-stack/whisper.cpp/models:/opt/whisper.cpp/models whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/test.wav"
    fi
  fi
fi


if [[ "$1" != "--update" ]]; then
  if check_container_running "stable-diffusion"; then
    echo "โœ… Skipping Stable Diffusion install (already running)."
  else
    read -p "โž• Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
    if [[ "$sd" =~ ^[Yy]$ ]]; then
      echo "๐ŸŽจ Cloning and building AUTOMATIC1111 WebUI..."

      SD_DIR=~/ai-stack/stable-diffusion
      mkdir -p ~/ai-stack
      git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
      cd "$SD_DIR"

      echo "๐Ÿ“„ Writing Dockerfile..."
      cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt-get update && \
    apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
    apt-get clean

WORKDIR /app
COPY . /app

RUN pip install --upgrade pip && \
    pip install -r requirements_versions.txt && \
    pip install -r requirements.txt

EXPOSE 7860
CMD ["/bin/bash", "-c", "python launch.py --listen --xformers --api"]
EOF

      echo "๐Ÿณ Building Docker image 'a1111-webui'..."
      docker stop stable-diffusion 2>/dev/null || true
      docker rm stable-diffusion 2>/dev/null || true
      docker build -t a1111-webui .

      echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/sd-models"
      mkdir -p ~/ai-stack/sd-models ~/ai-stack/stable-diffusion/outputs

      echo "๐Ÿš€ Running Stable Diffusion WebUI on port 7860..."
      docker run -d --gpus all \
        --name stable-diffusion \
        --network ai-stack-net \
        -p 7860:7860 \
        -v ~/ai-stack/sd-models:/app/models/Stable-diffusion \
        -v ~/ai-stack/stable-diffusion/outputs:/app/outputs \
        a1111-webui

      echo "๐ŸŒ Access at http://localhost:7860"
    fi
  fi
fi

if [[ "$1" != "--update" ]]; then
  if check_container_running "tg-webui"; then
    echo "โœ… Skipping Text-generation-webui install (already running)."
  else
    read -p "โž• Install Text-generation-webui? [y/N]: " tg
    if [[ "$tg" =~ ^[Yy]$ ]]; then
      echo "๐Ÿง  Cloning and building Text-generation-webui..."

      TG_DIR=~/ai-stack/tg-webui
      git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
      cd "$TG_DIR"

      echo "๐Ÿ“„ Writing Dockerfile..."
      cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt-get update && \
    apt-get install -y git python3 python3-pip build-essential && \
    apt-get clean

WORKDIR /app
COPY . /app

RUN pip install --upgrade pip && \
    (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)

EXPOSE 7861
CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF

      echo "๐Ÿณ Building Docker image 'tg-webui'..."
      docker stop tg-webui 2>/dev/null || true
      docker rm tg-webui 2>/dev/null || true
      docker build -t tg-webui .

      echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/tg-models"
      mkdir -p ~/ai-stack/tg-models

      echo "๐Ÿš€ Running text-generation-webui on port 7861..."
      docker run -d --gpus all \
        --name tg-webui \
        --network ai-stack-net \
        -p 7861:7861 \
        -v ~/ai-stack/tg-models:/app/models \
        tg-webui

      echo "๐ŸŒ Access at http://localhost:7861"
    fi
  fi
fi



if [[ "$1" != "--update" ]]; then
  if check_container_running "comfyui"; then
    echo "โœ… Skipping ComfyUI install (already running)."
  else
    read -p "โž• Install ComfyUI (custom Docker build)? [y/N]: " comfy
    if [[ "$comfy" =~ ^[Yy]$ ]]; then
      echo "๐Ÿงฉ Cloning and building ComfyUI..."

      COMFYUI_DIR=~/ai-stack/comfyui
      git clone https://github.com/comfyanonymous/ComfyUI "$COMFYUI_DIR" || true
      cd "$COMFYUI_DIR"

      echo "๐Ÿ“„ Writing Dockerfile..."
      cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt-get update && \
    apt-get install -y git python3 python3-pip ffmpeg libgl1 libglib2.0-0 && \
    apt-get clean

WORKDIR /app
COPY . /app

RUN pip install --upgrade pip && pip install -r requirements.txt

EXPOSE 8188
CMD ["python3", "main.py", "--listen", "--port", "8188"]
EOF

      echo "๐Ÿณ Building Docker image 'comfyui-custom'..."
      docker stop comfyui 2>/dev/null || true
      docker rm comfyui 2>/dev/null || true
      docker build -t comfyui-custom .

      echo "๐Ÿ“‚ Preparing workspace volume..."
      mkdir -p ~/ai-stack/comfyui/workspace

      echo "๐Ÿš€ Running ComfyUI on port 8188..."
      docker run -d --gpus all \
        --name comfyui \
        --network ai-stack-net \
        -p 8188:8188 \
        -v ~/ai-stack/comfyui/workspace:/app/output \
        comfyui-custom

      echo "๐ŸŒ ComfyUI running at http://localhost:8188"
    fi
  fi
fi

if [[ "$1" != "--update" ]]; then
  if check_container_running "localai"; then
    echo "โœ… Skipping LocalAI install (already running)."
  else
    read -p "โž• Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
    if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
      echo "๐Ÿง  Installing LocalAI (NVIDIA CUDA 12)..."
      
      mkdir -p ~/ai-stack/localai/models
      mkdir -p ~/ai-stack/localai/config

      echo "๐Ÿ“„ Creating default models.yaml..."
#       cat > ~/ai-stack/localai/config/models.yaml <<EOF
# - name: phi
#   backend: llama-cuda
#   parameters:
#     model: /models/phi-2.gguf
# EOF

      docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12

      docker run -d \
        --name localai \
        --gpus all \
        -p 8080:8080 \
        -v ~/ai-stack/localai/models:/build/models \
        -v ~/ai-stack/localai/config:/config \
        -e ENABLE_BACKENDS=llama-cuda,ollama \
        -e INCLUDE_DEFAULT_MODELS=true \
        -e AUTOLOAD_MODELS=true \
        --restart unless-stopped \
        localai/localai:latest-aio-gpu-nvidia-cuda-12

      echo "๐ŸŒ LocalAI is now running at http://localhost:8080"
    fi
  fi
fi

# if [[ "$1" != "--update" ]]; then
#   if check_container_running "localai"; then
#     echo "โœ… Skipping LocalAI install (already running)."
#   else
#     read -p "โž• Install LocalAI (GPU-custom)? [y/N]: " localai_prompt
#     if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
#       echo "๐Ÿง  Installing LocalAI (Custom created localAI container)..."
      
#       mkdir -p ~/ai-stack/localai/models
#       mkdir -p ~/ai-stack/localai/config

#       echo "๐Ÿ“ฆ Using custom built LocalAI image (localai:custom)"

#       docker run -d \
#         --name localai \
#         --gpus all \
#         --network ai-stack-net \
#         -p 8080:8080 \
#         -v ~/ai-stack/localai/models:/models \
#         -v ~/ai-stack/localai/models:/app/models \
#         -e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \
#         -e INCLUDE_DEFAULT_MODELS=true \
#         -e AUTOLOAD_MODELS=true \
#         -e MODEL_PATH=/models \
#         -e LOG_LEVEL=debug \
#         --restart unless-stopped \
#         localai:custom

#       echo "๐ŸŒ LocalAI is now running at http://localhost:8080"
#     fi
#   fi
# fi




echo "โœ… AI stack setup complete."