#!/bin/bash
set -e

mkdir -p ~/ai-stack
LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1

echo "๐Ÿ“ฆ AI Stack Setup Script"
echo "Log: $LOG_FILE"
echo

# Containers to manage
CONTAINERS=("open-webui" "jupyterlab" "tg-webui" "comfyui")


# Handle update option
if [[ "$1" == "--update" ]]; then
  echo "๐Ÿ”„ Updating running containers..."
  for name in "${CONTAINERS[@]}"; do
    if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
      echo "๐Ÿงผ Updating $name..."
      docker pull "$(docker inspect --format='{{.Config.Image}}' $name)"
      docker stop $name
      docker rm $name
    else
      echo "โš ๏ธ  Container $name is not running, skipping."
    fi
  done
  echo "โœ… Update complete. Rerun the script without --update to restart containers."
  exit 0
fi

check_container_running() {
  local name="$1"
  if docker ps --format '{{.Names}}' | grep -q "^$name$"; then
    echo "๐ŸŸข Container '$name' is already running."
    return 0
  fi
  return 1
}

# Function to clean up existing container
clean_container() {
  local name=$1
  docker rm -f "$name" 2>/dev/null || true
}

# ๐Ÿง  Open WebUI + Ollama
echo "๐Ÿง  Setting up Open WebUI + Ollama..."
clean_container open-webui
docker run -d -p 3000:8080 --gpus=all \
  -v ollama:/root/.ollama \
  -v open-webui:/app/backend/data \
  --name open-webui \
  --restart always \
  ghcr.io/open-webui/open-webui:ollama

# โž• JupyterLab
read -p "โž• Install JupyterLab for Python/AI Dev? [y/N]: " jupyter
if [[ "$jupyter" =~ ^[Yy]$ ]]; then
  echo "๐Ÿ“” Installing JupyterLab..."
  clean_container jupyterlab
  docker run -d \
    --name jupyterlab \
    --gpus all \
    -p 8888:8888 \
    -v jupyter_data:/home/jovyan/work \
    jupyter/scipy-notebook:latest

  echo "๐Ÿ” Jupyter token:"
  docker logs jupyterlab 2>&1 | grep token | tee ~/ai-stack/jupyter_token.txt
  echo "๐Ÿ“ Token saved to ~/ai-stack/jupyter_token.txt"
fi

# โž• Whisper.cpp with GPU Support (Build Locally)
read -p "โž• Install Whisper.cpp (GPU-accelerated, custom Docker build)? [y/N]: " whisper
if [[ "$whisper" =~ ^[Yy]$ ]]; then
  echo "๐Ÿ”Š Cloning and building Whisper.cpp with GPU support..."
  
  WHISPER_DIR=~/ai-stack/whisper.cpp
  mkdir -p ~/ai-stack
  git clone https://github.com/ggerganov/whisper.cpp "$WHISPER_DIR" || true

  cat > "$WHISPER_DIR/Dockerfile.gpu" <<EOF
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

RUN apt-get update && \\
    apt-get install -y git build-essential cmake curl ffmpeg libopenblas-dev && \\
    apt-get clean

WORKDIR /opt
RUN git clone https://github.com/ggerganov/whisper.cpp.git
WORKDIR /opt/whisper.cpp
RUN cmake -DWITH_CUBLAS=ON . && make -j

CMD ["./main", "-m", "models/ggml-large.bin", "-f", "samples/jfk.wav"]
EOF

  echo "๐Ÿณ Building Docker image 'whispercpp-gpu'..."
  docker build -t whispercpp-gpu -f "$WHISPER_DIR/Dockerfile.gpu" "$WHISPER_DIR"

  echo "๐Ÿ“‚ Setting up input folder at ~/ai-stack/audio"
  mkdir -p ~/ai-stack/audio

  echo "โœ… To run whisper on a file:"
  echo "docker run --rm --gpus all -v ~/ai-stack/audio:/audio whispercpp-gpu ./main -m models/ggml-large.bin -f /audio/yourfile.wav"

  echo "๐Ÿ“ข Reminder: Download a model like ggml-large.bin into ~/ai-stack/whisper.cpp/models/"
  echo "e.g. https://huggingface.co/ggerganov/whisper.cpp/tree/main/models"
fi

if [[ "$1" == "--update" ]]; then
  # ...existing container update loop

  echo "๐Ÿ” Updating whisper.cpp..."
  WHISPER_DIR=~/ai-stack/whisper.cpp
  if [ -d "$WHISPER_DIR" ]; then
    cd "$WHISPER_DIR"
    git pull origin master
    docker build -t whispercpp-gpu -f Dockerfile.gpu .
    echo "โœ… whispercpp-gpu rebuilt with latest source."
  else
    echo "โš ๏ธ whisper.cpp not found at $WHISPER_DIR โ€” skipping."
  fi
fi

# โž• Stable Diffusion (AUTOMATIC1111 custom GPU build)
read -p "โž• Install Stable Diffusion UI (AUTOMATIC1111, custom GPU build)? [y/N]: " sd
if [[ "$sd" =~ ^[Yy]$ ]]; then
  echo "๐ŸŽจ Cloning and building AUTOMATIC1111 WebUI..."

  SD_DIR=~/ai-stack/stable-diffusion
  mkdir -p ~/ai-stack
  git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui "$SD_DIR" || true
  cd "$SD_DIR"
cat > ~/ai-stack/stable-diffusion/Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

ENV DEBIAN_FRONTEND=noninteractive

# Install dependencies
RUN apt-get update && \
    apt-get install -y git python3 python3-pip python3-venv ffmpeg libgl1 libglib2.0-0 && \
    apt-get clean

WORKDIR /app
COPY . /app

# Create venv and install deps
RUN python3 -m venv venv && \
    . venv/bin/activate && \
    pip install --upgrade pip && \
    pip install -r requirements_versions.txt && \
    pip install -r requirements.txt

EXPOSE 7860

CMD ["/bin/bash", "-c", ". venv/bin/activate && python launch.py --listen --xformers --api"]

EOF
  echo "๐Ÿณ Building Docker image 'a1111-webui'..."
  docker stop stable-diffusion 2>/dev/null || true
  docker rm stable-diffusion 2>/dev/null || true
  docker build -t a1111-webui .

  echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/sd-models"
  mkdir -p ~/ai-stack/sd-models

  echo "๐Ÿš€ Starting Stable Diffusion WebUI..."
  docker run -d --gpus all \
    --name stable-diffusion \
    -p 7860:7860 \
    -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
    -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
    a1111-webui

  echo "๐ŸŒ Access Stable Diffusion WebUI at http://localhost:7860"
fi

if [[ "$1" == "--update" ]]; then
  echo "๐Ÿ” Updating Stable Diffusion WebUI..."
  SD_DIR=~/ai-stack/stable-diffusion
  if [ -d "$SD_DIR" ]; then
    cd "$SD_DIR"
    git pull origin master
    docker build -t a1111-webui .
    docker stop stable-diffusion 2>/dev/null || true
    docker rm stable-diffusion 2>/dev/null || true
    docker run -d --gpus all \
      --name stable-diffusion \
      -p 7860:7860 \
      -v ~/ai-stack/sd-models:/stable-diffusion-webui/models/Stable-diffusion \
      -v ~/ai-stack/stable-diffusion/outputs:/stable-diffusion-webui/outputs \
      a1111-webui
    echo "โœ… Stable Diffusion updated and restarted."
  else
    echo "โš ๏ธ Stable Diffusion directory not found โ€” skipping update."
  fi
fi



# โž• Text-generation-webui (custom Docker build with GPU + API)
read -p "โž• Install Text-generation-webui? [y/N]: " tg
if [[ "$tg" =~ ^[Yy]$ ]]; then
  echo "๐Ÿงช Cloning and building Text-generation-webui..."

  TG_DIR=~/ai-stack/tg-webui
  git clone https://github.com/oobabooga/text-generation-webui "$TG_DIR" || true
  cd "$TG_DIR"

  echo "๐Ÿ“ Creating Dockerfile for build..."
  cat > Dockerfile <<'EOF'
FROM nvidia/cuda:12.2.0-base-ubuntu22.04

ENV DEBIAN_FRONTEND=noninteractive

RUN apt-get update && \
    apt-get install -y git python3 python3-pip build-essential && \
    apt-get clean

WORKDIR /app
COPY . /app

# Install dependencies
RUN pip install --upgrade pip && \
    (pip install -r requirements.txt || pip install torch torchvision torchaudio sentencepiece transformers)

EXPOSE 7861

CMD ["/bin/bash", "-c", "python server.py --listen --api"]
EOF

  echo "๐Ÿณ Building Docker image 'tg-webui'..."
  docker stop tg-webui 2>/dev/null || true
  docker rm tg-webui 2>/dev/null || true
  docker build -t tg-webui .

  echo "๐Ÿ“‚ Mounting model directory from ~/ai-stack/tg-models"
  mkdir -p ~/ai-stack/tg-models

  echo "๐Ÿš€ Running text-generation-webui on port 7861..."
  docker run -d --gpus all \
    --name tg-webui \
    -p 7861:7861 \
    -v ~/ai-stack/tg-models:/app/models \
    tg-webui

  echo "๐ŸŒ Access at http://localhost:7861"
fi


echo "๐Ÿ” Updating Text-generation-webui..."
TG_DIR=~/ai-stack/tg-webui
if [ -d "$TG_DIR" ]; then
  cd "$TG_DIR"
  git pull origin master
  docker stop tg-webui 2>/dev/null || true
  docker rm tg-webui 2>/dev/null || true
  docker build -t tg-webui .
  docker run -d --gpus all \
    --name tg-webui \
    -p 7861:7861 \
    -v ~/ai-stack/tg-models:/app/models \
    tg-webui
  echo "โœ… Text-generation-webui updated and restarted."
else
  echo "โš ๏ธ Text-generation-webui directory not found โ€” skipping update."
fi


# โž• ComfyUI (Docker - graph-based Stable Diffusion)
read -p "โž• Install ComfyUI (graph-based Stable Diffusion UI)? [y/N]: " comfy
if [[ "$comfy" =~ ^[Yy]$ ]]; then
  echo "๐Ÿงฉ Installing ComfyUI (Docker)..."
  clean_container comfyui
  docker run -d --gpus all \
    --name comfyui \
    -p 8188:8188 \
    -v comfyui_data:/workspace \
    ghcr.io/cmdr2/comfyui:latest
  echo "๐ŸŒ ComfyUI running at http://localhost:8188"
fi

echo "โœ… AI stack setup complete."