Fixed incorrect localai setup

This commit is contained in:
crowetic 2025-05-19 09:43:39 -07:00
parent 1a95585ebc
commit 5801e5f800

View File

@ -2,6 +2,8 @@
set -e set -e
mkdir -p ~/ai-stack mkdir -p ~/ai-stack
mkdir -p ~/ai-stack/localai/models
mkdir -p ~/ai-stack/localai/config
LOG_FILE=~/ai-stack/setup.log LOG_FILE=~/ai-stack/setup.log
exec > >(tee -a "$LOG_FILE") 2>&1 exec > >(tee -a "$LOG_FILE") 2>&1
@ -42,70 +44,44 @@ if [[ "$1" == "--update" ]]; then
echo "⚠️ JupyterLab is not running, skipping update." echo "⚠️ JupyterLab is not running, skipping update."
fi fi
# # Whisper (whisper.cpp)
# if check_container_running "whisper"; then
# echo "🧼 Updating Whisper..."
# docker stop whisper
# docker rm whisper
# docker pull ggerganov/whisper.cpp:latest
# docker run -d \
# --name whisper \
# --network ai-stack-net \
# --gpus all \
# -v whisper_data:/app/data \
# ggerganov/whisper.cpp:latest
# echo "✅ Whisper updated."
# else
# echo "⚠️ Whisper is not running, skipping update."
# fi
# # Stable Diffusion (A1111) # 🔁 Updating LocalAI (GPU build)
# if check_container_running "stable-diffusion"; then if check_container_running "localai"; then
# echo "🧼 Updating Stable Diffusion..." echo "🧠 Updating LocalAI..."
# docker stop stable-diffusion docker stop localai
# docker rm stable-diffusion docker rm localai
# docker pull abraham-ai/automatic1111-webui:latest else
# docker run -d \ echo " Installing LocalAI..."
# --name stable-diffusion \ fi
# --gpus all \
# -p 7860:7860 \
# -v sd_models:/data \
# abraham-ai/automatic1111-webui:latest
# echo "✅ Stable Diffusion updated."
# else
# echo "⚠️ Stable Diffusion is not running, skipping update."
# fi
# echo "🔁 Updating ComfyUI..." docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
# docker stop comfyui 2>/dev/null || true
# docker rm comfyui 2>/dev/null || true
# docker pull ghcr.io/cmdr2/comfyui:latest
# docker run -d --gpus all \
# --name comfyui \
# --network ai-stack-net \
# -p 8188:8188 \
# -v comfyui_data:/workspace \
# ghcr.io/cmdr2/comfyui:latest
# echo "✅ ComfyUI updated and restarted."
# echo "🔁 Updating LocalAI..." # Ensure default models.yaml exists
# docker stop localai 2>/dev/null || true MODELS_YAML=~/ai-stack/localai/config/models.yaml
# docker rm localai 2>/dev/null || true if [ ! -f "$MODELS_YAML" ]; then
echo "📄 Creating default models.yaml..."
cat > "$MODELS_YAML" <<EOF
- name: phi
backend: llama-cuda
parameters:
model: /models/phi-2.gguf
EOF
fi
mkdir -p ~/ai-stack/localai docker run -d \
--name localai \
--gpus all \
-p 8080:8080 \
-v ~/ai-stack/localai/models:/models \
-v ~/ai-stack/localai/config:/etc/local-ai \
--health-cmd="curl --fail http://localhost:8080/health || exit 1" \
--health-interval=30s \
--health-retries=3 \
--restart unless-stopped \
localai/localai:latest-aio-gpu-nvidia-cuda-12
docker pull quay.io/go-skynet/local-ai:latest echo "✅ LocalAI running at http://localhost:8080"
docker run -d \
--name localai \
--network ai-stack-net \
-p 8080:8080 \
-v ~/ai-stack/localai:/models \
-e MODELS_PATH=/models \
-e ENABLE_OOLLAMA_BACKEND=true \
-v /var/run/docker.sock:/var/run/docker.sock \
quay.io/go-skynet/local-ai:latest
echo "✅ LocalAI updated and restarted."
echo "🔁 Updating Whisper.cpp (custom GPU build)..." echo "🔁 Updating Whisper.cpp (custom GPU build)..."
@ -447,26 +423,37 @@ if [[ "$1" != "--update" ]]; then
if check_container_running "localai"; then if check_container_running "localai"; then
echo "✅ Skipping LocalAI install (already running)." echo "✅ Skipping LocalAI install (already running)."
else else
read -p " Install LocalAI (OpenAI-compatible API proxy)? [y/N]: " lai read -p " Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
if [[ "$lai" =~ ^[Yy]$ ]]; then if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
echo "🧠 Setting up LocalAI..." echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
docker stop localai 2>/dev/null || true mkdir -p ~/ai-stack/localai/models
docker rm localai 2>/dev/null || true mkdir -p ~/ai-stack/localai/config
mkdir -p ~/ai-stack/localai echo "📄 Creating default models.yaml..."
cat > ~/ai-stack/localai/config/models.yaml <<EOF
- name: phi
backend: llama-cuda
parameters:
model: /models/phi-2.gguf
EOF
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
docker run -d \ docker run -d \
--name localai \ --name localai \
--network ai-stack-net \ --gpus all \
-p 8080:8080 \ -p 8080:8080 \
-v ~/ai-stack/localai:/models \ -v ~/ai-stack/localai/models:/models \
-e MODELS_PATH=/models \ -v ~/ai-stack/localai/config:/etc/local-ai \
-e ENABLE_OOLLAMA_BACKEND=true \ --env ENABLE_BACKENDS=llama-cuda,ollama
-v /var/run/docker.sock:/var/run/docker.sock \ --health-cmd="curl --fail http://localhost:8080/health || exit 1" \
quay.io/go-skynet/local-ai:latest --health-interval=30s \
--health-retries=3 \
--restart unless-stopped \
localai/localai:latest-aio-gpu-nvidia-cuda-12
echo "🌐 LocalAI running at http://localhost:8080" echo "🌐 LocalAI is now running at http://localhost:8080"
fi fi
fi fi
fi fi
@ -474,5 +461,6 @@ fi
echo "✅ AI stack setup complete." echo "✅ AI stack setup complete."