Fixed incorrect localai setup
This commit is contained in:
parent
1a95585ebc
commit
5801e5f800
@ -2,6 +2,8 @@
|
||||
set -e
|
||||
|
||||
mkdir -p ~/ai-stack
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
LOG_FILE=~/ai-stack/setup.log
|
||||
exec > >(tee -a "$LOG_FILE") 2>&1
|
||||
|
||||
@ -41,71 +43,45 @@ if [[ "$1" == "--update" ]]; then
|
||||
else
|
||||
echo "⚠️ JupyterLab is not running, skipping update."
|
||||
fi
|
||||
|
||||
# # Whisper (whisper.cpp)
|
||||
# if check_container_running "whisper"; then
|
||||
# echo "🧼 Updating Whisper..."
|
||||
# docker stop whisper
|
||||
# docker rm whisper
|
||||
# docker pull ggerganov/whisper.cpp:latest
|
||||
# docker run -d \
|
||||
# --name whisper \
|
||||
# --network ai-stack-net \
|
||||
# --gpus all \
|
||||
# -v whisper_data:/app/data \
|
||||
# ggerganov/whisper.cpp:latest
|
||||
# echo "✅ Whisper updated."
|
||||
# else
|
||||
# echo "⚠️ Whisper is not running, skipping update."
|
||||
# fi
|
||||
|
||||
# # Stable Diffusion (A1111)
|
||||
# if check_container_running "stable-diffusion"; then
|
||||
# echo "🧼 Updating Stable Diffusion..."
|
||||
# docker stop stable-diffusion
|
||||
# docker rm stable-diffusion
|
||||
# docker pull abraham-ai/automatic1111-webui:latest
|
||||
# docker run -d \
|
||||
# --name stable-diffusion \
|
||||
# --gpus all \
|
||||
# -p 7860:7860 \
|
||||
# -v sd_models:/data \
|
||||
# abraham-ai/automatic1111-webui:latest
|
||||
# echo "✅ Stable Diffusion updated."
|
||||
# else
|
||||
# echo "⚠️ Stable Diffusion is not running, skipping update."
|
||||
# fi
|
||||
|
||||
# echo "🔁 Updating ComfyUI..."
|
||||
# docker stop comfyui 2>/dev/null || true
|
||||
# docker rm comfyui 2>/dev/null || true
|
||||
# docker pull ghcr.io/cmdr2/comfyui:latest
|
||||
# docker run -d --gpus all \
|
||||
# --name comfyui \
|
||||
# --network ai-stack-net \
|
||||
# -p 8188:8188 \
|
||||
# -v comfyui_data:/workspace \
|
||||
# ghcr.io/cmdr2/comfyui:latest
|
||||
# echo "✅ ComfyUI updated and restarted."
|
||||
|
||||
# echo "🔁 Updating LocalAI..."
|
||||
# docker stop localai 2>/dev/null || true
|
||||
# docker rm localai 2>/dev/null || true
|
||||
|
||||
mkdir -p ~/ai-stack/localai
|
||||
# 🔁 Updating LocalAI (GPU build)
|
||||
if check_container_running "localai"; then
|
||||
echo "🧠 Updating LocalAI..."
|
||||
docker stop localai
|
||||
docker rm localai
|
||||
else
|
||||
echo "➕ Installing LocalAI..."
|
||||
fi
|
||||
|
||||
docker pull quay.io/go-skynet/local-ai:latest
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--network ai-stack-net \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai:/models \
|
||||
-e MODELS_PATH=/models \
|
||||
-e ENABLE_OOLLAMA_BACKEND=true \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
quay.io/go-skynet/local-ai:latest
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
# Ensure default models.yaml exists
|
||||
MODELS_YAML=~/ai-stack/localai/config/models.yaml
|
||||
if [ ! -f "$MODELS_YAML" ]; then
|
||||
echo "📄 Creating default models.yaml..."
|
||||
cat > "$MODELS_YAML" <<EOF
|
||||
- name: phi
|
||||
backend: llama-cuda
|
||||
parameters:
|
||||
model: /models/phi-2.gguf
|
||||
EOF
|
||||
fi
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/etc/local-ai \
|
||||
--health-cmd="curl --fail http://localhost:8080/health || exit 1" \
|
||||
--health-interval=30s \
|
||||
--health-retries=3 \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "✅ LocalAI running at http://localhost:8080"
|
||||
|
||||
echo "✅ LocalAI updated and restarted."
|
||||
|
||||
|
||||
echo "🔁 Updating Whisper.cpp (custom GPU build)..."
|
||||
@ -447,26 +423,37 @@ if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "localai"; then
|
||||
echo "✅ Skipping LocalAI install (already running)."
|
||||
else
|
||||
read -p "➕ Install LocalAI (OpenAI-compatible API proxy)? [y/N]: " lai
|
||||
if [[ "$lai" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Setting up LocalAI..."
|
||||
read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
||||
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
||||
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
docker stop localai 2>/dev/null || true
|
||||
docker rm localai 2>/dev/null || true
|
||||
echo "📄 Creating default models.yaml..."
|
||||
cat > ~/ai-stack/localai/config/models.yaml <<EOF
|
||||
- name: phi
|
||||
backend: llama-cuda
|
||||
parameters:
|
||||
model: /models/phi-2.gguf
|
||||
EOF
|
||||
|
||||
mkdir -p ~/ai-stack/localai
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--network ai-stack-net \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai:/models \
|
||||
-e MODELS_PATH=/models \
|
||||
-e ENABLE_OOLLAMA_BACKEND=true \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
quay.io/go-skynet/local-ai:latest
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/etc/local-ai \
|
||||
--env ENABLE_BACKENDS=llama-cuda,ollama
|
||||
--health-cmd="curl --fail http://localhost:8080/health || exit 1" \
|
||||
--health-interval=30s \
|
||||
--health-retries=3 \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "🌐 LocalAI running at http://localhost:8080"
|
||||
echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -474,5 +461,6 @@ fi
|
||||
|
||||
|
||||
|
||||
|
||||
echo "✅ AI stack setup complete."
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user