commented out pre-built localai aio docker

This commit is contained in:
crowetic 2025-05-19 17:43:26 -07:00
parent 6952e719ef
commit 6af23e4310

View File

@ -481,44 +481,44 @@ EOF
fi
fi
if [[ "$1" != "--update" ]]; then
if check_container_running "localai"; then
echo "✅ Skipping LocalAI install (already running)."
else
read -p " Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
# if [[ "$1" != "--update" ]]; then
# if check_container_running "localai"; then
# echo "✅ Skipping LocalAI install (already running)."
# else
# read -p " Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
# echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
mkdir -p ~/ai-stack/localai/models
mkdir -p ~/ai-stack/localai/config
# mkdir -p ~/ai-stack/localai/models
# mkdir -p ~/ai-stack/localai/config
echo "📄 Creating default models.yaml..."
cat > ~/ai-stack/localai/config/models.yaml <<EOF
- name: phi
backend: llama-cuda
parameters:
model: /models/phi-2.gguf
EOF
# echo "📄 Creating default models.yaml..."
# cat > ~/ai-stack/localai/config/models.yaml <<EOF
# - name: phi
# backend: llama-cuda
# parameters:
# model: /models/phi-2.gguf
# EOF
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
docker run -d \
--name localai \
--gpus all \
-p 8080:8080 \
-v ~/ai-stack/localai/models:/models \
-v ~/ai-stack/localai/config:/config \
-e ENABLE_BACKENDS=llama-cuda,ollama \
-e INCLUDE_DEFAULT_MODELS=true \
-e AUTOLOAD_MODELS=true \
-e MODEL_PATH=/models \
--restart unless-stopped \
localai/localai:latest-aio-gpu-nvidia-cuda-12
# docker run -d \
# --name localai \
# --gpus all \
# -p 8080:8080 \
# -v ~/ai-stack/localai/models:/models \
# -v ~/ai-stack/localai/config:/config \
# -e ENABLE_BACKENDS=llama-cuda,ollama \
# -e INCLUDE_DEFAULT_MODELS=true \
# -e AUTOLOAD_MODELS=true \
# -e MODEL_PATH=/models \
# --restart unless-stopped \
# localai/localai:latest-aio-gpu-nvidia-cuda-12
echo "🌐 LocalAI is now running at http://localhost:8080"
fi
fi
fi
# echo "🌐 LocalAI is now running at http://localhost:8080"
# fi
# fi
# fi
if [[ "$1" != "--update" ]]; then
if check_container_running "localai"; then