commented out pre-built localai aio docker
This commit is contained in:
parent
6952e719ef
commit
6af23e4310
@ -481,44 +481,44 @@ EOF
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$1" != "--update" ]]; then
|
# if [[ "$1" != "--update" ]]; then
|
||||||
if check_container_running "localai"; then
|
# if check_container_running "localai"; then
|
||||||
echo "✅ Skipping LocalAI install (already running)."
|
# echo "✅ Skipping LocalAI install (already running)."
|
||||||
else
|
# else
|
||||||
read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
# read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
||||||
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||||
echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
# echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
||||||
|
|
||||||
mkdir -p ~/ai-stack/localai/models
|
# mkdir -p ~/ai-stack/localai/models
|
||||||
mkdir -p ~/ai-stack/localai/config
|
# mkdir -p ~/ai-stack/localai/config
|
||||||
|
|
||||||
echo "📄 Creating default models.yaml..."
|
# echo "📄 Creating default models.yaml..."
|
||||||
cat > ~/ai-stack/localai/config/models.yaml <<EOF
|
# cat > ~/ai-stack/localai/config/models.yaml <<EOF
|
||||||
- name: phi
|
# - name: phi
|
||||||
backend: llama-cuda
|
# backend: llama-cuda
|
||||||
parameters:
|
# parameters:
|
||||||
model: /models/phi-2.gguf
|
# model: /models/phi-2.gguf
|
||||||
EOF
|
# EOF
|
||||||
|
|
||||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||||
|
|
||||||
docker run -d \
|
# docker run -d \
|
||||||
--name localai \
|
# --name localai \
|
||||||
--gpus all \
|
# --gpus all \
|
||||||
-p 8080:8080 \
|
# -p 8080:8080 \
|
||||||
-v ~/ai-stack/localai/models:/models \
|
# -v ~/ai-stack/localai/models:/models \
|
||||||
-v ~/ai-stack/localai/config:/config \
|
# -v ~/ai-stack/localai/config:/config \
|
||||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
# -e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||||
-e INCLUDE_DEFAULT_MODELS=true \
|
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||||
-e AUTOLOAD_MODELS=true \
|
# -e AUTOLOAD_MODELS=true \
|
||||||
-e MODEL_PATH=/models \
|
# -e MODEL_PATH=/models \
|
||||||
--restart unless-stopped \
|
# --restart unless-stopped \
|
||||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
# localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||||
|
|
||||||
echo "🌐 LocalAI is now running at http://localhost:8080"
|
# echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||||
fi
|
# fi
|
||||||
fi
|
# fi
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if [[ "$1" != "--update" ]]; then
|
if [[ "$1" != "--update" ]]; then
|
||||||
if check_container_running "localai"; then
|
if check_container_running "localai"; then
|
||||||
|
Loading…
x
Reference in New Issue
Block a user