diff --git a/setup-ai-stack.sh b/setup-ai-stack.sh index f2a30e2..98937dc 100644 --- a/setup-ai-stack.sh +++ b/setup-ai-stack.sh @@ -73,10 +73,7 @@ docker run -d \ --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/models \ - -v ~/ai-stack/localai/config:/etc/local-ai \ - --health-cmd="curl --fail http://localhost:8080/health || exit 1" \ - --health-interval=30s \ - --health-retries=3 \ + --env ENABLE_BACKENDS=llama-cuda,ollama \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12 @@ -253,9 +250,6 @@ EOF fi - - - if [[ "$1" != "--update" ]]; then if check_container_running "stable-diffusion"; then echo "✅ Skipping Stable Diffusion install (already running)." @@ -445,11 +439,7 @@ EOF --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/models \ - -v ~/ai-stack/localai/config:/etc/local-ai \ - --env ENABLE_BACKENDS=llama-cuda,ollama - --health-cmd="curl --fail http://localhost:8080/health || exit 1" \ - --health-interval=30s \ - --health-retries=3 \ + --env ENABLE_BACKENDS=llama-cuda,ollama \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12