diff --git a/setup-ai-stack.sh b/setup-ai-stack.sh index 8a2b04d..ad23e00 100644 --- a/setup-ai-stack.sh +++ b/setup-ai-stack.sh @@ -206,7 +206,7 @@ if [[ "$1" != "--update" ]]; then docker run -d -p 3000:8080 --gpus all \ -e OPENAI_API_BASE_URL=http://pipelines:9099 \ -e OPENAI_API_KEY=0p3n-w3bu! \ - -e OLLAMA_MAX_LOADED_MODELS=1 + -e OLLAMA_MAX_LOADED_MODELS=1 \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \ @@ -477,7 +477,7 @@ if [[ "$1" != "--update" ]]; then -e ENABLE_BACKENDS=llama-cuda,ollama \ -e INCLUDE_DEFAULT_MODELS=true \ -e AUTOLOAD_MODELS=true \ - -e MODEL_IDLE_TIMEOUT=600 + -e MODEL_IDLE_TIMEOUT=600 \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12