diff --git a/setup-ai-stack.sh b/setup-ai-stack.sh index f2d75f5..1026ae0 100644 --- a/setup-ai-stack.sh +++ b/setup-ai-stack.sh @@ -69,6 +69,8 @@ if [[ "$1" == "--update" ]]; then docker rm open-webui docker pull ghcr.io/open-webui/open-webui:ollama docker run -d -p 3000:8080 --gpus all \ + -e OPENAI_API_BASE_URL=http://pipelines:9099 \ + -e OPENAI_API_KEY=0p3n-w3bu! \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \ @@ -86,6 +88,7 @@ if [[ "$1" == "--update" ]]; then docker rm localai docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 docker run -d \ + --network ai-stack-net \ --name localai \ --gpus all \ -p 8080:8080 \ @@ -201,6 +204,8 @@ if [[ "$1" != "--update" ]]; then docker stop open-webui 2>/dev/null || true docker rm open-webui 2>/dev/null || true docker run -d -p 3000:8080 --gpus all \ + -e OPENAI_API_BASE_URL=http://pipelines:9099 \ + -e OPENAI_API_KEY=0p3n-w3bu! \ -v ollama:/root/.ollama \ -v open-webui:/app/backend/data \ --name open-webui \