added changes to localai config
This commit is contained in:
parent
5801e5f800
commit
716b23fabf
@ -73,10 +73,7 @@ docker run -d \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/etc/local-ai \
|
||||
--health-cmd="curl --fail http://localhost:8080/health || exit 1" \
|
||||
--health-interval=30s \
|
||||
--health-retries=3 \
|
||||
--env ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
@ -253,9 +250,6 @@ EOF
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "stable-diffusion"; then
|
||||
echo "✅ Skipping Stable Diffusion install (already running)."
|
||||
@ -445,11 +439,7 @@ EOF
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/etc/local-ai \
|
||||
--env ENABLE_BACKENDS=llama-cuda,ollama
|
||||
--health-cmd="curl --fail http://localhost:8080/health || exit 1" \
|
||||
--health-interval=30s \
|
||||
--health-retries=3 \
|
||||
--env ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user