diff --git a/setup-ai-stack.sh b/setup-ai-stack.sh index b3d2db7..5c5823d 100644 --- a/setup-ai-stack.sh +++ b/setup-ai-stack.sh @@ -481,44 +481,44 @@ EOF fi fi -if [[ "$1" != "--update" ]]; then - if check_container_running "localai"; then - echo "✅ Skipping LocalAI install (already running)." - else - read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt - if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then - echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..." +# if [[ "$1" != "--update" ]]; then +# if check_container_running "localai"; then +# echo "✅ Skipping LocalAI install (already running)." +# else +# read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt +# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then +# echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..." - mkdir -p ~/ai-stack/localai/models - mkdir -p ~/ai-stack/localai/config +# mkdir -p ~/ai-stack/localai/models +# mkdir -p ~/ai-stack/localai/config - echo "📄 Creating default models.yaml..." - cat > ~/ai-stack/localai/config/models.yaml <<EOF -- name: phi - backend: llama-cuda - parameters: - model: /models/phi-2.gguf -EOF +# echo "📄 Creating default models.yaml..." +# cat > ~/ai-stack/localai/config/models.yaml <<EOF +# - name: phi +# backend: llama-cuda +# parameters: +# model: /models/phi-2.gguf +# EOF - docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 +# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12 - docker run -d \ - --name localai \ - --gpus all \ - -p 8080:8080 \ - -v ~/ai-stack/localai/models:/models \ - -v ~/ai-stack/localai/config:/config \ - -e ENABLE_BACKENDS=llama-cuda,ollama \ - -e INCLUDE_DEFAULT_MODELS=true \ - -e AUTOLOAD_MODELS=true \ - -e MODEL_PATH=/models \ - --restart unless-stopped \ - localai/localai:latest-aio-gpu-nvidia-cuda-12 +# docker run -d \ +# --name localai \ +# --gpus all \ +# -p 8080:8080 \ +# -v ~/ai-stack/localai/models:/models \ +# -v ~/ai-stack/localai/config:/config \ +# -e ENABLE_BACKENDS=llama-cuda,ollama \ +# -e INCLUDE_DEFAULT_MODELS=true \ +# -e AUTOLOAD_MODELS=true \ +# -e MODEL_PATH=/models \ +# --restart unless-stopped \ +# localai/localai:latest-aio-gpu-nvidia-cuda-12 - echo "🌐 LocalAI is now running at http://localhost:8080" - fi - fi -fi +# echo "🌐 LocalAI is now running at http://localhost:8080" +# fi +# fi +# fi if [[ "$1" != "--update" ]]; then if check_container_running "localai"; then