switching back to AIO build of localai
This commit is contained in:
parent
dffcd2d318
commit
9cf8d1b07c
@ -69,69 +69,30 @@ if [[ "$1" == "--update" ]]; then
|
||||
echo "🔧 Running make prepare..."
|
||||
make prepare
|
||||
|
||||
echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
|
||||
make build \
|
||||
TAGS="localrecall,localagi,gpu" \
|
||||
BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
|
||||
GPU_SUPPORT="cuda"
|
||||
# echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
|
||||
# make build \
|
||||
# TAGS="localrecall,localagi,gpu" \
|
||||
# BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
|
||||
# GPU_SUPPORT="cuda"
|
||||
|
||||
|
||||
# Download your custom Dockerfile
|
||||
echo "📥 Downloading custom Dockerfile..."
|
||||
curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
|
||||
# # Download your custom Dockerfile
|
||||
# echo "📥 Downloading custom Dockerfile..."
|
||||
# curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
|
||||
|
||||
# Build the container
|
||||
echo "🔨 Building custom LocalAI Docker image..."
|
||||
docker build -t localai:custom .
|
||||
# # Build the container
|
||||
# echo "🔨 Building custom LocalAI Docker image..."
|
||||
# docker build -t localai:custom .
|
||||
|
||||
# Remove existing container
|
||||
docker stop localai 2>/dev/null || true
|
||||
docker rm localai 2>/dev/null || true
|
||||
# # Remove existing container
|
||||
# docker stop localai 2>/dev/null || true
|
||||
# docker rm localai 2>/dev/null || true
|
||||
|
||||
# Create model and config directories if needed
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# Run new container
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
-e MODEL_PATH=/models \
|
||||
--restart unless-stopped \
|
||||
localai:custom
|
||||
|
||||
echo "✅ LocalAI (custom) is now running at http://localhost:8080"
|
||||
|
||||
|
||||
# # 🔁 Updating LocalAI (GPU build)
|
||||
# if check_container_running "localai"; then
|
||||
# echo "🧠 Updating LocalAI..."
|
||||
# docker stop localai
|
||||
# docker rm localai
|
||||
# else
|
||||
# echo "➕ Installing LocalAI..."
|
||||
# fi
|
||||
|
||||
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
# # Ensure default models.yaml exists
|
||||
# MODELS_YAML=~/ai-stack/localai/config/models.yaml
|
||||
# if [ ! -f "$MODELS_YAML" ]; then
|
||||
# echo "📄 Creating default models.yaml..."
|
||||
# cat > "$MODELS_YAML" <<EOF
|
||||
# - name: phi
|
||||
# backend: llama-cuda
|
||||
# parameters:
|
||||
# model: /models/phi-2.gguf
|
||||
# EOF
|
||||
# fi
|
||||
# # Create model and config directories if needed
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# # Run new container
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
@ -143,9 +104,36 @@ if [[ "$1" == "--update" ]]; then
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# --restart unless-stopped \
|
||||
# localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
# localai:custom
|
||||
|
||||
# echo "✅ LocalAI running at http://localhost:8080"
|
||||
# echo "✅ LocalAI (custom) is now running at http://localhost:8080"
|
||||
|
||||
|
||||
# 🔁 Updating LocalAI (GPU build)
|
||||
if check_container_running "localai"; then
|
||||
echo "🧠 Updating LocalAI..."
|
||||
docker stop localai
|
||||
docker rm localai
|
||||
else
|
||||
echo "➕ Installing LocalAI..."
|
||||
fi
|
||||
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
-e MODEL_PATH=/models \
|
||||
--restart unless-stopped \
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "✅ LocalAI running at http://localhost:8080"
|
||||
|
||||
|
||||
|
||||
@ -481,18 +469,18 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
# if [[ "$1" != "--update" ]]; then
|
||||
# if check_container_running "localai"; then
|
||||
# echo "✅ Skipping LocalAI install (already running)."
|
||||
# else
|
||||
# read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
||||
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
# echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "localai"; then
|
||||
echo "✅ Skipping LocalAI install (already running)."
|
||||
else
|
||||
read -p "➕ Install LocalAI (GPU-accelerated)? [y/N]: " localai_prompt
|
||||
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Installing LocalAI (NVIDIA CUDA 12)..."
|
||||
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# echo "📄 Creating default models.yaml..."
|
||||
echo "📄 Creating default models.yaml..."
|
||||
# cat > ~/ai-stack/localai/config/models.yaml <<EOF
|
||||
# - name: phi
|
||||
# backend: llama-cuda
|
||||
@ -500,59 +488,59 @@ fi
|
||||
# model: /models/phi-2.gguf
|
||||
# EOF
|
||||
|
||||
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
# -p 8080:8080 \
|
||||
# -v ~/ai-stack/localai/models:/models \
|
||||
# -v ~/ai-stack/localai/config:/config \
|
||||
# -e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# --restart unless-stopped \
|
||||
# localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
# echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
# fi
|
||||
# fi
|
||||
# fi
|
||||
|
||||
if [[ "$1" != "--update" ]]; then
|
||||
if check_container_running "localai"; then
|
||||
echo "✅ Skipping LocalAI install (already running)."
|
||||
else
|
||||
read -p "➕ Install LocalAI (GPU-custom)? [y/N]: " localai_prompt
|
||||
if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
echo "🧠 Installing LocalAI (Custom created localAI container)..."
|
||||
|
||||
mkdir -p ~/ai-stack/localai/models
|
||||
mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
echo "📦 Using custom built LocalAI image (localai:custom)"
|
||||
docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
docker run -d \
|
||||
--name localai \
|
||||
--gpus all \
|
||||
--network ai-stack-net \
|
||||
-p 8080:8080 \
|
||||
-v ~/ai-stack/localai/models:/models \
|
||||
-v ~/ai-stack/localai/models:/app/models \
|
||||
-e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \
|
||||
-v ~/ai-stack/localai/config:/config \
|
||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||
-e INCLUDE_DEFAULT_MODELS=true \
|
||||
-e AUTOLOAD_MODELS=true \
|
||||
-e MODEL_PATH=/models \
|
||||
-e LOG_LEVEL=debug \
|
||||
--restart unless-stopped \
|
||||
localai:custom
|
||||
localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# if [[ "$1" != "--update" ]]; then
|
||||
# if check_container_running "localai"; then
|
||||
# echo "✅ Skipping LocalAI install (already running)."
|
||||
# else
|
||||
# read -p "➕ Install LocalAI (GPU-custom)? [y/N]: " localai_prompt
|
||||
# if [[ "$localai_prompt" =~ ^[Yy]$ ]]; then
|
||||
# echo "🧠 Installing LocalAI (Custom created localAI container)..."
|
||||
|
||||
# mkdir -p ~/ai-stack/localai/models
|
||||
# mkdir -p ~/ai-stack/localai/config
|
||||
|
||||
# echo "📦 Using custom built LocalAI image (localai:custom)"
|
||||
|
||||
# docker run -d \
|
||||
# --name localai \
|
||||
# --gpus all \
|
||||
# --network ai-stack-net \
|
||||
# -p 8080:8080 \
|
||||
# -v ~/ai-stack/localai/models:/models \
|
||||
# -v ~/ai-stack/localai/models:/app/models \
|
||||
# -e ENABLE_BACKENDS=llama-cuda,whispercpp,stablediffusion,ollama \
|
||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||
# -e AUTOLOAD_MODELS=true \
|
||||
# -e MODEL_PATH=/models \
|
||||
# -e LOG_LEVEL=debug \
|
||||
# --restart unless-stopped \
|
||||
# localai:custom
|
||||
|
||||
# echo "🌐 LocalAI is now running at http://localhost:8080"
|
||||
# fi
|
||||
# fi
|
||||
# fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user