fixed formatting
This commit is contained in:
parent
7d61556e82
commit
70886e0ce4
@ -44,108 +44,108 @@ if [[ "$1" == "--update" ]]; then
|
|||||||
echo "⚠️ JupyterLab is not running, skipping update."
|
echo "⚠️ JupyterLab is not running, skipping update."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --- LocalAI custom build and update ---
|
# --- LocalAI custom build and update ---
|
||||||
LOCALAI_REPO=~/git-repos/LocalAI
|
LOCALAI_REPO=~/git-repos/LocalAI
|
||||||
DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile"
|
DOCKERFILE_URL="https://gitea.qortal.link/crowetic/AI-Dev/raw/branch/main/Dockerfile"
|
||||||
|
|
||||||
echo "🔁 Rebuilding LocalAI (custom)..."
|
echo "🔁 Rebuilding LocalAI (custom)..."
|
||||||
|
|
||||||
# Backup and remove existing repo
|
# Backup and remove existing repo
|
||||||
if [ -d "$LOCALAI_REPO" ]; then
|
if [ -d "$LOCALAI_REPO" ]; then
|
||||||
echo "📦 Backing up existing LocalAI repo..."
|
echo "📦 Backing up existing LocalAI repo..."
|
||||||
mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)"
|
mv "$LOCALAI_REPO" "${LOCALAI_REPO}_backup_$(date +%s)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clone latest LocalAI
|
# Clone latest LocalAI
|
||||||
git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO"
|
git clone https://github.com/mudler/LocalAI.git "$LOCALAI_REPO"
|
||||||
|
|
||||||
# Remove default Docker-related files
|
# Remove default Docker-related files
|
||||||
cd "$LOCALAI_REPO"
|
cd "$LOCALAI_REPO"
|
||||||
rm -f Dockerfile Dockerfile.aio docker-compose.yaml
|
rm -f Dockerfile Dockerfile.aio docker-compose.yaml
|
||||||
|
|
||||||
# Prepare and build LocalAI binary
|
# Prepare and build LocalAI binary
|
||||||
# NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods
|
# NOTE - This portion required quite a few custom additions in order to build correctly... if you are running this from a machine that hasn't built it already, just better probably to await the implementation of the Docker-Compose methods
|
||||||
# Docker-compose methods will be coming soon.
|
# Docker-compose methods will be coming soon.
|
||||||
echo "🔧 Running make prepare..."
|
echo "🔧 Running make prepare..."
|
||||||
make prepare
|
make prepare
|
||||||
|
|
||||||
echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
|
echo "🧠 Building LocalAI binary with GPU, localrecall, and localagi..."
|
||||||
make build \
|
make build \
|
||||||
TAGS="localrecall,localagi,gpu" \
|
TAGS="localrecall,localagi,gpu" \
|
||||||
BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
|
BACKENDS="llama-cuda whispercpp stablediffusion ollama" \
|
||||||
GPU_SUPPORT="cuda"
|
GPU_SUPPORT="cuda"
|
||||||
|
|
||||||
|
|
||||||
# Download your custom Dockerfile
|
# Download your custom Dockerfile
|
||||||
echo "📥 Downloading custom Dockerfile..."
|
echo "📥 Downloading custom Dockerfile..."
|
||||||
curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
|
curl -fsSL "$DOCKERFILE_URL" -o Dockerfile
|
||||||
|
|
||||||
# Build the container
|
# Build the container
|
||||||
echo "🔨 Building custom LocalAI Docker image..."
|
echo "🔨 Building custom LocalAI Docker image..."
|
||||||
docker build -t localai:custom .
|
docker build -t localai:custom .
|
||||||
|
|
||||||
# Remove existing container
|
# Remove existing container
|
||||||
docker stop localai 2>/dev/null || true
|
docker stop localai 2>/dev/null || true
|
||||||
docker rm localai 2>/dev/null || true
|
docker rm localai 2>/dev/null || true
|
||||||
|
|
||||||
# Create model and config directories if needed
|
# Create model and config directories if needed
|
||||||
mkdir -p ~/ai-stack/localai/models
|
mkdir -p ~/ai-stack/localai/models
|
||||||
mkdir -p ~/ai-stack/localai/config
|
mkdir -p ~/ai-stack/localai/config
|
||||||
|
|
||||||
# Run new container
|
# Run new container
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name localai \
|
--name localai \
|
||||||
--gpus all \
|
--gpus all \
|
||||||
-p 8080:8080 \
|
-p 8080:8080 \
|
||||||
-v ~/ai-stack/localai/models:/models \
|
-v ~/ai-stack/localai/models:/models \
|
||||||
-v ~/ai-stack/localai/config:/config \
|
-v ~/ai-stack/localai/config:/config \
|
||||||
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
-e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||||
-e INCLUDE_DEFAULT_MODELS=true \
|
-e INCLUDE_DEFAULT_MODELS=true \
|
||||||
-e AUTOLOAD_MODELS=true \
|
-e AUTOLOAD_MODELS=true \
|
||||||
-e MODEL_PATH=/models \
|
-e MODEL_PATH=/models \
|
||||||
--restart unless-stopped \
|
--restart unless-stopped \
|
||||||
localai:custom
|
localai:custom
|
||||||
|
|
||||||
echo "✅ LocalAI (custom) is now running at http://localhost:8080"
|
echo "✅ LocalAI (custom) is now running at http://localhost:8080"
|
||||||
|
|
||||||
|
|
||||||
# # 🔁 Updating LocalAI (GPU build)
|
# # 🔁 Updating LocalAI (GPU build)
|
||||||
# if check_container_running "localai"; then
|
# if check_container_running "localai"; then
|
||||||
# echo "🧠 Updating LocalAI..."
|
# echo "🧠 Updating LocalAI..."
|
||||||
# docker stop localai
|
# docker stop localai
|
||||||
# docker rm localai
|
# docker rm localai
|
||||||
# else
|
# else
|
||||||
# echo "➕ Installing LocalAI..."
|
# echo "➕ Installing LocalAI..."
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
# docker pull localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||||
|
|
||||||
# # Ensure default models.yaml exists
|
# # Ensure default models.yaml exists
|
||||||
# MODELS_YAML=~/ai-stack/localai/config/models.yaml
|
# MODELS_YAML=~/ai-stack/localai/config/models.yaml
|
||||||
# if [ ! -f "$MODELS_YAML" ]; then
|
# if [ ! -f "$MODELS_YAML" ]; then
|
||||||
# echo "📄 Creating default models.yaml..."
|
# echo "📄 Creating default models.yaml..."
|
||||||
# cat > "$MODELS_YAML" <<EOF
|
# cat > "$MODELS_YAML" <<EOF
|
||||||
# - name: phi
|
# - name: phi
|
||||||
# backend: llama-cuda
|
# backend: llama-cuda
|
||||||
# parameters:
|
# parameters:
|
||||||
# model: /models/phi-2.gguf
|
# model: /models/phi-2.gguf
|
||||||
# EOF
|
# EOF
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
# docker run -d \
|
# docker run -d \
|
||||||
# --name localai \
|
# --name localai \
|
||||||
# --gpus all \
|
# --gpus all \
|
||||||
# -p 8080:8080 \
|
# -p 8080:8080 \
|
||||||
# -v ~/ai-stack/localai/models:/models \
|
# -v ~/ai-stack/localai/models:/models \
|
||||||
# -v ~/ai-stack/localai/config:/config \
|
# -v ~/ai-stack/localai/config:/config \
|
||||||
# -e ENABLE_BACKENDS=llama-cuda,ollama \
|
# -e ENABLE_BACKENDS=llama-cuda,ollama \
|
||||||
# -e INCLUDE_DEFAULT_MODELS=true \
|
# -e INCLUDE_DEFAULT_MODELS=true \
|
||||||
# -e AUTOLOAD_MODELS=true \
|
# -e AUTOLOAD_MODELS=true \
|
||||||
# -e MODEL_PATH=/models \
|
# -e MODEL_PATH=/models \
|
||||||
# --restart unless-stopped \
|
# --restart unless-stopped \
|
||||||
# localai/localai:latest-aio-gpu-nvidia-cuda-12
|
# localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||||
|
|
||||||
# echo "✅ LocalAI running at http://localhost:8080"
|
# echo "✅ LocalAI running at http://localhost:8080"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user