99 lines
2.8 KiB
Bash
Executable File
99 lines
2.8 KiB
Bash
Executable File
#!/bin/bash
|
|
set -e
|
|
|
|
echo "=========================================="
|
|
echo " Local Swarm - Installer"
|
|
echo "=========================================="
|
|
echo
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Check Python
|
|
if ! command -v python3 &> /dev/null; then
|
|
echo -e "${RED}[ERROR] Python 3 is not installed${NC}"
|
|
echo "Please install Python 3.9+ and try again"
|
|
exit 1
|
|
fi
|
|
|
|
echo "[1/4] Checking Python version..."
|
|
PYTHON_VERSION=$(python3 --version | cut -d' ' -f2)
|
|
echo " Found Python $PYTHON_VERSION"
|
|
|
|
echo
|
|
echo "[2/4] Upgrading pip..."
|
|
python3 -m pip install --upgrade pip
|
|
|
|
echo
|
|
echo "[3/4] Installing base dependencies..."
|
|
pip3 install -r requirements.txt
|
|
|
|
# Detect platform and install appropriate backend
|
|
echo
|
|
echo "[4/4] Detecting hardware and installing backend..."
|
|
|
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
# macOS
|
|
echo " Platform: macOS"
|
|
|
|
# Check for Apple Silicon
|
|
if [[ $(uname -m) == "arm64" ]]; then
|
|
echo " Hardware: Apple Silicon detected!"
|
|
echo " Installing MLX backend..."
|
|
pip3 install -r requirements-macos.txt
|
|
echo " ${GREEN}MLX backend installed!${NC}"
|
|
else
|
|
echo " Hardware: Intel Mac"
|
|
echo " Installing llama.cpp (CPU)..."
|
|
pip3 install llama-cpp-python
|
|
echo " ${GREEN}llama.cpp installed (CPU mode)${NC}"
|
|
fi
|
|
|
|
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
# Linux
|
|
echo " Platform: Linux"
|
|
|
|
# Check for NVIDIA GPU
|
|
if command -v nvidia-smi &> /dev/null; then
|
|
echo " Hardware: NVIDIA GPU detected!"
|
|
echo " Installing CUDA-enabled llama.cpp..."
|
|
pip3 uninstall -y llama-cpp-python 2>/dev/null || true
|
|
pip3 install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121
|
|
echo " ${GREEN}GPU support enabled!${NC}"
|
|
else
|
|
echo " Hardware: No NVIDIA GPU detected"
|
|
echo " Installing llama.cpp (CPU)..."
|
|
pip3 install llama-cpp-python
|
|
echo " ${GREEN}CPU backend installed${NC}"
|
|
fi
|
|
|
|
# Check for AMD GPU (ROCm)
|
|
if command -v rocm-smi &> /dev/null; then
|
|
echo -e "${YELLOW}[WARNING] AMD GPU detected but ROCm support is experimental${NC}"
|
|
echo " Using CPU backend for now"
|
|
fi
|
|
|
|
else
|
|
echo -e "${YELLOW}[WARNING] Unknown platform: $OSTYPE${NC}"
|
|
echo " Installing generic CPU backend..."
|
|
pip3 install llama-cpp-python
|
|
fi
|
|
|
|
echo
|
|
echo "=========================================="
|
|
echo " Installation Complete!"
|
|
echo "=========================================="
|
|
echo
|
|
echo "To start Local Swarm:"
|
|
echo " python3 main.py"
|
|
echo
|
|
echo "To check hardware detection:"
|
|
echo " python3 main.py --detect"
|
|
echo
|
|
echo "For more options:"
|
|
echo " python3 main.py --help"
|
|
echo
|