#!/bin/bash # Script to check GPU availability for Ollama echo "GPU Availability Check" echo "======================" echo "" # Check for NVIDIA GPU if command -v nvidia-smi &> /dev/null; then echo "✓ NVIDIA GPU detected" echo "" echo "GPU Information:" nvidia-smi --query-gpu=index,name,driver_version,memory.total,memory.free --format=csv,noheader | \ awk -F', ' '{printf " GPU %s: %s\n Driver: %s\n Memory: %s total, %s free\n\n", $1, $2, $3, $4, $5}' # Check CUDA version if command -v nvcc &> /dev/null; then echo "CUDA Version:" nvcc --version | grep "release" | awk '{print " " $0}' echo "" fi # Check Docker GPU support echo "Checking Docker GPU support..." if docker run --rm --gpus all nvidia/cuda:12.0.0-base-ubuntu22.04 nvidia-smi &> /dev/null; then echo "✓ Docker can access GPU" echo "" echo "Recommendation: Use GPU-accelerated startup" echo " ./start-with-gpu.sh" else echo "✗ Docker cannot access GPU" echo "" echo "Install NVIDIA Container Toolkit:" echo " https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html" echo "" echo "After installation, restart Docker:" echo " sudo systemctl restart docker" fi else echo "ℹ No NVIDIA GPU detected" echo "" echo "Running Ollama on CPU is supported but slower." echo "" echo "Performance comparison:" echo " CPU: ~1-2s per translation, ~8s per summary" echo " GPU: ~0.3s per translation, ~2s per summary" echo "" echo "Recommendation: Use standard startup" echo " docker-compose up -d" fi echo "" echo "For more information, see: docs/OLLAMA_SETUP.md"