47 lines
1.5 KiB
Bash
Executable File
47 lines
1.5 KiB
Bash
Executable File
#!/bin/bash
|
|
# Check GPU status via API
|
|
|
|
echo "=========================================="
|
|
echo "Ollama GPU Status Check"
|
|
echo "=========================================="
|
|
echo ""
|
|
|
|
# Check GPU status
|
|
echo "1. GPU Status:"
|
|
echo "---"
|
|
curl -s http://localhost:5001/api/ollama/gpu-status | python3 -m json.tool
|
|
echo ""
|
|
echo ""
|
|
|
|
# Test performance
|
|
echo "2. Performance Test:"
|
|
echo "---"
|
|
curl -s http://localhost:5001/api/ollama/test | python3 -m json.tool
|
|
echo ""
|
|
echo ""
|
|
|
|
# List models
|
|
echo "3. Available Models:"
|
|
echo "---"
|
|
curl -s http://localhost:5001/api/ollama/models | python3 -m json.tool
|
|
echo ""
|
|
echo ""
|
|
|
|
echo "=========================================="
|
|
echo "Quick Summary:"
|
|
echo "=========================================="
|
|
|
|
# Extract key info
|
|
GPU_STATUS=$(curl -s http://localhost:5001/api/ollama/gpu-status | python3 -c "import json,sys; data=json.load(sys.stdin); print('GPU Active' if data.get('gpu_in_use') else 'CPU Mode')" 2>/dev/null || echo "Error")
|
|
PERF=$(curl -s http://localhost:5001/api/ollama/test | python3 -c "import json,sys; data=json.load(sys.stdin); print(f\"{data.get('duration_seconds', 'N/A')}s - {data.get('performance', 'N/A')}\")" 2>/dev/null || echo "Error")
|
|
|
|
echo "GPU Status: $GPU_STATUS"
|
|
echo "Performance: $PERF"
|
|
echo ""
|
|
|
|
if [ "$GPU_STATUS" = "CPU Mode" ]; then
|
|
echo "💡 TIP: Enable GPU for 5-10x faster processing:"
|
|
echo " docker-compose -f docker-compose.yml -f docker-compose.gpu.yml up -d"
|
|
echo " See docs/GPU_SETUP.md for details"
|
|
fi
|