llama: use host memory if device reports 0 memory (#18587)

This commit is contained in:
Aaron Teo
2026-01-09 05:34:56 +08:00
committed by GitHub
parent 480160d472
commit 046d5fd44e
4 changed files with 28 additions and 8 deletions
+2 -2
View File
@@ -4287,8 +4287,8 @@ static const char * ggml_backend_opencl_device_get_description(ggml_backend_dev_
}
static void ggml_backend_opencl_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
*free = 1;
*total = 1;
*free = 0;
*total = 0;
GGML_UNUSED(dev);
}