ggml-virtgpu: make the code thread safe (#19204)

* ggml-virtgpu: regenerate_remoting.py: add the ability to deprecate a function

* ggml-virtgpu: deprecate buffer_type is_host remoting

not necessary

* ggml-virtgpu: stop using static vars as cache

The static init isn't thread safe.

* ggml-virtgpu: protect the use of the shared memory to transfer data

* ggml-virtgpu: make the remote calls thread-safe

* ggml-virtgpu: backend: don't continue if couldn't allocate the tensor memory

* ggml-virtgpu: add a cleanup function for consistency

* ggml-virtgpu: backend: don't crash if buft->iface.get_max_size is missing

* fix style and ordering

* Remove the static variable in apir_device_get_count

* ggml-virtgpu: improve the logging

* fix review minor formatting changes
This commit is contained in:
Kevin Pouget
2026-02-04 03:46:18 +01:00
committed by GitHub
parent 2ceda3f662
commit 015deb9048
27 changed files with 397 additions and 237 deletions
+11 -10
View File
@@ -3,8 +3,8 @@
/* device */
void apir_device_get_device_count(struct virtgpu * gpu);
int apir_device_get_count(struct virtgpu * gpu);
const char * apir_device_get_name(struct virtgpu * gpu);
const char * apir_device_get_description(struct virtgpu * gpu);
char * apir_device_get_name(struct virtgpu * gpu);
char * apir_device_get_description(struct virtgpu * gpu);
uint32_t apir_device_get_type(struct virtgpu * gpu);
void apir_device_get_memory(struct virtgpu * gpu, size_t * free, size_t * total);
bool apir_device_supports_op(struct virtgpu * gpu, const ggml_tensor * op);
@@ -17,14 +17,15 @@ void apir_device_get_props(struct virtgpu * gpu,
apir_buffer_context_t apir_device_buffer_from_ptr(struct virtgpu * gpu, size_t size, size_t max_tensor_size);
/* buffer-type */
const char * apir_buffer_type_get_name(struct virtgpu * gpu, ggml_backend_buffer_type_t buft);
size_t apir_buffer_type_get_alignment(struct virtgpu * gpu, ggml_backend_buffer_type_t buft);
size_t apir_buffer_type_get_max_size(struct virtgpu * gpu, ggml_backend_buffer_type_t buft);
bool apir_buffer_type_is_host(struct virtgpu * gpu, ggml_backend_buffer_type_t buft);
apir_buffer_context_t apir_buffer_type_alloc_buffer(struct virtgpu * gpu,
ggml_backend_buffer_type_t buffer_buft,
size_t size);
size_t apir_buffer_type_get_alloc_size(struct virtgpu * gpu, ggml_backend_buffer_type_t buft, const ggml_tensor * op);
char * apir_buffer_type_get_name(struct virtgpu * gpu, apir_buffer_type_host_handle_t host_handle);
size_t apir_buffer_type_get_alignment(struct virtgpu * gpu, apir_buffer_type_host_handle_t host_handle);
size_t apir_buffer_type_get_max_size(struct virtgpu * gpu, apir_buffer_type_host_handle_t host_handle);
apir_buffer_context_t apir_buffer_type_alloc_buffer(struct virtgpu * gpu,
apir_buffer_type_host_handle_t host_handle,
size_t size);
size_t apir_buffer_type_get_alloc_size(struct virtgpu * gpu,
apir_buffer_type_host_handle_t host_handle,
const ggml_tensor * op);
/* buffer */
void * apir_buffer_get_base(struct virtgpu * gpu, apir_buffer_context_t * buffer_context);