aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNicolas James <nj3ahxac@gmail.com>2026-04-07 13:20:55 +1000
committerNicolas James <nj3ahxac@gmail.com>2026-04-07 13:20:55 +1000
commita65bdd8a0aa027c97391cea807d6d30df1c22ca4 (patch)
tree858a816d4455e3cc3c0ea67c5d6e4f8173edb2a7 /src
parent341b9b65a57dee1d4d16ec0994fe9a414b542ba5 (diff)
Fix potential race in DestroyInstance and DestroyDevice
Diffstat (limited to 'src')
-rw-r--r--src/layer.cc77
1 files changed, 39 insertions, 38 deletions
diff --git a/src/layer.cc b/src/layer.cc
index a4759fa..1218482 100644
--- a/src/layer.cc
+++ b/src/layer.cc
@@ -97,28 +97,30 @@ CreateInstance(const VkInstanceCreateInfo* pCreateInfo,
static VKAPI_ATTR void VKAPI_CALL
DestroyInstance(VkInstance instance, const VkAllocationCallbacks* allocator) {
-
- const auto destroy_instance_func = [&]() -> auto {
- const auto context = layer_context.get_context(instance);
- const auto lock = std::scoped_lock{layer_context.mutex};
-
- // Erase our physical devices owned by this instance from the global
- // context.
- for (const auto& [key, _] : context->physical_devices) {
- assert(layer_context.contexts.contains(key));
- layer_context.contexts.erase(key);
- }
-
- const auto key = layer_context.get_key(instance);
+ // These requires special care because multiple threads might create a race
+ // condition by being given the same VkInstance dispatchable handle.
+ auto lock = std::unique_lock{layer_context.mutex};
+
+ const auto key = layer_context.get_key(instance);
+ const auto iter = layer_context.contexts.find(key);
+ assert(iter != std::end(layer_context.contexts));
+ const auto context =
+ std::dynamic_pointer_cast<InstanceContext>(iter->second);
+
+ // Erase our physical devices owned by this instance from the global
+ // context.
+ for (const auto& [key, _] : context->physical_devices) {
assert(layer_context.contexts.contains(key));
layer_context.contexts.erase(key);
+ }
- // Should be the last ptr now like DestroyDevice.
- assert(context.unique());
- return context->vtable.DestroyInstance;
- }();
+ layer_context.contexts.erase(iter);
+ // Should be the last ptr now like DestroyDevice.
+ assert(context.unique());
- destroy_instance_func(instance, allocator);
+ // Unlock now to avoid deadlocks if something calls back into us.
+ lock.unlock();
+ context->vtable.DestroyInstance(instance, allocator);
}
static VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(
@@ -264,31 +266,30 @@ static VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(
static VKAPI_ATTR void VKAPI_CALL
DestroyDevice(VkDevice device, const VkAllocationCallbacks* allocator) {
-
- const auto destroy_device_func = [&]() -> auto {
- const auto device_context = layer_context.get_context(device);
-
- const auto func = device_context->vtable.DestroyDevice;
- const auto lock = std::scoped_lock{layer_context.mutex};
- // Remove all owned queues from our global context pool.
- for (const auto& [queue, _] : device_context->queues) {
- const auto key = layer_context.get_key(queue);
- assert(layer_context.contexts.contains(key));
- layer_context.contexts.erase(key);
- }
-
- const auto key = layer_context.get_key(device);
+ // Similarly to DestroyInstance, this needs to be done carefully to avoid a
+ // race.
+ auto lock = std::unique_lock{layer_context.mutex};
+
+ const auto key = layer_context.get_key(device);
+ const auto iter = layer_context.contexts.find(key);
+ assert(iter != std::end(layer_context.contexts));
+ const auto context = dynamic_pointer_cast<DeviceContext>(iter->second);
+
+ // Remove all owned queues from our global context pool.
+ for (const auto& [queue, _] : context->queues) {
+ const auto key = layer_context.get_key(queue);
assert(layer_context.contexts.contains(key));
layer_context.contexts.erase(key);
+ }
- // Should be the last shared ptr now, so its destructor can be called.
- // The destructor should expect its owned queues to be unique as well.
- assert(device_context.unique());
+ layer_context.contexts.erase(iter);
- return func;
- }();
+ // Should be the last shared ptr now, so its destructor can be called.
+ // The destructor should expect its owned queues to be unique as well.
+ assert(context.unique());
- destroy_device_func(device, allocator);
+ lock.unlock();
+ context->vtable.DestroyDevice(device, allocator);
}
static VKAPI_ATTR void VKAPI_CALL