aboutsummaryrefslogtreecommitdiff
path: root/src/layer_context.hh
blob: 5c169262d4c9494402187ec307f67cb4b1c3a678 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#ifndef LAYER_CONTEXT_HH_
#define LAYER_CONTEXT_HH_

#include <mutex>
#include <unordered_map>
#include <vulkan/vulkan_core.h>

#include "context.hh"
#include "device_context.hh"
#include "instance_context.hh"
#include "physical_device_context.hh"
#include "queue_context.hh"

// The purpose of this file is to provide a definition for the highest level
// entry point struct of our vulkan state.

namespace low_latency {

// All these templates do is make it so we can go from some DispatchableType
// to their respective context's with nice syntax. This lets us write something
// like this for all DispatchableTypes:
//
//     const auto device_context = get_context(some_vk_device);
//           ^ It was automatically deduced as DeviceContext, wow!

template <typename T>
concept DispatchableType =
    std::same_as<std::remove_cvref_t<T>, VkInstance> ||
    std::same_as<std::remove_cvref_t<T>, VkPhysicalDevice> ||
    std::same_as<std::remove_cvref_t<T>, VkDevice> ||
    std::same_as<std::remove_cvref_t<T>, VkQueue>;

template <class D> struct context_for_t;
template <> struct context_for_t<VkInstance> {
    using context = InstanceContext;
};
template <> struct context_for_t<VkPhysicalDevice> {
    using context = PhysicalDeviceContext;
};
template <> struct context_for_t<VkDevice> {
    using context = DeviceContext;
};
template <> struct context_for_t<VkQueue> {
    using context = QueueContext;
};
template <DispatchableType D>
using dispatch_context_t = typename context_for_t<D>::context;

class LayerContext final : public Context {
  private:
    // If this is not null and set to 1 then VK_NV_low_latency2 should be
    // provided instead of VK_AMD_anti_lag.
    static constexpr auto EXPOSE_REFLEX_ENV = "LOW_LATENCY_LAYER_EXPOSE_REFLEX";

    // If this is not null and set to 1 then the card's vendor, id, and device
    // name will be modified to appear as a NVIDIA card.
    static constexpr auto SPOOF_NVIDIA_ENV = "LOW_LATENCY_LAYER_SPOOF_NVIDIA";

  public:
    // Constants for spoofing.
    static constexpr auto NVIDIA_VENDOR_ID = 0x10DE;
    static constexpr auto NVIDIA_DEVICE_ID = 0x2B85; // 5090
    static constexpr auto NVIDIA_DEVICE_NAME = "NVIDIA GeForce RTX 5090";

  public:
    std::mutex mutex;
    std::unordered_map<void*, std::shared_ptr<Context>> contexts;

    bool should_expose_reflex = false;
    bool should_spoof_nvidia = false;

  public:
    LayerContext();
    virtual ~LayerContext();

  public:
    template <DispatchableType DT> static void* get_key(const DT& dt) {
        return reinterpret_cast<void*>(dt);
    }

    template <DispatchableType DT>
    std::shared_ptr<dispatch_context_t<DT>> get_context(const DT& dt) {
        const auto key = get_key(dt);

        const auto lock = std::scoped_lock{this->mutex};
        const auto it = this->contexts.find(key);
        assert(it != std::end(this->contexts));

        using context_t = dispatch_context_t<DT>;
        const auto ptr = std::dynamic_pointer_cast<context_t>(it->second);
        assert(ptr);
        return ptr;
    }
};

}; // namespace low_latency

#endif