blob: c0ab8825a56d44e886332d43eb286dd10c605ed1 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
#include "device_strategy.hh"
#include "device_context.hh"
#include "queue_strategy.hh"
#include <vulkan/vulkan_core.h>
#include <thread>
namespace low_latency {
AntiLagDeviceStrategy::AntiLagDeviceStrategy(DeviceContext& device)
: DeviceStrategy(device) {}
AntiLagDeviceStrategy::~AntiLagDeviceStrategy() {}
void AntiLagDeviceStrategy::notify_update(const VkAntiLagDataAMD& data) {
auto lock = std::unique_lock{this->mutex};
this->is_enabled = !(data.mode == VK_ANTI_LAG_MODE_OFF_AMD);
this->input_delay = [&]() -> std::chrono::microseconds {
using namespace std::chrono;
if (!data.maxFPS) {
return 0us;
}
return duration_cast<microseconds>(1s) / data.maxFPS;
}();
if (!data.pPresentationInfo) {
return;
}
// If we're at the present stage, stop collecting submissions by making
// our frame_index nullopt.
if (data.pPresentationInfo->stage == VK_ANTI_LAG_STAGE_PRESENT_AMD) {
this->frame_index.reset();
return;
}
// If we're at the input stage, start marking submissions as relevant.
this->frame_index.emplace(data.pPresentationInfo->frameIndex);
// Grab this before we unlock the mutex.
const auto delay = this->input_delay;
lock.unlock();
// We need to collect all queue submission and wait on them in this thread.
// Input stage needs to wait for all queue submissions to complete.
const auto queue_frame_spans = [&]() -> auto {
auto queue_frame_spans = std::vector<std::unique_ptr<FrameSpan>>{};
const auto device_lock = std::shared_lock{this->device.mutex};
for (const auto& iter : this->device.queues) {
const auto& queue = iter.second;
const auto strategy =
dynamic_cast<AntiLagQueueStrategy*>(queue->strategy.get());
assert(strategy);
// Grab it from the queue, don't hold the lock.
const auto queue_lock = std::scoped_lock{strategy->mutex};
queue_frame_spans.emplace_back(std::move(strategy->frame_span));
strategy->frame_span.reset();
}
return queue_frame_spans;
}();
// Wait on outstanding work to complete.
for (const auto& frame_span : queue_frame_spans) {
if (frame_span) { // Can still be null here.
frame_span->await_completed();
}
}
// We might need to wait a little more time to meet our frame limit.
using namespace std::chrono;
if (delay != 0us && this->previous_input_release.has_value()) {
const auto last = this->previous_input_release.get();
std::this_thread::sleep_until(last + delay);
}
this->previous_input_release.set(steady_clock::now());
}
bool AntiLagDeviceStrategy::should_track_submissions() {
const auto lock = std::shared_lock{this->mutex};
if (!this->is_enabled) {
return false;
}
// Don't track submissions if our frame index is nullopt!
if (!this->frame_index.has_value()) {
return false;
}
return true;
}
// Stub - anti_lag doesn't differentiate between swapchains.
void AntiLagDeviceStrategy::notify_create_swapchain(
const VkSwapchainKHR&, const VkSwapchainCreateInfoKHR&) {}
// Stub - again, AL doesn't care about swapchains.
void AntiLagDeviceStrategy::notify_destroy_swapchain(const VkSwapchainKHR&) {}
} // namespace low_latency
|