1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
|
#include "timestamp_pool.hh"
#include "device_context.hh"
#include "queue_context.hh"
#include <ranges>
#include <vulkan/utility/vk_dispatch_table.h>
#include <vulkan/vulkan_core.h>
namespace low_latency {
TimestampPool::QueryChunk::QueryChunk(const QueueContext& queue_context) {
const auto& device_context = queue_context.device_context;
const auto& vtable = device_context.vtable;
this->query_pool = [&]() {
const auto qpci = VkQueryPoolCreateInfo{
.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
.queryType = VK_QUERY_TYPE_TIMESTAMP,
.queryCount = QueryChunk::CHUNK_SIZE};
auto qp = VkQueryPool{};
vtable.CreateQueryPool(device_context.device, &qpci, nullptr, &qp);
return qp;
}();
constexpr auto key_range = std::views::iota(0u, QueryChunk::CHUNK_SIZE);
this->free_indices = std::make_unique<free_indices_t>(std::begin(key_range),
std::end(key_range));
this->command_buffers = [&, this]() -> auto {
auto cbs = std::make_unique<std::vector<VkCommandBuffer>>(CHUNK_SIZE);
const auto cbai = VkCommandBufferAllocateInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.commandPool = queue_context.command_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = static_cast<std::uint32_t>(std::size(*cbs)),
};
vtable.AllocateCommandBuffers(device_context.device, &cbai,
std::data(*cbs));
return cbs;
}();
}
TimestampPool::QueryChunk::~QueryChunk() {}
TimestampPool::TimestampPool(QueueContext& queue_context)
: queue_context(queue_context) {
// Allocate one block on construction, it's likely more than enough.
auto query_chunk = std::make_shared<QueryChunk>(this->queue_context);
this->query_chunks.emplace(std::move(query_chunk));
}
std::shared_ptr<TimestampPool::Handle> TimestampPool::acquire() {
// Gets the empty one, or inserts a new one and returns it.
const auto not_empty_iter = [this]() -> auto {
const auto not_empty_iter =
std::ranges::find_if(this->query_chunks, [](const auto& qc) {
assert(qc);
return std::size(*qc->free_indices);
});
if (not_empty_iter != std::end(this->query_chunks)) {
return not_empty_iter;
}
const auto insert = std::make_shared<QueryChunk>(this->queue_context);
const auto [iter, did_insert] = this->query_chunks.emplace(insert);
assert(did_insert);
return iter;
}();
// Grab any element from our set and erase it immediately after.
auto& indices = *(*not_empty_iter)->free_indices;
const auto query_index = *std::begin(indices);
assert(indices.erase(query_index));
return std::make_shared<Handle>(*not_empty_iter, query_index);
}
TimestampPool::Handle::Handle(const std::shared_ptr<QueryChunk>& origin_chunk,
const std::uint64_t& query_index)
: query_pool(origin_chunk->query_pool), query_index(query_index),
origin_chunk(origin_chunk),
command_buffer((*origin_chunk->command_buffers)[query_index]) {}
TimestampPool::Handle::~Handle() {
// Parent destructing shouldn't mean we should have a bunch of insertions
// for zero reason.
if (const auto ptr = this->origin_chunk.lock(); ptr) {
assert(ptr->free_indices->insert(this->query_index).second);
}
}
void TimestampPool::Handle::setup_command_buffers(
const Handle& tail, const QueueContext& queue_context) const {
const auto cbbi = VkCommandBufferBeginInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
};
const auto& device_context = queue_context.device_context;
const auto& vtable = device_context.vtable;
vtable.ResetQueryPoolEXT(device_context.device, this->query_pool,
this->query_index, 1);
vtable.BeginCommandBuffer(this->command_buffer, &cbbi);
vtable.CmdWriteTimestamp2KHR(this->command_buffer,
VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
this->query_pool, this->query_index);
vtable.EndCommandBuffer(this->command_buffer);
vtable.ResetQueryPoolEXT(device_context.device, tail.query_pool,
tail.query_index, 1);
vtable.ResetCommandBuffer(tail.command_buffer, 0);
vtable.BeginCommandBuffer(tail.command_buffer, &cbbi);
vtable.CmdWriteTimestamp2KHR(tail.command_buffer,
VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT,
tail.query_pool, tail.query_index);
vtable.EndCommandBuffer(tail.command_buffer);
}
std::optional<std::uint64_t>
TimestampPool::Handle::get_ticks(const TimestampPool& pool) {
const auto& device_context = pool.queue_context.device_context;
const auto& vtable = device_context.vtable;
struct QueryResult {
std::uint64_t value;
std::uint64_t available;
};
auto query_result = QueryResult{};
const auto r = vtable.GetQueryPoolResults(
device_context.device, query_pool, this->query_index, 1,
sizeof(query_result), &query_result, sizeof(query_result),
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT);
assert(r == VK_SUCCESS || r == VK_NOT_READY);
if (!query_result.available) {
return std::nullopt;
}
return query_result.value;
}
TimestampPool::~TimestampPool() {
const auto& device = this->queue_context.device_context.device;
const auto& vtable = this->queue_context.device_context.vtable;
for (const auto& query_chunk : this->query_chunks) {
vtable.FreeCommandBuffers(device, this->queue_context.command_pool,
std::size(*query_chunk->command_buffers),
std::data(*query_chunk->command_buffers));
vtable.DestroyQueryPool(device, query_chunk->query_pool, nullptr);
}
}
} // namespace low_latency
|