blob: bfdad2e2dfa50dbb0ba143b5754894bedfe40a7e (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
#ifndef TIMESTAMP_POOL_HH_
#define TIMESTAMP_POOL_HH_
// The purpose of this file is to provide the definition of a 'timestamp pool'.
// It manages blocks of timestamp query pools, hands them out when requested,
// and allocates more when (if) we run out.
// Usage:
// 1. Get handle with .acquire().
// 2. Write start/end timestamp operations with the handle's pool and index
// into the provided command buffer. Will return nullopt if they're
// not yet available.
// 3. Destruct the handle to return the key to the pool.
#include <vulkan/utility/vk_dispatch_table.h>
#include <vulkan/vulkan.hpp>
#include <memory>
#include <unordered_set>
#include <vector>
#include "device_context.hh"
namespace low_latency {
class QueueContext;
class TimestampPool final {
private:
QueueContext& queue_context;
// A chunk of data which is useful for making timestamp queries.
// Allows association of an index to a query pool and command buffer.
// We reuse these when they're released.
struct QueryChunk final {
private:
using free_indices_t = std::unordered_set<std::uint64_t>;
static constexpr auto CHUNK_SIZE = 512u;
public:
VkQueryPool query_pool;
std::unique_ptr<free_indices_t> free_indices;
std::unique_ptr<std::vector<VkCommandBuffer>> command_buffers;
public:
QueryChunk(const QueueContext& queue_context);
QueryChunk(const QueryChunk& handle) = delete;
QueryChunk(QueryChunk&&) = delete;
QueryChunk operator=(const QueryChunk& handle) = delete;
QueryChunk operator=(QueryChunk&&) = delete;
~QueryChunk();
};
std::unordered_set<std::shared_ptr<QueryChunk>> query_chunks;
public:
// A handle represents a VkCommandBuffer and a query index.
// Once the Handle goes out of scope, the query index will be returned
// to the parent pool.
struct Handle final {
private:
friend class TimestampPool;
private:
// For our spinlock functions this is the period in which we sleep
// between attempts.
static constexpr auto SPINLOCK_MAX_DELAY = std::chrono::microseconds(1);
private:
const TimestampPool& timestamp_pool;
const std::weak_ptr<QueryChunk> origin_chunk;
public:
const VkQueryPool query_pool;
const std::uint64_t query_index;
const VkCommandBuffer command_buffer;
public:
Handle(const TimestampPool& timestamp_pool,
const std::shared_ptr<QueryChunk>& origin_chunk,
const std::uint64_t& query_index);
Handle(const Handle& handle) = delete;
Handle(Handle&&) = delete;
Handle operator=(const Handle& handle) = delete;
Handle operator=(Handle&&) = delete;
~Handle();
public:
void setup_command_buffers(const Handle& tail,
const QueueContext& queue_context) const;
// Attempts to get_time, but returns an optional if it's not available
// yet.
std::optional<DeviceContext::Clock::time_point_t> get_time();
// Calls get_time() repeatedly under a spinlock, or gives up at
// time_point_t and returns std::nullopt.
std::optional<DeviceContext::Clock::time_point_t>
get_time_spinlock(const DeviceContext::Clock::time_point_t& until);
// Calls get_time() repeatedly under a spinlock until it's available.
DeviceContext::Clock::time_point_t get_time_spinlock();
// Calls get_time with the assumption it's already available.
DeviceContext::Clock::time_point_t get_time_required();
};
public:
TimestampPool(QueueContext& queue_context);
TimestampPool(const TimestampPool&) = delete;
TimestampPool(TimestampPool&&) = delete;
TimestampPool operator=(const TimestampPool&) = delete;
TimestampPool operator=(TimestampPool&&) = delete;
~TimestampPool();
public:
// Hands out a Handle!
std::shared_ptr<Handle> acquire();
};
} // namespace low_latency
#endif
|