aboutsummaryrefslogtreecommitdiff
path: root/src/server/resources.hh
blob: 578a248b8415e898e58b1dc2eedf749acf890e82 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#ifndef SERVER_RESOURCES_HH_
#define SERVER_RESOURCES_HH_

#include <memory>
#include <mutex>
#include <optional>
#include <unordered_map>

#include <boost/asio.hpp>
#include <boost/thread/thread_pool.hpp>

#include "server/chunk_data.hh"
#include "server/client.hh"
#include "server/database.hh"
#include "server/world.hh"
#include "shared/player.hh"
#include "shared/world.hh"

namespace server {
namespace resources {

// Occasionally we need access to certain objects quickly.
// These classes enable the server to take priority over the thread pool's work
// when necessary. Construct a low_priority_lock when it can happen whenever,
// and a high_priority_lock when it should happen ~ now.
template <typename T>
class lock_base {
private:
    T& obj;

protected:
    static inline std::mutex data_mutex;
    static inline std::mutex next_mutex;
    static inline std::mutex low_priority_mutex;

protected:
    lock_base(T& obj) noexcept : obj(obj) {}

public:
    lock_base(const lock_base&) = delete;
    lock_base(lock_base&&) = default;
    virtual ~lock_base(){};

    T& get() noexcept { return this->obj; }
    T& operator*() noexcept { return this->get(); }
    T* const operator->() noexcept { return &this->get(); }
};

// https://stackoverflow.com/questions/11666610/how-to-give-priority-to-privileged-thread-in-mutex-locking
// ty ecatmur!
template <typename T>
class low_priority_lock : public lock_base<T> {
public:
    low_priority_lock(T& t) noexcept : lock_base<T>(t) {
        lock_base<T>::low_priority_mutex.lock();
        lock_base<T>::next_mutex.lock();
        lock_base<T>::data_mutex.lock();
        lock_base<T>::next_mutex.unlock();
    }
    virtual ~low_priority_lock() noexcept {
        lock_base<T>::data_mutex.unlock();
        lock_base<T>::low_priority_mutex.unlock();
    }
};

template <typename T>
class high_priority_lock : public lock_base<T> {
public:
    high_priority_lock(T& t) noexcept : lock_base<T>(t) {
        lock_base<T>::next_mutex.lock();
        lock_base<T>::data_mutex.lock();
        lock_base<T>::next_mutex.unlock();
    }
    virtual ~high_priority_lock() noexcept {
        lock_base<T>::data_mutex.unlock();
    }
};

using chunk_map_key = shared::math::coords;
using chunk_map_value = std::unique_ptr<chunk_data>;
using chunk_map = std::unordered_map<chunk_map_key, chunk_map_value,
                                     decltype(&shared::world::chunk::hash),
                                     decltype(&shared::world::chunk::equal)>;

using client_map_key = shared::player::index_t;
using client_map_value = std::unique_ptr<server::client>;
using client_map = std::unordered_map<client_map_key, client_map_value>;

using pool_t = boost::asio::thread_pool;

struct resources {
    client_map& clients;
    chunk_map& chunks;
    pool_t& pool;
};

void init() noexcept;
void quit() noexcept;

low_priority_lock<resources> get_resources_lock() noexcept;
high_priority_lock<resources> get_resources_lock_immediate() noexcept;

inline void associate_client_chunk(const shared::math::coords& coords,
                                   client_map_value& client,
                                   chunk_map_value& chunk) noexcept {
    client->chunks.emplace(coords);
    chunk->players.emplace(client->index);
}
inline void disassociate_client_chunk(const shared::math::coords& coords,
                                      client_map_value& client,
                                      chunk_map_value& chunk) noexcept {
    client->chunks.erase(coords);
    chunk->players.erase(client->index);
}

} // namespace resources
} // namespace server

#endif