summaryrefslogtreecommitdiffstats
path: root/src/common/address_space.inc
blob: c97dc86516e1402f1a6769f00bc0292430503b8a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
// SPDX-License-Identifier: GPL-3.0-or-later

#include "common/address_space.h"
#include "common/assert.h"

#define MAP_MEMBER(returnType)                                                                     \
    template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,              \
              bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo>                \
    requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap<           \
        VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
#define MAP_MEMBER_CONST()                                                                         \
    template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,              \
              bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo>                \
    requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap<                      \
        VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>

#define MM_MEMBER(returnType)                                                                      \
    template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>                         \
    requires AddressSpaceValid<VaType, AddressSpaceBits> returnType                                \
        FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>

#define ALLOC_MEMBER(returnType)                                                                   \
    template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>                         \
    requires AddressSpaceValid<VaType, AddressSpaceBits> returnType                                \
        FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
#define ALLOC_MEMBER_CONST()                                                                       \
    template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>                         \
    requires AddressSpaceValid<VaType, AddressSpaceBits>                                           \
        FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>

namespace Common {
MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_,
                                        std::function<void(VaType, VaType)> unmap_callback_)
    : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} {
    if (va_limit > VaMaximum) {
        ASSERT_MSG(false, "Invalid VA limit!");
    }
}

MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) {
    VaType virt_end{virt + size};

    if (virt_end > va_limit) {
        ASSERT_MSG(false,
                   "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
                   virt_end, va_limit);
    }

    auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
    if (block_end_successor == blocks.begin()) {
        ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end);
    }

    auto block_end_predecessor{std::prev(block_end_successor)};

    if (block_end_successor != blocks.end()) {
        // We have blocks in front of us, if one is directly in front then we don't have to add a
        // tail
        if (block_end_successor->virt != virt_end) {
            PaType tailPhys{[&]() -> PaType {
                if constexpr (!PaContigSplit) {
                    // Always propagate unmapped regions rather than calculating offset
                    return block_end_predecessor->phys;
                } else {
                    if (block_end_predecessor->Unmapped()) {
                        // Always propagate unmapped regions rather than calculating offset
                        return block_end_predecessor->phys;
                    } else {
                        return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
                    }
                }
            }()};

            if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) {
                // If this block's start would be overlapped by the map then reuse it as a tail
                // block
                block_end_predecessor->virt = virt_end;
                block_end_predecessor->phys = tailPhys;
                block_end_predecessor->extra_info = block_end_predecessor->extra_info;

                // No longer predecessor anymore
                block_end_successor = block_end_predecessor--;
            } else {
                // Else insert a new one and we're done
                blocks.insert(block_end_successor,
                              {Block(virt, phys, extra_info),
                               Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
                if (unmap_callback) {
                    unmap_callback(virt, size);
                }

                return;
            }
        }
    } else {
        // block_end_predecessor will always be unmapped as blocks has to be terminated by an
        // unmapped chunk
        if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) {
            // Move the unmapped block start backwards
            block_end_predecessor->virt = virt_end;

            // No longer predecessor anymore
            block_end_successor = block_end_predecessor--;
        } else {
            // Else insert a new one and we're done
            blocks.insert(block_end_successor,
                          {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})});
            if (unmap_callback) {
                unmap_callback(virt, size);
            }

            return;
        }
    }

    auto block_start_successor{block_end_successor};

    // Walk the block vector to find the start successor as this is more efficient than another
    // binary search in most scenarios
    while (std::prev(block_start_successor)->virt >= virt) {
        block_start_successor--;
    }

    // Check that the start successor is either the end block or something in between
    if (block_start_successor->virt > virt_end) {
        ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
    } else if (block_start_successor->virt == virt_end) {
        // We need to create a new block as there are none spare that we would overwrite
        blocks.insert(block_start_successor, Block(virt, phys, extra_info));
    } else {
        // Erase overwritten blocks
        if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
            blocks.erase(eraseStart, block_end_successor);
        }

        // Reuse a block that would otherwise be overwritten as a start block
        block_start_successor->virt = virt;
        block_start_successor->phys = phys;
        block_start_successor->extra_info = extra_info;
    }

    if (unmap_callback) {
        unmap_callback(virt, size);
    }
}

MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
    VaType virt_end{virt + size};

    if (virt_end > va_limit) {
        ASSERT_MSG(false,
                   "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
                   virt_end, va_limit);
    }

    auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
    if (block_end_successor == blocks.begin()) {
        ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}",
                   virt_end);
    }

    auto block_end_predecessor{std::prev(block_end_successor)};

    auto walk_back_to_predecessor{[&](auto iter) {
        while (iter->virt >= virt) {
            iter--;
        }

        return iter;
    }};

    auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) {
        auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)};
        auto block_start_successor{std::next(block_start_predecessor)};

        auto eraseEnd{[&]() {
            if (block_start_predecessor->Unmapped()) {
                // If the start predecessor is unmapped then we can erase everything in our region
                // and be done
                return std::next(unmappedEnd);
            } else {
                // Else reuse the end predecessor as the start of our unmapped region then erase all
                // up to it
                unmappedEnd->virt = virt;
                return unmappedEnd;
            }
        }()};

        // We can't have two unmapped regions after each other
        if (eraseEnd != blocks.end() &&
            (eraseEnd == block_start_successor ||
             (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) {
            ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!");
        }

        blocks.erase(block_start_successor, eraseEnd);
    }};

    // We can avoid any splitting logic if these are the case
    if (block_end_predecessor->Unmapped()) {
        if (block_end_predecessor->virt > virt) {
            erase_blocks_with_end_unmapped(block_end_predecessor);
        }

        if (unmap_callback) {
            unmap_callback(virt, size);
        }

        return; // The region is unmapped, bail out early
    } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) {
        erase_blocks_with_end_unmapped(block_end_successor);

        if (unmap_callback) {
            unmap_callback(virt, size);
        }

        return; // The region is unmapped here and doesn't need splitting, bail out early
    } else if (block_end_successor == blocks.end()) {
        // This should never happen as the end should always follow an unmapped block
        ASSERT_MSG(false, "Unexpected Memory Manager state!");
    } else if (block_end_successor->virt != virt_end) {
        // If one block is directly in front then we don't have to add a tail

        // The previous block is mapped so we will need to add a tail with an offset
        PaType tailPhys{[&]() {
            if constexpr (PaContigSplit) {
                return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
            } else {
                return block_end_predecessor->phys;
            }
        }()};

        if (block_end_predecessor->virt >= virt) {
            // If this block's start would be overlapped by the unmap then reuse it as a tail block
            block_end_predecessor->virt = virt_end;
            block_end_predecessor->phys = tailPhys;

            // No longer predecessor anymore
            block_end_successor = block_end_predecessor--;
        } else {
            blocks.insert(block_end_successor,
                          {Block(virt, UnmappedPa, {}),
                           Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
            if (unmap_callback) {
                unmap_callback(virt, size);
            }

            // The previous block is mapped and ends before
            return;
        }
    }

    // Walk the block vector to find the start predecessor as this is more efficient than another
    // binary search in most scenarios
    auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)};
    auto block_start_successor{std::next(block_start_predecessor)};

    if (block_start_successor->virt > virt_end) {
        ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
    } else if (block_start_successor->virt == virt_end) {
        // There are no blocks between the start and the end that would let us skip inserting a new
        // one for head

        // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
        if (block_start_predecessor->Mapped()) {
            blocks.insert(block_start_successor, Block(virt, UnmappedPa, {}));
        }
    } else if (block_start_predecessor->Unmapped()) {
        // If the previous block is unmapped
        blocks.erase(block_start_successor, block_end_predecessor);
    } else {
        // Erase overwritten blocks, skipping the first one as we have written the unmapped start
        // block there
        if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
            blocks.erase(eraseStart, block_end_successor);
        }

        // Add in the unmapped block header
        block_start_successor->virt = virt;
        block_start_successor->phys = UnmappedPa;
    }

    if (unmap_callback)
        unmap_callback(virt, size);
}

ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_)
    : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {}

ALLOC_MEMBER(VaType)::Allocate(VaType size) {
    std::scoped_lock lock(this->block_mutex);

    VaType alloc_start{UnmappedVa};
    VaType alloc_end{current_linear_alloc_end + size};

    // Avoid searching backwards in the address space if possible
    if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) {
        auto alloc_end_successor{
            std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)};
        if (alloc_end_successor == this->blocks.begin()) {
            ASSERT_MSG(false, "First block in AS map is invalid!");
        }

        auto alloc_end_predecessor{std::prev(alloc_end_successor)};
        if (alloc_end_predecessor->virt <= current_linear_alloc_end) {
            alloc_start = current_linear_alloc_end;
        } else {
            // Skip over fixed any mappings in front of us
            while (alloc_end_successor != this->blocks.end()) {
                if (alloc_end_successor->virt - alloc_end_predecessor->virt < size ||
                    alloc_end_predecessor->Mapped()) {
                    alloc_start = alloc_end_predecessor->virt;
                    break;
                }

                alloc_end_predecessor = alloc_end_successor++;

                // Use the VA limit to calculate if we can fit in the final block since it has no
                // successor
                if (alloc_end_successor == this->blocks.end()) {
                    alloc_end = alloc_end_predecessor->virt + size;

                    if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) {
                        alloc_start = alloc_end_predecessor->virt;
                    }
                }
            }
        }
    }

    if (alloc_start != UnmappedVa) {
        current_linear_alloc_end = alloc_start + size;
    } else { // If linear allocation overflows the AS then find a gap
        if (this->blocks.size() <= 2) {
            ASSERT_MSG(false, "Unexpected allocator state!");
        }

        auto search_predecessor{this->blocks.begin()};
        auto search_successor{std::next(search_predecessor)};

        while (search_successor != this->blocks.end() &&
               (search_successor->virt - search_predecessor->virt < size ||
                search_predecessor->Mapped())) {
            search_predecessor = search_successor++;
        }

        if (search_successor != this->blocks.end()) {
            alloc_start = search_predecessor->virt;
        } else {
            return {}; // AS is full
        }
    }

    this->MapLocked(alloc_start, true, size, {});
    return alloc_start;
}

ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
    this->Map(virt, true, size);
}

ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
    this->Unmap(virt, size);
}
} // namespace Common