298 lines
6.8 KiB
C++
298 lines
6.8 KiB
C++
#include <cstdint>
|
|
#include <cassert>
|
|
#include <array>
|
|
#include <algorithm>
|
|
|
|
#include "util.hh"
|
|
#include "phys_mm.hh"
|
|
|
|
static constexpr unsigned phys_pages = 65536; // for 256 MiB RAM
|
|
static constexpr unsigned size_steps = 17; // log2(256Mi)-log2(4Ki)+1
|
|
|
|
using idx_t = uint16_t; // Must have a range of 0..phys_pages-1
|
|
|
|
struct pb_t {
|
|
idx_t next_free_by_size;
|
|
uint8_t size_ln2;
|
|
unsigned used:1;
|
|
unsigned nbs_valid:1;
|
|
} __attribute__((packed));
|
|
|
|
static pb_t phys_blocks[phys_pages];
|
|
|
|
static unsigned size_starts[size_steps];
|
|
|
|
// Import symbols from linker
|
|
extern uint32_t _kernel_real_pt_end;
|
|
|
|
static unsigned _idx(pb_t const& pb) __attribute__((const));
|
|
static unsigned _idx(pb_t const& pb) {
|
|
return &pb - phys_blocks;
|
|
}
|
|
|
|
static pb_t& _next_by_pos(pb_t const& pb) __attribute__((pure));
|
|
static pb_t& _next_by_pos(pb_t const& pb) {
|
|
auto idx = _idx(pb);
|
|
assert(idx+_pow2(pb.size_ln2) < phys_pages);
|
|
|
|
return phys_blocks[idx+_pow2(pb.size_ln2)];
|
|
}
|
|
|
|
static bool _is_last_pos(pb_t const& pb) __attribute__((pure));
|
|
static bool _is_last_pos(pb_t const& pb) {
|
|
auto idx = _idx(pb);
|
|
return (idx+_pow2(pb.size_ln2) >= phys_pages);
|
|
}
|
|
|
|
// // Caution: Slow, performs linear search
|
|
// static pb_t& _prev_by_pos(pb_t const& pb) __attribute__((pure));
|
|
// static pb_t& _prev_by_pos(pb_t const& pb) {
|
|
// auto idx = _idx(pb);
|
|
// assert(idx != 0);
|
|
|
|
// unsigned i = 0, prev_i;
|
|
// while(i < idx) {
|
|
// prev_i = i;
|
|
// i = i + _pow2(phys_blocks[i].size_ln2);
|
|
// }
|
|
// assert(i == idx);
|
|
// return phys_blocks[prev_i];
|
|
// }
|
|
|
|
static pb_t& _next_by_size(pb_t const& pb) __attribute__((pure));
|
|
static pb_t& _next_by_size(pb_t const& pb) {
|
|
assert(pb.nbs_valid);
|
|
|
|
return phys_blocks[pb.next_free_by_size];
|
|
}
|
|
|
|
|
|
static void _ll_insert(unsigned& head, pb_t& elem) {
|
|
if(head != phys_pages) {
|
|
elem.next_free_by_size = head;
|
|
elem.nbs_valid = true;
|
|
} else
|
|
elem.nbs_valid = false;
|
|
|
|
head = _idx(elem);
|
|
}
|
|
|
|
static void _ll_remove(unsigned& head, pb_t& elem) {
|
|
assert(head != phys_pages);
|
|
|
|
// Remove from head is fast
|
|
if(head == _idx(elem)) {
|
|
if(elem.nbs_valid)
|
|
head = elem.next_free_by_size;
|
|
else
|
|
head = phys_pages;
|
|
elem.nbs_valid = false;
|
|
} else {
|
|
// Search for previous element
|
|
pb_t* it = phys_blocks+head;
|
|
while(&_next_by_size(*it) != &elem) {
|
|
it = &_next_by_size(*it);
|
|
}
|
|
|
|
// Remove
|
|
it->next_free_by_size = elem.next_free_by_size;
|
|
it->nbs_valid = elem.nbs_valid;
|
|
elem.nbs_valid = false;
|
|
}
|
|
}
|
|
|
|
// Returns true if block is the left half of the next larger
|
|
// allocation block, false if it is the right half
|
|
static bool _is_left(pb_t& block) __attribute__((pure));
|
|
static bool _is_left(pb_t& block) {
|
|
auto idx = _idx(block);
|
|
|
|
return !(idx & _pow2(block.size_ln2));
|
|
}
|
|
|
|
// Gets the left half of the allocation block of which
|
|
// block is the right half. Only valid if _is_left(block)
|
|
// would be false
|
|
static pb_t& _get_left(pb_t& block) __attribute__((pure));
|
|
static pb_t& _get_left(pb_t& block) {
|
|
return phys_blocks[_idx(block)-_pow2(block.size_ln2)];
|
|
}
|
|
|
|
static void _split(pb_t& block) {
|
|
assert(block.size_ln2 > 0);
|
|
assert(!block.used);
|
|
|
|
// Remove from free list for old size
|
|
_ll_remove(size_starts[block.size_ln2], block);
|
|
|
|
// Split
|
|
--block.size_ln2;
|
|
auto& next = _next_by_pos(block);
|
|
next.size_ln2 = block.size_ln2;
|
|
next.used = false;
|
|
|
|
// Add to free list for new size
|
|
_ll_insert(size_starts[block.size_ln2], block);
|
|
_ll_insert(size_starts[block.size_ln2], next);
|
|
}
|
|
|
|
static void _set_used(pb_t& block) {
|
|
_ll_remove(size_starts[block.size_ln2], block);
|
|
|
|
block.used = true;
|
|
}
|
|
|
|
static void _alloc_at(unsigned idx, unsigned size) {
|
|
auto& block = phys_blocks[idx];
|
|
|
|
assert(block.size_ln2 >= size);
|
|
assert(!block.used);
|
|
|
|
while(block.size_ln2 > size)
|
|
_split(block);
|
|
|
|
_set_used(block);
|
|
}
|
|
|
|
static unsigned _find_free(unsigned size) {
|
|
for(unsigned i = size;i < size_steps;++i) {
|
|
if(size_starts[i] != phys_pages)
|
|
return size_starts[i];
|
|
}
|
|
|
|
return phys_pages;
|
|
}
|
|
|
|
// Recursivly merge block if possible
|
|
static pb_t& _merge(pb_t& block) {
|
|
if(block.size_ln2 == size_steps-1)
|
|
return block;
|
|
|
|
if(_is_left(block) && !_next_by_pos(block).used &&
|
|
(_next_by_pos(block).size_ln2 == block.size_ln2)) {
|
|
// Remove from free list for old size
|
|
_ll_remove(size_starts[block.size_ln2], block);
|
|
_ll_remove(size_starts[block.size_ln2], _next_by_pos(block));
|
|
|
|
// Merge
|
|
++block.size_ln2;
|
|
|
|
// Add to free list for new size
|
|
_ll_insert(size_starts[block.size_ln2], block);
|
|
|
|
return _merge(block);
|
|
} else if (!_is_left(block) && !_get_left(block).used &&
|
|
(_get_left(block).size_ln2 == block.size_ln2)) {
|
|
// Remove from free list for old size
|
|
_ll_remove(size_starts[block.size_ln2], block);
|
|
_ll_remove(size_starts[block.size_ln2], _get_left(block));
|
|
|
|
// Merge
|
|
auto& left = _get_left(block);
|
|
++left.size_ln2;
|
|
|
|
// Add to free list for new size
|
|
_ll_insert(size_starts[left.size_ln2], left);
|
|
|
|
return _merge(left);
|
|
}
|
|
|
|
return block;
|
|
}
|
|
|
|
|
|
static void _free_at(unsigned idx) {
|
|
auto& block = phys_blocks[idx];
|
|
assert(block.used);
|
|
|
|
block.used = false;
|
|
_ll_insert(size_starts[block.size_ln2], block);
|
|
|
|
// Check merge
|
|
_merge(block);
|
|
}
|
|
|
|
static void _print_elem(pb_t const& block) {
|
|
auto idx = _idx(block);
|
|
assert(idx < phys_pages);
|
|
|
|
printf(" {%u: ", idx);
|
|
if(block.nbs_valid)
|
|
printf("%u ", block.next_free_by_size);
|
|
else
|
|
printf("- ");
|
|
printf("%u%s}", block.size_ln2, block.used?" U":"");
|
|
}
|
|
|
|
void phys_mm::init() {
|
|
for(unsigned i = 0;i < size_steps-1;++i)
|
|
size_starts[i] = phys_pages;
|
|
size_starts[size_steps-1] = 0;
|
|
|
|
phys_blocks[0].size_ln2 = size_steps-1;
|
|
phys_blocks[0].used = false;
|
|
phys_blocks[0].nbs_valid = false;
|
|
|
|
uintptr_t kernel_size = (uintptr_t)&_kernel_real_pt_end - 0x80000000;
|
|
unsigned kernel_pages = kernel_size/4096;
|
|
if(kernel_size%4096 != 0)
|
|
++kernel_pages;
|
|
|
|
_alloc_at(0, _ln2(kernel_pages));
|
|
}
|
|
|
|
uintptr_t phys_mm::alloc(unsigned count) {
|
|
assert(count > 0);
|
|
|
|
unsigned i = _find_free(_ln2(count));
|
|
if(i == phys_pages)
|
|
throw bad_alloc{};
|
|
|
|
_alloc_at(i, _ln2(count));
|
|
|
|
return 0x80000000+4096*i;
|
|
}
|
|
|
|
void phys_mm::free(uintptr_t base) {
|
|
assert(base != 0);
|
|
|
|
unsigned idx = (base-0x80000000)/4096;
|
|
_free_at(idx);
|
|
}
|
|
|
|
void phys_mm::print_state() {
|
|
printf("Free lists:\n");
|
|
for(unsigned i = 0;i < size_steps;++i) {
|
|
printf("\t%u:", i);
|
|
if(size_starts[i] != phys_pages) {
|
|
pb_t* it = phys_blocks+size_starts[i];
|
|
while(true) {
|
|
_print_elem(*it);
|
|
if(!it->nbs_valid)
|
|
break;
|
|
|
|
it = &_next_by_size(*it);
|
|
}
|
|
}
|
|
printf("\n");
|
|
}
|
|
|
|
printf("Blocks:\n");
|
|
pb_t *it = phys_blocks;
|
|
while (true) {
|
|
_print_elem(*it);
|
|
|
|
if(_is_last_pos(*it))
|
|
break;
|
|
|
|
it = &_next_by_pos(*it);
|
|
}
|
|
|
|
printf("\n\n");
|
|
}
|
|
|
|
uintptr_t phys_mm::get_end_of_kernel_alloc() {
|
|
pb_t& kern = phys_blocks[0];
|
|
return 0x80000000+4096*_idx(_next_by_pos(kern));
|
|
}
|