Rewrite heap allocation and linker section

This commit is contained in:
gil 2024-05-18 21:09:22 -05:00
parent c7ccdc371d
commit 9e180a565a
3 changed files with 62 additions and 18 deletions

View file

@ -1,15 +1,59 @@
use crate::abort; // src/entry.rs
#[no_mangle]
#[link_section = ".text.init"]
unsafe extern "C" fn _entry() {
use core::arch::asm;
// let id = riscv::register::mhartid::read();
// write_tp(&id);
// TODO set up stack for all harts
// TODO set up CSRs for all harts
// if id != 0 { abort(); }
asm!(
/* Global pointer register `gp`
Push current option stack to temporarily disable relaxation, load
_global_pointer symbol (provided by linker), then pop option stack.
Important to keep relaxation off for this instruction, so that
the instruction is emitted as:
1:
auipc gp, %pcrel_hi(_global_pointer)
addi gp, gp, %pcrel_lo(1b)"
instead of:
mv gp, gp
which would do nothing. */
".option push",
".option norelax",
"la gp, _global_pointer",
".option pop",
);
riscv::register::satp::write(0);
unsafe extern "C" fn _enter() {
let id = riscv::register::mhartid::read(); let id = riscv::register::mhartid::read();
write_tp(&id); if id != 0 { crate::abort(); }
// TODO: set up stack for all harts
// TODO: set up CSRs for all harts
if id != 0 { abort(); }
// TODO: clear BSS // Clear BSS section
// TODO: do hardware inits and wake other harts asm!(
"la t0, _bss_start",
"la t1, _bss_end",
"bgeu t0, t1, 2f",
"1:",
"sb zero, 0(t0)",
"addi t0, t0, 1",
"bne t0, t1, 1b",
"2:",
"la sp, _stack_end",
"li t0, (0b11 << 11) | (1 << 13)",
"csrw mstatus, t0",
"csrw mie, x0",
);
// TODO do hardware inits and wake other harts
} }
#[inline] #[inline]

View file

@ -14,23 +14,23 @@ static ALLOCATOR: LockedHeap = LockedHeap::empty();
/// # Safety /// # Safety
/// Must be called at most once. /// Must be called at most once.
pub unsafe fn init() { pub unsafe fn init() {
let heap_bottom; let heap_start;
let heap_size; let heap_size;
// UNSAFE: This is fine, just loading some constants. // UNSAFE: This is fine, just loading some constants.
unsafe { unsafe {
// using inline assembly is easier to access linker constants // using inline assembly is easier to access linker constants
asm!( asm!(
"la {heap_bottom}, _kernel_heap_bottom", "la {heap_start}, _heap_start",
"la {heap_size}, _kernel_heap_size", "la {heap_size}, _heap_size",
heap_bottom = out(reg) heap_bottom, heap_start = out(reg) heap_start,
heap_size = out(reg) heap_size, heap_size = out(reg) heap_size,
options(nomem) options(nomem)
) )
}; };
println!( println!(
"Initialising kernel heap (bottom: {:#x}, size: {:#x})", "Initialising kernel heap (bottom: {:#x}, size: {:#x})",
heap_bottom as usize, heap_size heap_start as usize, heap_size
); );
// UNSAFE: Fine to call at most once. // UNSAFE: Fine to call at most once.
unsafe { ALLOCATOR.lock().init(heap_bottom, heap_size) }; unsafe { ALLOCATOR.lock().init(heap_start, heap_size) };
} }

View file

@ -52,7 +52,7 @@ SECTIONS {
PROVIDE(_memory_end = ORIGIN(ram) + LENGTH(ram)); PROVIDE(_memory_end = ORIGIN(ram) + LENGTH(ram));
PROVIDE(_kernel_heap_bottom = _stack_end); # allocate heap to remaining physical memory /* TODO Redefine heap sections */
PROVIDE(_kernel_heap_top = ORIGIN(ram) + LENGTH(ram)); # top of heap is end of ram PROVIDE(_heap_start = _stack_end); # allocate heap to remaining physical memory
PROVIDE(_kernel_heap_size = _kernel_heap_top - _kernel_heap_bottom); # capture size of heap PROVIDE(_heap_size = _memory_end - _heap_start); # capture size of heap
} }