From 65e6c16c4952d41502ca09042e24f4c7e6547536 Mon Sep 17 00:00:00 2001 From: tmu Date: Sun, 5 Mar 2023 22:00:28 -0800 Subject: [PATCH 01/57] Setup a general purpose allocator --- src/general-alloc.rs | 196 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 src/general-alloc.rs diff --git a/src/general-alloc.rs b/src/general-alloc.rs new file mode 100644 index 0000000..0cf7001 --- /dev/null +++ b/src/general-alloc.rs @@ -0,0 +1,196 @@ +/// General allocation, basically malloc/kalloc type thing +use crate::vm::palloc; +use crate::hw::param::*; +use core::mem::size_of; +use core::assert; + +/// This is either a data layer or an indirect layer +/// +/// If it is indirect, then the first u64 is the level. Level n points +/// to n-1, and 1 points to data layers +/// +/// If it is indirect, the second u64 is the valid/in use bits of the +/// corresponding u64s in the current header. +/// +/// If this is an indirect header, then all futher u64 are paried. The +/// even indexed (first) u64 is a pointer dwon one level. The odd +/// (second) one is the valid mask for that link. If the link is to a +/// data layer, then it corresponds to the parts of the data layer in +/// use. If the link is to another indirect layer, then ignore this +/// and decend and check the second u64 of that layer instead. (In +/// fact it should be marked invalid.) +/// +/// If this is a data layer, then the entire page is naturally aligned +/// data. By that I mean that a pow of 2 chunk of size n is n-byte +/// aligned. +type header = [u64; PAGE_SIZE/64]; + +pub struct GAlloc { + root: *mut header, +} + +// gives the index of the lowest set bit or None +fn lowest_set_bit(field: u64) -> Option { + let mut i = 0; + while (i < 64 && + !((field >> i) & 0x1)) { + i += 1; + } + match i { + 64 => { + None + }, + _ => { + i + } + } +} + +//same but for highest +fn highest_set_bit(field: u64) -> Option { + let mut i = 63; + while (i >= 0 && + !((field >> i) & 0x1)) { + i -= 1; + } + match i { + 0 => { + None + }, + _ => { + i + } + } +} + +// not efficient. make a lower bit mask with said # of ones +fn make_mask(mut num_ones: usize) -> u64 { + let mut out = 0; + while num_ones > 0 { + out = (out << 1) | 1; + num_ones -= 1; + } + out +} + +// pow of two that fits s +/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +fn round_up(mut s:u64) -> u64 { + s -= 1; + let mut i = 1; + while (i < 64) { + s |= s >> i; + } + s + 1 +} + +impl GAlloc { + pub fn new() -> Self { + let page = palloc() as *mut header; + page.0 = 1; + page.1 = 0; + // level 1 page with no valid pages + GAlloc { + root: page + } + } + + //TODO drop? What does that even mean here + + fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { + let size = round_up(size) / 8; // pow 2, in usize units + let search_mask = make_mask(size); + + let mut i = 0; + while i < 64 { + if (dl_mask >> i) & search_mask == 0 { + // clear bits + return Some(i); + } else { + i += size; // skip size places + } + } + None + } + + fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { + if root[0] != 1 { + for i in (2..64).step_by(2) { + match walk_alloc(size, *(root.i)) { + None => {}, + ret => { return ret; } + } + } + return None; + } else { + let open: isize = -1; // neg if full, 0-63 for first empty + for i in (2..64).step_by(2) { + if (root[1] >> i) & 0x1 == 0 { + if open == -1 { open = i; } + continue; + } + + match search_data_layer(size, root[i+1]) { + None => {}, + Some(idx) => { + // found one, make pointer + let in_use = round_up(size) / 8; // how many to mark in use + root[i+1] = root[i+1] | (make_mask(in_use) << idx); + return Some(root[i].offset(idx)); + } + } + } + // couldn't find anything, try to add another indirect layer + if open >= 0 { + let mut page = palloc() as *mut header; + root[open] = page; + page[0] = root[0] - 1; + page[1] = 0; // entirely empty; + root[1] = root[1] | (1 << open); // down link valid + root[1] = root[1] & !(1 << (open+1)); // mask no longer valid + return walk_alloc(size, root[open]); + } + return None; + } + } + + pub fn alloc(mut self, size: usize) -> Option<*mut usize> { + assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); + match walk_alloc(size, self.root) { + Some(ret) => { Some(ret) }, + None => { + let new_root = palloc() as *mut header; + new_root[0] = self.root[0] + 1; + new_root[1] = 0x4; // single valid entry, old root + new_root[2] = self.root; + self.root = new_root; + walk_alloc(size, self.root) + } + } + } + + fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { + let test_ptr = ptr as usize & !(PAGE_SIZE - 1); + if header[0] != 1 { + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + walk_dealloc(ptr, size, root[i]); + } + } else { + // bottom level, search for match + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + if root[i] as usize == test_ptr { + // match! + let offset = ptr as usize & (PAGE_SIZE - 1); + let clear_mask = make_mask(round_up(size) / 8); + root[i+1] = root[i+1] & !(clear_mask << offset); + } + } + } + } + + pub fn dealloc(mut self, ptr: *mut usize, size: usize) { + walk_dealloc(ptr, size, self.root); + } +} From f6a96647860a142a4a4e487de01b3e09e02f1946 Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 6 Mar 2023 12:54:02 -0800 Subject: [PATCH 02/57] Move to inside vm. --- src/{general-alloc.rs => vm/galloc.rs} | 111 +++++++++++++++---------- 1 file changed, 68 insertions(+), 43 deletions(-) rename src/{general-alloc.rs => vm/galloc.rs} (75%) diff --git a/src/general-alloc.rs b/src/vm/galloc.rs similarity index 75% rename from src/general-alloc.rs rename to src/vm/galloc.rs index 0cf7001..aedb1b3 100644 --- a/src/general-alloc.rs +++ b/src/vm/galloc.rs @@ -1,8 +1,7 @@ /// General allocation, basically malloc/kalloc type thing -use crate::vm::palloc; use crate::hw::param::*; -use core::mem::size_of; use core::assert; +use crate::vm::palloc::PagePool; /// This is either a data layer or an indirect layer /// @@ -23,45 +22,45 @@ use core::assert; /// If this is a data layer, then the entire page is naturally aligned /// data. By that I mean that a pow of 2 chunk of size n is n-byte /// aligned. -type header = [u64; PAGE_SIZE/64]; +type Header = [u64; param::PAGE_SIZE/64]; pub struct GAlloc { - root: *mut header, + root: *mut Header, } // gives the index of the lowest set bit or None -fn lowest_set_bit(field: u64) -> Option { - let mut i = 0; - while (i < 64 && - !((field >> i) & 0x1)) { - i += 1; - } - match i { - 64 => { - None - }, - _ => { - i - } - } -} +// fn lowest_set_bit(field: u64) -> Option { +// let mut i = 0; +// while (i < 64 && +// !((field >> i) & 0x1)) { +// i += 1; +// } +// match i { +// 64 => { +// None +// }, +// _ => { +// i +// } +// } +// } //same but for highest -fn highest_set_bit(field: u64) -> Option { - let mut i = 63; - while (i >= 0 && - !((field >> i) & 0x1)) { - i -= 1; - } - match i { - 0 => { - None - }, - _ => { - i - } - } -} +// fn highest_set_bit(field: u64) -> Option { +// let mut i = 63; +// while (i >= 0 && +// !((field >> i) & 0x1)) { +// i -= 1; +// } +// match i { +// 0 => { +// None +// }, +// _ => { +// i +// } +// } +// } // not efficient. make a lower bit mask with said # of ones fn make_mask(mut num_ones: usize) -> u64 { @@ -84,9 +83,13 @@ fn round_up(mut s:u64) -> u64 { s + 1 } +fn get_page() -> *mut u8 { + unsafe { (*crate::vm::PAGEPOOL).palloc() } +} + impl GAlloc { - pub fn new() -> Self { - let page = palloc() as *mut header; + pub fn new(page_allocator: PagePool) -> Self { + let page = as *mut Header; page.0 = 1; page.1 = 0; // level 1 page with no valid pages @@ -95,6 +98,28 @@ impl GAlloc { } } + // readability helpers + fn set_level(mut self, level: u64) { + self.root.0 = level; + } + + fn set_valid_bits(mut self, bit_mask: u64) { + self.root.1 |= bit_mask; + } + + // clears the bits specified as arg + fn clear_valid_bits(mut self, bit_mask: u64) { + self.root.1 &= !bit_mask; + } + + fn level(self) -> u64 { + self.root.0 + } + + fn valid(self) -> u64 { + self.root.1 + } + //TODO drop? What does that even mean here fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { @@ -113,8 +138,8 @@ impl GAlloc { None } - fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { - if root[0] != 1 { + fn walk_alloc(size: usize, root: *mut Header) -> Option<*mut usize> { + if root.level() != 1 { for i in (2..64).step_by(2) { match walk_alloc(size, *(root.i)) { None => {}, @@ -125,7 +150,7 @@ impl GAlloc { } else { let open: isize = -1; // neg if full, 0-63 for first empty for i in (2..64).step_by(2) { - if (root[1] >> i) & 0x1 == 0 { + if (root.valid() >> i) & 0x1 == 0 { if open == -1 { open = i; } continue; } @@ -142,7 +167,7 @@ impl GAlloc { } // couldn't find anything, try to add another indirect layer if open >= 0 { - let mut page = palloc() as *mut header; + let mut page = palloc() as *mut Header; root[open] = page; page[0] = root[0] - 1; page[1] = 0; // entirely empty; @@ -159,7 +184,7 @@ impl GAlloc { match walk_alloc(size, self.root) { Some(ret) => { Some(ret) }, None => { - let new_root = palloc() as *mut header; + let new_root = palloc() as *mut Header; new_root[0] = self.root[0] + 1; new_root[1] = 0x4; // single valid entry, old root new_root[2] = self.root; @@ -169,9 +194,9 @@ impl GAlloc { } } - fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { + fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut Header) { let test_ptr = ptr as usize & !(PAGE_SIZE - 1); - if header[0] != 1 { + if Header[0] != 1 { for i in (2..64).step_by(2) { if root[1] >> i == 0 {continue;} walk_dealloc(ptr, size, root[i]); From c5ad3ef9040156b614995fbcdc348a69f377b926 Mon Sep 17 00:00:00 2001 From: tmu Date: Sun, 5 Mar 2023 22:00:28 -0800 Subject: [PATCH 03/57] Setup a general purpose allocator --- src/general-alloc.rs | 196 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 src/general-alloc.rs diff --git a/src/general-alloc.rs b/src/general-alloc.rs new file mode 100644 index 0000000..0cf7001 --- /dev/null +++ b/src/general-alloc.rs @@ -0,0 +1,196 @@ +/// General allocation, basically malloc/kalloc type thing +use crate::vm::palloc; +use crate::hw::param::*; +use core::mem::size_of; +use core::assert; + +/// This is either a data layer or an indirect layer +/// +/// If it is indirect, then the first u64 is the level. Level n points +/// to n-1, and 1 points to data layers +/// +/// If it is indirect, the second u64 is the valid/in use bits of the +/// corresponding u64s in the current header. +/// +/// If this is an indirect header, then all futher u64 are paried. The +/// even indexed (first) u64 is a pointer dwon one level. The odd +/// (second) one is the valid mask for that link. If the link is to a +/// data layer, then it corresponds to the parts of the data layer in +/// use. If the link is to another indirect layer, then ignore this +/// and decend and check the second u64 of that layer instead. (In +/// fact it should be marked invalid.) +/// +/// If this is a data layer, then the entire page is naturally aligned +/// data. By that I mean that a pow of 2 chunk of size n is n-byte +/// aligned. +type header = [u64; PAGE_SIZE/64]; + +pub struct GAlloc { + root: *mut header, +} + +// gives the index of the lowest set bit or None +fn lowest_set_bit(field: u64) -> Option { + let mut i = 0; + while (i < 64 && + !((field >> i) & 0x1)) { + i += 1; + } + match i { + 64 => { + None + }, + _ => { + i + } + } +} + +//same but for highest +fn highest_set_bit(field: u64) -> Option { + let mut i = 63; + while (i >= 0 && + !((field >> i) & 0x1)) { + i -= 1; + } + match i { + 0 => { + None + }, + _ => { + i + } + } +} + +// not efficient. make a lower bit mask with said # of ones +fn make_mask(mut num_ones: usize) -> u64 { + let mut out = 0; + while num_ones > 0 { + out = (out << 1) | 1; + num_ones -= 1; + } + out +} + +// pow of two that fits s +/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +fn round_up(mut s:u64) -> u64 { + s -= 1; + let mut i = 1; + while (i < 64) { + s |= s >> i; + } + s + 1 +} + +impl GAlloc { + pub fn new() -> Self { + let page = palloc() as *mut header; + page.0 = 1; + page.1 = 0; + // level 1 page with no valid pages + GAlloc { + root: page + } + } + + //TODO drop? What does that even mean here + + fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { + let size = round_up(size) / 8; // pow 2, in usize units + let search_mask = make_mask(size); + + let mut i = 0; + while i < 64 { + if (dl_mask >> i) & search_mask == 0 { + // clear bits + return Some(i); + } else { + i += size; // skip size places + } + } + None + } + + fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { + if root[0] != 1 { + for i in (2..64).step_by(2) { + match walk_alloc(size, *(root.i)) { + None => {}, + ret => { return ret; } + } + } + return None; + } else { + let open: isize = -1; // neg if full, 0-63 for first empty + for i in (2..64).step_by(2) { + if (root[1] >> i) & 0x1 == 0 { + if open == -1 { open = i; } + continue; + } + + match search_data_layer(size, root[i+1]) { + None => {}, + Some(idx) => { + // found one, make pointer + let in_use = round_up(size) / 8; // how many to mark in use + root[i+1] = root[i+1] | (make_mask(in_use) << idx); + return Some(root[i].offset(idx)); + } + } + } + // couldn't find anything, try to add another indirect layer + if open >= 0 { + let mut page = palloc() as *mut header; + root[open] = page; + page[0] = root[0] - 1; + page[1] = 0; // entirely empty; + root[1] = root[1] | (1 << open); // down link valid + root[1] = root[1] & !(1 << (open+1)); // mask no longer valid + return walk_alloc(size, root[open]); + } + return None; + } + } + + pub fn alloc(mut self, size: usize) -> Option<*mut usize> { + assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); + match walk_alloc(size, self.root) { + Some(ret) => { Some(ret) }, + None => { + let new_root = palloc() as *mut header; + new_root[0] = self.root[0] + 1; + new_root[1] = 0x4; // single valid entry, old root + new_root[2] = self.root; + self.root = new_root; + walk_alloc(size, self.root) + } + } + } + + fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { + let test_ptr = ptr as usize & !(PAGE_SIZE - 1); + if header[0] != 1 { + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + walk_dealloc(ptr, size, root[i]); + } + } else { + // bottom level, search for match + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + if root[i] as usize == test_ptr { + // match! + let offset = ptr as usize & (PAGE_SIZE - 1); + let clear_mask = make_mask(round_up(size) / 8); + root[i+1] = root[i+1] & !(clear_mask << offset); + } + } + } + } + + pub fn dealloc(mut self, ptr: *mut usize, size: usize) { + walk_dealloc(ptr, size, self.root); + } +} From 730a4a4158497ee7a925367a68c2715c16dd443c Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 6 Mar 2023 17:19:41 -0800 Subject: [PATCH 04/57] Rebase onto 25 --- src/vm.rs | 3 +- src/vm/galloc.rs | 314 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 231 insertions(+), 86 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index bb678d6..50686af 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -2,13 +2,13 @@ pub mod palloc; pub mod ptable; pub mod process; +pub mod galloc; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; use ptable::{kpage_init, PageTable}; use process::Process; - use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. @@ -24,6 +24,7 @@ pub enum VmError { PartialPalloc, PallocFail, PfreeFail, + GNoSpace, } pub trait Resource {} diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index aedb1b3..e8a09ab 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -1,7 +1,7 @@ /// General allocation, basically malloc/kalloc type thing use crate::hw::param::*; use core::assert; -use crate::vm::palloc::PagePool; +use crate::vm::*; /// This is either a data layer or an indirect layer /// @@ -22,7 +22,30 @@ use crate::vm::palloc::PagePool; /// If this is a data layer, then the entire page is naturally aligned /// data. By that I mean that a pow of 2 chunk of size n is n-byte /// aligned. -type Header = [u64; param::PAGE_SIZE/64]; + +// I'd use page size but rust won't let me +// type Header = [u64; 4096/64]; + +#[repr(C)] +#[derive(Clone, Copy)] +struct HeaderPair { + valid: u64, + down: *mut Header, +} + +#[repr(C)] +#[derive(Clone, Copy)] +struct Indirect { + level: u64, + valid: u64, + contents: [HeaderPair; 255], +} + +#[derive(Clone, Copy)] +union Header { + data: [u64; 4096/64], + indirect: Indirect, +} pub struct GAlloc { root: *mut Header, @@ -63,7 +86,7 @@ pub struct GAlloc { // } // not efficient. make a lower bit mask with said # of ones -fn make_mask(mut num_ones: usize) -> u64 { +fn make_mask(mut num_ones: u64) -> u64 { let mut out = 0; while num_ones > 0 { out = (out << 1) | 1; @@ -77,52 +100,51 @@ fn make_mask(mut num_ones: usize) -> u64 { fn round_up(mut s:u64) -> u64 { s -= 1; let mut i = 1; - while (i < 64) { + while i < 64 { s |= s >> i; } s + 1 } -fn get_page() -> *mut u8 { - unsafe { (*crate::vm::PAGEPOOL).palloc() } +fn get_page() -> Result<*mut usize, VmError> { + match unsafe { (*PAGEPOOL).palloc() } { + Err(e) => { + Err(e) + }, + Ok(page) => { + Ok(page.addr as *mut usize) + } + } +} + + +impl Drop for GAlloc { + fn drop(&mut self) { + panic!("Dropped your general allocator") + } } impl GAlloc { - pub fn new(page_allocator: PagePool) -> Self { - let page = as *mut Header; - page.0 = 1; - page.1 = 0; + pub fn new() -> Self { + let page = match get_page() { + Err(e) => { + panic!("Couldn't initialize the header for general alloc: {:?}", e) + }, + Ok(addr) => { + addr as *mut Header + } + }; + unsafe { + (*page).indirect.level = 1; + (*page).indirect.valid = 0; + } // level 1 page with no valid pages GAlloc { root: page } } - // readability helpers - fn set_level(mut self, level: u64) { - self.root.0 = level; - } - - fn set_valid_bits(mut self, bit_mask: u64) { - self.root.1 |= bit_mask; - } - - // clears the bits specified as arg - fn clear_valid_bits(mut self, bit_mask: u64) { - self.root.1 &= !bit_mask; - } - - fn level(self) -> u64 { - self.root.0 - } - - fn valid(self) -> u64 { - self.root.1 - } - - //TODO drop? What does that even mean here - - fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { + fn search_data_layer(size: u64, dl_mask: u64) -> Option { let size = round_up(size) / 8; // pow 2, in usize units let search_mask = make_mask(size); @@ -138,84 +160,206 @@ impl GAlloc { None } - fn walk_alloc(size: usize, root: *mut Header) -> Option<*mut usize> { - if root.level() != 1 { - for i in (2..64).step_by(2) { - match walk_alloc(size, *(root.i)) { - None => {}, - ret => { return ret; } + unsafe fn walk_alloc(size: usize, root: &mut Header) -> Result<*mut usize, VmError> { + let mut open: isize = -1; // neg if full, 0-31 for first empty + if root.indirect.level != 1 { + for i in 0..32 { + if (root.indirect.valid >> i) & 0x1 == 0{ + // invalid down link + if open != -1 { open = i; } + } else { + // this is a down link we can follow + let down = &mut (unsafe { + *root.indirect.contents[i as usize].down + }); + match Self::walk_alloc(size, down) { + Err(_) => {}, + ret => { return ret; } + } } } - return None; + // checked all valid down links and none of them are valid + // now check if we can add a valid one (was there a hole) + + if open != -1 { + let page: *mut Header = match get_page() { + Err(e) => { + return Err(e); + }, + Ok(addr) => { + addr as *mut Header + } + }; + // insert a new page + let p_ref = &mut (unsafe { *page }); + p_ref.indirect.level = root.indirect.level -1; + p_ref.indirect.valid = 0; + root.indirect.contents[open as usize].down = p_ref; + // root.indirect.contents[open as usize].valid is not needed, as p_ref is not a data layer + root.indirect.valid = root.indirect.valid | 1 << open; + return Self::walk_alloc(size, p_ref); + } + // no space and no holes for further intermediate levels + // in any case, pass the error up + return Err(VmError::GNoSpace); } else { - let open: isize = -1; // neg if full, 0-63 for first empty - for i in (2..64).step_by(2) { - if (root.valid() >> i) & 0x1 == 0 { - if open == -1 { open = i; } + // this is a level 1 layer, and points to data layers + for i in 0..32 { + let i = i as usize; + if (root.indirect.valid >> i) & 0x1 == 0 { + // this is a data page down link that isn't in use + if open == -1 { open = i as isize; } continue; } - match search_data_layer(size, root[i+1]) { + match Self::search_data_layer(size as u64, + root.indirect.contents[i].valid) { None => {}, Some(idx) => { - // found one, make pointer - let in_use = round_up(size) / 8; // how many to mark in use - root[i+1] = root[i+1] | (make_mask(in_use) << idx); - return Some(root[i].offset(idx)); + // found space, mark and make pointer + let in_use = round_up(size as u64) / 8; // how many to mark in use + root.indirect.contents[i].valid = + root.indirect.contents[i].valid | (make_mask(in_use) << idx); + let data_page = root.indirect.contents[i].down as *mut usize; + return Ok(unsafe { data_page.offset(idx as isize) }); } } } - // couldn't find anything, try to add another indirect layer - if open >= 0 { - let mut page = palloc() as *mut Header; - root[open] = page; - page[0] = root[0] - 1; - page[1] = 0; // entirely empty; - root[1] = root[1] | (1 << open); // down link valid - root[1] = root[1] & !(1 << (open+1)); // mask no longer valid - return walk_alloc(size, root[open]); + // couldn't find anything, try to add another data page + if open == -1 { + let open = open as usize; + let page: *mut Header = match get_page() { + Err(e) => { + return Err(e); + }, + Ok(addr) => { + addr as *mut Header + } + }; + root.indirect.contents[open].down = page; + root.indirect.contents[open].valid = 0; // all free + // don't set page meta, because this is a data page + root.indirect.valid = root.indirect.valid | (1 << open); // down link valid + return Self::walk_alloc(size, &mut (unsafe { + *(root.indirect.contents[open].down) + })); } - return None; + return Err(VmError::GNoSpace); } } - pub fn alloc(mut self, size: usize) -> Option<*mut usize> { + pub fn alloc(mut self, size: usize) -> Result<*mut usize, VmError> { assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); - match walk_alloc(size, self.root) { - Some(ret) => { Some(ret) }, - None => { - let new_root = palloc() as *mut Header; - new_root[0] = self.root[0] + 1; - new_root[1] = 0x4; // single valid entry, old root - new_root[2] = self.root; - self.root = new_root; - walk_alloc(size, self.root) + match unsafe {Self::walk_alloc(size, &mut (*self.root)) } { + Ok(ret) => { Ok(ret) }, + Err(_) => { + // alloc failed. try to bump the root up (note that + // this may also fail if the issue was out of pages) + let mut page: *mut Header = match get_page() { + Err(e) => { + return Err(e); + }, + Ok(addr) => { + addr as *mut Header + } + }; + unsafe { + (*page).indirect.level = (*self.root).indirect.level + 1; // bump level + (*page).indirect.valid = 1; // single valid page (old root) + (*page).indirect.contents[0] = HeaderPair { + valid: 0, // unused since root is not a data page + down: self.root, + }; + } + self.root = page; + match unsafe { Self::walk_alloc(size, &mut (*self.root)) } { + Err(e) => { + Err(e) + }, + Ok(addr) => { + Ok(addr) + } + } } } } - fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut Header) { - let test_ptr = ptr as usize & !(PAGE_SIZE - 1); - if Header[0] != 1 { - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - walk_dealloc(ptr, size, root[i]); + // returns (did_we_find_it, should_we_keep_this_branch) + unsafe fn walk_dealloc(ptr: *mut usize, size: usize, root: &mut Header) -> (bool, bool) { + let test_ptr = ptr as usize & !(PAGE_SIZE - 1); // should match data_page base pointer + if root.indirect.level != 1 { + // down links are not data pages + let valid = root.indirect.valid; + if valid == 0 { + return (false, false); + } + let mut should_we_keep = false; + for i in 0..32 { + if (valid >> i) & 1 == 0 {continue;} + match Self::walk_dealloc(ptr, size, &mut (*root.indirect.contents[i].down)) { + (true, true) => { + return (true, true); + }, + (false, true) => { + // keep searching + should_we_keep = true; + }, + (found, false) => { + // trim branch and maybe report findings + root.indirect.valid = root.indirect.valid & !(1 << i); + // TODO free the said down link + if root.indirect.valid == 0 { + // nothing more to check, report findings + return (found, false); + } else if found { + return (true, true); + } + } + } + } + if should_we_keep { + return (false, true); + } else { + return (false, false); } } else { - // bottom level, search for match - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - if root[i] as usize == test_ptr { + // downlinks are data pages, search for match + let valid = root.indirect.valid; + for i in 0..32 { + if (valid >> i) & 1 == 0 {continue;} + if root.indirect.contents[i].down as usize == test_ptr { // match! let offset = ptr as usize & (PAGE_SIZE - 1); - let clear_mask = make_mask(round_up(size) / 8); - root[i+1] = root[i+1] & !(clear_mask << offset); + let clear_mask = make_mask(round_up(size as u64) / 8); + root.indirect.contents[i].valid = + root.indirect.contents[i].valid & !(clear_mask << offset); + if root.indirect.contents[i].valid == 0 { + // free data page + // TODO free page + root.indirect.valid = valid & !(1 << i); + if root.indirect.valid == 0 { + // cleanup this indirect layer + return (true, false); + } else { + return (true, true); + } + } else { + return (true, true); + } } } + if valid == 0 { + return (false, false); + } else { + return (false, true); + } } } - pub fn dealloc(mut self, ptr: *mut usize, size: usize) { - walk_dealloc(ptr, size, self.root); + pub fn dealloc(&mut self, ptr: *mut usize, size: usize) { + unsafe { + // TODO consider mechanism for undoing root bump / when to do that + Self::walk_dealloc(ptr, size, &mut (*self.root)); + } } } From a185f0ec44ca838c0070fc7f41f0326e8d752ec4 Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 6 Mar 2023 22:00:29 -0800 Subject: [PATCH 05/57] Rebase onto 25 --- src/mem.rs | 36 +++++++++++++++++++++++++----------- src/vm.rs | 2 ++ src/vm/galloc.rs | 39 ++------------------------------------- 3 files changed, 29 insertions(+), 48 deletions(-) diff --git a/src/mem.rs b/src/mem.rs index db0fb83..50b11a9 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -1,21 +1,34 @@ //! Kernel memory utilities use core::ops::{Deref, DerefMut}; +use core::mem::size_of; +use crate::vm::GALLOC; -/// Kernel heap allocated pointer. No guarantees on unique ownership or concurrent access. +/// Kernel heap allocated pointer. No guarantees on unique ownership +/// or concurrent access. pub struct Kbox { inner: *mut T, // NonNull, try NonNull later for lazy allocation impl. + size: usize, } impl Kbox { - pub fn new(mut data: T) -> Self { + pub fn new(data: T) -> Self { // How the allocater interface should be made use of. // Current constraints on allocator mean size_of::() must be less than 4Kb - // let new_ptr = global allocator, allocate us size_of::() bytes, please. - // new_ptr.write(data); <-- initialize newly allocated memory with our inner value. - //let new_ptr: *mut T = core::ptr::null_mut(); // Delete placeholder code. - let inner = &mut data; - Self { - inner, + let size = size_of::(); + match unsafe { (*GALLOC).alloc(size) } { + Err(e) => { + panic!("Kbox can't allocate: {:?}", e) + }, + Ok(ptr) => { + let new_ptr = ptr as *mut T; + unsafe { + *new_ptr = data; // <-- initialize newly allocated memory with our inner value. + Self { + inner: new_ptr, + size + } + } + } } } } @@ -25,7 +38,7 @@ unsafe impl Sync for Kbox {} impl Deref for Kbox { type Target = T; - + fn deref(&self) -> &Self::Target { unsafe { &*self.inner @@ -44,7 +57,8 @@ impl DerefMut for Kbox { impl Drop for Kbox { fn drop(&mut self) { - // How to use the allocator interface. - // dealloc(self.inner) + unsafe { + (*GALLOC).dealloc(self.inner as *mut usize, self.size); + } } } diff --git a/src/vm.rs b/src/vm.rs index 50686af..288662b 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -7,6 +7,7 @@ pub mod galloc; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; +use galloc::GAlloc; use ptable::{kpage_init, PageTable}; use process::Process; use core::cell::OnceCell; @@ -47,6 +48,7 @@ pub struct TaskNode { /// kernel's page table struct. pub fn init() -> Result<(), PagePool>{ unsafe { PAGEPOOL.set(PagePool::new(bss_end(), dram_end()))?; } + log!(Debug, "Successfully initialized kernel page pool..."); // Map text, data, heap into kernel memory match kpage_init() { diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index e8a09ab..576c67d 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -51,40 +51,6 @@ pub struct GAlloc { root: *mut Header, } -// gives the index of the lowest set bit or None -// fn lowest_set_bit(field: u64) -> Option { -// let mut i = 0; -// while (i < 64 && -// !((field >> i) & 0x1)) { -// i += 1; -// } -// match i { -// 64 => { -// None -// }, -// _ => { -// i -// } -// } -// } - -//same but for highest -// fn highest_set_bit(field: u64) -> Option { -// let mut i = 63; -// while (i >= 0 && -// !((field >> i) & 0x1)) { -// i -= 1; -// } -// match i { -// 0 => { -// None -// }, -// _ => { -// i -// } -// } -// } - // not efficient. make a lower bit mask with said # of ones fn make_mask(mut num_ones: u64) -> u64 { let mut out = 0; @@ -99,8 +65,7 @@ fn make_mask(mut num_ones: u64) -> u64 { /* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ fn round_up(mut s:u64) -> u64 { s -= 1; - let mut i = 1; - while i < 64 { + for i in 1..64 { s |= s >> i; } s + 1 @@ -248,7 +213,7 @@ impl GAlloc { } } - pub fn alloc(mut self, size: usize) -> Result<*mut usize, VmError> { + pub fn alloc(&mut self, size: usize) -> Result<*mut usize, VmError> { assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); match unsafe {Self::walk_alloc(size, &mut (*self.root)) } { Ok(ret) => { Ok(ret) }, From 08311d5d3f172d09bdec687d5f53ef32919c1d91 Mon Sep 17 00:00:00 2001 From: tmu Date: Tue, 7 Mar 2023 10:13:14 -0800 Subject: [PATCH 06/57] align with new palloc and integrate with mem --- .gitignore | 2 + src/general-alloc.rs | 196 ------------------------------------------- src/mem.rs | 8 +- src/vm.rs | 14 ++++ src/vm/galloc.rs | 2 +- 5 files changed, 20 insertions(+), 202 deletions(-) delete mode 100644 src/general-alloc.rs diff --git a/.gitignore b/.gitignore index 4f8c6f7..0b8eca8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ /target *~ +*.o +*.ELF .rust* diff --git a/src/general-alloc.rs b/src/general-alloc.rs deleted file mode 100644 index 0cf7001..0000000 --- a/src/general-alloc.rs +++ /dev/null @@ -1,196 +0,0 @@ -/// General allocation, basically malloc/kalloc type thing -use crate::vm::palloc; -use crate::hw::param::*; -use core::mem::size_of; -use core::assert; - -/// This is either a data layer or an indirect layer -/// -/// If it is indirect, then the first u64 is the level. Level n points -/// to n-1, and 1 points to data layers -/// -/// If it is indirect, the second u64 is the valid/in use bits of the -/// corresponding u64s in the current header. -/// -/// If this is an indirect header, then all futher u64 are paried. The -/// even indexed (first) u64 is a pointer dwon one level. The odd -/// (second) one is the valid mask for that link. If the link is to a -/// data layer, then it corresponds to the parts of the data layer in -/// use. If the link is to another indirect layer, then ignore this -/// and decend and check the second u64 of that layer instead. (In -/// fact it should be marked invalid.) -/// -/// If this is a data layer, then the entire page is naturally aligned -/// data. By that I mean that a pow of 2 chunk of size n is n-byte -/// aligned. -type header = [u64; PAGE_SIZE/64]; - -pub struct GAlloc { - root: *mut header, -} - -// gives the index of the lowest set bit or None -fn lowest_set_bit(field: u64) -> Option { - let mut i = 0; - while (i < 64 && - !((field >> i) & 0x1)) { - i += 1; - } - match i { - 64 => { - None - }, - _ => { - i - } - } -} - -//same but for highest -fn highest_set_bit(field: u64) -> Option { - let mut i = 63; - while (i >= 0 && - !((field >> i) & 0x1)) { - i -= 1; - } - match i { - 0 => { - None - }, - _ => { - i - } - } -} - -// not efficient. make a lower bit mask with said # of ones -fn make_mask(mut num_ones: usize) -> u64 { - let mut out = 0; - while num_ones > 0 { - out = (out << 1) | 1; - num_ones -= 1; - } - out -} - -// pow of two that fits s -/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ -fn round_up(mut s:u64) -> u64 { - s -= 1; - let mut i = 1; - while (i < 64) { - s |= s >> i; - } - s + 1 -} - -impl GAlloc { - pub fn new() -> Self { - let page = palloc() as *mut header; - page.0 = 1; - page.1 = 0; - // level 1 page with no valid pages - GAlloc { - root: page - } - } - - //TODO drop? What does that even mean here - - fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { - let size = round_up(size) / 8; // pow 2, in usize units - let search_mask = make_mask(size); - - let mut i = 0; - while i < 64 { - if (dl_mask >> i) & search_mask == 0 { - // clear bits - return Some(i); - } else { - i += size; // skip size places - } - } - None - } - - fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { - if root[0] != 1 { - for i in (2..64).step_by(2) { - match walk_alloc(size, *(root.i)) { - None => {}, - ret => { return ret; } - } - } - return None; - } else { - let open: isize = -1; // neg if full, 0-63 for first empty - for i in (2..64).step_by(2) { - if (root[1] >> i) & 0x1 == 0 { - if open == -1 { open = i; } - continue; - } - - match search_data_layer(size, root[i+1]) { - None => {}, - Some(idx) => { - // found one, make pointer - let in_use = round_up(size) / 8; // how many to mark in use - root[i+1] = root[i+1] | (make_mask(in_use) << idx); - return Some(root[i].offset(idx)); - } - } - } - // couldn't find anything, try to add another indirect layer - if open >= 0 { - let mut page = palloc() as *mut header; - root[open] = page; - page[0] = root[0] - 1; - page[1] = 0; // entirely empty; - root[1] = root[1] | (1 << open); // down link valid - root[1] = root[1] & !(1 << (open+1)); // mask no longer valid - return walk_alloc(size, root[open]); - } - return None; - } - } - - pub fn alloc(mut self, size: usize) -> Option<*mut usize> { - assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); - match walk_alloc(size, self.root) { - Some(ret) => { Some(ret) }, - None => { - let new_root = palloc() as *mut header; - new_root[0] = self.root[0] + 1; - new_root[1] = 0x4; // single valid entry, old root - new_root[2] = self.root; - self.root = new_root; - walk_alloc(size, self.root) - } - } - } - - fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { - let test_ptr = ptr as usize & !(PAGE_SIZE - 1); - if header[0] != 1 { - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - walk_dealloc(ptr, size, root[i]); - } - } else { - // bottom level, search for match - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - if root[i] as usize == test_ptr { - // match! - let offset = ptr as usize & (PAGE_SIZE - 1); - let clear_mask = make_mask(round_up(size) / 8); - root[i+1] = root[i+1] & !(clear_mask << offset); - } - } - } - } - - pub fn dealloc(mut self, ptr: *mut usize, size: usize) { - walk_dealloc(ptr, size, self.root); - } -} diff --git a/src/mem.rs b/src/mem.rs index 50b11a9..ab575d8 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -1,7 +1,7 @@ //! Kernel memory utilities use core::ops::{Deref, DerefMut}; use core::mem::size_of; -use crate::vm::GALLOC; +use crate::vm::{galloc, gdealloc}; /// Kernel heap allocated pointer. No guarantees on unique ownership /// or concurrent access. @@ -15,7 +15,7 @@ impl Kbox { // How the allocater interface should be made use of. // Current constraints on allocator mean size_of::() must be less than 4Kb let size = size_of::(); - match unsafe { (*GALLOC).alloc(size) } { + match galloc(size) { Err(e) => { panic!("Kbox can't allocate: {:?}", e) }, @@ -57,8 +57,6 @@ impl DerefMut for Kbox { impl Drop for Kbox { fn drop(&mut self) { - unsafe { - (*GALLOC).dealloc(self.inner as *mut usize, self.size); - } + gdealloc(self.inner as *mut usize, self.size); } } diff --git a/src/vm.rs b/src/vm.rs index 288662b..2d05515 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -15,6 +15,7 @@ use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. //static mut PAGEPOOL: PagePool = PagePool::new(bss_end(), dram_end()); static mut PAGEPOOL: OnceCell = OnceCell::new(); +static mut GALLOC: OnceCell = OnceCell::new(); /// Global kernel page table. pub static mut KPGTABLE: *mut PageTable = core::ptr::null_mut(); @@ -48,6 +49,7 @@ pub struct TaskNode { /// kernel's page table struct. pub fn init() -> Result<(), PagePool>{ unsafe { PAGEPOOL.set(PagePool::new(bss_end(), dram_end()))?; } + unsafe { GALLOC.set(GAlloc::new()); } log!(Debug, "Successfully initialized kernel page pool..."); // Map text, data, heap into kernel memory @@ -62,6 +64,18 @@ pub fn init() -> Result<(), PagePool>{ Ok(()) } +pub fn galloc(size: usize) -> Result<*mut usize, VmError> { + unsafe { + GALLOC.get_mut().unwrap().alloc(size) + } +} + +pub fn gdealloc(ptr: *mut usize, size: usize) { + unsafe { + GALLOC.get_mut().unwrap().dealloc(ptr, size) + } +} + pub unsafe fn test_palloc() { let allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); println!("allocd addr: {:?}", allocd.addr); diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index 576c67d..fa81087 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -72,7 +72,7 @@ fn round_up(mut s:u64) -> u64 { } fn get_page() -> Result<*mut usize, VmError> { - match unsafe { (*PAGEPOOL).palloc() } { + match unsafe { PAGEPOOL.get_mut().unwrap().palloc() } { Err(e) => { Err(e) }, From abb1c99ad87018a324983ec9d021dbbddcce8950 Mon Sep 17 00:00:00 2001 From: tmu Date: Sun, 5 Mar 2023 22:00:28 -0800 Subject: [PATCH 07/57] Setup a general purpose allocator --- src/general-alloc.rs | 196 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 src/general-alloc.rs diff --git a/src/general-alloc.rs b/src/general-alloc.rs new file mode 100644 index 0000000..0cf7001 --- /dev/null +++ b/src/general-alloc.rs @@ -0,0 +1,196 @@ +/// General allocation, basically malloc/kalloc type thing +use crate::vm::palloc; +use crate::hw::param::*; +use core::mem::size_of; +use core::assert; + +/// This is either a data layer or an indirect layer +/// +/// If it is indirect, then the first u64 is the level. Level n points +/// to n-1, and 1 points to data layers +/// +/// If it is indirect, the second u64 is the valid/in use bits of the +/// corresponding u64s in the current header. +/// +/// If this is an indirect header, then all futher u64 are paried. The +/// even indexed (first) u64 is a pointer dwon one level. The odd +/// (second) one is the valid mask for that link. If the link is to a +/// data layer, then it corresponds to the parts of the data layer in +/// use. If the link is to another indirect layer, then ignore this +/// and decend and check the second u64 of that layer instead. (In +/// fact it should be marked invalid.) +/// +/// If this is a data layer, then the entire page is naturally aligned +/// data. By that I mean that a pow of 2 chunk of size n is n-byte +/// aligned. +type header = [u64; PAGE_SIZE/64]; + +pub struct GAlloc { + root: *mut header, +} + +// gives the index of the lowest set bit or None +fn lowest_set_bit(field: u64) -> Option { + let mut i = 0; + while (i < 64 && + !((field >> i) & 0x1)) { + i += 1; + } + match i { + 64 => { + None + }, + _ => { + i + } + } +} + +//same but for highest +fn highest_set_bit(field: u64) -> Option { + let mut i = 63; + while (i >= 0 && + !((field >> i) & 0x1)) { + i -= 1; + } + match i { + 0 => { + None + }, + _ => { + i + } + } +} + +// not efficient. make a lower bit mask with said # of ones +fn make_mask(mut num_ones: usize) -> u64 { + let mut out = 0; + while num_ones > 0 { + out = (out << 1) | 1; + num_ones -= 1; + } + out +} + +// pow of two that fits s +/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +fn round_up(mut s:u64) -> u64 { + s -= 1; + let mut i = 1; + while (i < 64) { + s |= s >> i; + } + s + 1 +} + +impl GAlloc { + pub fn new() -> Self { + let page = palloc() as *mut header; + page.0 = 1; + page.1 = 0; + // level 1 page with no valid pages + GAlloc { + root: page + } + } + + //TODO drop? What does that even mean here + + fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { + let size = round_up(size) / 8; // pow 2, in usize units + let search_mask = make_mask(size); + + let mut i = 0; + while i < 64 { + if (dl_mask >> i) & search_mask == 0 { + // clear bits + return Some(i); + } else { + i += size; // skip size places + } + } + None + } + + fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { + if root[0] != 1 { + for i in (2..64).step_by(2) { + match walk_alloc(size, *(root.i)) { + None => {}, + ret => { return ret; } + } + } + return None; + } else { + let open: isize = -1; // neg if full, 0-63 for first empty + for i in (2..64).step_by(2) { + if (root[1] >> i) & 0x1 == 0 { + if open == -1 { open = i; } + continue; + } + + match search_data_layer(size, root[i+1]) { + None => {}, + Some(idx) => { + // found one, make pointer + let in_use = round_up(size) / 8; // how many to mark in use + root[i+1] = root[i+1] | (make_mask(in_use) << idx); + return Some(root[i].offset(idx)); + } + } + } + // couldn't find anything, try to add another indirect layer + if open >= 0 { + let mut page = palloc() as *mut header; + root[open] = page; + page[0] = root[0] - 1; + page[1] = 0; // entirely empty; + root[1] = root[1] | (1 << open); // down link valid + root[1] = root[1] & !(1 << (open+1)); // mask no longer valid + return walk_alloc(size, root[open]); + } + return None; + } + } + + pub fn alloc(mut self, size: usize) -> Option<*mut usize> { + assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); + match walk_alloc(size, self.root) { + Some(ret) => { Some(ret) }, + None => { + let new_root = palloc() as *mut header; + new_root[0] = self.root[0] + 1; + new_root[1] = 0x4; // single valid entry, old root + new_root[2] = self.root; + self.root = new_root; + walk_alloc(size, self.root) + } + } + } + + fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { + let test_ptr = ptr as usize & !(PAGE_SIZE - 1); + if header[0] != 1 { + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + walk_dealloc(ptr, size, root[i]); + } + } else { + // bottom level, search for match + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + if root[i] as usize == test_ptr { + // match! + let offset = ptr as usize & (PAGE_SIZE - 1); + let clear_mask = make_mask(round_up(size) / 8); + root[i+1] = root[i+1] & !(clear_mask << offset); + } + } + } + } + + pub fn dealloc(mut self, ptr: *mut usize, size: usize) { + walk_dealloc(ptr, size, self.root); + } +} From e305bf0d978949879efa9cbaa1fdbff13e7849bf Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 6 Mar 2023 12:54:02 -0800 Subject: [PATCH 08/57] Move to inside vm. --- src/{general-alloc.rs => vm/galloc.rs} | 111 +++++++++++++++---------- 1 file changed, 68 insertions(+), 43 deletions(-) rename src/{general-alloc.rs => vm/galloc.rs} (75%) diff --git a/src/general-alloc.rs b/src/vm/galloc.rs similarity index 75% rename from src/general-alloc.rs rename to src/vm/galloc.rs index 0cf7001..aedb1b3 100644 --- a/src/general-alloc.rs +++ b/src/vm/galloc.rs @@ -1,8 +1,7 @@ /// General allocation, basically malloc/kalloc type thing -use crate::vm::palloc; use crate::hw::param::*; -use core::mem::size_of; use core::assert; +use crate::vm::palloc::PagePool; /// This is either a data layer or an indirect layer /// @@ -23,45 +22,45 @@ use core::assert; /// If this is a data layer, then the entire page is naturally aligned /// data. By that I mean that a pow of 2 chunk of size n is n-byte /// aligned. -type header = [u64; PAGE_SIZE/64]; +type Header = [u64; param::PAGE_SIZE/64]; pub struct GAlloc { - root: *mut header, + root: *mut Header, } // gives the index of the lowest set bit or None -fn lowest_set_bit(field: u64) -> Option { - let mut i = 0; - while (i < 64 && - !((field >> i) & 0x1)) { - i += 1; - } - match i { - 64 => { - None - }, - _ => { - i - } - } -} +// fn lowest_set_bit(field: u64) -> Option { +// let mut i = 0; +// while (i < 64 && +// !((field >> i) & 0x1)) { +// i += 1; +// } +// match i { +// 64 => { +// None +// }, +// _ => { +// i +// } +// } +// } //same but for highest -fn highest_set_bit(field: u64) -> Option { - let mut i = 63; - while (i >= 0 && - !((field >> i) & 0x1)) { - i -= 1; - } - match i { - 0 => { - None - }, - _ => { - i - } - } -} +// fn highest_set_bit(field: u64) -> Option { +// let mut i = 63; +// while (i >= 0 && +// !((field >> i) & 0x1)) { +// i -= 1; +// } +// match i { +// 0 => { +// None +// }, +// _ => { +// i +// } +// } +// } // not efficient. make a lower bit mask with said # of ones fn make_mask(mut num_ones: usize) -> u64 { @@ -84,9 +83,13 @@ fn round_up(mut s:u64) -> u64 { s + 1 } +fn get_page() -> *mut u8 { + unsafe { (*crate::vm::PAGEPOOL).palloc() } +} + impl GAlloc { - pub fn new() -> Self { - let page = palloc() as *mut header; + pub fn new(page_allocator: PagePool) -> Self { + let page = as *mut Header; page.0 = 1; page.1 = 0; // level 1 page with no valid pages @@ -95,6 +98,28 @@ impl GAlloc { } } + // readability helpers + fn set_level(mut self, level: u64) { + self.root.0 = level; + } + + fn set_valid_bits(mut self, bit_mask: u64) { + self.root.1 |= bit_mask; + } + + // clears the bits specified as arg + fn clear_valid_bits(mut self, bit_mask: u64) { + self.root.1 &= !bit_mask; + } + + fn level(self) -> u64 { + self.root.0 + } + + fn valid(self) -> u64 { + self.root.1 + } + //TODO drop? What does that even mean here fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { @@ -113,8 +138,8 @@ impl GAlloc { None } - fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { - if root[0] != 1 { + fn walk_alloc(size: usize, root: *mut Header) -> Option<*mut usize> { + if root.level() != 1 { for i in (2..64).step_by(2) { match walk_alloc(size, *(root.i)) { None => {}, @@ -125,7 +150,7 @@ impl GAlloc { } else { let open: isize = -1; // neg if full, 0-63 for first empty for i in (2..64).step_by(2) { - if (root[1] >> i) & 0x1 == 0 { + if (root.valid() >> i) & 0x1 == 0 { if open == -1 { open = i; } continue; } @@ -142,7 +167,7 @@ impl GAlloc { } // couldn't find anything, try to add another indirect layer if open >= 0 { - let mut page = palloc() as *mut header; + let mut page = palloc() as *mut Header; root[open] = page; page[0] = root[0] - 1; page[1] = 0; // entirely empty; @@ -159,7 +184,7 @@ impl GAlloc { match walk_alloc(size, self.root) { Some(ret) => { Some(ret) }, None => { - let new_root = palloc() as *mut header; + let new_root = palloc() as *mut Header; new_root[0] = self.root[0] + 1; new_root[1] = 0x4; // single valid entry, old root new_root[2] = self.root; @@ -169,9 +194,9 @@ impl GAlloc { } } - fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { + fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut Header) { let test_ptr = ptr as usize & !(PAGE_SIZE - 1); - if header[0] != 1 { + if Header[0] != 1 { for i in (2..64).step_by(2) { if root[1] >> i == 0 {continue;} walk_dealloc(ptr, size, root[i]); From 482e9b9fb748780fcf2ff463ba578e87aa53b881 Mon Sep 17 00:00:00 2001 From: tmu Date: Sun, 5 Mar 2023 22:00:28 -0800 Subject: [PATCH 09/57] Setup a general purpose allocator --- src/general-alloc.rs | 196 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 src/general-alloc.rs diff --git a/src/general-alloc.rs b/src/general-alloc.rs new file mode 100644 index 0000000..0cf7001 --- /dev/null +++ b/src/general-alloc.rs @@ -0,0 +1,196 @@ +/// General allocation, basically malloc/kalloc type thing +use crate::vm::palloc; +use crate::hw::param::*; +use core::mem::size_of; +use core::assert; + +/// This is either a data layer or an indirect layer +/// +/// If it is indirect, then the first u64 is the level. Level n points +/// to n-1, and 1 points to data layers +/// +/// If it is indirect, the second u64 is the valid/in use bits of the +/// corresponding u64s in the current header. +/// +/// If this is an indirect header, then all futher u64 are paried. The +/// even indexed (first) u64 is a pointer dwon one level. The odd +/// (second) one is the valid mask for that link. If the link is to a +/// data layer, then it corresponds to the parts of the data layer in +/// use. If the link is to another indirect layer, then ignore this +/// and decend and check the second u64 of that layer instead. (In +/// fact it should be marked invalid.) +/// +/// If this is a data layer, then the entire page is naturally aligned +/// data. By that I mean that a pow of 2 chunk of size n is n-byte +/// aligned. +type header = [u64; PAGE_SIZE/64]; + +pub struct GAlloc { + root: *mut header, +} + +// gives the index of the lowest set bit or None +fn lowest_set_bit(field: u64) -> Option { + let mut i = 0; + while (i < 64 && + !((field >> i) & 0x1)) { + i += 1; + } + match i { + 64 => { + None + }, + _ => { + i + } + } +} + +//same but for highest +fn highest_set_bit(field: u64) -> Option { + let mut i = 63; + while (i >= 0 && + !((field >> i) & 0x1)) { + i -= 1; + } + match i { + 0 => { + None + }, + _ => { + i + } + } +} + +// not efficient. make a lower bit mask with said # of ones +fn make_mask(mut num_ones: usize) -> u64 { + let mut out = 0; + while num_ones > 0 { + out = (out << 1) | 1; + num_ones -= 1; + } + out +} + +// pow of two that fits s +/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +fn round_up(mut s:u64) -> u64 { + s -= 1; + let mut i = 1; + while (i < 64) { + s |= s >> i; + } + s + 1 +} + +impl GAlloc { + pub fn new() -> Self { + let page = palloc() as *mut header; + page.0 = 1; + page.1 = 0; + // level 1 page with no valid pages + GAlloc { + root: page + } + } + + //TODO drop? What does that even mean here + + fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { + let size = round_up(size) / 8; // pow 2, in usize units + let search_mask = make_mask(size); + + let mut i = 0; + while i < 64 { + if (dl_mask >> i) & search_mask == 0 { + // clear bits + return Some(i); + } else { + i += size; // skip size places + } + } + None + } + + fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { + if root[0] != 1 { + for i in (2..64).step_by(2) { + match walk_alloc(size, *(root.i)) { + None => {}, + ret => { return ret; } + } + } + return None; + } else { + let open: isize = -1; // neg if full, 0-63 for first empty + for i in (2..64).step_by(2) { + if (root[1] >> i) & 0x1 == 0 { + if open == -1 { open = i; } + continue; + } + + match search_data_layer(size, root[i+1]) { + None => {}, + Some(idx) => { + // found one, make pointer + let in_use = round_up(size) / 8; // how many to mark in use + root[i+1] = root[i+1] | (make_mask(in_use) << idx); + return Some(root[i].offset(idx)); + } + } + } + // couldn't find anything, try to add another indirect layer + if open >= 0 { + let mut page = palloc() as *mut header; + root[open] = page; + page[0] = root[0] - 1; + page[1] = 0; // entirely empty; + root[1] = root[1] | (1 << open); // down link valid + root[1] = root[1] & !(1 << (open+1)); // mask no longer valid + return walk_alloc(size, root[open]); + } + return None; + } + } + + pub fn alloc(mut self, size: usize) -> Option<*mut usize> { + assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); + match walk_alloc(size, self.root) { + Some(ret) => { Some(ret) }, + None => { + let new_root = palloc() as *mut header; + new_root[0] = self.root[0] + 1; + new_root[1] = 0x4; // single valid entry, old root + new_root[2] = self.root; + self.root = new_root; + walk_alloc(size, self.root) + } + } + } + + fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { + let test_ptr = ptr as usize & !(PAGE_SIZE - 1); + if header[0] != 1 { + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + walk_dealloc(ptr, size, root[i]); + } + } else { + // bottom level, search for match + for i in (2..64).step_by(2) { + if root[1] >> i == 0 {continue;} + if root[i] as usize == test_ptr { + // match! + let offset = ptr as usize & (PAGE_SIZE - 1); + let clear_mask = make_mask(round_up(size) / 8); + root[i+1] = root[i+1] & !(clear_mask << offset); + } + } + } + } + + pub fn dealloc(mut self, ptr: *mut usize, size: usize) { + walk_dealloc(ptr, size, self.root); + } +} From a9ff71e1db16003bafa4d76731c24c4c8997fef1 Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 6 Mar 2023 17:19:41 -0800 Subject: [PATCH 10/57] Rebase onto 25 --- src/vm.rs | 3 +- src/vm/galloc.rs | 314 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 231 insertions(+), 86 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index b409ba5..a0e4bc2 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -2,13 +2,13 @@ pub mod palloc; pub mod ptable; pub mod process; +pub mod galloc; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; use ptable::{kpage_init, PageTable}; use process::Process; - use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. @@ -24,6 +24,7 @@ pub enum VmError { PartialPalloc, PallocFail, PfreeFail, + GNoSpace, } pub trait Resource {} diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index aedb1b3..e8a09ab 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -1,7 +1,7 @@ /// General allocation, basically malloc/kalloc type thing use crate::hw::param::*; use core::assert; -use crate::vm::palloc::PagePool; +use crate::vm::*; /// This is either a data layer or an indirect layer /// @@ -22,7 +22,30 @@ use crate::vm::palloc::PagePool; /// If this is a data layer, then the entire page is naturally aligned /// data. By that I mean that a pow of 2 chunk of size n is n-byte /// aligned. -type Header = [u64; param::PAGE_SIZE/64]; + +// I'd use page size but rust won't let me +// type Header = [u64; 4096/64]; + +#[repr(C)] +#[derive(Clone, Copy)] +struct HeaderPair { + valid: u64, + down: *mut Header, +} + +#[repr(C)] +#[derive(Clone, Copy)] +struct Indirect { + level: u64, + valid: u64, + contents: [HeaderPair; 255], +} + +#[derive(Clone, Copy)] +union Header { + data: [u64; 4096/64], + indirect: Indirect, +} pub struct GAlloc { root: *mut Header, @@ -63,7 +86,7 @@ pub struct GAlloc { // } // not efficient. make a lower bit mask with said # of ones -fn make_mask(mut num_ones: usize) -> u64 { +fn make_mask(mut num_ones: u64) -> u64 { let mut out = 0; while num_ones > 0 { out = (out << 1) | 1; @@ -77,52 +100,51 @@ fn make_mask(mut num_ones: usize) -> u64 { fn round_up(mut s:u64) -> u64 { s -= 1; let mut i = 1; - while (i < 64) { + while i < 64 { s |= s >> i; } s + 1 } -fn get_page() -> *mut u8 { - unsafe { (*crate::vm::PAGEPOOL).palloc() } +fn get_page() -> Result<*mut usize, VmError> { + match unsafe { (*PAGEPOOL).palloc() } { + Err(e) => { + Err(e) + }, + Ok(page) => { + Ok(page.addr as *mut usize) + } + } +} + + +impl Drop for GAlloc { + fn drop(&mut self) { + panic!("Dropped your general allocator") + } } impl GAlloc { - pub fn new(page_allocator: PagePool) -> Self { - let page = as *mut Header; - page.0 = 1; - page.1 = 0; + pub fn new() -> Self { + let page = match get_page() { + Err(e) => { + panic!("Couldn't initialize the header for general alloc: {:?}", e) + }, + Ok(addr) => { + addr as *mut Header + } + }; + unsafe { + (*page).indirect.level = 1; + (*page).indirect.valid = 0; + } // level 1 page with no valid pages GAlloc { root: page } } - // readability helpers - fn set_level(mut self, level: u64) { - self.root.0 = level; - } - - fn set_valid_bits(mut self, bit_mask: u64) { - self.root.1 |= bit_mask; - } - - // clears the bits specified as arg - fn clear_valid_bits(mut self, bit_mask: u64) { - self.root.1 &= !bit_mask; - } - - fn level(self) -> u64 { - self.root.0 - } - - fn valid(self) -> u64 { - self.root.1 - } - - //TODO drop? What does that even mean here - - fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { + fn search_data_layer(size: u64, dl_mask: u64) -> Option { let size = round_up(size) / 8; // pow 2, in usize units let search_mask = make_mask(size); @@ -138,84 +160,206 @@ impl GAlloc { None } - fn walk_alloc(size: usize, root: *mut Header) -> Option<*mut usize> { - if root.level() != 1 { - for i in (2..64).step_by(2) { - match walk_alloc(size, *(root.i)) { - None => {}, - ret => { return ret; } + unsafe fn walk_alloc(size: usize, root: &mut Header) -> Result<*mut usize, VmError> { + let mut open: isize = -1; // neg if full, 0-31 for first empty + if root.indirect.level != 1 { + for i in 0..32 { + if (root.indirect.valid >> i) & 0x1 == 0{ + // invalid down link + if open != -1 { open = i; } + } else { + // this is a down link we can follow + let down = &mut (unsafe { + *root.indirect.contents[i as usize].down + }); + match Self::walk_alloc(size, down) { + Err(_) => {}, + ret => { return ret; } + } } } - return None; + // checked all valid down links and none of them are valid + // now check if we can add a valid one (was there a hole) + + if open != -1 { + let page: *mut Header = match get_page() { + Err(e) => { + return Err(e); + }, + Ok(addr) => { + addr as *mut Header + } + }; + // insert a new page + let p_ref = &mut (unsafe { *page }); + p_ref.indirect.level = root.indirect.level -1; + p_ref.indirect.valid = 0; + root.indirect.contents[open as usize].down = p_ref; + // root.indirect.contents[open as usize].valid is not needed, as p_ref is not a data layer + root.indirect.valid = root.indirect.valid | 1 << open; + return Self::walk_alloc(size, p_ref); + } + // no space and no holes for further intermediate levels + // in any case, pass the error up + return Err(VmError::GNoSpace); } else { - let open: isize = -1; // neg if full, 0-63 for first empty - for i in (2..64).step_by(2) { - if (root.valid() >> i) & 0x1 == 0 { - if open == -1 { open = i; } + // this is a level 1 layer, and points to data layers + for i in 0..32 { + let i = i as usize; + if (root.indirect.valid >> i) & 0x1 == 0 { + // this is a data page down link that isn't in use + if open == -1 { open = i as isize; } continue; } - match search_data_layer(size, root[i+1]) { + match Self::search_data_layer(size as u64, + root.indirect.contents[i].valid) { None => {}, Some(idx) => { - // found one, make pointer - let in_use = round_up(size) / 8; // how many to mark in use - root[i+1] = root[i+1] | (make_mask(in_use) << idx); - return Some(root[i].offset(idx)); + // found space, mark and make pointer + let in_use = round_up(size as u64) / 8; // how many to mark in use + root.indirect.contents[i].valid = + root.indirect.contents[i].valid | (make_mask(in_use) << idx); + let data_page = root.indirect.contents[i].down as *mut usize; + return Ok(unsafe { data_page.offset(idx as isize) }); } } } - // couldn't find anything, try to add another indirect layer - if open >= 0 { - let mut page = palloc() as *mut Header; - root[open] = page; - page[0] = root[0] - 1; - page[1] = 0; // entirely empty; - root[1] = root[1] | (1 << open); // down link valid - root[1] = root[1] & !(1 << (open+1)); // mask no longer valid - return walk_alloc(size, root[open]); + // couldn't find anything, try to add another data page + if open == -1 { + let open = open as usize; + let page: *mut Header = match get_page() { + Err(e) => { + return Err(e); + }, + Ok(addr) => { + addr as *mut Header + } + }; + root.indirect.contents[open].down = page; + root.indirect.contents[open].valid = 0; // all free + // don't set page meta, because this is a data page + root.indirect.valid = root.indirect.valid | (1 << open); // down link valid + return Self::walk_alloc(size, &mut (unsafe { + *(root.indirect.contents[open].down) + })); } - return None; + return Err(VmError::GNoSpace); } } - pub fn alloc(mut self, size: usize) -> Option<*mut usize> { + pub fn alloc(mut self, size: usize) -> Result<*mut usize, VmError> { assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); - match walk_alloc(size, self.root) { - Some(ret) => { Some(ret) }, - None => { - let new_root = palloc() as *mut Header; - new_root[0] = self.root[0] + 1; - new_root[1] = 0x4; // single valid entry, old root - new_root[2] = self.root; - self.root = new_root; - walk_alloc(size, self.root) + match unsafe {Self::walk_alloc(size, &mut (*self.root)) } { + Ok(ret) => { Ok(ret) }, + Err(_) => { + // alloc failed. try to bump the root up (note that + // this may also fail if the issue was out of pages) + let mut page: *mut Header = match get_page() { + Err(e) => { + return Err(e); + }, + Ok(addr) => { + addr as *mut Header + } + }; + unsafe { + (*page).indirect.level = (*self.root).indirect.level + 1; // bump level + (*page).indirect.valid = 1; // single valid page (old root) + (*page).indirect.contents[0] = HeaderPair { + valid: 0, // unused since root is not a data page + down: self.root, + }; + } + self.root = page; + match unsafe { Self::walk_alloc(size, &mut (*self.root)) } { + Err(e) => { + Err(e) + }, + Ok(addr) => { + Ok(addr) + } + } } } } - fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut Header) { - let test_ptr = ptr as usize & !(PAGE_SIZE - 1); - if Header[0] != 1 { - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - walk_dealloc(ptr, size, root[i]); + // returns (did_we_find_it, should_we_keep_this_branch) + unsafe fn walk_dealloc(ptr: *mut usize, size: usize, root: &mut Header) -> (bool, bool) { + let test_ptr = ptr as usize & !(PAGE_SIZE - 1); // should match data_page base pointer + if root.indirect.level != 1 { + // down links are not data pages + let valid = root.indirect.valid; + if valid == 0 { + return (false, false); + } + let mut should_we_keep = false; + for i in 0..32 { + if (valid >> i) & 1 == 0 {continue;} + match Self::walk_dealloc(ptr, size, &mut (*root.indirect.contents[i].down)) { + (true, true) => { + return (true, true); + }, + (false, true) => { + // keep searching + should_we_keep = true; + }, + (found, false) => { + // trim branch and maybe report findings + root.indirect.valid = root.indirect.valid & !(1 << i); + // TODO free the said down link + if root.indirect.valid == 0 { + // nothing more to check, report findings + return (found, false); + } else if found { + return (true, true); + } + } + } + } + if should_we_keep { + return (false, true); + } else { + return (false, false); } } else { - // bottom level, search for match - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - if root[i] as usize == test_ptr { + // downlinks are data pages, search for match + let valid = root.indirect.valid; + for i in 0..32 { + if (valid >> i) & 1 == 0 {continue;} + if root.indirect.contents[i].down as usize == test_ptr { // match! let offset = ptr as usize & (PAGE_SIZE - 1); - let clear_mask = make_mask(round_up(size) / 8); - root[i+1] = root[i+1] & !(clear_mask << offset); + let clear_mask = make_mask(round_up(size as u64) / 8); + root.indirect.contents[i].valid = + root.indirect.contents[i].valid & !(clear_mask << offset); + if root.indirect.contents[i].valid == 0 { + // free data page + // TODO free page + root.indirect.valid = valid & !(1 << i); + if root.indirect.valid == 0 { + // cleanup this indirect layer + return (true, false); + } else { + return (true, true); + } + } else { + return (true, true); + } } } + if valid == 0 { + return (false, false); + } else { + return (false, true); + } } } - pub fn dealloc(mut self, ptr: *mut usize, size: usize) { - walk_dealloc(ptr, size, self.root); + pub fn dealloc(&mut self, ptr: *mut usize, size: usize) { + unsafe { + // TODO consider mechanism for undoing root bump / when to do that + Self::walk_dealloc(ptr, size, &mut (*self.root)); + } } } From bccf25a43b5e123c8a3797a915c86f0ceb531f4a Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 6 Mar 2023 22:00:29 -0800 Subject: [PATCH 11/57] Rebase onto 25 --- src/mem.rs | 36 +++++++++++++++++++++++++----------- src/vm.rs | 2 ++ src/vm/galloc.rs | 39 ++------------------------------------- 3 files changed, 29 insertions(+), 48 deletions(-) diff --git a/src/mem.rs b/src/mem.rs index db0fb83..50b11a9 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -1,21 +1,34 @@ //! Kernel memory utilities use core::ops::{Deref, DerefMut}; +use core::mem::size_of; +use crate::vm::GALLOC; -/// Kernel heap allocated pointer. No guarantees on unique ownership or concurrent access. +/// Kernel heap allocated pointer. No guarantees on unique ownership +/// or concurrent access. pub struct Kbox { inner: *mut T, // NonNull, try NonNull later for lazy allocation impl. + size: usize, } impl Kbox { - pub fn new(mut data: T) -> Self { + pub fn new(data: T) -> Self { // How the allocater interface should be made use of. // Current constraints on allocator mean size_of::() must be less than 4Kb - // let new_ptr = global allocator, allocate us size_of::() bytes, please. - // new_ptr.write(data); <-- initialize newly allocated memory with our inner value. - //let new_ptr: *mut T = core::ptr::null_mut(); // Delete placeholder code. - let inner = &mut data; - Self { - inner, + let size = size_of::(); + match unsafe { (*GALLOC).alloc(size) } { + Err(e) => { + panic!("Kbox can't allocate: {:?}", e) + }, + Ok(ptr) => { + let new_ptr = ptr as *mut T; + unsafe { + *new_ptr = data; // <-- initialize newly allocated memory with our inner value. + Self { + inner: new_ptr, + size + } + } + } } } } @@ -25,7 +38,7 @@ unsafe impl Sync for Kbox {} impl Deref for Kbox { type Target = T; - + fn deref(&self) -> &Self::Target { unsafe { &*self.inner @@ -44,7 +57,8 @@ impl DerefMut for Kbox { impl Drop for Kbox { fn drop(&mut self) { - // How to use the allocator interface. - // dealloc(self.inner) + unsafe { + (*GALLOC).dealloc(self.inner as *mut usize, self.size); + } } } diff --git a/src/vm.rs b/src/vm.rs index a0e4bc2..1e35ebc 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -7,6 +7,7 @@ pub mod galloc; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; +use galloc::GAlloc; use ptable::{kpage_init, PageTable}; use process::Process; use core::cell::OnceCell; @@ -47,6 +48,7 @@ pub struct TaskNode { /// kernel's page table struct. pub fn init() -> Result<(), PagePool>{ unsafe { PAGEPOOL.set(PagePool::new(bss_end(), dram_end()))?; } + log!(Debug, "Successfully initialized kernel page pool..."); // Map text, data, heap into kernel memory match kpage_init() { diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index e8a09ab..576c67d 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -51,40 +51,6 @@ pub struct GAlloc { root: *mut Header, } -// gives the index of the lowest set bit or None -// fn lowest_set_bit(field: u64) -> Option { -// let mut i = 0; -// while (i < 64 && -// !((field >> i) & 0x1)) { -// i += 1; -// } -// match i { -// 64 => { -// None -// }, -// _ => { -// i -// } -// } -// } - -//same but for highest -// fn highest_set_bit(field: u64) -> Option { -// let mut i = 63; -// while (i >= 0 && -// !((field >> i) & 0x1)) { -// i -= 1; -// } -// match i { -// 0 => { -// None -// }, -// _ => { -// i -// } -// } -// } - // not efficient. make a lower bit mask with said # of ones fn make_mask(mut num_ones: u64) -> u64 { let mut out = 0; @@ -99,8 +65,7 @@ fn make_mask(mut num_ones: u64) -> u64 { /* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ fn round_up(mut s:u64) -> u64 { s -= 1; - let mut i = 1; - while i < 64 { + for i in 1..64 { s |= s >> i; } s + 1 @@ -248,7 +213,7 @@ impl GAlloc { } } - pub fn alloc(mut self, size: usize) -> Result<*mut usize, VmError> { + pub fn alloc(&mut self, size: usize) -> Result<*mut usize, VmError> { assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); match unsafe {Self::walk_alloc(size, &mut (*self.root)) } { Ok(ret) => { Ok(ret) }, From 732eb2c3f584b40c5640f97d5a80023e1a942d7b Mon Sep 17 00:00:00 2001 From: tmu Date: Tue, 7 Mar 2023 10:13:14 -0800 Subject: [PATCH 12/57] align with new palloc and integrate with mem --- .gitignore | 2 + src/general-alloc.rs | 196 ------------------------------------------- src/mem.rs | 8 +- src/vm.rs | 14 ++++ src/vm/galloc.rs | 2 +- 5 files changed, 20 insertions(+), 202 deletions(-) delete mode 100644 src/general-alloc.rs diff --git a/.gitignore b/.gitignore index 4f8c6f7..0b8eca8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ /target *~ +*.o +*.ELF .rust* diff --git a/src/general-alloc.rs b/src/general-alloc.rs deleted file mode 100644 index 0cf7001..0000000 --- a/src/general-alloc.rs +++ /dev/null @@ -1,196 +0,0 @@ -/// General allocation, basically malloc/kalloc type thing -use crate::vm::palloc; -use crate::hw::param::*; -use core::mem::size_of; -use core::assert; - -/// This is either a data layer or an indirect layer -/// -/// If it is indirect, then the first u64 is the level. Level n points -/// to n-1, and 1 points to data layers -/// -/// If it is indirect, the second u64 is the valid/in use bits of the -/// corresponding u64s in the current header. -/// -/// If this is an indirect header, then all futher u64 are paried. The -/// even indexed (first) u64 is a pointer dwon one level. The odd -/// (second) one is the valid mask for that link. If the link is to a -/// data layer, then it corresponds to the parts of the data layer in -/// use. If the link is to another indirect layer, then ignore this -/// and decend and check the second u64 of that layer instead. (In -/// fact it should be marked invalid.) -/// -/// If this is a data layer, then the entire page is naturally aligned -/// data. By that I mean that a pow of 2 chunk of size n is n-byte -/// aligned. -type header = [u64; PAGE_SIZE/64]; - -pub struct GAlloc { - root: *mut header, -} - -// gives the index of the lowest set bit or None -fn lowest_set_bit(field: u64) -> Option { - let mut i = 0; - while (i < 64 && - !((field >> i) & 0x1)) { - i += 1; - } - match i { - 64 => { - None - }, - _ => { - i - } - } -} - -//same but for highest -fn highest_set_bit(field: u64) -> Option { - let mut i = 63; - while (i >= 0 && - !((field >> i) & 0x1)) { - i -= 1; - } - match i { - 0 => { - None - }, - _ => { - i - } - } -} - -// not efficient. make a lower bit mask with said # of ones -fn make_mask(mut num_ones: usize) -> u64 { - let mut out = 0; - while num_ones > 0 { - out = (out << 1) | 1; - num_ones -= 1; - } - out -} - -// pow of two that fits s -/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ -fn round_up(mut s:u64) -> u64 { - s -= 1; - let mut i = 1; - while (i < 64) { - s |= s >> i; - } - s + 1 -} - -impl GAlloc { - pub fn new() -> Self { - let page = palloc() as *mut header; - page.0 = 1; - page.1 = 0; - // level 1 page with no valid pages - GAlloc { - root: page - } - } - - //TODO drop? What does that even mean here - - fn search_data_layer(mut size: u64, dl_mask: u64) -> Option { - let size = round_up(size) / 8; // pow 2, in usize units - let search_mask = make_mask(size); - - let mut i = 0; - while i < 64 { - if (dl_mask >> i) & search_mask == 0 { - // clear bits - return Some(i); - } else { - i += size; // skip size places - } - } - None - } - - fn walk_alloc(size: usize, root: *mut header) -> Option<*mut usize> { - if root[0] != 1 { - for i in (2..64).step_by(2) { - match walk_alloc(size, *(root.i)) { - None => {}, - ret => { return ret; } - } - } - return None; - } else { - let open: isize = -1; // neg if full, 0-63 for first empty - for i in (2..64).step_by(2) { - if (root[1] >> i) & 0x1 == 0 { - if open == -1 { open = i; } - continue; - } - - match search_data_layer(size, root[i+1]) { - None => {}, - Some(idx) => { - // found one, make pointer - let in_use = round_up(size) / 8; // how many to mark in use - root[i+1] = root[i+1] | (make_mask(in_use) << idx); - return Some(root[i].offset(idx)); - } - } - } - // couldn't find anything, try to add another indirect layer - if open >= 0 { - let mut page = palloc() as *mut header; - root[open] = page; - page[0] = root[0] - 1; - page[1] = 0; // entirely empty; - root[1] = root[1] | (1 << open); // down link valid - root[1] = root[1] & !(1 << (open+1)); // mask no longer valid - return walk_alloc(size, root[open]); - } - return None; - } - } - - pub fn alloc(mut self, size: usize) -> Option<*mut usize> { - assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); - match walk_alloc(size, self.root) { - Some(ret) => { Some(ret) }, - None => { - let new_root = palloc() as *mut header; - new_root[0] = self.root[0] + 1; - new_root[1] = 0x4; // single valid entry, old root - new_root[2] = self.root; - self.root = new_root; - walk_alloc(size, self.root) - } - } - } - - fn walk_dealloc(ptr: *mut usize, size: usize, root: *mut header) { - let test_ptr = ptr as usize & !(PAGE_SIZE - 1); - if header[0] != 1 { - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - walk_dealloc(ptr, size, root[i]); - } - } else { - // bottom level, search for match - for i in (2..64).step_by(2) { - if root[1] >> i == 0 {continue;} - if root[i] as usize == test_ptr { - // match! - let offset = ptr as usize & (PAGE_SIZE - 1); - let clear_mask = make_mask(round_up(size) / 8); - root[i+1] = root[i+1] & !(clear_mask << offset); - } - } - } - } - - pub fn dealloc(mut self, ptr: *mut usize, size: usize) { - walk_dealloc(ptr, size, self.root); - } -} diff --git a/src/mem.rs b/src/mem.rs index 50b11a9..ab575d8 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -1,7 +1,7 @@ //! Kernel memory utilities use core::ops::{Deref, DerefMut}; use core::mem::size_of; -use crate::vm::GALLOC; +use crate::vm::{galloc, gdealloc}; /// Kernel heap allocated pointer. No guarantees on unique ownership /// or concurrent access. @@ -15,7 +15,7 @@ impl Kbox { // How the allocater interface should be made use of. // Current constraints on allocator mean size_of::() must be less than 4Kb let size = size_of::(); - match unsafe { (*GALLOC).alloc(size) } { + match galloc(size) { Err(e) => { panic!("Kbox can't allocate: {:?}", e) }, @@ -57,8 +57,6 @@ impl DerefMut for Kbox { impl Drop for Kbox { fn drop(&mut self) { - unsafe { - (*GALLOC).dealloc(self.inner as *mut usize, self.size); - } + gdealloc(self.inner as *mut usize, self.size); } } diff --git a/src/vm.rs b/src/vm.rs index 1e35ebc..72b5a10 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -15,6 +15,7 @@ use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. //static mut PAGEPOOL: PagePool = PagePool::new(bss_end(), dram_end()); static mut PAGEPOOL: OnceCell = OnceCell::new(); +static mut GALLOC: OnceCell = OnceCell::new(); /// Global kernel page table. pub static mut KPGTABLE: *mut PageTable = core::ptr::null_mut(); @@ -48,6 +49,7 @@ pub struct TaskNode { /// kernel's page table struct. pub fn init() -> Result<(), PagePool>{ unsafe { PAGEPOOL.set(PagePool::new(bss_end(), dram_end()))?; } + unsafe { GALLOC.set(GAlloc::new()); } log!(Debug, "Successfully initialized kernel page pool..."); // Map text, data, heap into kernel memory @@ -62,6 +64,18 @@ pub fn init() -> Result<(), PagePool>{ Ok(()) } +pub fn galloc(size: usize) -> Result<*mut usize, VmError> { + unsafe { + GALLOC.get_mut().unwrap().alloc(size) + } +} + +pub fn gdealloc(ptr: *mut usize, size: usize) { + unsafe { + GALLOC.get_mut().unwrap().dealloc(ptr, size) + } +} + pub unsafe fn test_palloc() { let allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); //println!("allocd addr: {:?}", allocd.addr); diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index 576c67d..fa81087 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -72,7 +72,7 @@ fn round_up(mut s:u64) -> u64 { } fn get_page() -> Result<*mut usize, VmError> { - match unsafe { (*PAGEPOOL).palloc() } { + match unsafe { PAGEPOOL.get_mut().unwrap().palloc() } { Err(e) => { Err(e) }, From a677d02b2e93b49d68a938702dfafda8a419bacb Mon Sep 17 00:00:00 2001 From: tmu Date: Wed, 8 Mar 2023 12:17:21 -0800 Subject: [PATCH 13/57] testing galloc and broken stack --- .gdbinit | 1 + src/alloc.rs | 391 ----------------------------------------------- src/lib.rs | 6 +- src/vm.rs | 41 ++++- src/vm/galloc.rs | 12 +- 5 files changed, 47 insertions(+), 404 deletions(-) delete mode 100644 src/alloc.rs diff --git a/.gdbinit b/.gdbinit index 6bd8185..f79dab7 100644 --- a/.gdbinit +++ b/.gdbinit @@ -5,3 +5,4 @@ add-inferior info threads set schedule-multiple on set output-radix 16 +set print pretty on diff --git a/src/alloc.rs b/src/alloc.rs deleted file mode 100644 index ea1b8c1..0000000 --- a/src/alloc.rs +++ /dev/null @@ -1,391 +0,0 @@ -/// This is the usable wrapper for alloc. It implements a general -/// allocate that could be used for pages or for small scale kernel -/// stuff. See Kalloc::new. -/// -/// -/// TODO fix the data path for things like start and end, I shouldn't -/// have to pass them around, but I also don't want them inside the -/// headers themselves. -/// -/// Externally, you should only use the Kalloc new and GlobalAlloc -/// functions. The rest may have assumptions that are not enforced by -/// the compiler -use core::alloc::*; -use core::cell::UnsafeCell; -use core::mem::{size_of, align_of_val}; - -use core::cmp::max; -use core::ptr::null_mut; - -/// General util function. Enforces the same meaning on start and end -/// everywhere. -fn out_of_bounds(addr: usize, heap_start: usize, heap_end: usize) -> bool { - addr < heap_start || addr >= heap_end -} - -/// Stores the overhead info for a chunk. -/// -/// This is directly before the chunk allocated for user memory that -/// it corresponds to. -#[derive(Debug)] -#[repr(C)] -pub struct KChunkHeader { - size: usize, // Size of the chunk including this header - alignment_offset: usize, // Number of bytes between end of header and beginning of user data - layout: Layout, // What alloc/realloc call was this a response to? - is_free: bool, // Is this chunk in use or not? -} - -impl KChunkHeader { - /// setup a header for a chunk of this size (including header) - /// that is not in use. - fn init_free(&mut self, size: usize) { - self.size = size; - self.is_free = true; - self.alignment_offset = 0; - } - - /// How many bytes between the header and the user data? - fn alignment_offset(&self) -> usize { - self.alignment_offset - } - - /// Total size of chunk with header, padding, and data - fn size(&self) -> usize { - self.size - } - - /// Size of the usable part of the chunk, the padding + the user data - fn size_with_padding(&self) -> usize { - self.size() - size_of::() - } - - /// The number of bytes the user can safely use. Does NOT include - /// padding or header. - fn user_size(&self) -> usize { - self.size() - size_of::() - self.alignment_offset() - } - - /// What call was this chunk grabbed in response to? - fn layout(&self) -> Layout { - self.layout - } - - /// Is this chunk up for grabs? - fn is_free(&self) -> bool { - self.is_free - } - - /// Update whether or not this chunk is in use. - fn set_is_free(&mut self, free: bool) { - self.is_free = free; - } - - /// Update the user visible size of this chunk by resizing the chunk. - fn set_user_size(&mut self, user_size: usize) { - self.size = user_size + size_of::() + self.alignment_offset(); - } - - /// Update size of this chunk. - fn set_size(&mut self, size: usize) { - self.size = size; - } - - /// Set the fingerprint of the call that put this into use. - fn set_layout(&mut self, layout: Layout) { - self.layout = layout; - } - - /// Set the padding between the header and the user data. - fn set_alignment_offset(&mut self, offset: usize) { - self.alignment_offset = offset; - } - - /// Gives a pointer to the first byte of padding. Check the size yourself. - /// - /// TODO boy this is not rustish. No idea if that's fixable though. - fn start_of_padding(&mut self) -> *mut u8 { - (((self as *mut KChunkHeader) as usize) + size_of::()) as *mut u8 - } - - /// Give a pointer to the start of user data. Check the size yourself. - fn user_data(&mut self) -> *mut u8 { - (((self as *mut KChunkHeader) as usize) + size_of::() + self.alignment_offset()) - as *mut u8 - } - - /// Returns a pointer to the next header or None. Takes the info - /// about the heap it is in to make the call about if it is at the - /// end of the header list. See file top comment. - unsafe fn next_chunk( - &mut self, - heap_start: usize, - heap_end: usize, - ) -> Option<*mut KChunkHeader> { - let next = (self as *mut KChunkHeader).byte_add(self.size()); - if out_of_bounds(next as usize, heap_start, heap_end) { - None - } else { - Some(next) - } - } - - /// Only safe on free chunks. Tries to merge this chunk with the - /// next if it is also free. Takes bounds for safety, see file top - /// comment. - unsafe fn attempt_merge(&mut self, heap_start: usize, heap_end: usize) { - let pos_next = self.next_chunk(heap_start, heap_end); - match pos_next { - Some(next_chunk) => { - if (*next_chunk).is_free() { - self.set_user_size(self.user_size() + (*next_chunk).size()); - } else { - // do nothing - } - } - None => { - // do nothing - } - } - } - - /// Does this chunk match this caller fingerprint? - fn matches_layout(&self, layout: &Layout) -> bool { - self.layout() == *layout - } -} - -// We want to be able to for loop over these headers, as a painless way to deal with them -struct KChunkIter { - start: usize, // heap start for this run of chunks - end: usize, // heap end for this run of chunks - current: Option<*mut KChunkHeader>, // where are we in the run, the value returned by the next call to next() -} - -impl Iterator for KChunkIter { - type Item = *mut KChunkHeader; - - fn next(&mut self) -> Option { - match self.current { - None => None, - Some(chunk) => unsafe { - self.current = (*self.current.unwrap()).next_chunk(self.start, self.end); - Some(chunk) - }, - } - } -} - -/// one allocation pool. Basically one "heap" -pub struct Kalloc { - heap_start: usize, // start of the managed region of memory - heap_end: usize, // end of the managed region of memory - pool: UnsafeCell<*mut KChunkHeader>, // the actual data as it should be accessed -} - -// Just so we can put it in a mutex -unsafe impl Sync for Kalloc {} - -impl Kalloc { - /// Make a new managed region given by the bounds. They are inclusive and exclusive respectively. - pub fn new(start: usize, end: usize) -> Self { - let first_chunk = start as *mut KChunkHeader; - unsafe { - (*first_chunk).init_free(end - start); - } - Kalloc { - heap_start: start, - heap_end: end, - pool: UnsafeCell::new(start as *mut KChunkHeader), - } - } - - /// Iterate over the chunks of this region. - fn mut_iter(&self) -> KChunkIter { - unsafe { - KChunkIter { - start: self.heap_start, - end: self.heap_end, - current: Some(*self.pool.get()), - } - } - } - - // TODO this and next can be replaced with stuff from core::pointer I think - - pub fn adjust_size_with_align(size: usize, align: usize) -> usize { - let mask: usize = align - 1; - if (size & mask) != 0 { - // has low order bits - (size & !mask) + align - } else { - // already aligned - size - } - } - - /// Adjust a pointer forward until it matches alignment at least. - /// - /// Returns a tuple of the changed pointer and the number of bytes - /// forward that it was moved - pub fn adjust_ptr_with_align(ptr: usize, align: usize) -> (usize , usize) { - let mask: usize = align - 1; - let addr: usize = ptr as usize; - if (addr & mask) != 0 { - // low order bits - ((addr & !mask) + align, align - (addr & mask)) - } else { - (ptr, 0) - } - } - - pub fn print_alloc(&self) { - for cptr in self.mut_iter() { - unsafe { - let chunk: &KChunkHeader = &*cptr; - print!("{:?}", *chunk); - } - } - } -} - -// Does what it says. -unsafe impl GlobalAlloc for Kalloc { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let internal_align = max(layout.align(), size_of::()); - let next_header_align = align_of_val(self); - - for cptr in self.mut_iter() { - let chunk: &mut KChunkHeader = &mut *cptr; - let first_usable_byte = chunk.start_of_padding() as usize; - let aligned_ptr = Kalloc::adjust_ptr_with_align( - first_usable_byte, internal_align).0 as usize; - let aligned_ptr_size = aligned_ptr + layout.size(); - let required_size = aligned_ptr_size - first_usable_byte as usize; - // ^ is how big the current chunk needs to be to accomidate the current request - let padded_size = chunk.size_with_padding(); - // ^ What is the maximum it can fit now? - if padded_size >= required_size && chunk.is_free() { - // this will work - - // min number bytes to the next header from the bottom of this header - let skip: usize = required_size; - - let next_header = Self::adjust_ptr_with_align( - first_usable_byte + skip, - next_header_align).0; - - - let new_chunk: *mut KChunkHeader = next_header as *mut KChunkHeader; - (*new_chunk).init_free(padded_size - skip); - // does not set layout of new chunk - - (*chunk).set_size(skip + size_of::()); - (*chunk).set_is_free(false); - (*chunk).set_layout(layout); - let ptr_and_offset = - Kalloc::adjust_ptr_with_align(first_usable_byte, internal_align); - chunk.set_alignment_offset(ptr_and_offset.1); - return ptr_and_offset.0 as *mut u8; - - } - } - return null_mut::(); // Can't find any space - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - let mut previous: Option<*mut KChunkHeader> = None; - for cptr in self.mut_iter() { - let chunk: &mut KChunkHeader = &mut *cptr; - if (*chunk).matches_layout(&layout) && (*chunk).user_data() == ptr { - // this is it - chunk.set_is_free(true); - chunk.attempt_merge(self.heap_start, self.heap_end); - if previous != None && (*previous.unwrap()).is_free() { - (*previous.unwrap()).attempt_merge(self.heap_start, self.heap_end); - } - return; - } else { - // keep moving - previous = Some(chunk as *mut KChunkHeader); - } - } - panic!("Dealloc Failure: Chunk not found. Have you changed your pointer or layout?"); - } - - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let ptr = self.alloc(layout); - ptr.write_bytes(0, layout.size()); - ptr - } - - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - return null_mut::(); - // let previous: Option<*mut KChunkHeader> = None; - // let internal_align = max(layout.align(), size_of::()); - // let internal_size = Kalloc::adjust_size_with_align(new_size, internal_align); - - // for cptr in self.mut_iter() { - // let chunk = &mut *cptr; - // if chunk.matches_layout(&layout) && (*chunk).user_data() == ptr { - // // this is it - // chunk.set_is_free(true); - // chunk.attempt_merge(self.heap_start, self.heap_end); - // if previous != None && (*previous.unwrap()).is_free() { - // let p_chunk = &mut *previous.unwrap(); - // p_chunk.attempt_merge(self.heap_start, self.heap_end); - // if p_chunk.size_with_padding() > internal_size { - // // we can use prev: copy and be wary of - // // overlap. we are writing forward from source - // // to dest, and dest comes before source, so - // // we should be fine - // p_chunk.set_is_free(false); - // p_chunk - // .set_layout(Layout::from_size_align(new_size, layout.align()).unwrap()); - - // let ptr_and_offset = Kalloc::adjust_ptr_with_align( - // p_chunk.start_of_padding(), - // internal_align, - // ); - // p_chunk.set_alignment_offset(ptr_and_offset.1); - - // let dest = p_chunk.user_data(); - // let src = chunk.user_data(); - // for off in 0..chunk.user_size() { - // dest.byte_offset(off as isize) - // .write(src.byte_offset(off as isize).read()); - // } - // return ptr_and_offset.0; - // } - // } else if chunk.size_with_padding() > internal_size { - // // prev wasn't free or didn't exist. But this - // // chunk that has been freed and merged is big - // // enough. We can avoid any data movement - // chunk.set_is_free(false); - // chunk.set_layout(Layout::from_size_align(new_size, layout.align()).unwrap()); - // return chunk.user_data(); - // } else { - // // neither prev nor the chunk we freed worked, just straight alloc - - // // TODO this re-traverses part of the array. fix it. - - // chunk.set_is_free(false); // avoid clobbering side effects of future alloc optimizations - - // let dest = - // self.alloc(Layout::from_size_align(new_size, layout.align()).unwrap()); - // if dest == null_mut::() { - // panic!("Realloc Failure: Couldn't allocate a new chunk."); - // } - // let src = chunk.user_data(); - // for off in 0..chunk.user_size() { - // dest.byte_offset(off as isize) - // .write(src.byte_offset(off as isize).read()); - // } - // chunk.set_is_free(true); - // return dest; - // } - // } - // } - // panic!("Realloc Failure: Chunk not found. Have you changed your pointer or layout?") - } -} diff --git a/src/lib.rs b/src/lib.rs index 55e169e..70a6a71 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,7 +110,11 @@ fn main() -> ! { } log!(Info, "Initialized the kernel page table..."); log!(Debug, "Testing page allocation and freeing..."); - unsafe { vm::test_palloc() }; + unsafe { + vm::test_palloc(); + } + log!(Debug, "Testing general subpage allocation..."); + vm::test_galloc(); } else { //Interrupt other harts to init kpgtable. trap::init(); diff --git a/src/vm.rs b/src/vm.rs index 72b5a10..b4f2d8e 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -4,6 +4,7 @@ pub mod ptable; pub mod process; pub mod galloc; +use core::ptr::null_mut; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; @@ -48,8 +49,20 @@ pub struct TaskNode { /// Finally we set the global kernel page table `KPGTABLE` variable to point to the /// kernel's page table struct. pub fn init() -> Result<(), PagePool>{ - unsafe { PAGEPOOL.set(PagePool::new(bss_end(), dram_end()))?; } - unsafe { GALLOC.set(GAlloc::new()); } + unsafe { + match PAGEPOOL.set(PagePool::new(bss_end(), dram_end())) { + Ok(_) => {}, + Err(_) => { + panic!("vm double init.") + } + } + match GALLOC.set(GAlloc::new()) { + Ok(_) => {}, + Err(_) => { + panic!("vm double init.") + } + } + } log!(Debug, "Successfully initialized kernel page pool..."); // Map text, data, heap into kernel memory @@ -66,13 +79,17 @@ pub fn init() -> Result<(), PagePool>{ pub fn galloc(size: usize) -> Result<*mut usize, VmError> { unsafe { - GALLOC.get_mut().unwrap().alloc(size) + GALLOC.get_mut() + .unwrap() + .alloc(size) } } pub fn gdealloc(ptr: *mut usize, size: usize) { unsafe { - GALLOC.get_mut().unwrap().dealloc(ptr, size) + GALLOC.get_mut() + .unwrap() + .dealloc(ptr, size) } } @@ -83,3 +100,19 @@ pub unsafe fn test_palloc() { let _ = PAGEPOOL.get_mut().unwrap().pfree(allocd); log!(Debug, "Successful test of page allocation and freeing..."); } + +pub fn test_galloc() { + let first = galloc(8).unwrap(); + println!("asked for 8 bytes, got {:?}", first as usize); + let second = galloc(16).unwrap(); + println!("asked for 16 bytes got {:?}", second as usize); + gdealloc(first, 8); + let third = galloc(16).unwrap(); + println!("freed first and asked for 16 bytes, should match first {:?}", third as usize); + + let mut hold: [*mut usize; 256] = [null_mut(); 256]; + for i in 0.. { + hold[i] = galloc(16).unwrap(); + } + println!("{:?} and {:?} should not be in the same page", first, hold[255]); +} diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index fa81087..1ec74fd 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -134,9 +134,7 @@ impl GAlloc { if open != -1 { open = i; } } else { // this is a down link we can follow - let down = &mut (unsafe { - *root.indirect.contents[i as usize].down - }); + let down = &mut *root.indirect.contents[i as usize].down; match Self::walk_alloc(size, down) { Err(_) => {}, ret => { return ret; } @@ -156,7 +154,7 @@ impl GAlloc { } }; // insert a new page - let p_ref = &mut (unsafe { *page }); + let p_ref = &mut *page; p_ref.indirect.level = root.indirect.level -1; p_ref.indirect.valid = 0; root.indirect.contents[open as usize].down = p_ref; @@ -186,7 +184,7 @@ impl GAlloc { root.indirect.contents[i].valid = root.indirect.contents[i].valid | (make_mask(in_use) << idx); let data_page = root.indirect.contents[i].down as *mut usize; - return Ok(unsafe { data_page.offset(idx as isize) }); + return Ok(data_page.offset(idx as isize)); } } } @@ -205,9 +203,7 @@ impl GAlloc { root.indirect.contents[open].valid = 0; // all free // don't set page meta, because this is a data page root.indirect.valid = root.indirect.valid | (1 << open); // down link valid - return Self::walk_alloc(size, &mut (unsafe { - *(root.indirect.contents[open].down) - })); + return Self::walk_alloc(size, &mut *(root.indirect.contents[open].down)); } return Err(VmError::GNoSpace); } From d5078b87d6bc4af669339576c94c22db0158d3e2 Mon Sep 17 00:00:00 2001 From: tmu Date: Wed, 8 Mar 2023 12:23:55 -0800 Subject: [PATCH 14/57] missed one --- src/vm/galloc.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index 85a7938..1ec74fd 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -184,11 +184,7 @@ impl GAlloc { root.indirect.contents[i].valid = root.indirect.contents[i].valid | (make_mask(in_use) << idx); let data_page = root.indirect.contents[i].down as *mut usize; -<<<<<<< HEAD return Ok(data_page.offset(idx as isize)); -======= - return Ok(unsafe { data_page.offset(idx as isize) }); ->>>>>>> origin/6-Allocator } } } From 228ed25f3c073d474af2d2a8f62310057e759856 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Wed, 8 Mar 2023 12:45:15 -0800 Subject: [PATCH 15/57] Add page align safety check to pfree --- src/vm/palloc.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index b5a32a8..b259df7 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -54,6 +54,10 @@ impl PagePool { /// Free a page of physical memory by inserting into the doubly /// linked free list in order. pub fn pfree(&mut self, page: Page) -> Result<(), VmError> { + if !is_multiple(page.addr.addr(), PAGE_SIZE) { + panic!("Free page addr not page aligned.") + } + let mut pool = self.pool.lock(); Ok(pool.free_page(page)) } From 76a2828d43a8f4d2a6d40dd611d093216bbf720e Mon Sep 17 00:00:00 2001 From: tmu Date: Wed, 8 Mar 2023 21:21:07 -0800 Subject: [PATCH 16/57] palloc free fix and misc --- src/log.rs | 3 ++- src/trap.rs | 4 ++-- src/vm.rs | 9 +++------ src/vm/galloc.rs | 45 +++++++++++++++++++++++++++++++++------------ src/vm/palloc.rs | 14 ++++++++++++-- 5 files changed, 52 insertions(+), 23 deletions(-) diff --git a/src/log.rs b/src/log.rs index 4f8d17a..d786845 100644 --- a/src/log.rs +++ b/src/log.rs @@ -5,7 +5,8 @@ macro_rules! print ($($args:tt)+) => ({ use core::fmt::Write; use crate::uart; - let _ = write!(uart::WRITER.lock(), $($args)+); + // let _ = write!(uart::WRITER.lock(), $($args)+); + let _ = write!(uart::Uart::new().lock(), $($args)+); }); } diff --git a/src/trap.rs b/src/trap.rs index b4446fd..bd485d0 100644 --- a/src/trap.rs +++ b/src/trap.rs @@ -40,7 +40,7 @@ pub extern "C" fn m_handler() { _ => { log::log!( Warning, - "Uncaught machine mode interupt. mcause: {:X}", + "Uncaught machine mode interupt. mcause: 0x{:x}", mcause ); panic!(); @@ -57,7 +57,7 @@ pub extern "C" fn s_handler() { _ => { log::log!( Warning, - "Uncaught supervisor mode interupt. scause: {:X}", + "Uncaught supervisor mode interupt. scause: 0x{:x}", cause ); panic!() diff --git a/src/vm.rs b/src/vm.rs index b4f2d8e..1f3dd27 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -4,7 +4,6 @@ pub mod ptable; pub mod process; pub mod galloc; -use core::ptr::null_mut; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; @@ -110,9 +109,7 @@ pub fn test_galloc() { let third = galloc(16).unwrap(); println!("freed first and asked for 16 bytes, should match first {:?}", third as usize); - let mut hold: [*mut usize; 256] = [null_mut(); 256]; - for i in 0.. { - hold[i] = galloc(16).unwrap(); - } - println!("{:?} and {:?} should not be in the same page", first, hold[255]); + let back_half = galloc(4096 / 2).unwrap(); + let next_page = galloc(16).unwrap(); + println!("{:?} and {:?} should not be in the same page", back_half, next_page); } diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs index 1ec74fd..26d4d83 100644 --- a/src/vm/galloc.rs +++ b/src/vm/galloc.rs @@ -11,7 +11,7 @@ use crate::vm::*; /// If it is indirect, the second u64 is the valid/in use bits of the /// corresponding u64s in the current header. /// -/// If this is an indirect header, then all futher u64 are paried. The +/// If this is an indirect header, then all futher u64 are paired. The /// even indexed (first) u64 is a pointer dwon one level. The odd /// (second) one is the valid mask for that link. If the link is to a /// data layer, then it corresponds to the parts of the data layer in @@ -22,10 +22,11 @@ use crate::vm::*; /// If this is a data layer, then the entire page is naturally aligned /// data. By that I mean that a pow of 2 chunk of size n is n-byte /// aligned. +/// +/// This allocator works on 64byte chunks -// I'd use page size but rust won't let me -// type Header = [u64; 4096/64]; +// should be 16 bytes big exactly #[repr(C)] #[derive(Clone, Copy)] struct HeaderPair { @@ -38,12 +39,12 @@ struct HeaderPair { struct Indirect { level: u64, valid: u64, - contents: [HeaderPair; 255], + contents: [HeaderPair; 256], // 4096 / 16 } #[derive(Clone, Copy)] union Header { - data: [u64; 4096/64], + data: [u64; 64], // 4096 / 64 indirect: Indirect, } @@ -71,17 +72,36 @@ fn round_up(mut s:u64) -> u64 { s + 1 } +/// Returns the needed number of units to contain the given size +fn chunk_size(mut size: u64) -> u64 { + size = round_up(size); + if size < 64 { + 1 + } else { + size / 64 + } +} + fn get_page() -> Result<*mut usize, VmError> { match unsafe { PAGEPOOL.get_mut().unwrap().palloc() } { Err(e) => { Err(e) }, Ok(page) => { - Ok(page.addr as *mut usize) + Ok(page.addr) } } } +fn free_page(addr: *mut usize) { + match unsafe { PAGEPOOL.get_mut().unwrap().pfree(Page::from(addr)) } { + Err(_) => { + panic!("Galloc double free passed to palloc"); + }, + Ok(()) => {} + } +} + impl Drop for GAlloc { fn drop(&mut self) { @@ -110,7 +130,7 @@ impl GAlloc { } fn search_data_layer(size: u64, dl_mask: u64) -> Option { - let size = round_up(size) / 8; // pow 2, in usize units + let size = chunk_size(size); // number of units let search_mask = make_mask(size); let mut i = 0; @@ -180,7 +200,7 @@ impl GAlloc { None => {}, Some(idx) => { // found space, mark and make pointer - let in_use = round_up(size as u64) / 8; // how many to mark in use + let in_use = chunk_size(size as u64); // how many to mark in use root.indirect.contents[i].valid = root.indirect.contents[i].valid | (make_mask(in_use) << idx); let data_page = root.indirect.contents[i].down as *mut usize; @@ -189,7 +209,7 @@ impl GAlloc { } } // couldn't find anything, try to add another data page - if open == -1 { + if open != -1 { let open = open as usize; let page: *mut Header = match get_page() { Err(e) => { @@ -268,7 +288,8 @@ impl GAlloc { (found, false) => { // trim branch and maybe report findings root.indirect.valid = root.indirect.valid & !(1 << i); - // TODO free the said down link + // free lower page + free_page(root.indirect.contents[i].down as *mut usize); if root.indirect.valid == 0 { // nothing more to check, report findings return (found, false); @@ -291,13 +312,13 @@ impl GAlloc { if root.indirect.contents[i].down as usize == test_ptr { // match! let offset = ptr as usize & (PAGE_SIZE - 1); - let clear_mask = make_mask(round_up(size as u64) / 8); + let clear_mask = make_mask(chunk_size(size as u64)); root.indirect.contents[i].valid = root.indirect.contents[i].valid & !(clear_mask << offset); if root.indirect.contents[i].valid == 0 { // free data page - // TODO free page root.indirect.valid = valid & !(1 << i); + free_page(root.indirect.contents[i].down as *mut usize); if root.indirect.valid == 0 { // cleanup this indirect layer return (true, false); diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index b259df7..699bf97 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -91,7 +91,7 @@ impl Page { } } - /// Write pointers to the previous and next pointers of the doubly + /// Write pointers to the previous and next pointers of the doubly /// linked list to this page. We use the first 8 bytes of the page to /// store a ptr to the previous page, and the second 8 bytes to /// store a ptr to the next page. @@ -164,6 +164,7 @@ impl Pool { // in order to trigger the OutOfPages error. fn alloc_page(&mut self, mut page: Page) -> Page { let (prev, next) = page.read_free(); // prev is always 0x0 + log!(Debug, "called alloc, prev was {:?}", prev); assert_eq!(prev, 0x0 as *mut usize); if next.addr() == 0x0 { @@ -180,6 +181,7 @@ impl Pool { fn free_page(&mut self, mut page: Page) { let (mut head_prev, mut head_next) = (0x0 as *mut usize, 0x0 as *mut usize); + log!(Debug, "Called free"); let addr = page.addr; page.zero(); @@ -195,7 +197,15 @@ impl Pool { while addr > head_next && head_next != 0x0 as *mut usize { (head_prev, head_next) = Page::from(head_next).read_free(); } - Page::from(head_next).write_prev(addr); + if head_next != 0x0 as *mut usize { + Page::from(head_next).write_prev(addr); // link back from next + } + if head_prev != 0x0 as *mut usize { + Page::from(head_prev).write_next(addr); // link forward from prev + } else { + // insert at the front + self.free = Some(page); + } page.write_free(head_prev, head_next); } } From 2a29944979f4c0fe866207dc2424a30ed2a4f66d Mon Sep 17 00:00:00 2001 From: tmu Date: Wed, 8 Mar 2023 22:18:53 -0800 Subject: [PATCH 17/57] small stuff and debugging. Need to extend stack? --- .gdbinit | 7 +++++++ src/lib.rs | 3 --- src/vm.rs | 6 ++---- src/vm/palloc.rs | 2 -- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.gdbinit b/.gdbinit index f79dab7..569e4af 100644 --- a/.gdbinit +++ b/.gdbinit @@ -6,3 +6,10 @@ info threads set schedule-multiple on set output-radix 16 set print pretty on +define hook-quit + set confirm off +end +define hook-kill + set confirm off +end + diff --git a/src/lib.rs b/src/lib.rs index 70a6a71..c0d021e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -105,9 +105,6 @@ fn main() -> ! { trap::init(); log!(Info, "Finished trap init..."); let _ = vm::init(); - unsafe { - (*vm::KPGTABLE).write_satp(); - } log!(Info, "Initialized the kernel page table..."); log!(Debug, "Testing page allocation and freeing..."); unsafe { diff --git a/src/vm.rs b/src/vm.rs index 1f3dd27..6f9a17a 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -16,8 +16,6 @@ use core::cell::OnceCell; //static mut PAGEPOOL: PagePool = PagePool::new(bss_end(), dram_end()); static mut PAGEPOOL: OnceCell = OnceCell::new(); static mut GALLOC: OnceCell = OnceCell::new(); -/// Global kernel page table. -pub static mut KPGTABLE: *mut PageTable = core::ptr::null_mut(); /// (Still growing) list of kernel VM system error cases. #[derive(Debug)] @@ -66,8 +64,8 @@ pub fn init() -> Result<(), PagePool>{ // Map text, data, heap into kernel memory match kpage_init() { - Ok(mut pt) => unsafe { - KPGTABLE = &mut pt; + Ok(pt) => { + pt.write_satp() }, Err(_) => { panic!(); diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index 699bf97..6e2a929 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -164,7 +164,6 @@ impl Pool { // in order to trigger the OutOfPages error. fn alloc_page(&mut self, mut page: Page) -> Page { let (prev, next) = page.read_free(); // prev is always 0x0 - log!(Debug, "called alloc, prev was {:?}", prev); assert_eq!(prev, 0x0 as *mut usize); if next.addr() == 0x0 { @@ -181,7 +180,6 @@ impl Pool { fn free_page(&mut self, mut page: Page) { let (mut head_prev, mut head_next) = (0x0 as *mut usize, 0x0 as *mut usize); - log!(Debug, "Called free"); let addr = page.addr; page.zero(); From a857d5f29b4d7f0eab597c4b7b6ff6b968fd3628 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 11:06:42 -0700 Subject: [PATCH 18/57] Add per hart interrupt stack pages --- kernel.ld | 26 ++++++++++++++------------ src/asm/entry.s | 23 +++++++++++++++++------ src/hw/param.rs | 34 ++++++++++++++++++++++------------ 3 files changed, 53 insertions(+), 30 deletions(-) diff --git a/kernel.ld b/kernel.ld index a919f5b..0dc781b 100644 --- a/kernel.ld +++ b/kernel.ld @@ -6,20 +6,16 @@ MEMORY ram (wxa) : ORIGIN = 0x80000000, LENGTH = 128M } -/* If you don't Align(16), it breaks. If you try to page align .text, -it breaks. Align seems to only work INSIDE sections. To make mapping -nice. Sections should be page aligned. */ - SECTIONS { .text : { *(.text .text.*) . = ALIGN(0x1000); - PROVIDE(__text_end = .); + PROVIDE(_text_end = .); PROVIDE(_etext = .); } - PROVIDE(__global_pointer = .); + PROVIDE(_global_pointer = .); .rodata : { *(.srodata .srodata.*) @@ -37,21 +33,27 @@ SECTIONS /* lower guard page included in above */ .stacks : { . = ALIGN(0x1000); - PROVIDE(__stacks_start = .); - . = . + (4096 * 2 * 2); /* NHARTS with a guard page each, unstable */ - PROVIDE(__stacks_end = .); + PROVIDE(_stacks_start = .); + . = . + (4096 * 4 * 2); /* NHARTS with a guard page each, unstable */ + PROVIDE(_stacks_end = .); + } + .intstacks : { + . = ALIGN(0x1000); + PROVIDE(_intstacks_start = .); + . = . + (0x1000 * 4 * 2); + PROVIDE(_intstacks_end = .); } . = . + 4096; /* guard page */ /* stacks should start at stack end and alternate with guard pages going down */ .bss : { . = ALIGN(0x1000); - PROVIDE(__bss_start = .); + PROVIDE(_bss_start = .); *(.sbss .sbss.*) *(.bss .bss.*) . = ALIGN(0x1000); - PROVIDE(__bss_end = .); + PROVIDE(_bss_end = .); } PROVIDE(_end = .); - PROVIDE(__memory_end = ORIGIN(ram) + LENGTH(ram)); + PROVIDE(_memory_end = ORIGIN(ram) + LENGTH(ram)); } diff --git a/src/asm/entry.s b/src/asm/entry.s index 4a64c98..8bf801a 100644 --- a/src/asm/entry.s +++ b/src/asm/entry.s @@ -22,20 +22,31 @@ .option push .option norelax # Linker position data relative to gp - .extern __global_pointer - la gp, __global_pointer + .extern _global_pointer + la gp, _global_pointer .option pop # Set up stack per of hart ids according to linker script # Add 4k guard page per hart csrr a1, mhartid - sll a1, a1, 1 # Multiple hartid by 2 to get alternating pages - li a0, 0x1000 + #sll a1, a1, 1 # Multiple hartid by 2 to get alternating pages + li a0, 0x3000 mul a1, a1, a0 - .extern __stacks_end # Linker supplied - la a2, __stacks_end + .extern _stacks_end # Linker supplied + la a2, _stacks_end sub sp, a2, a1 + .extern _intstacks_end + csrr a1, mhartid + li a0, 0x4000 + mul a1, a1, a0 + la a2, _intstacks_end + sub a2, a2, a1 + csrw mscratch, a2 # Write per hart mscratch pad + li a0, 0x2000 + sub a2, a2, a0 # Move sp down by scratch pad page + guard page + csrw sscratch, a2 # Write per hart sscratch pad + # Jump to _start in src/main.rs .extern _start call _start diff --git a/src/hw/param.rs b/src/hw/param.rs index 4fbe3eb..c233a70 100644 --- a/src/hw/param.rs +++ b/src/hw/param.rs @@ -35,14 +35,16 @@ use core::ptr::addr_of_mut; // TODO consider reworking this to have a consistent naming scheme and // maybe a macro for the getter functions. extern "C" { - static mut __text_end: usize; - static mut __bss_start: usize; - static mut __bss_end: usize; - static mut __memory_end: usize; + static mut _text_end: usize; + static mut _bss_start: usize; + static mut _bss_end: usize; + static mut _memory_end: usize; static mut _roedata: usize; static mut _edata: usize; - static mut __stacks_start: usize; - static mut __stacks_end: usize; + static mut _stacks_start: usize; + static mut _stacks_end: usize; + static mut _intstacks_start: usize; + static mut _intstacks_end: usize; } /// CLINT base address. @@ -55,15 +57,15 @@ pub const UART_BASE: usize = 0x10000000; pub const DRAM_BASE: *mut usize = 0x80000000 as *mut usize; pub fn text_end() -> *mut usize { - unsafe { addr_of_mut!(__text_end) } + unsafe { addr_of_mut!(_text_end) } } pub fn bss_end() -> *mut usize { - unsafe { addr_of_mut!(__bss_end) } + unsafe { addr_of_mut!(_bss_end) } } pub fn bss_start() -> *mut usize { - unsafe { addr_of_mut!(__bss_start) } + unsafe { addr_of_mut!(_bss_start) } } pub fn rodata_end() -> *mut usize { @@ -75,15 +77,23 @@ pub fn data_end() -> *mut usize { } pub fn stacks_start() -> *mut usize { - unsafe { addr_of_mut!(__stacks_start) } + unsafe { addr_of_mut!(_stacks_start) } } pub fn stacks_end() -> *mut usize { - unsafe { addr_of_mut!(__stacks_end) } + unsafe { addr_of_mut!(_stacks_end) } +} + +pub fn intstacks_start() -> *mut usize { + unsafe { addr_of_mut!(_intstacks_start) } +} + +pub fn intstacks_end() -> *mut usize { + unsafe { addr_of_mut!(_intstacks_end) } } pub fn dram_end() -> *mut usize { - unsafe { addr_of_mut!(__memory_end) } + unsafe { addr_of_mut!(_memory_end) } } pub static PAGE_SIZE: usize = 4096; From dcfe915c4ee7d93b61f492b9142a1ece492669b8 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 11:55:15 -0700 Subject: [PATCH 19/57] Add save and load gp regs macros --- src/asm/trap.s | 99 ++++++++++++-------------------------------------- 1 file changed, 23 insertions(+), 76 deletions(-) diff --git a/src/asm/trap.s b/src/asm/trap.s index 8351bfb..741b3e7 100644 --- a/src/asm/trap.s +++ b/src/asm/trap.s @@ -1,10 +1,4 @@ - .section .text -# This is the machine mode trap vector(not really). It exists -# to get us into the rust handler - .option norvc - .align 4 - .global __mtrapvec -__mtrapvec: +.macro save_gp_regs addi sp, sp, -256 sd x0, 0(sp) @@ -38,10 +32,9 @@ __mtrapvec: sd x28, 224(sp) sd x29, 232(sp) sd x30, 240(sp) +.endm - .extern m_handler - call m_handler - +.macro load_gp_regs ld x0, 0(sp) ld x1, 8(sp) ld x2, 16(sp) @@ -74,9 +67,24 @@ __mtrapvec: ld x30, 240(sp) addi sp, sp, 256 - mret +.endm + .section .text +# This is the machine mode trap vector(not really). It exists +# to get us into the rust handler + .option norvc + .align 4 + .global __mtrapvec +__mtrapvec: + csrrw sp, mscratch, sp + save_gp_regs + .extern m_handler + call m_handler + + load_gp_regs + csrrw sp, mscratch, sp + mret # This is the supervisor trap vector, it just exists to get # us into the rust handler elsewhere @@ -84,73 +92,12 @@ __mtrapvec: .align 4 .globl __strapvec __strapvec: - addi sp, sp, -256 - - sd x0, 0(sp) - sd x1, 8(sp) - sd x2, 16(sp) - sd x3, 24(sp) - sd x4, 32(sp) - sd x5, 40(sp) - sd x6, 48(sp) - sd x7, 56(sp) - sd x8, 64(sp) - sd x9, 72(sp) - sd x10, 80(sp) - sd x11, 88(sp) - sd x12, 96(sp) - sd x13, 104(sp) - sd x14, 112(sp) - sd x15, 120(sp) - sd x16, 128(sp) - sd x17, 136(sp) - sd x18, 144(sp) - sd x19, 152(sp) - sd x20, 160(sp) - sd x21, 168(sp) - sd x22, 176(sp) - sd x23, 184(sp) - sd x24, 192(sp) - sd x25, 200(sp) - sd x26, 208(sp) - sd x27, 216(sp) - sd x28, 224(sp) - sd x29, 232(sp) - sd x30, 240(sp) + csrrw sp, sscratch, sp + save_gp_regs .extern s_handler call s_handler - ld x0, 0(sp) - ld x1, 8(sp) - ld x2, 16(sp) - ld x3, 24(sp) - ld x5, 40(sp) - ld x6, 48(sp) - ld x7, 56(sp) - ld x8, 64(sp) - ld x9, 72(sp) - ld x10, 80(sp) - ld x11, 88(sp) - ld x12, 96(sp) - ld x13, 104(sp) - ld x14, 112(sp) - ld x15, 120(sp) - ld x16, 128(sp) - ld x17, 136(sp) - ld x18, 144(sp) - ld x19, 152(sp) - ld x20, 160(sp) - ld x21, 168(sp) - ld x22, 176(sp) - ld x23, 184(sp) - ld x24, 192(sp) - ld x25, 200(sp) - ld x26, 208(sp) - ld x27, 216(sp) - ld x28, 224(sp) - ld x29, 232(sp) - ld x30, 240(sp) - - addi sp, sp, 256 + load_gp_regs + csrrw sp, sscratch, sp sret From 4ae5187f3718f27a0bac29cad44c1889ff4acb37 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 11:55:50 -0700 Subject: [PATCH 20/57] Add kernel pagetable mapping for int stacks and increased hart stack --- src/vm/ptable.rs | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/src/vm/ptable.rs b/src/vm/ptable.rs index cbad8c9..d034001 100644 --- a/src/vm/ptable.rs +++ b/src/vm/ptable.rs @@ -253,14 +253,16 @@ pub fn kpage_init() -> Result { "Succesfully mapped kernel data into kernel pgtable..." ); + // This maps hart 0, 1 stack pages in opposite order as entry.S. Shouln't necessarily be a + // problem. let base = stacks_start(); for s in 0..NHART { - let stack = unsafe { base.byte_add(PAGE_SIZE * (1 + s * 2)) }; + let stack = unsafe { base.byte_add(PAGE_SIZE * (1 + s * 3)) }; if let Err(kernel_stack) = page_map( kpage_table, stack, stack as *mut usize, - PAGE_SIZE, + PAGE_SIZE * 2, PTE_READ | PTE_WRITE, ) { return Err(kernel_stack); @@ -271,6 +273,40 @@ pub fn kpage_init() -> Result { s ); } + + // This maps hart 0, 1 stack pages in opposite order as entry.S. Shouln't necessarily be a + // problem. + let base = intstacks_start(); + for i in 0..NHART { + let m_intstack = unsafe { base.byte_add(PAGE_SIZE * (1 + i * 4)) }; + // Map hart i m-mode handler. + if let Err(intstack_m) = page_map( + kpage_table, + m_intstack, + m_intstack as *mut usize, + PAGE_SIZE, + PTE_READ | PTE_WRITE + ) { + return Err(intstack_m) + } + // Map hart i s-mode handler + let s_intstack = unsafe { m_intstack.byte_add(PAGE_SIZE * 2) }; + if let Err(intstack_s) = page_map( + kpage_table, + s_intstack, + s_intstack as *mut usize, + PAGE_SIZE, + PTE_READ | PTE_WRITE + ) { + return Err(intstack_s) + } + log!( + Debug, + "Succesfully mapped interrupt stack for hart {} into kernel pgtable...", + i + ); + + } if let Err(bss_map) = page_map( kpage_table, From 11a52f1ea948d1895ea72bd55ee74e315ae26433 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 12:06:37 -0700 Subject: [PATCH 21/57] remove palloc debug logs and unecessary casts --- src/vm/palloc.rs | 2 -- src/vm/ptable.rs | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index 699bf97..6e2a929 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -164,7 +164,6 @@ impl Pool { // in order to trigger the OutOfPages error. fn alloc_page(&mut self, mut page: Page) -> Page { let (prev, next) = page.read_free(); // prev is always 0x0 - log!(Debug, "called alloc, prev was {:?}", prev); assert_eq!(prev, 0x0 as *mut usize); if next.addr() == 0x0 { @@ -181,7 +180,6 @@ impl Pool { fn free_page(&mut self, mut page: Page) { let (mut head_prev, mut head_next) = (0x0 as *mut usize, 0x0 as *mut usize); - log!(Debug, "Called free"); let addr = page.addr; page.zero(); diff --git a/src/vm/ptable.rs b/src/vm/ptable.rs index d034001..e6f409f 100644 --- a/src/vm/ptable.rs +++ b/src/vm/ptable.rs @@ -261,7 +261,7 @@ pub fn kpage_init() -> Result { if let Err(kernel_stack) = page_map( kpage_table, stack, - stack as *mut usize, + stack, PAGE_SIZE * 2, PTE_READ | PTE_WRITE, ) { @@ -283,7 +283,7 @@ pub fn kpage_init() -> Result { if let Err(intstack_m) = page_map( kpage_table, m_intstack, - m_intstack as *mut usize, + m_intstack, PAGE_SIZE, PTE_READ | PTE_WRITE ) { @@ -294,7 +294,7 @@ pub fn kpage_init() -> Result { if let Err(intstack_s) = page_map( kpage_table, s_intstack, - s_intstack as *mut usize, + s_intstack, PAGE_SIZE, PTE_READ | PTE_WRITE ) { From 1aeaaf9ab1827df88d2dd0eb6ecc159e56d34447 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 12:21:47 -0700 Subject: [PATCH 22/57] fix linker stack size --- kernel.ld | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel.ld b/kernel.ld index 0dc781b..c48bff1 100644 --- a/kernel.ld +++ b/kernel.ld @@ -34,7 +34,7 @@ SECTIONS .stacks : { . = ALIGN(0x1000); PROVIDE(_stacks_start = .); - . = . + (4096 * 4 * 2); /* NHARTS with a guard page each, unstable */ + . = . + (4096 * 3 * 2); /* NHARTS with a guard page each, unstable */ PROVIDE(_stacks_end = .); } .intstacks : { From f00184799681fa43046cc05fa1d6c8d483e3b0f1 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 12:37:02 -0700 Subject: [PATCH 23/57] Make pfree() push free-d page to head of free list --- src/vm/palloc.rs | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index 6e2a929..ab811ce 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -179,32 +179,23 @@ impl Pool { } fn free_page(&mut self, mut page: Page) { - let (mut head_prev, mut head_next) = (0x0 as *mut usize, 0x0 as *mut usize); + let (head_prev, mut head_next) = (0x0 as *mut usize, 0x0 as *mut usize); let addr = page.addr; page.zero(); match self.free { - Some(page) => { head_next = page.addr; } + Some(mut head) => { + head_next = page.addr; + head.write_prev(addr); + } None => { page.write_free(head_prev, head_next); self.free = Some(page); return; } } - - while addr > head_next && head_next != 0x0 as *mut usize { - (head_prev, head_next) = Page::from(head_next).read_free(); - } - if head_next != 0x0 as *mut usize { - Page::from(head_next).write_prev(addr); // link back from next - } - if head_prev != 0x0 as *mut usize { - Page::from(head_prev).write_next(addr); // link forward from prev - } else { - // insert at the front - self.free = Some(page); - } page.write_free(head_prev, head_next); + self.free = Some(page); } } From 982f791484c480e4dd8f8eb3b462f0b202482597 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 12:47:05 -0700 Subject: [PATCH 24/57] Convert ptr math macros to inline fn --- src/vm/ptable.rs | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/src/vm/ptable.rs b/src/vm/ptable.rs index e6f409f..1400ba9 100644 --- a/src/vm/ptable.rs +++ b/src/vm/ptable.rs @@ -34,22 +34,19 @@ pub struct PageTable { base: PhysAddress, // Page Table located at base address. } -macro_rules! vpn { - ($p:expr, $l:expr) => { - (($p).addr()) >> (12 + 9 * $l) & 0x1FF - }; +#[inline(always)] +fn vpn(ptr: VirtAddress, level: usize) -> usize{ + ptr.addr() >> (12 + 9 * level) & 0x1FF } -macro_rules! PteToPhy { - ($p:expr) => { - ((($p) >> 10) << 12) as *mut usize - }; +#[inline(always)] +fn pte_to_phy(pte: PTEntry) -> PhysAddress { + ((pte >> 10) << 12) as *mut usize } -macro_rules! PhyToPte { - ($p:expr) => { - (((($p).addr()) >> 12) << 10) - }; +#[inline(always)] +fn phy_to_pte(ptr: PhysAddress) -> PTEntry { + ((ptr.addr()) >> 12) << 10 } macro_rules! PteGetFlag { @@ -64,10 +61,9 @@ macro_rules! PteSetFlag { }; } -macro_rules! PhyToSATP { - ($pte:expr) => { - (1 << 63) | ((($pte).addr()) >> 12) - }; +#[inline(always)] +fn phy_to_satp(ptr: PhysAddress) -> usize{ + (1 << 63) | (ptr.addr() >> 12) } macro_rules! PageAlignDown { @@ -94,7 +90,7 @@ fn read_pte(pte: *mut PTEntry) -> PTEntry { impl From for PageTable { fn from(pte: PTEntry) -> Self { PageTable { - base: PteToPhy!(pte), + base: pte_to_phy(pte), } } } @@ -106,7 +102,7 @@ impl PageTable { } pub fn write_satp(&self) { flush_tlb(); - write_satp(PhyToSATP!(self.base)); + write_satp(phy_to_satp(self.base)); flush_tlb(); } } @@ -118,7 +114,7 @@ unsafe fn walk(pt: PageTable, va: VirtAddress, alloc_new: bool) -> Result<*mut P let mut table = pt.clone(); assert!(va.addr() < VA_TOP); for level in (1..3).rev() { - let idx = vpn!(va, level); + let idx = vpn(va, level); let next: *mut PTEntry = table.index_mut(idx); table = match PteGetFlag!(*next, PTE_VALID) { true => PageTable::from(*next), @@ -129,8 +125,8 @@ unsafe fn walk(pt: PageTable, va: VirtAddress, alloc_new: bool) -> Result<*mut P .unwrap() .palloc() { Ok(pg) => { - *next = PteSetFlag!(PhyToPte!(pg.addr), PTE_VALID); - PageTable::from(PhyToPte!(pg.addr)) + *next = PteSetFlag!(phy_to_pte(pg.addr), PTE_VALID); + PageTable::from(phy_to_pte(pg.addr)) } Err(e) => return Err(e), } @@ -142,7 +138,7 @@ unsafe fn walk(pt: PageTable, va: VirtAddress, alloc_new: bool) -> Result<*mut P } // Last, return PTE leaf. Assuming we are all using 4K pages right now. // Caller's responsibility to check flags. - let idx = vpn!(va, 0); + let idx = vpn(va, 0); Ok(table.index_mut(idx)) } @@ -170,7 +166,7 @@ fn page_map( if read_pte(pte_addr) & PTE_VALID != 0 { return Err(VmError::PallocFail); } - set_pte(pte_addr, PteSetFlag!(PhyToPte!(phys), flag | PTE_VALID)); + set_pte(pte_addr, PteSetFlag!(phy_to_pte(phys), flag | PTE_VALID)); start = start.map_addr(|addr| addr + PAGE_SIZE); phys = phys.map_addr(|addr| addr + PAGE_SIZE); } From 1e9b1eb89d902993ca4422599fa7fdb7b9e8fa98 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 12:55:48 -0700 Subject: [PATCH 25/57] Add core::hint::spin_loop to mutex lock() --- src/lock/mutex.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/lock/mutex.rs b/src/lock/mutex.rs index 4b71a76..46bb371 100644 --- a/src/lock/mutex.rs +++ b/src/lock/mutex.rs @@ -7,6 +7,7 @@ /// Opportunity for improvement on interrupt safe locks. use core::cell::UnsafeCell; use core::sync::atomic::*; +use core::hint::spin_loop; /// Returned from successfully locking a mutex. pub struct MutexGuard<'a, T> { @@ -59,9 +60,9 @@ impl Mutex { /// after lock is acquired. pub fn lock(&self) -> MutexGuard { // Use Acquire memory order to load lock value. - // TODO: - // Spin loop improvement. - while self.lock_state.swap(1, Ordering::Acquire) == 1 {} + while self.lock_state.swap(1, Ordering::Acquire) == 1 { + spin_loop(); + } MutexGuard { mutex: self } } } From 5460b7b7cab5b8845a488444093dee9f3297feb1 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Mon, 13 Mar 2023 16:59:25 -0700 Subject: [PATCH 26/57] Add initial commit for experimental kalloc --- src/vm.rs | 4 +- src/vm/kalloc.rs | 106 +++++++++++++++++++++++++++++++++++++++++++++++ src/vm/ptable.rs | 4 +- 3 files changed, 111 insertions(+), 3 deletions(-) create mode 100644 src/vm/kalloc.rs diff --git a/src/vm.rs b/src/vm.rs index 6f9a17a..a3ffa24 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -3,12 +3,13 @@ pub mod palloc; pub mod ptable; pub mod process; pub mod galloc; +pub mod kalloc; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; use galloc::GAlloc; -use ptable::{kpage_init, PageTable}; +use ptable::kpage_init; //, PageTable}; use process::Process; use core::cell::OnceCell; @@ -25,6 +26,7 @@ pub enum VmError { PallocFail, PfreeFail, GNoSpace, + Koom, } pub trait Resource {} diff --git a/src/vm/kalloc.rs b/src/vm/kalloc.rs new file mode 100644 index 0000000..18337f1 --- /dev/null +++ b/src/vm/kalloc.rs @@ -0,0 +1,106 @@ +use core::mem::size_of; + +use crate::hw::param::PAGE_SIZE; +use super::{palloc::Page, VmError}; + +const MAX_CHUNK_SIZE: usize = 4088; // PAGE_SIZE - HEADER_SIZE = 4096 - 8 = 4088. +const HEADER_SIZE: usize = size_of::
(); +const HEADER_USED: usize = 0x1000; + +// 8-byte minimum allocation size, +// 4096-byte maximum allocation size. +// Guarantee that address of header + size = start of data. +// Size must be <= 4088 Bytes. +// Bits 0-11 are size (2^0 - (2^12 - 1)) +// Bit 12 is free/used. +#[repr(C)] +struct Header { + fields: usize, // Could be a union? +} + +impl From<*mut usize> for Header { + fn from(src: *mut usize) -> Self { + let fields = unsafe { src.read() }; + Header { fields } + } +} + +impl Header { + fn chunk_size(&self) -> usize { + self.fields & 0xFFF + } + + fn is_free(&self) -> bool { + self.fields & !HEADER_USED == 0 + } + + fn set_used(&mut self) { + self.fields = self.fields | HEADER_USED; + } + + // Clear size bits. Set size bits to size. + fn set_size(&mut self, size: usize) { + self.fields = (self.fields & !MAX_CHUNK_SIZE) | size; + } + + // Unsafe write header data to memory at dest. + fn write_to(&self, dest: *mut usize) { + unsafe { + dest.write_volatile(self.fields); + } + } + + // Takes an existing chunk and splits it into a chunk of 'new_size' + the remainder. + fn split(&mut self, new_size: usize, cur_addr: *mut usize) -> (Header, *mut usize) { + let old_size = self.chunk_size(); + let next_size = old_size - new_size; + self.set_size(new_size); + let next_addr = cur_addr.map_addr(|addr| addr + HEADER_SIZE + new_size); + let next_header = Header { fields: next_size }; + next_header.write_to(next_addr); + (next_header, next_addr) + } +} + +struct Kalloc { + head: *mut usize, // Address of next free header. + end: *mut usize, +} + +impl Kalloc { + fn new(start: Page) -> Self { + // Make sure start of allocation pool is page aligned. + assert_eq!(start.addr.addr() & (PAGE_SIZE - 1), 0); + let head = Header { fields: MAX_CHUNK_SIZE }; + head.write_to(start.addr); + + Kalloc { + head: start.addr, + end: unsafe { start.addr.byte_add(0x1000) }, + } + } + + fn alloc(&mut self, size: usize) -> Result<*mut usize, VmError> { + // Start tracks address of each header. + let mut start = self.head; + let mut head = Header::from(start); + + // Remove redundancy + use some helper fns. + while start != self.end { + let chunk_size = head.chunk_size(); + if chunk_size < size { + start = start.map_addr(|addr| addr + HEADER_SIZE + chunk_size); + head = Header::from(start); + } else if !head.is_free() { + start = start.map_addr(|addr| addr + HEADER_SIZE + chunk_size); + head = Header::from(start); + } else { + head.set_used(); + let (next, next_addr) = head.split(size, start); + next.write_to(next_addr); + return Ok(start.map_addr(|addr| addr + size)) + } + } + Err(VmError::Koom) + } +} diff --git a/src/vm/ptable.rs b/src/vm/ptable.rs index 1400ba9..67e6192 100644 --- a/src/vm/ptable.rs +++ b/src/vm/ptable.rs @@ -17,8 +17,8 @@ const PTE_GLOBAL: usize = 1 << 5; const PTE_ACCESSED: usize = 1 << 6; const PTE_DIRTY: usize = 1 << 7; -type VirtAddress = *mut usize; -type PhysAddress = *mut usize; +pub type VirtAddress = *mut usize; +pub type PhysAddress = *mut usize; type PTEntry = usize; /// Supervisor Address Translation and Protection. /// Section 4.1.12 of risc-v priviliged ISA manual. From dd1a2fb096bbb83d3f1985c0fccf084dd2522479 Mon Sep 17 00:00:00 2001 From: tmu Date: Mon, 13 Mar 2023 17:46:08 -0700 Subject: [PATCH 27/57] arithmetic touchup and first pass at free --- src/vm/kalloc.rs | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/src/vm/kalloc.rs b/src/vm/kalloc.rs index 18337f1..f78a819 100644 --- a/src/vm/kalloc.rs +++ b/src/vm/kalloc.rs @@ -38,9 +38,13 @@ impl Header { self.fields = self.fields | HEADER_USED; } + fn set_unused(&mut self) { + self.fields = self.fields & !HEADER_USED; + } + // Clear size bits. Set size bits to size. fn set_size(&mut self, size: usize) { - self.fields = (self.fields & !MAX_CHUNK_SIZE) | size; + self.fields = (self.fields & !(0x1000 - 1)) | size; } // Unsafe write header data to memory at dest. @@ -56,7 +60,7 @@ impl Header { let next_size = old_size - new_size; self.set_size(new_size); let next_addr = cur_addr.map_addr(|addr| addr + HEADER_SIZE + new_size); - let next_header = Header { fields: next_size }; + let next_header = Header { fields: next_size - HEADER_SIZE }; // make space for inserted header next_header.write_to(next_addr); (next_header, next_addr) } @@ -80,27 +84,44 @@ impl Kalloc { } } - fn alloc(&mut self, size: usize) -> Result<*mut usize, VmError> { + fn alloc(&mut self, mut size: usize) -> Result<*mut usize, VmError> { // Start tracks address of each header. let mut start = self.head; let mut head = Header::from(start); + size = if size < 8 {8} else {size}; // Remove redundancy + use some helper fns. while start != self.end { let chunk_size = head.chunk_size(); - if chunk_size < size { - start = start.map_addr(|addr| addr + HEADER_SIZE + chunk_size); - head = Header::from(start); - } else if !head.is_free() { + if chunk_size < size || !head.is_free() { start = start.map_addr(|addr| addr + HEADER_SIZE + chunk_size); head = Header::from(start); } else { head.set_used(); - let (next, next_addr) = head.split(size, start); - next.write_to(next_addr); - return Ok(start.map_addr(|addr| addr + size)) + if size != chunk_size { + let (next, next_addr) = head.split(size, start); + next.write_to(next_addr); + } + return Ok(start.map_addr(|addr| addr + HEADER_SIZE)) } } Err(VmError::Koom) } + + // TODO if you call alloc in order and then free in order this + // doesn't merge, as you can't merge backwards. Consider a merging + // pass when allocting. + fn free(&mut self, ptr: *mut usize) { + let chunk_loc = ptr.map_addr(|addr| addr - HEADER_SIZE); + let head = Header::from(chunk_loc); + assert!(!head.is_free(), "Kalloc double free."); + head.set_unused(); + let next = Header::from(chunk_loc.map_addr( + |addr| addr + HEADER_SIZE + head.chunk_size())); + if !(next.is_free()) { + // back to back free, merge + head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) + } + head.write_to(chunk_loc); + } } From 5998ee41206ff60ae1bcc2a35d0a3784125c949f Mon Sep 17 00:00:00 2001 From: tmu Date: Tue, 14 Mar 2023 09:39:59 -0700 Subject: [PATCH 28/57] Kbox with ?Sized and kalloc --- src/lib.rs | 1 + src/mem.rs | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index c0d021e..114fbf6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,6 +7,7 @@ #![feature(panic_info_message)] #![feature(strict_provenance)] #![feature(once_cell)] +#![feature(unsized_fn_params)] #![allow(dead_code)] use core::panic::PanicInfo; diff --git a/src/mem.rs b/src/mem.rs index ab575d8..0a5e968 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -1,7 +1,7 @@ //! Kernel memory utilities use core::ops::{Deref, DerefMut}; -use core::mem::size_of; -use crate::vm::{galloc, gdealloc}; +use core::mem::size_of_val; +use crate::vm::{kalloc, kfree}; /// Kernel heap allocated pointer. No guarantees on unique ownership /// or concurrent access. @@ -10,17 +10,28 @@ pub struct Kbox { size: usize, } -impl Kbox { +impl Kbox { + /// Note that as this exists currently, data is passed by value + /// into new, which means that the initial contents of a box MUST + /// be composed on the stack and passed here to be copied into the + /// heap. Kbox contents will not change size during their + /// lifetime, so it must soak up as much stack space as it will + /// ever use. + /// + /// Also this may entail a stack->stack copy into this callee's + /// stack fram, I am not sure. It might be optimized as a pass by + /// reference with the compiler knowledge that it is a move under + /// the hood, but I really can't say. pub fn new(data: T) -> Self { // How the allocater interface should be made use of. // Current constraints on allocator mean size_of::() must be less than 4Kb - let size = size_of::(); - match galloc(size) { + let size = size_of_val::(&data); + match kalloc(size) { Err(e) => { panic!("Kbox can't allocate: {:?}", e) }, Ok(ptr) => { - let new_ptr = ptr as *mut T; + let new_ptr: *mut T = ptr.cast(); unsafe { *new_ptr = data; // <-- initialize newly allocated memory with our inner value. Self { @@ -57,6 +68,6 @@ impl DerefMut for Kbox { impl Drop for Kbox { fn drop(&mut self) { - gdealloc(self.inner as *mut usize, self.size); + kfree(self.inner as *mut usize, self.size); } } From 3f62821a21e0002fcc193783ec021d1e7eca9b56 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 10:59:14 -0700 Subject: [PATCH 29/57] Add kalloc zone struct impl --- src/vm/kalloc.rs | 153 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 138 insertions(+), 15 deletions(-) diff --git a/src/vm/kalloc.rs b/src/vm/kalloc.rs index f78a819..16d5944 100644 --- a/src/vm/kalloc.rs +++ b/src/vm/kalloc.rs @@ -3,21 +3,56 @@ use core::mem::size_of; use crate::hw::param::PAGE_SIZE; use super::{palloc::Page, VmError}; -const MAX_CHUNK_SIZE: usize = 4088; // PAGE_SIZE - HEADER_SIZE = 4096 - 8 = 4088. +const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 = 4088. const HEADER_SIZE: usize = size_of::
(); -const HEADER_USED: usize = 0x1000; +const ZONE_SIZE: usize = 8; +const HEADER_USED: usize = 1 << 12; // Chunk is in use flag. -// 8-byte minimum allocation size, -// 4096-byte maximum allocation size. -// Guarantee that address of header + size = start of data. -// Size must be <= 4088 Bytes. +// 8 byte minimum allocation size, +// 4096-8-8=4080 byte maximum allocation size. +// Guarantee that address of header + header_size = start of data. +// Size must be <= 4080 Bytes. // Bits 0-11 are size (2^0 - (2^12 - 1)) -// Bit 12 is free/used. +// Bit 12 is Used. +// +// Header: +// ┌────────────────────────────────────┬─┬──────────────┐ +// │ Unused / Reserved │U│ Chunk Size │ +// └────────────────────────────────────┴─┴──────────────┘ +// 63 12 11 0 +// #[repr(C)] struct Header { fields: usize, // Could be a union? } +// An allocation zone is the internal representation of a page. +// Each zone contains the address of the next zone (page aligned), +// plus the number of in use chunks within the zone (refs count). +// +// Zone.next: +// ┌──────────────────────────────────────┬──────────────┐ +// │ next zone address (page aligned) │ refs count │ +// └──────────────────────────────────────┴──────────────┘ +// 63 11 0 +// +#[repr(C)] +struct Zone { + base: *mut usize, // This zone's address. + next: usize, // Next zone's address + this zone's ref count. +} + +struct Kalloc { + head: *mut usize, // Address of first zone. + end: *mut usize, +} + +enum KallocError { + MaxRefs, + MinRefs, + NullZone, +} + impl From<*mut usize> for Header { fn from(src: *mut usize) -> Self { let fields = unsafe { src.read() }; @@ -26,6 +61,11 @@ impl From<*mut usize> for Header { } impl Header { + fn new(size: usize) -> Self { + assert!(size <= MAX_CHUNK_SIZE); + Header { fields: size } + } + fn chunk_size(&self) -> usize { self.fields & 0xFFF } @@ -66,21 +106,104 @@ impl Header { } } -struct Kalloc { - head: *mut usize, // Address of next free header. - end: *mut usize, +// Assumes the first byte of a zone is the zone header. +// Next byte is the chunk header. +impl From<*mut usize> for Zone { + fn from(src: *mut usize) -> Self { + Zone { + base: src, + next: unsafe { src.read() } + } + } +} + +impl Zone { + fn new(base: *mut usize) -> Self { + Zone { + base, + next: 0x0, + } + } + + fn get_refs(&self) -> usize { + self.next & (4095) + } + + fn get_next(&self) -> Result { + let next_addr = self.next & !(PAGE_SIZE - 1); + if next_addr == 0x0 { + Err(KallocError::NullZone) + } else { + Ok(next_addr) + } + } + + // Read the next field to get the next zone address. + // Discard this zone's refs count. + // Write base address with next zone address and new refs count. + #[inline(always)] + unsafe fn write_refs(&mut self, new_count: usize) { + let next_addr = match self.get_next() { + Err(_) => 0x0, + Ok(ptr) => ptr, + }; + + self.base.write(next_addr | new_count); + } + + // Read the current next field to get the refs count. + // Discard this zone's next addr. + // Write base address with new next zone address and refs count. + unsafe fn write_next(&mut self, new_next: *mut usize) { + let refs = self.get_refs(); + self.base.write(new_next.addr() | refs); + } + + fn increment_refs(&mut self) -> Result<(), KallocError> { + let new_count = self.get_refs() + 1; + if new_count > 510 { + Err(KallocError::MaxRefs) + } else { + unsafe { self.write_refs(new_count); } + Ok(()) + } + } + + fn decrement_refs(&mut self) -> Result<(), KallocError> { + // Given a usize can't be < 0, I want to catch that and not cause a panic. + // This may truly be unnecessary, but just want to be cautious. + let new_count = self.get_refs() - 1; + if (new_count as isize) < 0 { + Err(KallocError::MinRefs) + } else { + unsafe { self.write_refs(new_count); } + Ok(()) + } + } + + fn next_zone(&self) -> Result { + let next_addr = self.get_next()?; + Ok(Zone::from(next_addr as *mut usize)) + } +} + +unsafe fn write_zone_header_pair(zone: Zone, header: Header) { + let base = zone.base; + base.write(zone.next); + base.byte_add(1).write(header.fields); } impl Kalloc { fn new(start: Page) -> Self { // Make sure start of allocation pool is page aligned. assert_eq!(start.addr.addr() & (PAGE_SIZE - 1), 0); - let head = Header { fields: MAX_CHUNK_SIZE }; - head.write_to(start.addr); - + // New page is the first zone in the Kalloc pool. + let zone = Zone::new(start.addr); + let head = Header::new(MAX_CHUNK_SIZE); + unsafe { write_zone_header_pair(zone, head); } Kalloc { head: start.addr, - end: unsafe { start.addr.byte_add(0x1000) }, + end: start.addr.map_addr(|addr| addr + 0x1000), } } @@ -113,7 +236,7 @@ impl Kalloc { // pass when allocting. fn free(&mut self, ptr: *mut usize) { let chunk_loc = ptr.map_addr(|addr| addr - HEADER_SIZE); - let head = Header::from(chunk_loc); + let mut head = Header::from(chunk_loc); assert!(!head.is_free(), "Kalloc double free."); head.set_unused(); let next = Header::from(chunk_loc.map_addr( From 6a850dd3cb46a452e00d3f6afe32bf1ed41d71e1 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 11:29:47 -0700 Subject: [PATCH 30/57] Remove legacy allocator test --- src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 114fbf6..ec76e05 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -111,8 +111,6 @@ fn main() -> ! { unsafe { vm::test_palloc(); } - log!(Debug, "Testing general subpage allocation..."); - vm::test_galloc(); } else { //Interrupt other harts to init kpgtable. trap::init(); From 49ad3c354a62b4a7b550153f11f933eec24dffba Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 11:30:27 -0700 Subject: [PATCH 31/57] Remove legacy allocator and add pub fns for kalloc and kfree --- src/vm.rs | 48 ++++++++++-------------------------------------- 1 file changed, 10 insertions(+), 38 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index a3ffa24..429d356 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -3,20 +3,18 @@ pub mod palloc; pub mod ptable; pub mod process; pub mod galloc; -pub mod kalloc; +pub mod vmalloc; use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; -use galloc::GAlloc; use ptable::kpage_init; //, PageTable}; use process::Process; use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. -//static mut PAGEPOOL: PagePool = PagePool::new(bss_end(), dram_end()); static mut PAGEPOOL: OnceCell = OnceCell::new(); -static mut GALLOC: OnceCell = OnceCell::new(); +static mut VMALLOC: OnceCell = OnceCell::new(); /// (Still growing) list of kernel VM system error cases. #[derive(Debug)] @@ -41,6 +39,14 @@ pub struct TaskNode { next: Option>, } +pub fn kalloc(size: usize) -> Result<*mut usize, vmalloc::KallocError> { + unsafe { VMALLOC.get_mut().unwrap().alloc(size) } +} + +pub fn kfree(ptr: *mut usize) { + unsafe { VMALLOC.get_mut().unwrap().free(ptr) } +} + /// Initialize the kernel VM system. /// First, setup the kernel physical page pool. /// We start the pool at the end of the .bss section, and stop at the end of physical memory. @@ -55,12 +61,6 @@ pub fn init() -> Result<(), PagePool>{ panic!("vm double init.") } } - match GALLOC.set(GAlloc::new()) { - Ok(_) => {}, - Err(_) => { - panic!("vm double init.") - } - } } log!(Debug, "Successfully initialized kernel page pool..."); @@ -76,21 +76,7 @@ pub fn init() -> Result<(), PagePool>{ Ok(()) } -pub fn galloc(size: usize) -> Result<*mut usize, VmError> { - unsafe { - GALLOC.get_mut() - .unwrap() - .alloc(size) - } -} -pub fn gdealloc(ptr: *mut usize, size: usize) { - unsafe { - GALLOC.get_mut() - .unwrap() - .dealloc(ptr, size) - } -} pub unsafe fn test_palloc() { let allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); @@ -99,17 +85,3 @@ pub unsafe fn test_palloc() { let _ = PAGEPOOL.get_mut().unwrap().pfree(allocd); log!(Debug, "Successful test of page allocation and freeing..."); } - -pub fn test_galloc() { - let first = galloc(8).unwrap(); - println!("asked for 8 bytes, got {:?}", first as usize); - let second = galloc(16).unwrap(); - println!("asked for 16 bytes got {:?}", second as usize); - gdealloc(first, 8); - let third = galloc(16).unwrap(); - println!("freed first and asked for 16 bytes, should match first {:?}", third as usize); - - let back_half = galloc(4096 / 2).unwrap(); - let next_page = galloc(16).unwrap(); - println!("{:?} and {:?} should not be in the same page", back_half, next_page); -} From fb700f207916fa269adc6a77673e79332d2a4c0f Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 11:31:06 -0700 Subject: [PATCH 32/57] Rename experimental allocator mod --- src/vm/{kalloc.rs => vmalloc.rs} | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) rename src/vm/{kalloc.rs => vmalloc.rs} (96%) diff --git a/src/vm/kalloc.rs b/src/vm/vmalloc.rs similarity index 96% rename from src/vm/kalloc.rs rename to src/vm/vmalloc.rs index 16d5944..1dc7d83 100644 --- a/src/vm/kalloc.rs +++ b/src/vm/vmalloc.rs @@ -1,7 +1,7 @@ use core::mem::size_of; use crate::hw::param::PAGE_SIZE; -use super::{palloc::Page, VmError}; +use super::palloc::Page; const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 = 4088. const HEADER_SIZE: usize = size_of::
(); @@ -42,15 +42,17 @@ struct Zone { next: usize, // Next zone's address + this zone's ref count. } -struct Kalloc { +pub struct Kalloc { head: *mut usize, // Address of first zone. end: *mut usize, } -enum KallocError { +#[derive(Debug)] +pub enum KallocError { MaxRefs, MinRefs, NullZone, + OOM, } impl From<*mut usize> for Header { @@ -207,7 +209,7 @@ impl Kalloc { } } - fn alloc(&mut self, mut size: usize) -> Result<*mut usize, VmError> { + pub fn alloc(&mut self, mut size: usize) -> Result<*mut usize, KallocError> { // Start tracks address of each header. let mut start = self.head; let mut head = Header::from(start); @@ -228,13 +230,13 @@ impl Kalloc { return Ok(start.map_addr(|addr| addr + HEADER_SIZE)) } } - Err(VmError::Koom) + Err(KallocError::OOM) } // TODO if you call alloc in order and then free in order this // doesn't merge, as you can't merge backwards. Consider a merging // pass when allocting. - fn free(&mut self, ptr: *mut usize) { + pub fn free(&mut self, ptr: *mut usize) { let chunk_loc = ptr.map_addr(|addr| addr - HEADER_SIZE); let mut head = Header::from(chunk_loc); assert!(!head.is_free(), "Kalloc double free."); From e713dbf1a5ef0cdc5c9afbd2fcefda6a94631d6a Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 12:57:49 -0700 Subject: [PATCH 33/57] Add public palloc() and pfree() wrapper functions --- src/vm.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/vm.rs b/src/vm.rs index 429d356..450d4a0 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -30,7 +30,7 @@ pub enum VmError { pub trait Resource {} pub struct TaskList { - head: Option>, // TODO:Convert to Option>> + head: Option>, } pub struct TaskNode { @@ -47,6 +47,14 @@ pub fn kfree(ptr: *mut usize) { unsafe { VMALLOC.get_mut().unwrap().free(ptr) } } +fn palloc() -> Result { + unsafe { PAGEPOOL.get_mut().unwrap().palloc() } +} + +fn pfree(page: Page) -> Result<(), VmError> { + unsafe { PAGEPOOL.get_mut().unwrap().pfree(page) } +} + /// Initialize the kernel VM system. /// First, setup the kernel physical page pool. /// We start the pool at the end of the .bss section, and stop at the end of physical memory. From 05c5da26619b16ae826be6525bf1f8058fc99a0c Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 13:01:05 -0700 Subject: [PATCH 34/57] Add zoned alloc() --- src/vm/vmalloc.rs | 99 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 76 insertions(+), 23 deletions(-) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 1dc7d83..352e404 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -1,7 +1,7 @@ use core::mem::size_of; use crate::hw::param::PAGE_SIZE; -use super::palloc::Page; +use super::{palloc::Page, palloc, pfree, VmError}; const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 = 4088. const HEADER_SIZE: usize = size_of::
(); @@ -15,7 +15,7 @@ const HEADER_USED: usize = 1 << 12; // Chunk is in use flag. // Bits 0-11 are size (2^0 - (2^12 - 1)) // Bit 12 is Used. // -// Header: +// Header.fields: // ┌────────────────────────────────────┬─┬──────────────┐ // │ Unused / Reserved │U│ Chunk Size │ // └────────────────────────────────────┴─┴──────────────┘ @@ -183,9 +183,45 @@ impl Zone { } } - fn next_zone(&self) -> Result { - let next_addr = self.get_next()?; - Ok(Zone::from(next_addr as *mut usize)) + fn next_zone(&self) -> Option { + match self.get_next() { + Ok(addr) => Some(Zone::from(addr as *mut usize)), + Err(_) => None, + } + } + + // Scan this zone for a free chunk of the right size. + // First 8 bytes of a zone is the Zone.next field. + // Second 8 bytes is the first header of the zone. + fn scan(&mut self, size: usize) -> Option<*mut usize> { + // If size is less than min alloc size (8 bytes), pad. + let size = if size < 8 { 8 } else { size }; + // Start and end (start + PAGE_SIZE) bounds of zone. + let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE/8)) }; + // Get the first header in the zone. + let head = unsafe { Header::from(curr) }; + + while curr < end { + let chunk_size = head.chunk_size(); + if chunk_size < size || !head.is_free() { + curr = curr.map_addr(|addr| addr + HEADER_SIZE + chunk_size); + head = Header::from(curr); + } else { + alloc_chunk(size, curr, self, &mut head); + return Some(curr.map_addr(|addr| addr + HEADER_SIZE)) + } + } + None + } +} + +fn alloc_chunk(size: usize, ptr: *mut usize, zone: &mut Zone, head: &mut Header) { + zone.increment_refs().expect("Maximum zone allocation limit exceeded."); + head.set_used(); + + if size != head.chunk_size() { + let (_, _) = head.split(size, ptr); + //next.write_to(next_addr); } } @@ -209,28 +245,45 @@ impl Kalloc { } } - pub fn alloc(&mut self, mut size: usize) -> Result<*mut usize, KallocError> { - // Start tracks address of each header. - let mut start = self.head; - let mut head = Header::from(start); - size = if size < 8 {8} else {size}; + fn grow_pool(&mut self, tail: Zone) -> Result<(Zone, Header), VmError> { + let page = palloc()?; + unsafe { tail.write_next(page.addr); } + let zone = Zone::new(page.addr); + let head = Header::new(MAX_CHUNK_SIZE); + unsafe { write_zone_header_pair(zone, head); } + Ok((zone, head)) + } - // Remove redundancy + use some helper fns. - while start != self.end { - let chunk_size = head.chunk_size(); - if chunk_size < size || !head.is_free() { - start = start.map_addr(|addr| addr + HEADER_SIZE + chunk_size); - head = Header::from(start); + /// Finds the first fit for the requested size. + /// 1. Scan first zone from first to last for a free chunk that fits. + /// 2a. If success: Return chunk's starting address (*mut usize). + /// 2b. Else, move to next zone and go back to step 1. + /// 3. If no zone had a fit, then try to allocate a new zone (palloc()). + /// 4. If success, go to step 2a. Else, fail with OOM. + pub fn alloc(&mut self, size: usize) -> Result<*mut usize, KallocError> { + let mut curr = self.head; + let mut zone = Some(Zone::from(curr)); + let mut trail = zone.unwrap(); + + while let Some(mut next_zone) = zone { + if let Some(ptr) = next_zone.scan(size) { + return Ok(ptr) } else { - head.set_used(); - if size != chunk_size { - let (next, next_addr) = head.split(size, start); - next.write_to(next_addr); - } - return Ok(start.map_addr(|addr| addr + HEADER_SIZE)) + trail = zone.unwrap(); + zone = zone.unwrap().next_zone(); } } - Err(KallocError::OOM) + + match self.grow_pool(trail) { + Ok((mut zone, mut head)) => { + let ptr = unsafe { zone.base.map_addr(|addr| addr + ZONE_SIZE + HEADER_SIZE) }; + alloc_chunk(size, ptr, &mut zone, &mut head); + Ok(ptr) + }, + Err(e) => Err(KallocError::OOM), + } + + //Err(KallocError::OOM) } // TODO if you call alloc in order and then free in order this From 104d71292080bc7cb8d2ca13178b3563eead1b65 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 14:47:58 -0700 Subject: [PATCH 35/57] Add allocator zoned freeing and merging --- src/mem.rs | 4 +- src/vm/vmalloc.rs | 96 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 71 insertions(+), 29 deletions(-) diff --git a/src/mem.rs b/src/mem.rs index 0a5e968..cd6a6aa 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -10,7 +10,7 @@ pub struct Kbox { size: usize, } -impl Kbox { +impl Kbox { /// Note that as this exists currently, data is passed by value /// into new, which means that the initial contents of a box MUST /// be composed on the stack and passed here to be copied into the @@ -68,6 +68,6 @@ impl DerefMut for Kbox { impl Drop for Kbox { fn drop(&mut self) { - kfree(self.inner as *mut usize, self.size); + kfree(self.inner as *mut usize); } } diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 352e404..0e6a1d6 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -37,6 +37,7 @@ struct Header { // 63 11 0 // #[repr(C)] +#[derive(Copy, Clone)] struct Zone { base: *mut usize, // This zone's address. next: usize, // Next zone's address + this zone's ref count. @@ -106,6 +107,14 @@ impl Header { next_header.write_to(next_addr); (next_header, next_addr) } + + fn merge(&mut self, next: Self, next_addr: *mut usize) { + assert!(next.is_free()); + assert!(self.is_free()); + let size = self.chunk_size() + next.chunk_size(); + self.set_size(size); + unsafe { next_addr.write(0); } + } } // Assumes the first byte of a zone is the zone header. @@ -130,7 +139,7 @@ impl Zone { fn get_refs(&self) -> usize { self.next & (4095) } - + fn get_next(&self) -> Result { let next_addr = self.next & !(PAGE_SIZE - 1); if next_addr == 0x0 { @@ -183,14 +192,29 @@ impl Zone { } } - fn next_zone(&self) -> Option { - match self.get_next() { - Ok(addr) => Some(Zone::from(addr as *mut usize)), - Err(_) => None, + fn next_zone(&self) -> Result { + if let Ok(addr) = self.get_next() { + Ok(Zone::from(addr as *mut usize)) + } else { + Err(KallocError::NullZone) + } + } + + // Only call from Kalloc.shrink_pool() to ensure this is not the first + // zone in the pool. + fn free_self(&mut self) { + assert!(self.get_refs() == 0); + let prev_base = unsafe { self.base.byte_sub(0x1000) }; + let mut prev_zone = Zone::from(prev_base); + if let Ok(next_zone) = self.next_zone() { + unsafe { prev_zone.write_next(next_zone.base); } + } else { + unsafe { prev_zone.write_next(0x0 as *mut usize); } } + let _ = pfree(Page::from(self.base)); } - // Scan this zone for a free chunk of the right size. + // Scan this zone for the first free chunk of size >= requested size. // First 8 bytes of a zone is the Zone.next field. // Second 8 bytes is the first header of the zone. fn scan(&mut self, size: usize) -> Option<*mut usize> { @@ -199,13 +223,20 @@ impl Zone { // Start and end (start + PAGE_SIZE) bounds of zone. let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE/8)) }; // Get the first header in the zone. - let head = unsafe { Header::from(curr) }; + let mut head = Header::from(curr); while curr < end { let chunk_size = head.chunk_size(); if chunk_size < size || !head.is_free() { + let (mut prev, trail) = (head, curr); curr = curr.map_addr(|addr| addr + HEADER_SIZE + chunk_size); head = Header::from(curr); + + // TODO: Is not pretty, make pretty. + if prev.is_free() && head.is_free() { + prev.merge(head, curr); + (head, curr) = (prev, trail); + } } else { alloc_chunk(size, curr, self, &mut head); return Some(curr.map_addr(|addr| addr + HEADER_SIZE)) @@ -225,7 +256,7 @@ fn alloc_chunk(size: usize, ptr: *mut usize, zone: &mut Zone, head: &mut Header) } } -unsafe fn write_zone_header_pair(zone: Zone, header: Header) { +unsafe fn write_zone_header_pair(zone: &Zone, header: &Header) { let base = zone.base; base.write(zone.next); base.byte_add(1).write(header.fields); @@ -238,22 +269,28 @@ impl Kalloc { // New page is the first zone in the Kalloc pool. let zone = Zone::new(start.addr); let head = Header::new(MAX_CHUNK_SIZE); - unsafe { write_zone_header_pair(zone, head); } + unsafe { write_zone_header_pair(&zone, &head); } Kalloc { head: start.addr, end: start.addr.map_addr(|addr| addr + 0x1000), } } - fn grow_pool(&mut self, tail: Zone) -> Result<(Zone, Header), VmError> { + fn grow_pool(&self, tail: &mut Zone) -> Result<(Zone, Header), VmError> { let page = palloc()?; unsafe { tail.write_next(page.addr); } let zone = Zone::new(page.addr); let head = Header::new(MAX_CHUNK_SIZE); - unsafe { write_zone_header_pair(zone, head); } + unsafe { write_zone_header_pair(&zone, &head); } Ok((zone, head)) } + fn shrink_pool(&self, mut zone: Zone) { + if zone.base != self.head { + zone.free_self(); + } + } + /// Finds the first fit for the requested size. /// 1. Scan first zone from first to last for a free chunk that fits. /// 2a. If success: Return chunk's starting address (*mut usize). @@ -261,45 +298,50 @@ impl Kalloc { /// 3. If no zone had a fit, then try to allocate a new zone (palloc()). /// 4. If success, go to step 2a. Else, fail with OOM. pub fn alloc(&mut self, size: usize) -> Result<*mut usize, KallocError> { - let mut curr = self.head; - let mut zone = Some(Zone::from(curr)); - let mut trail = zone.unwrap(); + let curr = self.head; + let end = self.end.map_addr(|addr| addr - 0x1000); + let mut zone = Zone::from(curr); + let mut trail = zone; - while let Some(mut next_zone) = zone { - if let Some(ptr) = next_zone.scan(size) { + while zone.base <= end { + if let Some(ptr) = zone.scan(size) { return Ok(ptr) } else { - trail = zone.unwrap(); - zone = zone.unwrap().next_zone(); + zone = zone.next_zone()?; } } - match self.grow_pool(trail) { + match self.grow_pool(&mut trail) { Ok((mut zone, mut head)) => { - let ptr = unsafe { zone.base.map_addr(|addr| addr + ZONE_SIZE + HEADER_SIZE) }; + let ptr = zone.base.map_addr(|addr| addr + ZONE_SIZE + HEADER_SIZE); alloc_chunk(size, ptr, &mut zone, &mut head); Ok(ptr) }, - Err(e) => Err(KallocError::OOM), + Err(_) => Err(KallocError::OOM), } - - //Err(KallocError::OOM) } // TODO if you call alloc in order and then free in order this // doesn't merge, as you can't merge backwards. Consider a merging // pass when allocting. pub fn free(&mut self, ptr: *mut usize) { - let chunk_loc = ptr.map_addr(|addr| addr - HEADER_SIZE); - let mut head = Header::from(chunk_loc); + // Assume that round down to nearest page is the current zone base addr. + let mut zone = Zone::from(ptr.map_addr(|addr| addr & !(PAGE_SIZE - 1))); + let head_ptr = ptr.map_addr(|addr| addr - HEADER_SIZE); + let mut head = Header::from(head_ptr); assert!(!head.is_free(), "Kalloc double free."); head.set_unused(); - let next = Header::from(chunk_loc.map_addr( + + if let Err(_) = zone.decrement_refs() { + self.shrink_pool(zone); + } + + let next = Header::from(head_ptr.map_addr( |addr| addr + HEADER_SIZE + head.chunk_size())); if !(next.is_free()) { // back to back free, merge head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) } - head.write_to(chunk_loc); + head.write_to(head_ptr); } } From f19c1f7c81acca43e63421d93cdd9a6a8476c5df Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 15:03:23 -0700 Subject: [PATCH 36/57] Fix write_zone_pair byte_add instead of add --- src/vm/vmalloc.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 0e6a1d6..b517f39 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -259,11 +259,11 @@ fn alloc_chunk(size: usize, ptr: *mut usize, zone: &mut Zone, head: &mut Header) unsafe fn write_zone_header_pair(zone: &Zone, header: &Header) { let base = zone.base; base.write(zone.next); - base.byte_add(1).write(header.fields); + base.add(1).write(header.fields); } impl Kalloc { - fn new(start: Page) -> Self { + pub fn new(start: Page) -> Self { // Make sure start of allocation pool is page aligned. assert_eq!(start.addr.addr() & (PAGE_SIZE - 1), 0); // New page is the first zone in the Kalloc pool. From 1d27b583583bde45d5ee191c913e57169798d953 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 17:58:27 -0700 Subject: [PATCH 37/57] Fix header merge resizing --- src/lib.rs | 2 ++ src/vm.rs | 51 +++++++++++++++++++++++++++++++++++++++++++++-- src/vm/vmalloc.rs | 40 +++++++++++++++++++++++++------------ 3 files changed, 78 insertions(+), 15 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ec76e05..151692a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -111,6 +111,8 @@ fn main() -> ! { unsafe { vm::test_palloc(); } + log!(Debug, "Testing kalloc and kfree..."); + vm::test_kalloc(); } else { //Interrupt other harts to init kpgtable. trap::init(); diff --git a/src/vm.rs b/src/vm.rs index 450d4a0..dbd28d6 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -43,7 +43,7 @@ pub fn kalloc(size: usize) -> Result<*mut usize, vmalloc::KallocError> { unsafe { VMALLOC.get_mut().unwrap().alloc(size) } } -pub fn kfree(ptr: *mut usize) { +pub fn kfree(ptr: *mut T) { unsafe { VMALLOC.get_mut().unwrap().free(ptr) } } @@ -72,7 +72,18 @@ pub fn init() -> Result<(), PagePool>{ } log!(Debug, "Successfully initialized kernel page pool..."); - // Map text, data, heap into kernel memory + unsafe { + match palloc() { + Ok(page) => { + if let Err(_) = VMALLOC.set(vmalloc::Kalloc::new(page)) { + panic!("VMALLOC double init...") + } + }, + Err(_) => panic!("Unable to allocate initial zone for vmalloc...") + } + } + + // Map text, data, stacks, heap into kernel page table. match kpage_init() { Ok(pt) => { pt.write_satp() @@ -93,3 +104,39 @@ pub unsafe fn test_palloc() { let _ = PAGEPOOL.get_mut().unwrap().pfree(allocd); log!(Debug, "Successful test of page allocation and freeing..."); } + +pub fn test_kalloc() { + use core::mem::size_of; + use core::ptr::write; + struct Atest { + xs: [u64; 4], + } + impl Atest { + fn new() -> Self { + let xs = [5; 4]; + Atest { xs } + } + } + let addr1 = kalloc(8).expect("Could not allocate addr1..."); + unsafe { addr1.write(0xdeadbeaf); } + + let addr2: *mut [u64; 2] = kalloc(16).expect("Could not allocate addr3...").cast(); + unsafe { write(addr2, [0x8BADF00D, 0xBAADF00D]) }; + + let t = Atest::new(); + let addr3: *mut Atest = kalloc(size_of::()).expect("Could not allocate addr3...").cast(); + unsafe { write(addr3, t); } + + kfree(addr1); + kfree(addr2); + kfree(addr3); + + let addr4 = kalloc(0xfc0).expect("Could not allocate addr4..."); + kfree(addr4); +} + + + + + + diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index b517f39..e2a76d7 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -74,7 +74,7 @@ impl Header { } fn is_free(&self) -> bool { - self.fields & !HEADER_USED == 0 + self.fields & HEADER_USED == 0 } fn set_used(&mut self) { @@ -93,7 +93,7 @@ impl Header { // Unsafe write header data to memory at dest. fn write_to(&self, dest: *mut usize) { unsafe { - dest.write_volatile(self.fields); + dest.write(self.fields); } } @@ -102,6 +102,7 @@ impl Header { let old_size = self.chunk_size(); let next_size = old_size - new_size; self.set_size(new_size); + self.write_to(cur_addr); let next_addr = cur_addr.map_addr(|addr| addr + HEADER_SIZE + new_size); let next_header = Header { fields: next_size - HEADER_SIZE }; // make space for inserted header next_header.write_to(next_addr); @@ -111,8 +112,9 @@ impl Header { fn merge(&mut self, next: Self, next_addr: *mut usize) { assert!(next.is_free()); assert!(self.is_free()); - let size = self.chunk_size() + next.chunk_size(); + let size = self.chunk_size() + HEADER_SIZE + next.chunk_size(); self.set_size(size); + //self.write_to(addr); unsafe { next_addr.write(0); } } } @@ -180,7 +182,7 @@ impl Zone { } } - fn decrement_refs(&mut self) -> Result<(), KallocError> { + fn decrement_refs(&mut self) -> Result { // Given a usize can't be < 0, I want to catch that and not cause a panic. // This may truly be unnecessary, but just want to be cautious. let new_count = self.get_refs() - 1; @@ -188,7 +190,7 @@ impl Zone { Err(KallocError::MinRefs) } else { unsafe { self.write_refs(new_count); } - Ok(()) + Ok(new_count) } } @@ -219,7 +221,11 @@ impl Zone { // Second 8 bytes is the first header of the zone. fn scan(&mut self, size: usize) -> Option<*mut usize> { // If size is less than min alloc size (8 bytes), pad. - let size = if size < 8 { 8 } else { size }; + let size = if size % 8 != 0 { + (size + 7) & !7 + } else { + size + }; // Start and end (start + PAGE_SIZE) bounds of zone. let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE/8)) }; // Get the first header in the zone. @@ -235,6 +241,7 @@ impl Zone { // TODO: Is not pretty, make pretty. if prev.is_free() && head.is_free() { prev.merge(head, curr); + prev.write_to(trail); (head, curr) = (prev, trail); } } else { @@ -249,6 +256,7 @@ impl Zone { fn alloc_chunk(size: usize, ptr: *mut usize, zone: &mut Zone, head: &mut Header) { zone.increment_refs().expect("Maximum zone allocation limit exceeded."); head.set_used(); + head.write_to(ptr); if size != head.chunk_size() { let (_, _) = head.split(size, ptr); @@ -324,7 +332,8 @@ impl Kalloc { // TODO if you call alloc in order and then free in order this // doesn't merge, as you can't merge backwards. Consider a merging // pass when allocting. - pub fn free(&mut self, ptr: *mut usize) { + pub fn free(&mut self, ptr: *mut T) { + let ptr: *mut usize = ptr.cast(); // Assume that round down to nearest page is the current zone base addr. let mut zone = Zone::from(ptr.map_addr(|addr| addr & !(PAGE_SIZE - 1))); let head_ptr = ptr.map_addr(|addr| addr - HEADER_SIZE); @@ -332,15 +341,20 @@ impl Kalloc { assert!(!head.is_free(), "Kalloc double free."); head.set_unused(); - if let Err(_) = zone.decrement_refs() { - self.shrink_pool(zone); + if let Ok(count) = zone.decrement_refs() { + if count == 0 { + self.shrink_pool(zone); + } + } else { + panic!("Negative zone refs count: {}", zone.get_refs()) } - let next = Header::from(head_ptr.map_addr( - |addr| addr + HEADER_SIZE + head.chunk_size())); - if !(next.is_free()) { + let next_ptr = ptr.map_addr(|addr| addr + head.chunk_size()); + let next = Header::from(next_ptr); + if next.is_free() { // back to back free, merge - head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) + //head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) + head.merge(next, next_ptr); } head.write_to(head_ptr); } From 2392613075f675c8323e34ea40e83ad8edef7438 Mon Sep 17 00:00:00 2001 From: tmu Date: Tue, 14 Mar 2023 18:47:09 -0700 Subject: [PATCH 38/57] possible worries and comment changes --- src/vm.rs | 10 +++++++--- src/vm/vmalloc.rs | 33 ++++++++++++++++++--------------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index dbd28d6..c3382b7 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -119,20 +119,24 @@ pub fn test_kalloc() { } let addr1 = kalloc(8).expect("Could not allocate addr1..."); unsafe { addr1.write(0xdeadbeaf); } - + let addr2: *mut [u64; 2] = kalloc(16).expect("Could not allocate addr3...").cast(); unsafe { write(addr2, [0x8BADF00D, 0xBAADF00D]) }; - + let t = Atest::new(); let addr3: *mut Atest = kalloc(size_of::()).expect("Could not allocate addr3...").cast(); unsafe { write(addr3, t); } - + kfree(addr1); kfree(addr2); kfree(addr3); let addr4 = kalloc(0xfc0).expect("Could not allocate addr4..."); + let addr5 = kalloc(8).expect("Could not allocate addr5..."); + unsafe {write(addr5, 0xee1f00d);} + kfree(addr5); kfree(addr4); + } diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index e2a76d7..d215595 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -3,7 +3,7 @@ use core::mem::size_of; use crate::hw::param::PAGE_SIZE; use super::{palloc::Page, palloc, pfree, VmError}; -const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 = 4088. +const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 - 8 = 4080. const HEADER_SIZE: usize = size_of::
(); const ZONE_SIZE: usize = 8; const HEADER_USED: usize = 1 << 12; // Chunk is in use flag. @@ -119,12 +119,12 @@ impl Header { } } -// Assumes the first byte of a zone is the zone header. -// Next byte is the chunk header. +// Assumes the first usize of a zone is the zone header. +// Next usize is the chunk header. impl From<*mut usize> for Zone { fn from(src: *mut usize) -> Self { - Zone { - base: src, + Zone { + base: src, next: unsafe { src.read() } } } @@ -150,7 +150,7 @@ impl Zone { Ok(next_addr) } } - + // Read the next field to get the next zone address. // Discard this zone's refs count. // Write base address with next zone address and new refs count. @@ -174,8 +174,8 @@ impl Zone { fn increment_refs(&mut self) -> Result<(), KallocError> { let new_count = self.get_refs() + 1; - if new_count > 510 { - Err(KallocError::MaxRefs) + if new_count > 510 { + Err(KallocError::MaxRefs) } else { unsafe { self.write_refs(new_count); } Ok(()) @@ -201,13 +201,15 @@ impl Zone { Err(KallocError::NullZone) } } - + // Only call from Kalloc.shrink_pool() to ensure this is not the first // zone in the pool. fn free_self(&mut self) { assert!(self.get_refs() == 0); + todo!("Relies on sequential page allocation."); let prev_base = unsafe { self.base.byte_sub(0x1000) }; let mut prev_zone = Zone::from(prev_base); + // ^ BUG: not guaranteed sequential if let Ok(next_zone) = self.next_zone() { unsafe { prev_zone.write_next(next_zone.base); } } else { @@ -220,12 +222,13 @@ impl Zone { // First 8 bytes of a zone is the Zone.next field. // Second 8 bytes is the first header of the zone. fn scan(&mut self, size: usize) -> Option<*mut usize> { - // If size is less than min alloc size (8 bytes), pad. - let size = if size % 8 != 0 { + // Round to a 8 byte granularity + let size = if size % 8 != 0 { (size + 7) & !7 - } else { - size + } else { + size }; + // Start and end (start + PAGE_SIZE) bounds of zone. let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE/8)) }; // Get the first header in the zone. @@ -310,7 +313,7 @@ impl Kalloc { let end = self.end.map_addr(|addr| addr - 0x1000); let mut zone = Zone::from(curr); let mut trail = zone; - + while zone.base <= end { if let Some(ptr) = zone.scan(size) { return Ok(ptr) @@ -348,7 +351,7 @@ impl Kalloc { } else { panic!("Negative zone refs count: {}", zone.get_refs()) } - + let next_ptr = ptr.map_addr(|addr| addr + head.chunk_size()); let next = Header::from(next_ptr); if next.is_free() { From 16470b8506137c45e4857180bf1b3a87baf3c498 Mon Sep 17 00:00:00 2001 From: tmu Date: Tue, 14 Mar 2023 19:03:17 -0700 Subject: [PATCH 39/57] patch up shrink_pool as list traversal --- src/vm/vmalloc.rs | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index d215595..28fabf9 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -204,12 +204,12 @@ impl Zone { // Only call from Kalloc.shrink_pool() to ensure this is not the first // zone in the pool. - fn free_self(&mut self) { + fn free_self(&mut self, mut prev_zone: Zone) { assert!(self.get_refs() == 0); - todo!("Relies on sequential page allocation."); - let prev_base = unsafe { self.base.byte_sub(0x1000) }; - let mut prev_zone = Zone::from(prev_base); - // ^ BUG: not guaranteed sequential + // todo!("Relies on sequential page allocation."); + // let prev_base = unsafe { self.base.byte_sub(0x1000) }; + // let mut prev_zone = Zone::from(prev_base); + // // ^ BUG: not guaranteed sequential if let Ok(next_zone) = self.next_zone() { unsafe { prev_zone.write_next(next_zone.base); } } else { @@ -296,9 +296,20 @@ impl Kalloc { Ok((zone, head)) } - fn shrink_pool(&self, mut zone: Zone) { - if zone.base != self.head { - zone.free_self(); + fn shrink_pool(&self, mut to_free: Zone) { + if to_free.base != self.head { + let mut curr = Zone::new(self.head); + + while let Ok(next) = curr.next_zone() { + if to_free.base == next.base { + // found it + to_free.free_self(curr); + return; + } else { + curr = next; + } + } + panic!("Tried to free a zone that wasn't in the list...") } } @@ -346,6 +357,7 @@ impl Kalloc { if let Ok(count) = zone.decrement_refs() { if count == 0 { + // this is costly, as it's a list traversal self.shrink_pool(zone); } } else { From aa0d36e963318e1c9592789fcbe1dbdd87f93866 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 20:59:40 -0700 Subject: [PATCH 40/57] Make kalloc test unsafe --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 151692a..fab931c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,9 +110,9 @@ fn main() -> ! { log!(Debug, "Testing page allocation and freeing..."); unsafe { vm::test_palloc(); + log!(Debug, "Testing kalloc and kfree..."); + vm::test_kalloc(); } - log!(Debug, "Testing kalloc and kfree..."); - vm::test_kalloc(); } else { //Interrupt other harts to init kpgtable. trap::init(); From c8e9a36e113491618a0815911b9d578046598a9c Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 21:00:14 -0700 Subject: [PATCH 41/57] Add assert!s to kalloc test --- src/vm.rs | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index c3382b7..6a07c4a 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -105,7 +105,7 @@ pub unsafe fn test_palloc() { log!(Debug, "Successful test of page allocation and freeing..."); } -pub fn test_kalloc() { +pub unsafe fn test_kalloc() { use core::mem::size_of; use core::ptr::write; struct Atest { @@ -118,29 +118,40 @@ pub fn test_kalloc() { } } let addr1 = kalloc(8).expect("Could not allocate addr1..."); - unsafe { addr1.write(0xdeadbeaf); } + assert_eq!(addr1.sub(2).read(), 0x1); // Check zone refs + assert_eq!(addr1.sub(1).read(), 0x1008); // Check chunk header size + used + addr1.write(0xdeadbeaf); let addr2: *mut [u64; 2] = kalloc(16).expect("Could not allocate addr3...").cast(); - unsafe { write(addr2, [0x8BADF00D, 0xBAADF00D]) }; + assert_eq!(addr1.sub(2).read(), 0x2); // Check zone refs + assert_eq!((addr2 as *mut usize).sub(1).read(), 0x1010); // Check chunk header size + used + write(addr2, [0x8BADF00D, 0xBAADF00D]); let t = Atest::new(); let addr3: *mut Atest = kalloc(size_of::()).expect("Could not allocate addr3...").cast(); - unsafe { write(addr3, t); } + write(addr3, t); kfree(addr1); kfree(addr2); kfree(addr3); + assert_eq!(addr1.sub(2).read(), 0x0); // Check zone refs + assert_eq!((addr2 as *mut usize).sub(1).read(), 0x10); // Check chunk header size + used let addr4 = kalloc(0xfc0).expect("Could not allocate addr4..."); let addr5 = kalloc(8).expect("Could not allocate addr5..."); - unsafe {write(addr5, 0xee1f00d);} + write(addr5, 0xee1f00d); kfree(addr5); kfree(addr4); -} - - - - + let addr6: *mut [u64;510] = kalloc(0xff0).expect("Could not allocate addr6 (remainder of page)...").cast(); + // Don't do this: Will stack overflow. + // Foreboding for Kbox::new() correctness. + // let big_xs = [555; 510]; + // unsafe { write(addr6, big_xs); } + let addr7 = kalloc(8).expect("Could not allocate addr7..."); + kfree(addr6); + kfree(addr7); + log!(Debug, "Successful test of kalloc and kfree..."); +} From 8ca4222f406daee6b2ad3a5fbdc7f61f5d036163 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 21:00:57 -0700 Subject: [PATCH 42/57] Fix shrink_pool and accidental chunk merging after shrinking pool --- src/vm/vmalloc.rs | 86 ++++++++++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 34 deletions(-) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 28fabf9..75fa41e 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -54,6 +54,7 @@ pub enum KallocError { MinRefs, NullZone, OOM, + Void, } impl From<*mut usize> for Header { @@ -160,8 +161,8 @@ impl Zone { Err(_) => 0x0, Ok(ptr) => ptr, }; - - self.base.write(next_addr | new_count); + self.next = next_addr | new_count; + self.base.write(self.next); } // Read the current next field to get the refs count. @@ -194,11 +195,11 @@ impl Zone { } } - fn next_zone(&self) -> Result { + fn next_zone(&self) -> Option { if let Ok(addr) = self.get_next() { - Ok(Zone::from(addr as *mut usize)) + Some(Zone::from(addr as *mut usize)) } else { - Err(KallocError::NullZone) + None } } @@ -210,7 +211,7 @@ impl Zone { // let prev_base = unsafe { self.base.byte_sub(0x1000) }; // let mut prev_zone = Zone::from(prev_base); // // ^ BUG: not guaranteed sequential - if let Ok(next_zone) = self.next_zone() { + if let Some(next_zone) = self.next_zone() { unsafe { prev_zone.write_next(next_zone.base); } } else { unsafe { prev_zone.write_next(0x0 as *mut usize); } @@ -296,21 +297,28 @@ impl Kalloc { Ok((zone, head)) } - fn shrink_pool(&self, mut to_free: Zone) { - if to_free.base != self.head { - let mut curr = Zone::new(self.head); - - while let Ok(next) = curr.next_zone() { - if to_free.base == next.base { - // found it - to_free.free_self(curr); - return; + fn shrink_pool(&self, mut drop_zone: Zone) { + if drop_zone.base != self.head { + let mut curr_ptr = self.head; + //let mut curr_zone = Zone::from(curr_ptr); + + loop { + let curr_zone = Zone::from(curr_ptr); + + if let Some(next_zone) = curr_zone.next_zone() { + if drop_zone.base == next_zone.base { + drop_zone.free_self(curr_zone); + return; + } else { + curr_ptr = next_zone.base; + } } else { - curr = next; + break; } } - panic!("Tried to free a zone that wasn't in the list...") + panic!("Tried to free zone after: {:?}. Not in the pool...", curr_ptr); } + } /// Finds the first fit for the requested size. @@ -320,6 +328,7 @@ impl Kalloc { /// 3. If no zone had a fit, then try to allocate a new zone (palloc()). /// 4. If success, go to step 2a. Else, fail with OOM. pub fn alloc(&mut self, size: usize) -> Result<*mut usize, KallocError> { + if size == 0 { return Err(KallocError::Void); } let curr = self.head; let end = self.end.map_addr(|addr| addr - 0x1000); let mut zone = Zone::from(curr); @@ -329,18 +338,21 @@ impl Kalloc { if let Some(ptr) = zone.scan(size) { return Ok(ptr) } else { - zone = zone.next_zone()?; - } - } - - match self.grow_pool(&mut trail) { - Ok((mut zone, mut head)) => { - let ptr = zone.base.map_addr(|addr| addr + ZONE_SIZE + HEADER_SIZE); - alloc_chunk(size, ptr, &mut zone, &mut head); - Ok(ptr) - }, - Err(_) => Err(KallocError::OOM), + zone = match zone.next_zone() { + Some(zone) => zone, + None => { + if let Ok((mut zone, mut head)) = self.grow_pool(&mut trail) { + let head_ptr = zone.base.map_addr(|addr| addr + ZONE_SIZE); + alloc_chunk(size, head_ptr, &mut zone, &mut head); + return Ok(head_ptr.map_addr(|addr| addr + HEADER_SIZE)) + } else { + return Err(KallocError::OOM) + } + } + } + }; } + Err(KallocError::OOM) } // TODO if you call alloc in order and then free in order this @@ -355,21 +367,27 @@ impl Kalloc { assert!(!head.is_free(), "Kalloc double free."); head.set_unused(); + let mut chunk_merge_flag = false; if let Ok(count) = zone.decrement_refs() { if count == 0 { // this is costly, as it's a list traversal self.shrink_pool(zone); + } else { + chunk_merge_flag = true; } + } else { panic!("Negative zone refs count: {}", zone.get_refs()) } - let next_ptr = ptr.map_addr(|addr| addr + head.chunk_size()); - let next = Header::from(next_ptr); - if next.is_free() { - // back to back free, merge - //head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) - head.merge(next, next_ptr); + if chunk_merge_flag { + let next_ptr = ptr.map_addr(|addr| addr + head.chunk_size()); + let next = Header::from(next_ptr); + if next.is_free() { + // back to back free, merge + //head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) + head.merge(next, next_ptr); + } } head.write_to(head_ptr); } From c654f9f50480e9d6841bacb77e18f011d70d3975 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 21:05:50 -0700 Subject: [PATCH 43/57] Remove bitmap allocator --- src/vm.rs | 1 - src/vm/galloc.rs | 347 ----------------------------------------------- 2 files changed, 348 deletions(-) delete mode 100644 src/vm/galloc.rs diff --git a/src/vm.rs b/src/vm.rs index 6a07c4a..b8448a6 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -2,7 +2,6 @@ pub mod palloc; pub mod ptable; pub mod process; -pub mod galloc; pub mod vmalloc; use crate::hw::param::*; diff --git a/src/vm/galloc.rs b/src/vm/galloc.rs deleted file mode 100644 index 26d4d83..0000000 --- a/src/vm/galloc.rs +++ /dev/null @@ -1,347 +0,0 @@ -/// General allocation, basically malloc/kalloc type thing -use crate::hw::param::*; -use core::assert; -use crate::vm::*; - -/// This is either a data layer or an indirect layer -/// -/// If it is indirect, then the first u64 is the level. Level n points -/// to n-1, and 1 points to data layers -/// -/// If it is indirect, the second u64 is the valid/in use bits of the -/// corresponding u64s in the current header. -/// -/// If this is an indirect header, then all futher u64 are paired. The -/// even indexed (first) u64 is a pointer dwon one level. The odd -/// (second) one is the valid mask for that link. If the link is to a -/// data layer, then it corresponds to the parts of the data layer in -/// use. If the link is to another indirect layer, then ignore this -/// and decend and check the second u64 of that layer instead. (In -/// fact it should be marked invalid.) -/// -/// If this is a data layer, then the entire page is naturally aligned -/// data. By that I mean that a pow of 2 chunk of size n is n-byte -/// aligned. -/// -/// This allocator works on 64byte chunks - - -// should be 16 bytes big exactly -#[repr(C)] -#[derive(Clone, Copy)] -struct HeaderPair { - valid: u64, - down: *mut Header, -} - -#[repr(C)] -#[derive(Clone, Copy)] -struct Indirect { - level: u64, - valid: u64, - contents: [HeaderPair; 256], // 4096 / 16 -} - -#[derive(Clone, Copy)] -union Header { - data: [u64; 64], // 4096 / 64 - indirect: Indirect, -} - -pub struct GAlloc { - root: *mut Header, -} - -// not efficient. make a lower bit mask with said # of ones -fn make_mask(mut num_ones: u64) -> u64 { - let mut out = 0; - while num_ones > 0 { - out = (out << 1) | 1; - num_ones -= 1; - } - out -} - -// pow of two that fits s -/* stolen: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ -fn round_up(mut s:u64) -> u64 { - s -= 1; - for i in 1..64 { - s |= s >> i; - } - s + 1 -} - -/// Returns the needed number of units to contain the given size -fn chunk_size(mut size: u64) -> u64 { - size = round_up(size); - if size < 64 { - 1 - } else { - size / 64 - } -} - -fn get_page() -> Result<*mut usize, VmError> { - match unsafe { PAGEPOOL.get_mut().unwrap().palloc() } { - Err(e) => { - Err(e) - }, - Ok(page) => { - Ok(page.addr) - } - } -} - -fn free_page(addr: *mut usize) { - match unsafe { PAGEPOOL.get_mut().unwrap().pfree(Page::from(addr)) } { - Err(_) => { - panic!("Galloc double free passed to palloc"); - }, - Ok(()) => {} - } -} - - -impl Drop for GAlloc { - fn drop(&mut self) { - panic!("Dropped your general allocator") - } -} - -impl GAlloc { - pub fn new() -> Self { - let page = match get_page() { - Err(e) => { - panic!("Couldn't initialize the header for general alloc: {:?}", e) - }, - Ok(addr) => { - addr as *mut Header - } - }; - unsafe { - (*page).indirect.level = 1; - (*page).indirect.valid = 0; - } - // level 1 page with no valid pages - GAlloc { - root: page - } - } - - fn search_data_layer(size: u64, dl_mask: u64) -> Option { - let size = chunk_size(size); // number of units - let search_mask = make_mask(size); - - let mut i = 0; - while i < 64 { - if (dl_mask >> i) & search_mask == 0 { - // clear bits - return Some(i); - } else { - i += size; // skip size places - } - } - None - } - - unsafe fn walk_alloc(size: usize, root: &mut Header) -> Result<*mut usize, VmError> { - let mut open: isize = -1; // neg if full, 0-31 for first empty - if root.indirect.level != 1 { - for i in 0..32 { - if (root.indirect.valid >> i) & 0x1 == 0{ - // invalid down link - if open != -1 { open = i; } - } else { - // this is a down link we can follow - let down = &mut *root.indirect.contents[i as usize].down; - match Self::walk_alloc(size, down) { - Err(_) => {}, - ret => { return ret; } - } - } - } - // checked all valid down links and none of them are valid - // now check if we can add a valid one (was there a hole) - - if open != -1 { - let page: *mut Header = match get_page() { - Err(e) => { - return Err(e); - }, - Ok(addr) => { - addr as *mut Header - } - }; - // insert a new page - let p_ref = &mut *page; - p_ref.indirect.level = root.indirect.level -1; - p_ref.indirect.valid = 0; - root.indirect.contents[open as usize].down = p_ref; - // root.indirect.contents[open as usize].valid is not needed, as p_ref is not a data layer - root.indirect.valid = root.indirect.valid | 1 << open; - return Self::walk_alloc(size, p_ref); - } - // no space and no holes for further intermediate levels - // in any case, pass the error up - return Err(VmError::GNoSpace); - } else { - // this is a level 1 layer, and points to data layers - for i in 0..32 { - let i = i as usize; - if (root.indirect.valid >> i) & 0x1 == 0 { - // this is a data page down link that isn't in use - if open == -1 { open = i as isize; } - continue; - } - - match Self::search_data_layer(size as u64, - root.indirect.contents[i].valid) { - None => {}, - Some(idx) => { - // found space, mark and make pointer - let in_use = chunk_size(size as u64); // how many to mark in use - root.indirect.contents[i].valid = - root.indirect.contents[i].valid | (make_mask(in_use) << idx); - let data_page = root.indirect.contents[i].down as *mut usize; - return Ok(data_page.offset(idx as isize)); - } - } - } - // couldn't find anything, try to add another data page - if open != -1 { - let open = open as usize; - let page: *mut Header = match get_page() { - Err(e) => { - return Err(e); - }, - Ok(addr) => { - addr as *mut Header - } - }; - root.indirect.contents[open].down = page; - root.indirect.contents[open].valid = 0; // all free - // don't set page meta, because this is a data page - root.indirect.valid = root.indirect.valid | (1 << open); // down link valid - return Self::walk_alloc(size, &mut *(root.indirect.contents[open].down)); - } - return Err(VmError::GNoSpace); - } - } - - pub fn alloc(&mut self, size: usize) -> Result<*mut usize, VmError> { - assert!(size <= PAGE_SIZE, "GAlloc is only sub-page size"); - match unsafe {Self::walk_alloc(size, &mut (*self.root)) } { - Ok(ret) => { Ok(ret) }, - Err(_) => { - // alloc failed. try to bump the root up (note that - // this may also fail if the issue was out of pages) - let mut page: *mut Header = match get_page() { - Err(e) => { - return Err(e); - }, - Ok(addr) => { - addr as *mut Header - } - }; - unsafe { - (*page).indirect.level = (*self.root).indirect.level + 1; // bump level - (*page).indirect.valid = 1; // single valid page (old root) - (*page).indirect.contents[0] = HeaderPair { - valid: 0, // unused since root is not a data page - down: self.root, - }; - } - self.root = page; - match unsafe { Self::walk_alloc(size, &mut (*self.root)) } { - Err(e) => { - Err(e) - }, - Ok(addr) => { - Ok(addr) - } - } - } - } - } - - // returns (did_we_find_it, should_we_keep_this_branch) - unsafe fn walk_dealloc(ptr: *mut usize, size: usize, root: &mut Header) -> (bool, bool) { - let test_ptr = ptr as usize & !(PAGE_SIZE - 1); // should match data_page base pointer - if root.indirect.level != 1 { - // down links are not data pages - let valid = root.indirect.valid; - if valid == 0 { - return (false, false); - } - let mut should_we_keep = false; - for i in 0..32 { - if (valid >> i) & 1 == 0 {continue;} - match Self::walk_dealloc(ptr, size, &mut (*root.indirect.contents[i].down)) { - (true, true) => { - return (true, true); - }, - (false, true) => { - // keep searching - should_we_keep = true; - }, - (found, false) => { - // trim branch and maybe report findings - root.indirect.valid = root.indirect.valid & !(1 << i); - // free lower page - free_page(root.indirect.contents[i].down as *mut usize); - if root.indirect.valid == 0 { - // nothing more to check, report findings - return (found, false); - } else if found { - return (true, true); - } - } - } - } - if should_we_keep { - return (false, true); - } else { - return (false, false); - } - } else { - // downlinks are data pages, search for match - let valid = root.indirect.valid; - for i in 0..32 { - if (valid >> i) & 1 == 0 {continue;} - if root.indirect.contents[i].down as usize == test_ptr { - // match! - let offset = ptr as usize & (PAGE_SIZE - 1); - let clear_mask = make_mask(chunk_size(size as u64)); - root.indirect.contents[i].valid = - root.indirect.contents[i].valid & !(clear_mask << offset); - if root.indirect.contents[i].valid == 0 { - // free data page - root.indirect.valid = valid & !(1 << i); - free_page(root.indirect.contents[i].down as *mut usize); - if root.indirect.valid == 0 { - // cleanup this indirect layer - return (true, false); - } else { - return (true, true); - } - } else { - return (true, true); - } - } - } - if valid == 0 { - return (false, false); - } else { - return (false, true); - } - } - } - - pub fn dealloc(&mut self, ptr: *mut usize, size: usize) { - unsafe { - // TODO consider mechanism for undoing root bump / when to do that - Self::walk_dealloc(ptr, size, &mut (*self.root)); - } - } -} From a79ff2bdcaaf18bcfe4e9fccf919d2fb768a07d5 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 21:35:45 -0700 Subject: [PATCH 44/57] Move alloc size adjustment into Kalloc::alloc from Zone::scan --- src/vm/vmalloc.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 75fa41e..9478581 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -223,13 +223,6 @@ impl Zone { // First 8 bytes of a zone is the Zone.next field. // Second 8 bytes is the first header of the zone. fn scan(&mut self, size: usize) -> Option<*mut usize> { - // Round to a 8 byte granularity - let size = if size % 8 != 0 { - (size + 7) & !7 - } else { - size - }; - // Start and end (start + PAGE_SIZE) bounds of zone. let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE/8)) }; // Get the first header in the zone. @@ -328,7 +321,16 @@ impl Kalloc { /// 3. If no zone had a fit, then try to allocate a new zone (palloc()). /// 4. If success, go to step 2a. Else, fail with OOM. pub fn alloc(&mut self, size: usize) -> Result<*mut usize, KallocError> { - if size == 0 { return Err(KallocError::Void); } + if size == 0 { + return Err(KallocError::Void); + } + // Round to a 8 byte granularity + let size = if size % 8 != 0 { + (size + 7) & !7 + } else { + size + }; + let curr = self.head; let end = self.end.map_addr(|addr| addr - 0x1000); let mut zone = Zone::from(curr); From ba727a8ef970ae34e3979d5ad24e57d0a1a56ca0 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 21:36:02 -0700 Subject: [PATCH 45/57] Fixup mod imports and add new test case --- src/vm.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index b8448a6..8e32f99 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -8,12 +8,13 @@ use crate::hw::param::*; use crate::mem::Kbox; use palloc::*; use ptable::kpage_init; //, PageTable}; +use vmalloc::Kalloc; use process::Process; use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. static mut PAGEPOOL: OnceCell = OnceCell::new(); -static mut VMALLOC: OnceCell = OnceCell::new(); +static mut VMALLOC: OnceCell = OnceCell::new(); /// (Still growing) list of kernel VM system error cases. #[derive(Debug)] @@ -42,6 +43,7 @@ pub fn kalloc(size: usize) -> Result<*mut usize, vmalloc::KallocError> { unsafe { VMALLOC.get_mut().unwrap().alloc(size) } } + pub fn kfree(ptr: *mut T) { unsafe { VMALLOC.get_mut().unwrap().free(ptr) } } @@ -148,7 +150,7 @@ pub unsafe fn test_kalloc() { // let big_xs = [555; 510]; // unsafe { write(addr6, big_xs); } - let addr7 = kalloc(8).expect("Could not allocate addr7..."); + let addr7 = kalloc(9).expect("Could not allocate addr7..."); kfree(addr6); kfree(addr7); From bd9df12e69119f413e5b92a9d8ce85106f3d01c6 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 23:08:11 -0700 Subject: [PATCH 46/57] Documentation + fix merge off the end of a zone bug --- src/vm/vmalloc.rs | 80 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 5 deletions(-) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 9478581..f7ab9ea 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -1,3 +1,4 @@ +//! Kernel Virtual Memory Allocator. use core::mem::size_of; use crate::hw::param::PAGE_SIZE; @@ -43,6 +44,70 @@ struct Zone { next: usize, // Next zone's address + this zone's ref count. } +/// Kernel Virtual Memory Allocator. +/// Kalloc is comprised of `Zones` (physical pages). Each +/// zone is broken up into smaller chunks as memory is allocated +/// and merged into larger chunks as memory is deallocated. +/// Each allocation, `x` , must satisfy `0<= x <= 4080` bytes. +/// All allocations will be automatically rounded up to be +/// 8 byte aligned. +/// +/// A generic zone with the first zone containing 1 in use chunk and +/// a second full zone with one in use chunk might look like: +/// ```text +/// Kalloc { +/// +/// ┌────────start +/// │ +/// ┌──┼────────end +/// │ │ +/// │ │ } +/// │ │ +/// │ │ ┌──────────────────────────────────────┬──────────────┐ +/// │ └─┬──┤► 0x80089e000 │ 0x1 │ 0x80089d000 +/// │ │ └──────────────────────────────────────┴──────────────┘ +/// │ │ 63 11 0 +/// │ │ ┌────────────────────────────────────┬─┬──────────────┐ +/// │ │ │ Unused / Reserved │1│ 0x008 │ 0x80089d008 +/// │ │ └────────────────────────────────────┴─┴──────────────┘ +/// │ │ 63 12 11 0 +/// │ │ ┌─────────────────────────────────────────────────────┐ +/// │ │ │ 0x8BADF00D │ 0x80089d010 +/// │ │ └─────────────────────────────────────────────────────┘ +/// │ │ 63 0 +/// │ │ ┌────────────────────────────────────┬─┬──────────────┐ +/// │ │ │ Unused / Reserved │0│ 0xfe0 │ 0x80089d018 +/// │ │ └────────────────────────────────────┴─┴──────────────┘ +/// │ │ 63 12 11 0 +/// │ │ +/// │ │ +/// │ │ ... +/// │ │ +/// │ │ ┌──────────────────────────────────────┬──────────────┐ +/// │ └──► 0x0 │ 0x1 │ 0x80089e000 +/// │ └──────────────────────────────────────┴──────────────┘ +/// │ 63 11 0 +/// │ ┌────────────────────────────────────┬─┬──────────────┐ +/// │ │ Unused / Reserved │1│ 0xff0 │ 0x80089e008 +/// │ └────────────────────────────────────┴─┴──────────────┘ +/// │ 63 12 11 0 +/// │ ┌─────────────────────────────────────────────────────┐ +/// │ │ 0x0 │ 0x80089e010 +/// │ └ ┘ +/// │ 63 │ 0 +/// │ │ +/// │ │ [usize; 510] +/// │ │ +/// │ │ +/// │ │ +/// │ ▼ +/// │ ┌ │ +/// │ │ 0x1fd │ 0x80089eff8 +/// │ └─────────────────────────────────────────────────────┘ +/// │ 63 0 +/// │ +/// └───────────────────────────────────────────────────────────────► 0x80089d000 +///``` pub struct Kalloc { head: *mut usize, // Address of first zone. end: *mut usize, @@ -268,6 +333,10 @@ unsafe fn write_zone_header_pair(zone: &Zone, header: &Header) { } impl Kalloc { + /// The virtual memory kernel allocator requires at least + /// one page to use as a `Zone`. On initialization, create + /// a new zone and initialize the memory with a zone and + /// chunk header. pub fn new(start: Page) -> Self { // Make sure start of allocation pool is page aligned. assert_eq!(start.addr.addr() & (PAGE_SIZE - 1), 0); @@ -319,7 +388,7 @@ impl Kalloc { /// 2a. If success: Return chunk's starting address (*mut usize). /// 2b. Else, move to next zone and go back to step 1. /// 3. If no zone had a fit, then try to allocate a new zone (palloc()). - /// 4. If success, go to step 2a. Else, fail with OOM. + /// 4. If 3. success, allocate from first chunk in new page. Else, fail with OOM. pub fn alloc(&mut self, size: usize) -> Result<*mut usize, KallocError> { if size == 0 { return Err(KallocError::Void); @@ -357,9 +426,10 @@ impl Kalloc { Err(KallocError::OOM) } - // TODO if you call alloc in order and then free in order this - // doesn't merge, as you can't merge backwards. Consider a merging - // pass when allocting. + /// 1. Calculate the header offset from the data pointer. + /// 2. Calculate the zone offset from the data pointer. + /// 3. Check if zone refs count is 0, if so, release zone. + /// 4. If zone refs count != 0, try to merge this freed chunk. pub fn free(&mut self, ptr: *mut T) { let ptr: *mut usize = ptr.cast(); // Assume that round down to nearest page is the current zone base addr. @@ -385,7 +455,7 @@ impl Kalloc { if chunk_merge_flag { let next_ptr = ptr.map_addr(|addr| addr + head.chunk_size()); let next = Header::from(next_ptr); - if next.is_free() { + if next.is_free() && next_ptr < zone.base.map_addr(|addr| addr + 0x1000) { // back to back free, merge //head.set_size(head.chunk_size() + HEADER_SIZE + next.chunk_size()) head.merge(next, next_ptr); From 3863579f5d9578e4bf9f043034d587f8e2e11a2b Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 23:08:29 -0700 Subject: [PATCH 47/57] Fix cargo docs --- src/hw/riscv.rs | 2 +- src/lock/mutex.rs | 7 +++---- src/vm.rs | 14 +++++++++++--- src/vm/process.rs | 2 ++ 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/src/hw/riscv.rs b/src/hw/riscv.rs index f94496f..9797acf 100644 --- a/src/hw/riscv.rs +++ b/src/hw/riscv.rs @@ -221,7 +221,7 @@ pub fn read_pmpcfg0() -> usize { } /// Just for curiosity's sake: -/// https://github.com/rust-lang/rust/issues/82753 +/// /// tp := thread pointer register. /// This way we can query a hart's hartid and store it in tp reg. pub fn write_tp(id: u64) { diff --git a/src/lock/mutex.rs b/src/lock/mutex.rs index 46bb371..7980260 100644 --- a/src/lock/mutex.rs +++ b/src/lock/mutex.rs @@ -1,8 +1,6 @@ //! Spinlock mutex implementation /// Inspiration taken in no small part from the awesome: -/// + https://marabos.nl/atomics/building-locks.html#mutex -/// as well as: -/// + https://github.com/westerndigitalcorporation/RISC-V-Linux/blob/master/linux/Documentation/locking/mutex-design.txt +/// /// /// Opportunity for improvement on interrupt safe locks. use core::cell::UnsafeCell; @@ -47,7 +45,8 @@ pub struct Mutex { unsafe impl Sync for Mutex {} impl Mutex { - /// https://doc.rust-lang.org/reference/const_eval.html + /// Reference: + /// pub const fn new(value: T) -> Self { Mutex { lock_state: AtomicU32::new(0), diff --git a/src/vm.rs b/src/vm.rs index 8e32f99..b70bfc4 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -27,23 +27,27 @@ pub enum VmError { Koom, } +/// Moving to `mod process` pub trait Resource {} +/// Moving to `mod ` pub struct TaskList { head: Option>, } +/// Moving to `mod ` pub struct TaskNode { proc: Option>, prev: Option>, next: Option>, } +/// See `vm::vmalloc::Kalloc::alloc`. pub fn kalloc(size: usize) -> Result<*mut usize, vmalloc::KallocError> { unsafe { VMALLOC.get_mut().unwrap().alloc(size) } } - +/// See `vm::vmalloc::Kalloc::free`. pub fn kfree(ptr: *mut T) { unsafe { VMALLOC.get_mut().unwrap().free(ptr) } } @@ -60,6 +64,7 @@ fn pfree(page: Page) -> Result<(), VmError> { /// First, setup the kernel physical page pool. /// We start the pool at the end of the .bss section, and stop at the end of physical memory. /// Next, we map physical memory into the kernel's physical memory 1:1. +/// Next, initialize the kernel virtual memory allocator pool. /// Finally we set the global kernel page table `KPGTABLE` variable to point to the /// kernel's page table struct. pub fn init() -> Result<(), PagePool>{ @@ -96,8 +101,7 @@ pub fn init() -> Result<(), PagePool>{ Ok(()) } - - +/// A test designed to be used with GDB. pub unsafe fn test_palloc() { let allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); //println!("allocd addr: {:?}", allocd.addr); @@ -106,6 +110,10 @@ pub unsafe fn test_palloc() { log!(Debug, "Successful test of page allocation and freeing..."); } +/// A test that is more insightful when run with GDB. +/// Likely missing some edge cases like: +/// + Free the last two chunks in a zone. Ensure you don't +/// try to merge out of zone bounds. pub unsafe fn test_kalloc() { use core::mem::size_of; use core::ptr::write; diff --git a/src/vm/process.rs b/src/vm/process.rs index f61bc5e..3752e40 100644 --- a/src/vm/process.rs +++ b/src/vm/process.rs @@ -1,3 +1,5 @@ +//! Process handle and utilities. + use crate::hw::HartContext; use crate::trap::TrapFrame; use crate::vm::ptable::PageTable; From 8caea325c460dca4b931b4f8269df429d20867bc Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 23:30:56 -0700 Subject: [PATCH 48/57] rustfmt --- src/vm.rs | 38 +++++++++--------- src/vm/palloc.rs | 6 ++- src/vm/process.rs | 4 +- src/vm/ptable.rs | 20 ++++------ src/vm/vmalloc.rs | 99 +++++++++++++++++++++++++++-------------------- 5 files changed, 90 insertions(+), 77 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index b70bfc4..f0eaf73 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -1,16 +1,16 @@ //! Virtual Memory pub mod palloc; -pub mod ptable; pub mod process; +pub mod ptable; pub mod vmalloc; use crate::hw::param::*; use crate::mem::Kbox; +use core::cell::OnceCell; use palloc::*; +use process::Process; use ptable::kpage_init; //, PageTable}; use vmalloc::Kalloc; -use process::Process; -use core::cell::OnceCell; /// Global physical page pool allocated by the kernel physical allocator. static mut PAGEPOOL: OnceCell = OnceCell::new(); @@ -67,10 +67,10 @@ fn pfree(page: Page) -> Result<(), VmError> { /// Next, initialize the kernel virtual memory allocator pool. /// Finally we set the global kernel page table `KPGTABLE` variable to point to the /// kernel's page table struct. -pub fn init() -> Result<(), PagePool>{ +pub fn init() -> Result<(), PagePool> { unsafe { match PAGEPOOL.set(PagePool::new(bss_end(), dram_end())) { - Ok(_) => {}, + Ok(_) => {} Err(_) => { panic!("vm double init.") } @@ -84,16 +84,14 @@ pub fn init() -> Result<(), PagePool>{ if let Err(_) = VMALLOC.set(vmalloc::Kalloc::new(page)) { panic!("VMALLOC double init...") } - }, - Err(_) => panic!("Unable to allocate initial zone for vmalloc...") + } + Err(_) => panic!("Unable to allocate initial zone for vmalloc..."), } } // Map text, data, stacks, heap into kernel page table. match kpage_init() { - Ok(pt) => { - pt.write_satp() - }, + Ok(pt) => pt.write_satp(), Err(_) => { panic!(); } @@ -127,24 +125,26 @@ pub unsafe fn test_kalloc() { } } let addr1 = kalloc(8).expect("Could not allocate addr1..."); - assert_eq!(addr1.sub(2).read(), 0x1); // Check zone refs - assert_eq!(addr1.sub(1).read(), 0x1008); // Check chunk header size + used + assert_eq!(addr1.sub(2).read(), 0x1); // Check zone refs + assert_eq!(addr1.sub(1).read(), 0x1008); // Check chunk header size + used addr1.write(0xdeadbeaf); let addr2: *mut [u64; 2] = kalloc(16).expect("Could not allocate addr3...").cast(); - assert_eq!(addr1.sub(2).read(), 0x2); // Check zone refs - assert_eq!((addr2 as *mut usize).sub(1).read(), 0x1010); // Check chunk header size + used + assert_eq!(addr1.sub(2).read(), 0x2); // Check zone refs + assert_eq!((addr2 as *mut usize).sub(1).read(), 0x1010); // Check chunk header size + used write(addr2, [0x8BADF00D, 0xBAADF00D]); let t = Atest::new(); - let addr3: *mut Atest = kalloc(size_of::()).expect("Could not allocate addr3...").cast(); + let addr3: *mut Atest = kalloc(size_of::()) + .expect("Could not allocate addr3...") + .cast(); write(addr3, t); kfree(addr1); kfree(addr2); kfree(addr3); - assert_eq!(addr1.sub(2).read(), 0x0); // Check zone refs - assert_eq!((addr2 as *mut usize).sub(1).read(), 0x10); // Check chunk header size + used + assert_eq!(addr1.sub(2).read(), 0x0); // Check zone refs + assert_eq!((addr2 as *mut usize).sub(1).read(), 0x10); // Check chunk header size + used let addr4 = kalloc(0xfc0).expect("Could not allocate addr4..."); let addr5 = kalloc(8).expect("Could not allocate addr5..."); @@ -152,7 +152,9 @@ pub unsafe fn test_kalloc() { kfree(addr5); kfree(addr4); - let addr6: *mut [u64;510] = kalloc(0xff0).expect("Could not allocate addr6 (remainder of page)...").cast(); + let addr6: *mut [u64; 510] = kalloc(0xff0) + .expect("Could not allocate addr6 (remainder of page)...") + .cast(); // Don't do this: Will stack overflow. // Foreboding for Kbox::new() correctness. // let big_xs = [555; 510]; diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index ab811ce..b2a6ca0 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -120,8 +120,10 @@ impl Page { /// Read the prev, next pointers of a page in the free list. fn read_free(&mut self) -> (*mut usize, *mut usize) { unsafe { - (self.addr.read_volatile() as *mut usize, - self.addr.add(1).read_volatile() as *mut usize) + ( + self.addr.read_volatile() as *mut usize, + self.addr.add(1).read_volatile() as *mut usize, + ) } } } diff --git a/src/vm/process.rs b/src/vm/process.rs index 3752e40..64fcf27 100644 --- a/src/vm/process.rs +++ b/src/vm/process.rs @@ -1,11 +1,11 @@ //! Process handle and utilities. +use crate::collection::BalBst; use crate::hw::HartContext; +use crate::mem::Kbox; use crate::trap::TrapFrame; use crate::vm::ptable::PageTable; use crate::vm::Resource; -use crate::collection::BalBst; -use crate::mem::Kbox; pub struct Process { id: usize, diff --git a/src/vm/ptable.rs b/src/vm/ptable.rs index 67e6192..47eedb4 100644 --- a/src/vm/ptable.rs +++ b/src/vm/ptable.rs @@ -35,7 +35,7 @@ pub struct PageTable { } #[inline(always)] -fn vpn(ptr: VirtAddress, level: usize) -> usize{ +fn vpn(ptr: VirtAddress, level: usize) -> usize { ptr.addr() >> (12 + 9 * level) & 0x1FF } @@ -62,7 +62,7 @@ macro_rules! PteSetFlag { } #[inline(always)] -fn phy_to_satp(ptr: PhysAddress) -> usize{ +fn phy_to_satp(ptr: PhysAddress) -> usize { (1 << 63) | (ptr.addr() >> 12) } @@ -120,10 +120,7 @@ unsafe fn walk(pt: PageTable, va: VirtAddress, alloc_new: bool) -> Result<*mut P true => PageTable::from(*next), false => { if alloc_new { - match PAGEPOOL - .get_mut() - .unwrap() - .palloc() { + match PAGEPOOL.get_mut().unwrap().palloc() { Ok(pg) => { *next = PteSetFlag!(phy_to_pte(pg.addr), PTE_VALID); PageTable::from(phy_to_pte(pg.addr)) @@ -269,7 +266,7 @@ pub fn kpage_init() -> Result { s ); } - + // This maps hart 0, 1 stack pages in opposite order as entry.S. Shouln't necessarily be a // problem. let base = intstacks_start(); @@ -281,9 +278,9 @@ pub fn kpage_init() -> Result { m_intstack, m_intstack, PAGE_SIZE, - PTE_READ | PTE_WRITE + PTE_READ | PTE_WRITE, ) { - return Err(intstack_m) + return Err(intstack_m); } // Map hart i s-mode handler let s_intstack = unsafe { m_intstack.byte_add(PAGE_SIZE * 2) }; @@ -292,16 +289,15 @@ pub fn kpage_init() -> Result { s_intstack, s_intstack, PAGE_SIZE, - PTE_READ | PTE_WRITE + PTE_READ | PTE_WRITE, ) { - return Err(intstack_s) + return Err(intstack_s); } log!( Debug, "Succesfully mapped interrupt stack for hart {} into kernel pgtable...", i ); - } if let Err(bss_map) = page_map( diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index f7ab9ea..33e6503 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -1,8 +1,8 @@ //! Kernel Virtual Memory Allocator. use core::mem::size_of; +use super::{palloc, palloc::Page, pfree, VmError}; use crate::hw::param::PAGE_SIZE; -use super::{palloc::Page, palloc, pfree, VmError}; const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 - 8 = 4080. const HEADER_SIZE: usize = size_of::
(); @@ -40,8 +40,8 @@ struct Header { #[repr(C)] #[derive(Copy, Clone)] struct Zone { - base: *mut usize, // This zone's address. - next: usize, // Next zone's address + this zone's ref count. + base: *mut usize, // This zone's address. + next: usize, // Next zone's address + this zone's ref count. } /// Kernel Virtual Memory Allocator. @@ -56,26 +56,26 @@ struct Zone { /// a second full zone with one in use chunk might look like: /// ```text /// Kalloc { -/// +/// /// ┌────────start /// │ /// ┌──┼────────end /// │ │ /// │ │ } /// │ │ -/// │ │ ┌──────────────────────────────────────┬──────────────┐ +/// │ │ ┌──────────────────────────────────────┬──────────────┐ (zone header) /// │ └─┬──┤► 0x80089e000 │ 0x1 │ 0x80089d000 /// │ │ └──────────────────────────────────────┴──────────────┘ /// │ │ 63 11 0 -/// │ │ ┌────────────────────────────────────┬─┬──────────────┐ +/// │ │ ┌────────────────────────────────────┬─┬──────────────┐ (chunk header) /// │ │ │ Unused / Reserved │1│ 0x008 │ 0x80089d008 /// │ │ └────────────────────────────────────┴─┴──────────────┘ /// │ │ 63 12 11 0 -/// │ │ ┌─────────────────────────────────────────────────────┐ +/// │ │ ┌─────────────────────────────────────────────────────┐ (data) /// │ │ │ 0x8BADF00D │ 0x80089d010 /// │ │ └─────────────────────────────────────────────────────┘ /// │ │ 63 0 -/// │ │ ┌────────────────────────────────────┬─┬──────────────┐ +/// │ │ ┌────────────────────────────────────┬─┬──────────────┐ (chunk header) /// │ │ │ Unused / Reserved │0│ 0xfe0 │ 0x80089d018 /// │ │ └────────────────────────────────────┴─┴──────────────┘ /// │ │ 63 12 11 0 @@ -83,15 +83,15 @@ struct Zone { /// │ │ /// │ │ ... /// │ │ -/// │ │ ┌──────────────────────────────────────┬──────────────┐ +/// │ │ ┌──────────────────────────────────────┬──────────────┐ (zone header) /// │ └──► 0x0 │ 0x1 │ 0x80089e000 /// │ └──────────────────────────────────────┴──────────────┘ /// │ 63 11 0 -/// │ ┌────────────────────────────────────┬─┬──────────────┐ +/// │ ┌────────────────────────────────────┬─┬──────────────┐ (chunk header) /// │ │ Unused / Reserved │1│ 0xff0 │ 0x80089e008 /// │ └────────────────────────────────────┴─┴──────────────┘ /// │ 63 12 11 0 -/// │ ┌─────────────────────────────────────────────────────┐ +/// │ ┌─────────────────────────────────────────────────────┐ (data) /// │ │ 0x0 │ 0x80089e010 /// │ └ ┘ /// │ 63 │ 0 @@ -101,11 +101,11 @@ struct Zone { /// │ │ /// │ │ /// │ ▼ -/// │ ┌ │ +/// │ ┌ │ (data) /// │ │ 0x1fd │ 0x80089eff8 /// │ └─────────────────────────────────────────────────────┘ /// │ 63 0 -/// │ +/// │ (end of pool) /// └───────────────────────────────────────────────────────────────► 0x80089d000 ///``` pub struct Kalloc { @@ -170,7 +170,9 @@ impl Header { self.set_size(new_size); self.write_to(cur_addr); let next_addr = cur_addr.map_addr(|addr| addr + HEADER_SIZE + new_size); - let next_header = Header { fields: next_size - HEADER_SIZE }; // make space for inserted header + let next_header = Header { + fields: next_size - HEADER_SIZE, + }; // make space for inserted header next_header.write_to(next_addr); (next_header, next_addr) } @@ -181,7 +183,9 @@ impl Header { let size = self.chunk_size() + HEADER_SIZE + next.chunk_size(); self.set_size(size); //self.write_to(addr); - unsafe { next_addr.write(0); } + unsafe { + next_addr.write(0); + } } } @@ -191,17 +195,14 @@ impl From<*mut usize> for Zone { fn from(src: *mut usize) -> Self { Zone { base: src, - next: unsafe { src.read() } + next: unsafe { src.read() }, } } } impl Zone { fn new(base: *mut usize) -> Self { - Zone { - base, - next: 0x0, - } + Zone { base, next: 0x0 } } fn get_refs(&self) -> usize { @@ -243,7 +244,9 @@ impl Zone { if new_count > 510 { Err(KallocError::MaxRefs) } else { - unsafe { self.write_refs(new_count); } + unsafe { + self.write_refs(new_count); + } Ok(()) } } @@ -255,7 +258,9 @@ impl Zone { if (new_count as isize) < 0 { Err(KallocError::MinRefs) } else { - unsafe { self.write_refs(new_count); } + unsafe { + self.write_refs(new_count); + } Ok(new_count) } } @@ -277,9 +282,13 @@ impl Zone { // let mut prev_zone = Zone::from(prev_base); // // ^ BUG: not guaranteed sequential if let Some(next_zone) = self.next_zone() { - unsafe { prev_zone.write_next(next_zone.base); } + unsafe { + prev_zone.write_next(next_zone.base); + } } else { - unsafe { prev_zone.write_next(0x0 as *mut usize); } + unsafe { + prev_zone.write_next(0x0 as *mut usize); + } } let _ = pfree(Page::from(self.base)); } @@ -289,7 +298,7 @@ impl Zone { // Second 8 bytes is the first header of the zone. fn scan(&mut self, size: usize) -> Option<*mut usize> { // Start and end (start + PAGE_SIZE) bounds of zone. - let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE/8)) }; + let (mut curr, end) = unsafe { (self.base.add(1), self.base.add(PAGE_SIZE / 8)) }; // Get the first header in the zone. let mut head = Header::from(curr); @@ -308,7 +317,7 @@ impl Zone { } } else { alloc_chunk(size, curr, self, &mut head); - return Some(curr.map_addr(|addr| addr + HEADER_SIZE)) + return Some(curr.map_addr(|addr| addr + HEADER_SIZE)); } } None @@ -316,7 +325,8 @@ impl Zone { } fn alloc_chunk(size: usize, ptr: *mut usize, zone: &mut Zone, head: &mut Header) { - zone.increment_refs().expect("Maximum zone allocation limit exceeded."); + zone.increment_refs() + .expect("Maximum zone allocation limit exceeded."); head.set_used(); head.write_to(ptr); @@ -343,7 +353,9 @@ impl Kalloc { // New page is the first zone in the Kalloc pool. let zone = Zone::new(start.addr); let head = Header::new(MAX_CHUNK_SIZE); - unsafe { write_zone_header_pair(&zone, &head); } + unsafe { + write_zone_header_pair(&zone, &head); + } Kalloc { head: start.addr, end: start.addr.map_addr(|addr| addr + 0x1000), @@ -352,10 +364,14 @@ impl Kalloc { fn grow_pool(&self, tail: &mut Zone) -> Result<(Zone, Header), VmError> { let page = palloc()?; - unsafe { tail.write_next(page.addr); } + unsafe { + tail.write_next(page.addr); + } let zone = Zone::new(page.addr); let head = Header::new(MAX_CHUNK_SIZE); - unsafe { write_zone_header_pair(&zone, &head); } + unsafe { + write_zone_header_pair(&zone, &head); + } Ok((zone, head)) } @@ -378,9 +394,11 @@ impl Kalloc { break; } } - panic!("Tried to free zone after: {:?}. Not in the pool...", curr_ptr); + panic!( + "Tried to free zone after: {:?}. Not in the pool...", + curr_ptr + ); } - } /// Finds the first fit for the requested size. @@ -390,15 +408,11 @@ impl Kalloc { /// 3. If no zone had a fit, then try to allocate a new zone (palloc()). /// 4. If 3. success, allocate from first chunk in new page. Else, fail with OOM. pub fn alloc(&mut self, size: usize) -> Result<*mut usize, KallocError> { - if size == 0 { - return Err(KallocError::Void); + if size == 0 { + return Err(KallocError::Void); } // Round to a 8 byte granularity - let size = if size % 8 != 0 { - (size + 7) & !7 - } else { - size - }; + let size = if size % 8 != 0 { (size + 7) & !7 } else { size }; let curr = self.head; let end = self.end.map_addr(|addr| addr - 0x1000); @@ -407,7 +421,7 @@ impl Kalloc { while zone.base <= end { if let Some(ptr) = zone.scan(size) { - return Ok(ptr) + return Ok(ptr); } else { zone = match zone.next_zone() { Some(zone) => zone, @@ -415,9 +429,9 @@ impl Kalloc { if let Ok((mut zone, mut head)) = self.grow_pool(&mut trail) { let head_ptr = zone.base.map_addr(|addr| addr + ZONE_SIZE); alloc_chunk(size, head_ptr, &mut zone, &mut head); - return Ok(head_ptr.map_addr(|addr| addr + HEADER_SIZE)) + return Ok(head_ptr.map_addr(|addr| addr + HEADER_SIZE)); } else { - return Err(KallocError::OOM) + return Err(KallocError::OOM); } } } @@ -447,7 +461,6 @@ impl Kalloc { } else { chunk_merge_flag = true; } - } else { panic!("Negative zone refs count: {}", zone.get_refs()) } From fb039d290e6ed20703f5614c0b2c658b37905455 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Tue, 14 Mar 2023 23:33:47 -0700 Subject: [PATCH 49/57] rust lint --- src/vm.rs | 2 +- src/vm/palloc.rs | 11 +++++----- src/vm/ptable.rs | 56 ++++++++++++++++------------------------------- src/vm/vmalloc.rs | 11 ++++------ 4 files changed, 30 insertions(+), 50 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index f0eaf73..151d8bb 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -81,7 +81,7 @@ pub fn init() -> Result<(), PagePool> { unsafe { match palloc() { Ok(page) => { - if let Err(_) = VMALLOC.set(vmalloc::Kalloc::new(page)) { + if VMALLOC.set(vmalloc::Kalloc::new(page)).is_err() { panic!("VMALLOC double init...") } } diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index b2a6ca0..5566b18 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -59,7 +59,8 @@ impl PagePool { } let mut pool = self.pool.lock(); - Ok(pool.free_page(page)) + pool.free_page(page); + Ok(()) } } @@ -136,14 +137,14 @@ impl Pool { let mut free = Page::new(bottom); let mut pa = bottom.map_addr(|addr| addr + chunk_size); //let tmp = FreeNode::new(0x0 as *mut usize, pa); // First free page 'prev' == 0x0 => none. - free.write_free(0x0 as *mut usize, pa); + free.write_free(core::ptr::null_mut::(), pa); let last = top.map_addr(|addr| addr - chunk_size); // Init the remainder of the free list. while pa < top { let prev_pa = pa.map_addr(|addr| addr - chunk_size); let next_pa = if pa == last { - 0x0 as *mut usize + core::ptr::null_mut::() } else { pa.map_addr(|addr| addr + chunk_size) }; @@ -166,7 +167,7 @@ impl Pool { // in order to trigger the OutOfPages error. fn alloc_page(&mut self, mut page: Page) -> Page { let (prev, next) = page.read_free(); // prev is always 0x0 - assert_eq!(prev, 0x0 as *mut usize); + assert_eq!(prev, core::ptr::null_mut::()); if next.addr() == 0x0 { self.free = None; @@ -181,7 +182,7 @@ impl Pool { } fn free_page(&mut self, mut page: Page) { - let (head_prev, mut head_next) = (0x0 as *mut usize, 0x0 as *mut usize); + let (head_prev, mut head_next) = (core::ptr::null_mut::(), core::ptr::null_mut::()); let addr = page.addr; page.zero(); diff --git a/src/vm/ptable.rs b/src/vm/ptable.rs index 47eedb4..6c712f2 100644 --- a/src/vm/ptable.rs +++ b/src/vm/ptable.rs @@ -111,7 +111,7 @@ impl PageTable { // Returns Either PTE or None, callers responsibility to use PTE // or allocate a new page. unsafe fn walk(pt: PageTable, va: VirtAddress, alloc_new: bool) -> Result<*mut PTEntry, VmError> { - let mut table = pt.clone(); + let mut table = pt; assert!(va.addr() < VA_TOP); for level in (1..3).rev() { let idx = vpn(va, level); @@ -193,54 +193,46 @@ pub fn kpage_init() -> Result { base: base.addr as *mut usize, }; - if let Err(uart_map) = page_map( + page_map( kpage_table, UART_BASE as *mut usize, UART_BASE as *mut usize, PAGE_SIZE, PTE_READ | PTE_WRITE, - ) { - return Err(uart_map); - } + )?; log!(Debug, "Successfully mapped UART into kernel pgtable..."); - if let Err(kernel_text) = page_map( + page_map( kpage_table, DRAM_BASE, DRAM_BASE as *mut usize, text_end().addr() - DRAM_BASE.addr(), PTE_READ | PTE_EXEC, - ) { - return Err(kernel_text); - } + )?; log!( Debug, "Succesfully mapped kernel text into kernel pgtable..." ); - if let Err(kernel_rodata) = page_map( + page_map( kpage_table, text_end(), text_end() as *mut usize, rodata_end().addr() - text_end().addr(), PTE_READ, - ) { - return Err(kernel_rodata); - } + )?; log!( Debug, "Succesfully mapped kernel rodata into kernel pgtable..." ); - if let Err(kernel_data) = page_map( + page_map( kpage_table, rodata_end(), rodata_end() as *mut usize, data_end().addr() - rodata_end().addr(), PTE_READ | PTE_WRITE, - ) { - return Err(kernel_data); - } + )?; log!( Debug, "Succesfully mapped kernel data into kernel pgtable..." @@ -251,15 +243,13 @@ pub fn kpage_init() -> Result { let base = stacks_start(); for s in 0..NHART { let stack = unsafe { base.byte_add(PAGE_SIZE * (1 + s * 3)) }; - if let Err(kernel_stack) = page_map( + page_map( kpage_table, stack, stack, PAGE_SIZE * 2, PTE_READ | PTE_WRITE, - ) { - return Err(kernel_stack); - } + )?; log!( Debug, "Succesfully mapped kernel stack {} into kernel pgtable...", @@ -273,26 +263,22 @@ pub fn kpage_init() -> Result { for i in 0..NHART { let m_intstack = unsafe { base.byte_add(PAGE_SIZE * (1 + i * 4)) }; // Map hart i m-mode handler. - if let Err(intstack_m) = page_map( + page_map( kpage_table, m_intstack, m_intstack, PAGE_SIZE, PTE_READ | PTE_WRITE, - ) { - return Err(intstack_m); - } + )?; // Map hart i s-mode handler let s_intstack = unsafe { m_intstack.byte_add(PAGE_SIZE * 2) }; - if let Err(intstack_s) = page_map( + page_map( kpage_table, s_intstack, s_intstack, PAGE_SIZE, PTE_READ | PTE_WRITE, - ) { - return Err(intstack_s); - } + )?; log!( Debug, "Succesfully mapped interrupt stack for hart {} into kernel pgtable...", @@ -300,26 +286,22 @@ pub fn kpage_init() -> Result { ); } - if let Err(bss_map) = page_map( + page_map( kpage_table, bss_start(), bss_start(), bss_end().addr() - bss_start().addr(), PTE_READ | PTE_WRITE, - ) { - return Err(bss_map); - } + )?; log!(Debug, "Succesfully mapped kernel bss..."); - if let Err(heap_map) = page_map( + page_map( kpage_table, bss_end(), bss_end(), dram_end().addr() - bss_end().addr(), PTE_READ | PTE_WRITE, - ) { - return Err(heap_map); - } + )?; log!(Debug, "Succesfully mapped kernel heap..."); Ok(kpage_table) diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index 33e6503..aa15aa6 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -144,11 +144,11 @@ impl Header { } fn set_used(&mut self) { - self.fields = self.fields | HEADER_USED; + self.fields |= HEADER_USED; } fn set_unused(&mut self) { - self.fields = self.fields & !HEADER_USED; + self.fields &= !HEADER_USED; } // Clear size bits. Set size bits to size. @@ -223,10 +223,7 @@ impl Zone { // Write base address with next zone address and new refs count. #[inline(always)] unsafe fn write_refs(&mut self, new_count: usize) { - let next_addr = match self.get_next() { - Err(_) => 0x0, - Ok(ptr) => ptr, - }; + let next_addr = self.get_next().unwrap_or(0x0); self.next = next_addr | new_count; self.base.write(self.next); } @@ -287,7 +284,7 @@ impl Zone { } } else { unsafe { - prev_zone.write_next(0x0 as *mut usize); + prev_zone.write_next(core::ptr::null_mut::()); } } let _ = pfree(Page::from(self.base)); From 01031a6a5c1c840687cac0a8518c3ccbcc1c927c Mon Sep 17 00:00:00 2001 From: tmu Date: Thu, 16 Mar 2023 09:18:13 -0700 Subject: [PATCH 50/57] Multi-page palloc --- src/vm.rs | 5 +- src/vm/palloc.rs | 168 ++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 147 insertions(+), 26 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index 151d8bb..c00b1cc 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -101,10 +101,13 @@ pub fn init() -> Result<(), PagePool> { /// A test designed to be used with GDB. pub unsafe fn test_palloc() { - let allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); + let mut allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); //println!("allocd addr: {:?}", allocd.addr); allocd.addr.write(0xdeadbeaf); let _ = PAGEPOOL.get_mut().unwrap().pfree(allocd); + allocd = PAGEPOOL.get_mut().unwrap().palloc_plural(2).unwrap(); + allocd.addr.write_bytes(5, PAGE_SIZE * 2); + let _ = PAGEPOOL.get_mut().unwrap().pfree_plural(allocd, 2); log!(Debug, "Successful test of page allocation and freeing..."); } diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index 5566b18..f4db55a 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -47,7 +47,10 @@ impl PagePool { let mut pool = self.pool.lock(); match pool.free { None => Err(VmError::OutOfPages), - Some(page) => Ok(pool.alloc_page(page)), + Some(page) => match pool.alloc_pages(page, 1) { + Err(_) => Err(VmError::OutOfPages), + Ok(ptr) => Ok(ptr), + }, } } @@ -59,7 +62,31 @@ impl PagePool { } let mut pool = self.pool.lock(); - pool.free_page(page); + pool.free_pages(page, 1); + Ok(()) + } + + pub fn palloc_plural(&mut self, num_pages: usize) -> Result { + assert!(num_pages != 0, "tried to allocate zero pages"); + let mut pool = self.pool.lock(); + match pool.free { + None => Err(VmError::OutOfPages), + Some(page) => match pool.alloc_pages(page, num_pages) { + Err(_) => Err(VmError::OutOfPages), + // ^ TODO consider partial allocations? + Ok(ptr) => Ok(ptr), + }, + } + } + + pub fn pfree_plural(&mut self, page: Page, num_pages: usize) -> Result<(), VmError> { + assert!(num_pages != 0, "tried to allocate zero pages"); + if !is_multiple(page.addr.addr(), PAGE_SIZE) { + panic!("Free page addr not page aligned.") + } + + let mut pool = self.pool.lock(); + pool.free_pages(page, num_pages); Ok(()) } } @@ -129,6 +156,11 @@ impl Page { } } +enum PageError { + NoGap, + // maybe more? +} + impl Pool { /// Setup a doubly linked list of chunks from the bottom to top addresses. /// Assume chunk will generally be PAGE_SIZE. @@ -161,44 +193,130 @@ impl Pool { } } - // Remove the current head of the doubly linked list and replace it - // with the next free page in the list. // If this is the last free page in the pool, set the free pool to None // in order to trigger the OutOfPages error. - fn alloc_page(&mut self, mut page: Page) -> Page { - let (prev, next) = page.read_free(); // prev is always 0x0 - assert_eq!(prev, core::ptr::null_mut::()); + fn alloc_pages(&mut self, mut page: Page, num_pages: usize) -> Result { + let (prev, mut next) = page.read_free(); // prev is always 0x0 + let example_null = core::ptr::null_mut::(); + assert_eq!(prev, example_null); + // we don't use prev after this point + + let mut start_region = page; + // ^ the first page of a contigous free region, we will take + // start_region through page (inclusive) on success - if next.addr() == 0x0 { - self.free = None; + while (page.addr as usize - start_region.addr as usize) / 0x1000 < num_pages - 1 { + // until it's big enough + + while next as usize == page.addr as usize + 0x1000 && + (page.addr as usize - start_region.addr as usize) / 0x1000 < num_pages - 1 + { + // until its big enough or there was a gap + page = Page::from(next); + (_, next) = page.read_free(); + if next as usize == 0x0 { + return Err(PageError::NoGap); + // ran off the end + } + } + + if next as usize != page.addr as usize + 0x1000 { + // too short! + start_region = Page::from(next); + } + } + + // we found it + + // would love this as a match but no dice + let (sr_prev, _) = start_region.read_free(); + if sr_prev == example_null { + // this was the first chunk + if next as usize == 0x0 { + self.free = None; + } else { + // remember next here is next from page + let mut next_page = Page::from(next); + next_page.write_prev(example_null); + self.free = Some(next_page); + } } else { - let mut new = Page::from(next); - new.write_prev(prev); - self.free = Some(new); + let before_region = sr_prev; + // not first chunk in pool + Page::from(before_region).write_next(next); + Page::from(next).write_prev(before_region); + + } + + // we found it + // zero them all out + let mut cur = start_region; + while cur.addr as usize <= page.addr as usize { + cur.zero(); + cur = Page::from(cur.addr.map_addr(|addr| addr + 0x1000)); } - page.zero(); - page + return Ok(start_region); } - fn free_page(&mut self, mut page: Page) { - let (head_prev, mut head_next) = (core::ptr::null_mut::(), core::ptr::null_mut::()); - let addr = page.addr; - page.zero(); + fn free_pages(&mut self, mut page: Page, num_pages: usize) { + assert!(num_pages != 0, "Tried to free zero pages"); + let example_null = core::ptr::null_mut::(); + + let mut region_end = page; + { + let mut region_prev: Option = None; + while (region_end.addr as usize - page.addr as usize ) / 0x1000 < num_pages { + region_end.zero(); + match region_prev { + None => {}, + Some(mut prev) => { + region_end.write_prev(prev.addr); + prev.write_next(region_end.addr); + } + } + region_prev = Some(region_end); + region_end = Page::from(region_end.addr.map_addr(|addr| addr + 0x1000)) + } + } + // zeroed and internally linked match self.free { Some(mut head) => { - head_next = page.addr; - head.write_prev(addr); - } + // special case, insert at beginning + if head.addr as usize > page.addr as usize { + head.write_prev(page.addr); + page.write_next(head.addr); + page.write_prev(example_null); + self.free = Some(page); + } else { + // will insert after insert_location + let mut head_next = head.read_free().1; + while head_next != example_null && + (head_next as usize) < page.addr as usize { + head = Page::from(head_next); + head_next = head.read_free().1; + } + + if head_next == example_null { + // insert at the end + region_end.write_next(example_null); + page.write_prev(head.addr); + head.write_next(page.addr); + } else { + head.write_next(page.addr); + page.write_prev(head.addr); + region_end.write_next(head_next); + Page::from(head_next).write_prev(region_end.addr); + } + } + }, None => { - page.write_free(head_prev, head_next); + page.write_prev(example_null); + region_end.write_next(example_null); self.free = Some(page); - return; } } - page.write_free(head_prev, head_next); - self.free = Some(page); } } From accb05498d31f75a22ff5502e6f57171b6f39149 Mon Sep 17 00:00:00 2001 From: tmu Date: Thu, 16 Mar 2023 14:37:22 -0700 Subject: [PATCH 51/57] build alloc --- .cargo/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 68d5333..1cf484d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,6 +1,6 @@ [unstable] build-std-features = ["compiler-builtins-mem"] -build-std = ["core", "compiler_builtins"] +build-std = ["core", "compiler_builtins", "alloc"] [build] From 9efefe80d9244e9cbf29f2abc091d35cae820b53 Mon Sep 17 00:00:00 2001 From: tmu Date: Thu, 16 Mar 2023 14:37:47 -0700 Subject: [PATCH 52/57] GlobalAlloc is go --- src/lib.rs | 4 +- src/mem.rs | 73 ------------------------- src/vm.rs | 136 ++++++++++++++++++---------------------------- src/vm/global.rs | 113 ++++++++++++++++++++++++++++++++++++++ src/vm/palloc.rs | 10 ++-- src/vm/process.rs | 8 ++- src/vm/vmalloc.rs | 2 +- 7 files changed, 179 insertions(+), 167 deletions(-) delete mode 100644 src/mem.rs create mode 100644 src/vm/global.rs diff --git a/src/lib.rs b/src/lib.rs index fab931c..6cf0154 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,6 +10,7 @@ #![feature(unsized_fn_params)] #![allow(dead_code)] use core::panic::PanicInfo; +extern crate alloc; #[macro_use] pub mod log; @@ -18,7 +19,6 @@ pub mod hw; pub mod lock; pub mod trap; pub mod vm; -pub mod mem; pub mod collection; //pub mod alloc; @@ -110,8 +110,6 @@ fn main() -> ! { log!(Debug, "Testing page allocation and freeing..."); unsafe { vm::test_palloc(); - log!(Debug, "Testing kalloc and kfree..."); - vm::test_kalloc(); } } else { //Interrupt other harts to init kpgtable. diff --git a/src/mem.rs b/src/mem.rs deleted file mode 100644 index cd6a6aa..0000000 --- a/src/mem.rs +++ /dev/null @@ -1,73 +0,0 @@ -//! Kernel memory utilities -use core::ops::{Deref, DerefMut}; -use core::mem::size_of_val; -use crate::vm::{kalloc, kfree}; - -/// Kernel heap allocated pointer. No guarantees on unique ownership -/// or concurrent access. -pub struct Kbox { - inner: *mut T, // NonNull, try NonNull later for lazy allocation impl. - size: usize, -} - -impl Kbox { - /// Note that as this exists currently, data is passed by value - /// into new, which means that the initial contents of a box MUST - /// be composed on the stack and passed here to be copied into the - /// heap. Kbox contents will not change size during their - /// lifetime, so it must soak up as much stack space as it will - /// ever use. - /// - /// Also this may entail a stack->stack copy into this callee's - /// stack fram, I am not sure. It might be optimized as a pass by - /// reference with the compiler knowledge that it is a move under - /// the hood, but I really can't say. - pub fn new(data: T) -> Self { - // How the allocater interface should be made use of. - // Current constraints on allocator mean size_of::() must be less than 4Kb - let size = size_of_val::(&data); - match kalloc(size) { - Err(e) => { - panic!("Kbox can't allocate: {:?}", e) - }, - Ok(ptr) => { - let new_ptr: *mut T = ptr.cast(); - unsafe { - *new_ptr = data; // <-- initialize newly allocated memory with our inner value. - Self { - inner: new_ptr, - size - } - } - } - } - } -} - -unsafe impl Send for Kbox {} -unsafe impl Sync for Kbox {} - -impl Deref for Kbox { - type Target = T; - - fn deref(&self) -> &Self::Target { - unsafe { - &*self.inner - } - } -} - -impl DerefMut for Kbox { - - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { - &mut *self.inner - } - } -} - -impl Drop for Kbox { - fn drop(&mut self) { - kfree(self.inner as *mut usize); - } -} diff --git a/src/vm.rs b/src/vm.rs index c00b1cc..d83eb1d 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -1,20 +1,51 @@ //! Virtual Memory -pub mod palloc; +mod palloc; pub mod process; pub mod ptable; pub mod vmalloc; +pub mod global; use crate::hw::param::*; -use crate::mem::Kbox; use core::cell::OnceCell; +use core::alloc::{GlobalAlloc, Layout}; +use alloc::boxed::Box; + use palloc::*; use process::Process; use ptable::kpage_init; //, PageTable}; -use vmalloc::Kalloc; +use global::Galloc; /// Global physical page pool allocated by the kernel physical allocator. static mut PAGEPOOL: OnceCell = OnceCell::new(); -static mut VMALLOC: OnceCell = OnceCell::new(); +#[global_allocator] +static mut GLOBAL: GlobalWrapper = GlobalWrapper { inner: OnceCell::new(), }; + +struct GlobalWrapper { + inner: OnceCell, +} + +unsafe impl GlobalAlloc for GlobalWrapper { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.inner.get().unwrap().alloc(layout) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.inner.get().unwrap().dealloc(ptr, layout) + } + + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + self.inner.get().unwrap().alloc_zeroed(layout) + } + + unsafe fn realloc( + &self, + ptr: *mut u8, + layout: Layout, + new_size: usize + ) -> *mut u8 { + self.inner.get().unwrap().realloc(ptr, layout, new_size) + } +} /// (Still growing) list of kernel VM system error cases. #[derive(Debug)] @@ -32,25 +63,25 @@ pub trait Resource {} /// Moving to `mod ` pub struct TaskList { - head: Option>, + head: Option>, } /// Moving to `mod ` pub struct TaskNode { - proc: Option>, - prev: Option>, - next: Option>, + proc: Option>, + prev: Option>, + next: Option>, } /// See `vm::vmalloc::Kalloc::alloc`. -pub fn kalloc(size: usize) -> Result<*mut usize, vmalloc::KallocError> { - unsafe { VMALLOC.get_mut().unwrap().alloc(size) } -} +// pub fn kalloc(size: usize) -> Result<*mut usize, vmalloc::KallocError> { +// unsafe { VMALLOC.get_mut().unwrap().alloc(size) } +// } /// See `vm::vmalloc::Kalloc::free`. -pub fn kfree(ptr: *mut T) { - unsafe { VMALLOC.get_mut().unwrap().free(ptr) } -} +// pub fn kfree(ptr: *mut T) { +// unsafe { VMALLOC.get_mut().unwrap().free(ptr) } +// } fn palloc() -> Result { unsafe { PAGEPOOL.get_mut().unwrap().palloc() } @@ -79,13 +110,11 @@ pub fn init() -> Result<(), PagePool> { log!(Debug, "Successfully initialized kernel page pool..."); unsafe { - match palloc() { - Ok(page) => { - if VMALLOC.set(vmalloc::Kalloc::new(page)).is_err() { - panic!("VMALLOC double init...") - } + match GLOBAL.inner.set(Galloc::new(PAGEPOOL.get_mut().unwrap())) { + Ok(_) => {}, + Err(_) => { + panic!("vm double init.") } - Err(_) => panic!("Unable to allocate initial zone for vmalloc..."), } } @@ -101,71 +130,12 @@ pub fn init() -> Result<(), PagePool> { /// A test designed to be used with GDB. pub unsafe fn test_palloc() { - let mut allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); + let mut allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap().addr; //println!("allocd addr: {:?}", allocd.addr); - allocd.addr.write(0xdeadbeaf); - let _ = PAGEPOOL.get_mut().unwrap().pfree(allocd); + allocd.write(0xdeadbeaf); + let _ = PAGEPOOL.get_mut().unwrap().pfree(Page::from(allocd)); allocd = PAGEPOOL.get_mut().unwrap().palloc_plural(2).unwrap(); - allocd.addr.write_bytes(5, PAGE_SIZE * 2); + allocd.write_bytes(5, PAGE_SIZE * 2); let _ = PAGEPOOL.get_mut().unwrap().pfree_plural(allocd, 2); log!(Debug, "Successful test of page allocation and freeing..."); } - -/// A test that is more insightful when run with GDB. -/// Likely missing some edge cases like: -/// + Free the last two chunks in a zone. Ensure you don't -/// try to merge out of zone bounds. -pub unsafe fn test_kalloc() { - use core::mem::size_of; - use core::ptr::write; - struct Atest { - xs: [u64; 4], - } - impl Atest { - fn new() -> Self { - let xs = [5; 4]; - Atest { xs } - } - } - let addr1 = kalloc(8).expect("Could not allocate addr1..."); - assert_eq!(addr1.sub(2).read(), 0x1); // Check zone refs - assert_eq!(addr1.sub(1).read(), 0x1008); // Check chunk header size + used - addr1.write(0xdeadbeaf); - - let addr2: *mut [u64; 2] = kalloc(16).expect("Could not allocate addr3...").cast(); - assert_eq!(addr1.sub(2).read(), 0x2); // Check zone refs - assert_eq!((addr2 as *mut usize).sub(1).read(), 0x1010); // Check chunk header size + used - write(addr2, [0x8BADF00D, 0xBAADF00D]); - - let t = Atest::new(); - let addr3: *mut Atest = kalloc(size_of::()) - .expect("Could not allocate addr3...") - .cast(); - write(addr3, t); - - kfree(addr1); - kfree(addr2); - kfree(addr3); - assert_eq!(addr1.sub(2).read(), 0x0); // Check zone refs - assert_eq!((addr2 as *mut usize).sub(1).read(), 0x10); // Check chunk header size + used - - let addr4 = kalloc(0xfc0).expect("Could not allocate addr4..."); - let addr5 = kalloc(8).expect("Could not allocate addr5..."); - write(addr5, 0xee1f00d); - kfree(addr5); - kfree(addr4); - - let addr6: *mut [u64; 510] = kalloc(0xff0) - .expect("Could not allocate addr6 (remainder of page)...") - .cast(); - // Don't do this: Will stack overflow. - // Foreboding for Kbox::new() correctness. - // let big_xs = [555; 510]; - // unsafe { write(addr6, big_xs); } - - let addr7 = kalloc(9).expect("Could not allocate addr7..."); - kfree(addr6); - kfree(addr7); - - log!(Debug, "Successful test of kalloc and kfree..."); -} diff --git a/src/vm/global.rs b/src/vm/global.rs new file mode 100644 index 0000000..063feb6 --- /dev/null +++ b/src/vm/global.rs @@ -0,0 +1,113 @@ +/// Global allocator on top of vmalloc and palloc +use core::alloc::{Layout, GlobalAlloc}; +use core::cell::UnsafeCell; +use crate::vm::palloc::PagePool; +use crate::vm::vmalloc::{MAX_CHUNK_SIZE, Kalloc}; +use crate::param::PAGE_SIZE; + +pub struct Galloc { + pool: *mut PagePool, + small_pool: UnsafeCell, +} + +impl Galloc { + pub fn new(pool: &mut PagePool) -> Self { + let small_pool_start = pool.palloc().expect("Could not initalize GlobalAlloc small pool"); + Galloc { + pool, + small_pool: UnsafeCell::new(Kalloc::new(small_pool_start)), + } + } +} + +impl Drop for Galloc { + fn drop(&mut self) { + panic!("Dropped the general allocator") + } +} + +/// Returns the number of pages to request from the page allocator or +/// zero to request from the sub-page allocator +fn decide_internal_scheme(layout: Layout) -> usize { + match layout.size() { + 0 => { panic!("Tried zero size alloc") }, + 1..=MAX_CHUNK_SIZE => { + // try use small allocator + match layout.align() { + 0..=8 => {0}, + _ => {1}, + // ^ alignment too large for Kalloc, round up to a page + } + }, + req_size => { + // use page allocator + (req_size + PAGE_SIZE - 1) / PAGE_SIZE + } + } +} + +unsafe impl GlobalAlloc for Galloc { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if layout.align() > PAGE_SIZE { + panic!("Page+ alignemnt requested in alloc"); + } + + let num_pages = decide_internal_scheme(layout); + + if num_pages == 0 { + match (*self.small_pool.get()).alloc(layout.size()) { + Ok(ptr) => { ptr as *mut u8 }, + Err(e) => { panic!("Small allocation failed {:?}", e) } + } + } else { + match (*self.pool).palloc_plural(num_pages) { + Ok(ptr) => { ptr as *mut u8 }, + Err(e) => { panic!("Page allocation failed {:?}", e)} + } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if layout.align() > PAGE_SIZE { + panic!("Page+ alignemnt requested in dealloc"); + } + + let num_pages = decide_internal_scheme(layout); + + if num_pages == 0 { + (*self.small_pool.get()).free(ptr as *mut usize) + } else { + match (*self.pool).pfree_plural(ptr as *mut usize, num_pages) { + Ok(_) => {}, + Err(e) => { panic!("Page deallocation failed {:?}", e)} + } + } + + } + + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let out = self.alloc(layout); + + let num_pages = decide_internal_scheme(layout); + if num_pages == 0 { + out.write_bytes(0, layout.size()); + out + } else { + // palloc already zeros + out + } + } + + unsafe fn realloc( + &self, + ptr: *mut u8, + layout: Layout, + new_size: usize + ) -> *mut u8 { + // TODO improve + let out = self.alloc(Layout::from_size_align(new_size, layout.align()).unwrap()); + core::intrinsics::copy_nonoverlapping(ptr, out, layout.size()); + self.dealloc(ptr, layout); + out + } +} diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index f4db55a..8830f11 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -66,7 +66,7 @@ impl PagePool { Ok(()) } - pub fn palloc_plural(&mut self, num_pages: usize) -> Result { + pub fn palloc_plural(&mut self, num_pages: usize) -> Result<*mut usize, VmError> { assert!(num_pages != 0, "tried to allocate zero pages"); let mut pool = self.pool.lock(); match pool.free { @@ -74,19 +74,19 @@ impl PagePool { Some(page) => match pool.alloc_pages(page, num_pages) { Err(_) => Err(VmError::OutOfPages), // ^ TODO consider partial allocations? - Ok(ptr) => Ok(ptr), + Ok(ptr) => Ok(ptr.addr), }, } } - pub fn pfree_plural(&mut self, page: Page, num_pages: usize) -> Result<(), VmError> { + pub fn pfree_plural(&mut self, page: *mut usize, num_pages: usize) -> Result<(), VmError> { assert!(num_pages != 0, "tried to allocate zero pages"); - if !is_multiple(page.addr.addr(), PAGE_SIZE) { + if !is_multiple(page.addr(), PAGE_SIZE) { panic!("Free page addr not page aligned.") } let mut pool = self.pool.lock(); - pool.free_pages(page, num_pages); + pool.free_pages(Page::from(page), num_pages); Ok(()) } } diff --git a/src/vm/process.rs b/src/vm/process.rs index 64fcf27..9b08549 100644 --- a/src/vm/process.rs +++ b/src/vm/process.rs @@ -1,15 +1,19 @@ //! Process handle and utilities. +// use alloc::boxed::Box; + +// extern crate alloc; + +use alloc::boxed::Box; use crate::collection::BalBst; use crate::hw::HartContext; -use crate::mem::Kbox; use crate::trap::TrapFrame; use crate::vm::ptable::PageTable; use crate::vm::Resource; pub struct Process { id: usize, - address_space: BalBst>, // todo: Balanced BST of Resources + address_space: BalBst>, // todo: Balanced BST of Resources state: ProcessState, pgtbl: PageTable, trapframe: TrapFrame, diff --git a/src/vm/vmalloc.rs b/src/vm/vmalloc.rs index aa15aa6..6bcd2cc 100644 --- a/src/vm/vmalloc.rs +++ b/src/vm/vmalloc.rs @@ -4,7 +4,7 @@ use core::mem::size_of; use super::{palloc, palloc::Page, pfree, VmError}; use crate::hw::param::PAGE_SIZE; -const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 - 8 = 4080. +pub const MAX_CHUNK_SIZE: usize = 4080; // PAGE_SIZE - ZONE_HEADER_SIZE - HEADER_SIZE = 4096 - 8 - 8 = 4080. const HEADER_SIZE: usize = size_of::
(); const ZONE_SIZE: usize = 8; const HEADER_USED: usize = 1 << 12; // Chunk is in use flag. From bca452fccf06f11392d94befd502e99742a6008c Mon Sep 17 00:00:00 2001 From: tmu Date: Thu, 16 Mar 2023 17:25:14 -0700 Subject: [PATCH 53/57] update formatting and memlayout --- src/asm/entry.s | 22 +++++++++++----------- src/vm/memlayout.md | 18 ++++++++++++++---- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/src/asm/entry.s b/src/asm/entry.s index 8bf801a..46b6e43 100644 --- a/src/asm/entry.s +++ b/src/asm/entry.s @@ -36,19 +36,19 @@ la a2, _stacks_end sub sp, a2, a1 - .extern _intstacks_end - csrr a1, mhartid - li a0, 0x4000 - mul a1, a1, a0 - la a2, _intstacks_end - sub a2, a2, a1 - csrw mscratch, a2 # Write per hart mscratch pad - li a0, 0x2000 - sub a2, a2, a0 # Move sp down by scratch pad page + guard page - csrw sscratch, a2 # Write per hart sscratch pad + .extern _intstacks_end + csrr a1, mhartid + li a0, 0x4000 + mul a1, a1, a0 + la a2, _intstacks_end + sub a2, a2, a1 + csrw mscratch, a2 # Write per hart mscratch pad + li a0, 0x2000 + sub a2, a2, a0 # Move sp down by scratch pad page + guard page + csrw sscratch, a2 # Write per hart sscratch pad # Jump to _start in src/main.rs - .extern _start + .extern _start call _start spin: wfi diff --git a/src/vm/memlayout.md b/src/vm/memlayout.md index a964a48..33e9c5e 100644 --- a/src/vm/memlayout.md +++ b/src/vm/memlayout.md @@ -9,13 +9,23 @@ ├──────────────────────┤ │ .bss │ ├──────────────────────┤ -│ ... │ -├──────────────────────┤ -│ Hart 1 guard │ -│ Hart 1 stack │ +│ guard │ +│ H0 M-mode stack │ +│ guard │ +│ H0 S-mode stack │ +│ guard │ +│ H1 M-mode stack │ +│ guard │ +│ H1 S-mode stack │ ├──────────────────────┤ │ Hart 0 guard │ │ Hart 0 stack │ +│ │ +├──────────────────────┤ +│ Hart 1 guard │ +│ Hart 1 stack │ +│ │ +│ Bottom guard │ ├──────────────────────┤ │ .data │ ├──────────────────────┤ From 286b29fadd90a8499917ea63d80443861838f308 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Thu, 16 Mar 2023 21:35:58 -0700 Subject: [PATCH 54/57] Fix write_bytes count in palloc test --- src/vm.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index d83eb1d..cff1240 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -134,8 +134,8 @@ pub unsafe fn test_palloc() { //println!("allocd addr: {:?}", allocd.addr); allocd.write(0xdeadbeaf); let _ = PAGEPOOL.get_mut().unwrap().pfree(Page::from(allocd)); - allocd = PAGEPOOL.get_mut().unwrap().palloc_plural(2).unwrap(); - allocd.write_bytes(5, PAGE_SIZE * 2); - let _ = PAGEPOOL.get_mut().unwrap().pfree_plural(allocd, 2); + allocd = PAGEPOOL.get_mut().unwrap().palloc_plural(5).unwrap(); + allocd.write_bytes(5, 512 * 2); + let _ = PAGEPOOL.get_mut().unwrap().pfree_plural(allocd, 5); log!(Debug, "Successful test of page allocation and freeing..."); } From bd72aba99b8e6f56ff07f04d37fe601908c58fac Mon Sep 17 00:00:00 2001 From: ethandmd Date: Thu, 16 Mar 2023 21:36:34 -0700 Subject: [PATCH 55/57] Link allocd pages back to free list --- src/vm/palloc.rs | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/src/vm/palloc.rs b/src/vm/palloc.rs index 8830f11..f645139 100644 --- a/src/vm/palloc.rs +++ b/src/vm/palloc.rs @@ -205,7 +205,7 @@ impl Pool { // ^ the first page of a contigous free region, we will take // start_region through page (inclusive) on success - while (page.addr as usize - start_region.addr as usize) / 0x1000 < num_pages - 1 { + while (page.addr.map_addr(|addr| addr - start_region.addr.addr())).addr() / 0x1000 < num_pages - 1 { // until it's big enough while next as usize == page.addr as usize + 0x1000 && @@ -263,37 +263,38 @@ impl Pool { assert!(num_pages != 0, "Tried to free zero pages"); let example_null = core::ptr::null_mut::(); - let mut region_end = page; - { - let mut region_prev: Option = None; - while (region_end.addr as usize - page.addr as usize ) / 0x1000 < num_pages { - region_end.zero(); - match region_prev { - None => {}, - Some(mut prev) => { - region_end.write_prev(prev.addr); - prev.write_next(region_end.addr); - } + let mut region_end = Page::from(page.addr.map_addr(|addr| addr + (num_pages - 1) * 0x1000)); + let stop = region_end.addr.map_addr(|addr| addr + 0x1000); + let mut prev_page: Option = None; + let mut curr_page = page; + while curr_page.addr < stop { + curr_page.zero(); + let next_page = Page::from(curr_page.addr.map_addr(|addr| addr + 0x1000)); + match prev_page { + None => { curr_page.write_next(next_page.addr); }, + Some(mut prev) => { + curr_page.write_prev(prev.addr); + prev.write_next(curr_page.addr); } - region_prev = Some(region_end); - region_end = Page::from(region_end.addr.map_addr(|addr| addr + 0x1000)) } + (prev_page, curr_page) = (Some(curr_page), next_page); + } // zeroed and internally linked match self.free { Some(mut head) => { // special case, insert at beginning - if head.addr as usize > page.addr as usize { - head.write_prev(page.addr); - page.write_next(head.addr); + if head.addr > region_end.addr { + head.write_prev(region_end.addr); + region_end.write_next(head.addr); page.write_prev(example_null); self.free = Some(page); } else { // will insert after insert_location let mut head_next = head.read_free().1; while head_next != example_null && - (head_next as usize) < page.addr as usize { + head_next < region_end.addr { head = Page::from(head_next); head_next = head.read_free().1; } From cc4cc0a1bcea52ce0011577b9b46096e225c9653 Mon Sep 17 00:00:00 2001 From: ethandmd Date: Thu, 16 Mar 2023 21:47:20 -0700 Subject: [PATCH 56/57] Add AB-AB palloc-pfree test --- src/vm.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/vm.rs b/src/vm.rs index cff1240..0163d11 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -129,13 +129,17 @@ pub fn init() -> Result<(), PagePool> { } /// A test designed to be used with GDB. +/// Allocate A, then B. Free A, then B. pub unsafe fn test_palloc() { - let mut allocd = PAGEPOOL.get_mut().unwrap().palloc().unwrap().addr; - //println!("allocd addr: {:?}", allocd.addr); - allocd.write(0xdeadbeaf); - let _ = PAGEPOOL.get_mut().unwrap().pfree(Page::from(allocd)); - allocd = PAGEPOOL.get_mut().unwrap().palloc_plural(5).unwrap(); - allocd.write_bytes(5, 512 * 2); - let _ = PAGEPOOL.get_mut().unwrap().pfree_plural(allocd, 5); + let one = PAGEPOOL.get_mut().unwrap().palloc().unwrap(); + one.addr.write(0xdeadbeaf); + + let many = PAGEPOOL.get_mut().unwrap().palloc_plural(5).unwrap(); + many.write_bytes(5, 512 * 2); + + let _ = PAGEPOOL.get_mut().unwrap().pfree(one); + let _ = PAGEPOOL.get_mut().unwrap().pfree_plural(many, 5); + log!(Debug, "Successful test of page allocation and freeing..."); } + From 7f57885304af96b2fe0aed4c45bfc51b04b3ad7a Mon Sep 17 00:00:00 2001 From: ethandmd Date: Thu, 16 Mar 2023 22:23:56 -0700 Subject: [PATCH 57/57] Add alloc crate tests --- src/lib.rs | 4 +++- src/vm.rs | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 6cf0154..7bac8bc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -107,9 +107,11 @@ fn main() -> ! { log!(Info, "Finished trap init..."); let _ = vm::init(); log!(Info, "Initialized the kernel page table..."); - log!(Debug, "Testing page allocation and freeing..."); unsafe { + log!(Debug, "Testing page allocation and freeing..."); vm::test_palloc(); + log!(Debug, "Testing galloc allocation and freeing..."); + vm::test_galloc(); } } else { //Interrupt other harts to init kpgtable. diff --git a/src/vm.rs b/src/vm.rs index 0163d11..afe4a9a 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -143,3 +143,26 @@ pub unsafe fn test_palloc() { log!(Debug, "Successful test of page allocation and freeing..."); } +pub unsafe fn test_galloc() { + use alloc::collections; + { + // Simple test. It works! + let mut one = Box::new(5); + let a_one: *mut u32 = one.as_mut(); + assert_eq!(*one, *a_one); + + // Slightly more interesting... it also works! Look at GDB + // and watch for the zone headers + chunk headers indicating 'in use' and + // 'chunk size'. Then watch as these go out of scope. + let mut one_vec: Box> = Box::new(collections::VecDeque::new()); + one_vec.push_back(555); + one_vec.push_front(111); + let _a_vec: *mut collections::VecDeque = one_vec.as_mut(); + } + + { // More than a page. + let mut big: Box<[u64; 513]> = Box::new([0x8BADF00D;513]); + let _a_big = big.as_mut(); + } +} +