From 9c7691cb8aaf03c411e65483b67e803705152dbf Mon Sep 17 00:00:00 2001 From: yufeng <321353225@qq.com> Date: Thu, 22 Feb 2024 00:29:09 +0800 Subject: [PATCH] optimize some code --- .cargo/config.toml | 4 +- Cargo.lock | 38 +- Cargo.toml | 2 - arch/src/lib.rs | 15 + arch/src/riscv64/page_table/sv39.rs | 20 +- arch/src/x86_64/mod.rs | 31 -- arch/src/x86_64/page_table.rs | 27 +- crates/page_table/Cargo.toml | 15 - crates/page_table/src/arch/aarch64.rs | 22 -- crates/page_table/src/arch/mod.rs | 8 - crates/page_table/src/arch/riscv.rs | 30 -- crates/page_table/src/arch/x86_64.rs | 16 - crates/page_table/src/bits64.rs | 402 -------------------- crates/page_table/src/lib.rs | 124 ------ crates/page_table_entry/Cargo.toml | 18 - crates/page_table_entry/src/arch/aarch64.rs | 240 ------------ crates/page_table_entry/src/arch/mod.rs | 9 - crates/page_table_entry/src/arch/riscv.rs | 130 ------- crates/page_table_entry/src/arch/x86_64.rs | 114 ------ crates/page_table_entry/src/lib.rs | 74 ---- 20 files changed, 37 insertions(+), 1302 deletions(-) delete mode 100644 crates/page_table/Cargo.toml delete mode 100644 crates/page_table/src/arch/aarch64.rs delete mode 100644 crates/page_table/src/arch/mod.rs delete mode 100644 crates/page_table/src/arch/riscv.rs delete mode 100644 crates/page_table/src/arch/x86_64.rs delete mode 100644 crates/page_table/src/bits64.rs delete mode 100644 crates/page_table/src/lib.rs delete mode 100644 crates/page_table_entry/Cargo.toml delete mode 100644 crates/page_table_entry/src/arch/aarch64.rs delete mode 100644 crates/page_table_entry/src/arch/mod.rs delete mode 100644 crates/page_table_entry/src/arch/riscv.rs delete mode 100644 crates/page_table_entry/src/arch/x86_64.rs delete mode 100644 crates/page_table_entry/src/lib.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index ca27ce1..d75b01b 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,7 +1,7 @@ # 编译的目标平台 [build] -# target = 'riscv64imac-unknown-none-elf' -target = 'x86_64-unknown-none' +target = 'riscv64imac-unknown-none-elf' +# target = 'x86_64-unknown-none' # This flags also can be set from every target. rustflags = [ diff --git a/Cargo.lock b/Cargo.lock index 13a7172..60b6370 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "aarch64-cpu" -version = "9.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac42a04a61c19fc8196dd728022a784baecc5d63d7e256c01ad1b3fbfab26287" -dependencies = [ - "tock-registers 0.8.1", -] - [[package]] name = "ahash" version = "0.8.3" @@ -152,7 +143,7 @@ version = "7.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdecfbb28672ad3664e71ae05a398a52df430d86d660691501b28968cc4467e6" dependencies = [ - "tock-registers 0.7.0", + "tock-registers", ] [[package]] @@ -601,7 +592,7 @@ dependencies = [ "raw-cpuid", "riscv 0.7.0", "spin 0.9.8", - "tock-registers 0.7.0", + "tock-registers", "x86_64", ] @@ -774,25 +765,6 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" -[[package]] -name = "page_table" -version = "0.1.0" -dependencies = [ - "log", - "memory_addr", - "page_table_entry", -] - -[[package]] -name = "page_table_entry" -version = "0.1.0" -dependencies = [ - "aarch64-cpu", - "bitflags 2.4.2", - "memory_addr", - "x86_64", -] - [[package]] name = "paste" version = "1.0.14" @@ -1045,12 +1017,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee8fba06c1f4d0b396ef61a54530bb6b28f0dc61c38bc8bc5a5a48161e6282e" -[[package]] -name = "tock-registers" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "696941a0aee7e276a165a978b37918fd5d22c55c3d6bda197813070ca9c0f21c" - [[package]] name = "toml" version = "0.5.11" diff --git a/Cargo.toml b/Cargo.toml index bd78451..b8dba16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,8 +10,6 @@ members = [ "crates/timestamp", "crates/vfscore", "crates/cv1811-sd", - "crates/page_table", - "crates/page_table_entry", "crates/memory_addr", "crates/crate_interface", diff --git a/arch/src/lib.rs b/arch/src/lib.rs index 4468699..8e6bfa6 100644 --- a/arch/src/lib.rs +++ b/arch/src/lib.rs @@ -106,3 +106,18 @@ pub fn clear_bss() { .fill(0); } } + +bitflags::bitflags! { + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct MappingFlags: u64 { + const None = 0; + const U = 1 << 0; + const R = 1 << 1; + const W = 1 << 2; + const X = 1 << 3; + const A = 1 << 4; + const D = 1 << 5; + const Device = 1 << 6; + const Cache = 1 << 7; + } +} diff --git a/arch/src/riscv64/page_table/sv39.rs b/arch/src/riscv64/page_table/sv39.rs index 9752c03..9e5f689 100644 --- a/arch/src/riscv64/page_table/sv39.rs +++ b/arch/src/riscv64/page_table/sv39.rs @@ -135,17 +135,6 @@ pub fn get_pte_list(paddr: PhysAddr) -> &'static mut [PTE] { unsafe { core::slice::from_raw_parts_mut(paddr.get_mut_ptr::(), PAGE_ITEM_COUNT) } } -fn destory_pte_leaf(paddr: PhysAddr) { - let pte_list = get_pte_list(paddr); - for pte in pte_list { - if pte.is_leaf() { - destory_pte_leaf(pte.to_ppn().into()); - ArchInterface::frame_unalloc(pte.to_ppn()); - } - } - ArchInterface::frame_unalloc(paddr.into()); -} - #[derive(Debug)] pub struct PageTable(pub(crate) PhysAddr); @@ -244,6 +233,13 @@ impl PageTable { impl Drop for PageTable { fn drop(&mut self) { - destory_pte_leaf(self.0); + for root_pte in get_pte_list(self.0)[..0x100].iter().filter(|x| x.is_leaf()) { + get_pte_list(root_pte.to_ppn().into()) + .iter() + .filter(|x| x.is_leaf()) + .for_each(|x| ArchInterface::frame_unalloc(x.to_ppn())); + ArchInterface::frame_unalloc(root_pte.to_ppn()); + } + ArchInterface::frame_unalloc(self.0.into()); } } \ No newline at end of file diff --git a/arch/src/x86_64/mod.rs b/arch/src/x86_64/mod.rs index e25ba69..2068a46 100644 --- a/arch/src/x86_64/mod.rs +++ b/arch/src/x86_64/mod.rs @@ -23,37 +23,6 @@ use x86_64::instructions::port::PortWriteOnly; use crate::x86_64::multiboot::use_multiboot; -#[link_section = ".data.prepage.entry"] -static KERNEL_PDPT: PDPT = { - let mut arr: PDPT = [PDPTEntry(0); PAGE_SIZE_ENTRIES]; - // 0x00000000_80000000 -> 0x80000000 (1G) - // arr[0] = PDPTEntry::new(PAddr(0x0), PDPTFlags::P | PDPTFlags::RW | PDPTFlags::PS); - // arr[1] = PDPTEntry::new(PAddr(0x40000000), PDPTFlags::P | PDPTFlags::RW | PDPTFlags::PS); - // arr[2] = PDPTEntry::new(PAddr(0x80000000), PDPTFlags::P | PDPTFlags::RW | PDPTFlags::PS); - // arr[3] = PDPTEntry::new(PAddr(0xc0000000), PDPTFlags::P | PDPTFlags::RW | PDPTFlags::PS); - arr[0] = PDPTEntry(0x0 | 0x83); - arr[1] = PDPTEntry(0x40000000 | 0x83); - arr[2] = PDPTEntry(0x80000000 | 0x83); - arr[3] = PDPTEntry(0xc0000000 | 0x83); - arr -}; - -// #[link_section = ".data.prepage.entry"] -// static PAGE_TABLE: PML4 = { -// let mut arr: PML4 = [PML4Entry(0); PAGE_SIZE_ENTRIES]; - -// // arr[2] = PTE::from_addr(0x8000_0000, PTEFlags::ADVRWX); -// // arr[0x100] = PTE::from_addr(0x0000_0000, PTEFlags::ADGVRWX); -// // arr[0x101] = PTE::from_addr(0x4000_0000, PTEFlags::ADGVRWX); -// // arr[0x102] = PTE::from_addr(0x8000_0000, PTEFlags::ADGVRWX); -// // arr[0x106] = PTE::from_addr(0x8000_0000, PTEFlags::ADVRWX); -// // arr[0] = PML4Entry::new(PAddr(KERNEL_PDPT.as_ptr() as u64 - VIRT_ADDR_START as u64), PML4Flags::P | PML4Flags::RW); -// let ptr = &KERNEL_PDPT as *const [PDPTEntry; PAGE_SIZE_ENTRIES] as *const PDPTEntry; -// let paddr: u64 = unsafe { transmute(ptr.sub(VIRT_ADDR_START)) }; -// arr[0] = PML4Entry(paddr | 3); -// arr -// }; - pub fn shutdown() -> ! { unsafe { PortWriteOnly::new(0x604).write(0x2000u16) }; diff --git a/arch/src/x86_64/page_table.rs b/arch/src/x86_64/page_table.rs index 573302f..744871d 100644 --- a/arch/src/x86_64/page_table.rs +++ b/arch/src/x86_64/page_table.rs @@ -5,7 +5,6 @@ use crate::{ArchInterface, PhysAddr, PhysPage, VirtAddr, VirtPage, PAGE_ITEM_COU use super::sigtrx::get_trx_mapping; - #[derive(Copy, Clone, Debug)] pub struct PTE(usize); impl PTE { @@ -122,17 +121,6 @@ pub fn get_pte_list(paddr: PhysAddr) -> &'static mut [PTE] { unsafe { core::slice::from_raw_parts_mut(paddr.get_mut_ptr::(), PAGE_ITEM_COUNT) } } -fn destory_pte_leaf(paddr: PhysAddr) { - let pte_list = get_pte_list(paddr); - for pte in pte_list { - if pte.is_leaf() { - destory_pte_leaf(pte.to_ppn().into()); - ArchInterface::frame_unalloc(pte.to_ppn()); - } - } - ArchInterface::frame_unalloc(paddr.into()); -} - #[derive(Debug)] pub struct PageTable(pub(crate) PhysAddr); @@ -169,8 +157,7 @@ impl PageTable { } #[inline] - pub fn map(&self, ppn: PhysPage, vpn: VirtPage, flags: PTEFlags, level: usize) - { + pub fn map(&self, ppn: PhysPage, vpn: VirtPage, flags: PTEFlags, level: usize) { // TODO: Add huge page support. let mut pte_list = get_pte_list(self.0); for i in (1..level).rev() { @@ -225,9 +212,15 @@ impl PageTable { impl Drop for PageTable { fn drop(&mut self) { - destory_pte_leaf(self.0); + for root_pte in get_pte_list(self.0)[..0x100].iter().filter(|x| x.is_leaf()) { + get_pte_list(root_pte.to_ppn().into()) + .iter() + .filter(|x| x.is_leaf()) + .for_each(|x| ArchInterface::frame_unalloc(x.to_ppn())); + ArchInterface::frame_unalloc(root_pte.to_ppn()); + } + ArchInterface::frame_unalloc(self.0.into()); } } -pub fn switch_to_kernel_page_table() { -} +pub fn switch_to_kernel_page_table() {} diff --git a/crates/page_table/Cargo.toml b/crates/page_table/Cargo.toml deleted file mode 100644 index 4178458..0000000 --- a/crates/page_table/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "page_table" -version = "0.1.0" -edition = "2021" -authors = ["Yuekai Jia "] -description = "Generic page table structures for various hardware architectures" -license = "GPL-3.0-or-later OR Apache-2.0" -homepage = "https://github.com/rcore-os/arceos" -repository = "https://github.com/rcore-os/arceos/tree/main/crates/page_table" -documentation = "https://rcore-os.github.io/arceos/page_table/index.html" - -[dependencies] -log = "0.4" -memory_addr = { path = "../memory_addr" } -page_table_entry = { path = "../page_table_entry" } diff --git a/crates/page_table/src/arch/aarch64.rs b/crates/page_table/src/arch/aarch64.rs deleted file mode 100644 index e652544..0000000 --- a/crates/page_table/src/arch/aarch64.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! AArch64 specific page table structures. - -use crate::{PageTable64, PagingMetaData}; -use page_table_entry::aarch64::A64PTE; - -/// Metadata of AArch64 page tables. -#[derive(Copy, Clone)] -pub struct A64PagingMetaData; - -impl const PagingMetaData for A64PagingMetaData { - const LEVELS: usize = 4; - const PA_MAX_BITS: usize = 48; - const VA_MAX_BITS: usize = 48; - - fn vaddr_is_valid(vaddr: usize) -> bool { - let top_bits = vaddr >> Self::VA_MAX_BITS; - top_bits == 0 || top_bits == 0xffff - } -} - -/// AArch64 VMSAv8-64 translation table. -pub type A64PageTable = PageTable64; diff --git a/crates/page_table/src/arch/mod.rs b/crates/page_table/src/arch/mod.rs deleted file mode 100644 index 13fff18..0000000 --- a/crates/page_table/src/arch/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[cfg(any(target_arch = "x86_64", doc))] -pub mod x86_64; - -#[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))] -pub mod riscv; - -#[cfg(any(target_arch = "aarch64", doc))] -pub mod aarch64; diff --git a/crates/page_table/src/arch/riscv.rs b/crates/page_table/src/arch/riscv.rs deleted file mode 100644 index 6c15678..0000000 --- a/crates/page_table/src/arch/riscv.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! RISC-V specific page table structures. - -use crate::{PageTable64, PagingMetaData}; -use page_table_entry::riscv::Rv64PTE; - -/// Metadata of RISC-V Sv39 page tables. -#[derive(Clone, Copy)] -pub struct Sv39MetaData; - -/// Metadata of RISC-V Sv48 page tables. -#[derive(Clone, Copy)] -pub struct Sv48MetaData; - -impl const PagingMetaData for Sv39MetaData { - const LEVELS: usize = 3; - const PA_MAX_BITS: usize = 56; - const VA_MAX_BITS: usize = 39; -} - -impl const PagingMetaData for Sv48MetaData { - const LEVELS: usize = 4; - const PA_MAX_BITS: usize = 56; - const VA_MAX_BITS: usize = 48; -} - -/// Sv39: Page-Based 39-bit (3 levels) Virtual-Memory System. -pub type Sv39PageTable = PageTable64; - -/// Sv48: Page-Based 48-bit (4 levels) Virtual-Memory System. -pub type Sv48PageTable = PageTable64; diff --git a/crates/page_table/src/arch/x86_64.rs b/crates/page_table/src/arch/x86_64.rs deleted file mode 100644 index 627f8f4..0000000 --- a/crates/page_table/src/arch/x86_64.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! x86 specific page table structures. - -use crate::{PageTable64, PagingMetaData}; -use page_table_entry::x86_64::X64PTE; - -/// metadata of x86_64 page tables. -pub struct X64PagingMetaData; - -impl const PagingMetaData for X64PagingMetaData { - const LEVELS: usize = 4; - const PA_MAX_BITS: usize = 52; - const VA_MAX_BITS: usize = 48; -} - -/// x86_64 page table. -pub type X64PageTable = PageTable64; diff --git a/crates/page_table/src/bits64.rs b/crates/page_table/src/bits64.rs deleted file mode 100644 index 2560153..0000000 --- a/crates/page_table/src/bits64.rs +++ /dev/null @@ -1,402 +0,0 @@ -extern crate alloc; - -use alloc::{vec, vec::Vec}; -use core::marker::PhantomData; - -use memory_addr::{PhysAddr, VirtAddr, PAGE_SIZE_4K}; - -use crate::{GenericPTE, PagingIf, PagingMetaData}; -use crate::{MappingFlags, PageSize, PagingError, PagingResult}; - -const ENTRY_COUNT: usize = 512; - -const fn p4_index(vaddr: VirtAddr) -> usize { - (vaddr.as_usize() >> (12 + 27)) & (ENTRY_COUNT - 1) -} - -const fn p3_index(vaddr: VirtAddr) -> usize { - (vaddr.as_usize() >> (12 + 18)) & (ENTRY_COUNT - 1) -} - -const fn p2_index(vaddr: VirtAddr) -> usize { - (vaddr.as_usize() >> (12 + 9)) & (ENTRY_COUNT - 1) -} - -const fn p1_index(vaddr: VirtAddr) -> usize { - (vaddr.as_usize() >> 12) & (ENTRY_COUNT - 1) -} - -/// A generic page table struct for 64-bit platform. -/// -/// It also tracks all intermediate level tables. They will be deallocated -/// When the [`PageTable64`] itself is dropped. -pub struct PageTable64 { - root_paddr: PhysAddr, - intrm_tables: Vec, - _phantom: PhantomData<(M, PTE, IF)>, -} - -impl PageTable64 { - /// Creates a new page table instance or returns the error. - /// - /// It will allocate a new page for the root page table. - pub fn try_new() -> PagingResult { - let root_paddr = Self::alloc_table()?; - Ok(Self { - root_paddr, - intrm_tables: vec![root_paddr], - _phantom: PhantomData, - }) - } - - /// Returns the physical address of the root page table. - pub const fn root_paddr(&self) -> PhysAddr { - self.root_paddr - } - - /// Maps a virtual page to a physical frame with the given `page_size` - /// and mapping `flags`. - /// - /// The virtual page starts with `vaddr`, amd the physical frame starts with - /// `target`. If the addresses is not aligned to the page size, they will be - /// aligned down automatically. - /// - /// Returns [`Err(PagingError::AlreadyMapped)`](PagingError::AlreadyMapped) - /// if the mapping is already present. - pub fn map( - &mut self, - vaddr: VirtAddr, - target: PhysAddr, - page_size: PageSize, - flags: MappingFlags, - ) -> PagingResult { - let entry = self.get_entry_mut_or_create(vaddr, page_size)?; - if !entry.is_unused() { - return Err(PagingError::AlreadyMapped); - } - *entry = GenericPTE::new_page(target.align_down(page_size), flags, page_size.is_huge()); - Ok(()) - } - /// Same as `PageTable64::map()`. This function will error if entry doesn't exist. Should be - /// used to edit PTE in page fault handler. - pub fn map_overwrite( - &mut self, - vaddr: VirtAddr, - target: PhysAddr, - page_size: PageSize, - flags: MappingFlags, - ) -> PagingResult { - let entry = self.get_entry_mut_or_create(vaddr, page_size)?; - - if entry.is_unused() { - return Err(PagingError::NotMapped); - } - *entry = GenericPTE::new_page(target.align_down(page_size), flags, page_size.is_huge()); - Ok(()) - } - - /// Unmaps the mapping starts with `vaddr`. - /// - /// Returns [`Err(PagingError::NotMapped)`](PagingError::NotMapped) if the - /// mapping is not present. - pub fn unmap(&mut self, vaddr: VirtAddr) -> PagingResult<(PhysAddr, PageSize)> { - let (entry, size) = self.get_entry_mut(vaddr)?; - if entry.is_unused() { - return Err(PagingError::NotMapped); - } - let paddr = entry.paddr(); - entry.clear(); - Ok((paddr, size)) - } - - /// Query the result of the mapping starts with `vaddr`. - /// - /// Returns the physical address of the target frame, mapping flags, and - /// the page size. - /// - /// Returns [`Err(PagingError::NotMapped)`](PagingError::NotMapped) if the - /// mapping is not present. - pub fn query(&self, vaddr: VirtAddr) -> PagingResult<(PhysAddr, MappingFlags, PageSize)> { - let (entry, size) = self.get_entry_mut(vaddr)?; - if entry.is_unused() { - return Err(PagingError::NotMapped); - } - let off = vaddr.align_offset(size); - Ok((entry.paddr() + off, entry.flags(), size)) - } - - /// Updates the target or flags of the mapping starts with `vaddr`. If the - /// corresponding argument is `None`, it will not be updated. - /// - /// Returns the page size of the mapping. - /// - /// Returns [`Err(PagingError::NotMapped)`](PagingError::NotMapped) if the - /// mapping is not present. - pub fn update( - &mut self, - vaddr: VirtAddr, - paddr: Option, - flags: Option, - ) -> PagingResult { - let (entry, size) = self.get_entry_mut(vaddr)?; - if let Some(paddr) = paddr { - entry.set_paddr(paddr); - } - if let Some(flags) = flags { - entry.set_flags(flags, size.is_huge()); - } - Ok(size) - } - - /// Map a contiguous virtual memory region to a contiguous physical memory - /// region with the given mapping `flags`. - /// - /// The virtual and physical memory regions start with `vaddr` and `paddr` - /// respectively. The region size is `size`. The addresses and `size` must - /// be aligned to 4K, otherwise it will return [`Err(PagingError::NotAligned)`]. - /// - /// When `allow_huge` is true, it will try to map the region with huge pages - /// if possible. Otherwise, it will map the region with 4K pages. - /// - /// [`Err(PagingError::NotAligned)`]: PagingError::NotAligned - pub fn map_region( - &mut self, - vaddr: VirtAddr, - paddr: PhysAddr, - size: usize, - flags: MappingFlags, - allow_huge: bool, - ) -> PagingResult { - if !vaddr.is_aligned(PageSize::Size4K) - || !paddr.is_aligned(PageSize::Size4K) - || !memory_addr::is_aligned(size, PageSize::Size4K.into()) - { - return Err(PagingError::NotAligned); - } - trace!( - "map_region({:#x}): [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}", - self.root_paddr(), - vaddr, - vaddr + size, - paddr, - paddr + size, - flags, - ); - let mut vaddr = vaddr; - let mut paddr = paddr; - let mut size = size; - while size > 0 { - let page_size = if allow_huge { - if vaddr.is_aligned(PageSize::Size1G) - && paddr.is_aligned(PageSize::Size1G) - && size >= PageSize::Size1G as usize - { - PageSize::Size1G - } else if vaddr.is_aligned(PageSize::Size2M) - && paddr.is_aligned(PageSize::Size2M) - && size >= PageSize::Size2M as usize - { - PageSize::Size2M - } else { - PageSize::Size4K - } - } else { - PageSize::Size4K - }; - self.map(vaddr, paddr, page_size, flags).inspect_err(|e| { - error!( - "failed to map page: {:#x?}({:?}) -> {:#x?}, {:?}", - vaddr, page_size, paddr, e - ) - })?; - vaddr += page_size as usize; - paddr += page_size as usize; - size -= page_size as usize; - } - Ok(()) - } - - /// Unmap a contiguous virtual memory region. - /// - /// The region must be mapped before using [`PageTable64::map_region`], or - /// unexpected behaviors may occur. - pub fn unmap_region(&mut self, vaddr: VirtAddr, size: usize) -> PagingResult { - trace!( - "unmap_region({:#x}) [{:#x}, {:#x})", - self.root_paddr(), - vaddr, - vaddr + size, - ); - let mut vaddr = vaddr; - let mut size = size; - while size > 0 { - let (_, page_size) = self - .unmap(vaddr) - .inspect_err(|e| error!("failed to unmap page: {:#x?}, {:?}", vaddr, e))?; - assert!(vaddr.is_aligned(page_size)); - assert!(page_size as usize <= size); - vaddr += page_size as usize; - size -= page_size as usize; - } - Ok(()) - } - /// Walk the page table recursively. - /// - /// When reaching the leaf page table, call `func` on the current page table - /// entry. The max number of enumerations in one table is limited by `limit`. - /// - /// The arguments of `func` are: - /// - Current level (starts with `0`): `usize` - /// - The index of the entry in the current-level table: `usize` - /// - The virtual address that is mapped to the entry: [`VirtAddr`] - /// - The reference of the entry: [`&PTE`](GenericPTE) - pub fn walk(&self, limit: usize, func: &F) -> PagingResult - where - F: Fn(usize, usize, VirtAddr, &PTE), - { - self.walk_recursive( - self.table_of(self.root_paddr()), - 0, - VirtAddr::from(0), - limit, - func, - ) - } -} - -// Private implements. -impl PageTable64 { - fn alloc_table() -> PagingResult { - if let Some(paddr) = IF::alloc_frame() { - let ptr = IF::phys_to_virt(paddr).as_mut_ptr(); - unsafe { core::ptr::write_bytes(ptr, 0, PAGE_SIZE_4K) }; - Ok(paddr) - } else { - Err(PagingError::NoMemory) - } - } - - fn table_of<'a>(&self, paddr: PhysAddr) -> &'a [PTE] { - let ptr = IF::phys_to_virt(paddr).as_ptr() as _; - unsafe { core::slice::from_raw_parts(ptr, ENTRY_COUNT) } - } - - fn table_of_mut<'a>(&self, paddr: PhysAddr) -> &'a mut [PTE] { - let ptr = IF::phys_to_virt(paddr).as_mut_ptr() as _; - unsafe { core::slice::from_raw_parts_mut(ptr, ENTRY_COUNT) } - } - - fn next_table_mut<'a>(&self, entry: &PTE) -> PagingResult<&'a mut [PTE]> { - if !entry.is_present() { - Err(PagingError::NotMapped) - } else if entry.is_huge() { - Err(PagingError::MappedToHugePage) - } else { - Ok(self.table_of_mut(entry.paddr())) - } - } - - fn next_table_mut_or_create<'a>(&mut self, entry: &mut PTE) -> PagingResult<&'a mut [PTE]> { - if entry.is_unused() { - let paddr = Self::alloc_table()?; - self.intrm_tables.push(paddr); - *entry = GenericPTE::new_table(paddr); - Ok(self.table_of_mut(paddr)) - } else { - self.next_table_mut(entry) - } - } - - pub fn get_entry_mut(&self, vaddr: VirtAddr) -> PagingResult<(&mut PTE, PageSize)> { - let p3 = if M::LEVELS == 3 { - self.table_of_mut(self.root_paddr()) - } else if M::LEVELS == 4 { - let p4 = self.table_of_mut(self.root_paddr()); - let p4e = &mut p4[p4_index(vaddr)]; - self.next_table_mut(p4e)? - } else { - unreachable!() - }; - let p3e = &mut p3[p3_index(vaddr)]; - if p3e.is_huge() { - return Ok((p3e, PageSize::Size1G)); - } - - let p2 = self.next_table_mut(p3e)?; - let p2e = &mut p2[p2_index(vaddr)]; - if p2e.is_huge() { - return Ok((p2e, PageSize::Size2M)); - } - - let p1 = self.next_table_mut(p2e)?; - let p1e = &mut p1[p1_index(vaddr)]; - Ok((p1e, PageSize::Size4K)) - } - - fn get_entry_mut_or_create( - &mut self, - vaddr: VirtAddr, - page_size: PageSize, - ) -> PagingResult<&mut PTE> { - let p3 = if M::LEVELS == 3 { - self.table_of_mut(self.root_paddr()) - } else if M::LEVELS == 4 { - let p4 = self.table_of_mut(self.root_paddr()); - let p4e = &mut p4[p4_index(vaddr)]; - self.next_table_mut_or_create(p4e)? - } else { - unreachable!() - }; - let p3e = &mut p3[p3_index(vaddr)]; - if page_size == PageSize::Size1G { - return Ok(p3e); - } - - let p2 = self.next_table_mut_or_create(p3e)?; - let p2e = &mut p2[p2_index(vaddr)]; - if page_size == PageSize::Size2M { - return Ok(p2e); - } - - let p1 = self.next_table_mut_or_create(p2e)?; - let p1e = &mut p1[p1_index(vaddr)]; - Ok(p1e) - } - - fn walk_recursive( - &self, - table: &[PTE], - level: usize, - start_vaddr: VirtAddr, - limit: usize, - func: &F, - ) -> PagingResult - where - F: Fn(usize, usize, VirtAddr, &PTE), - { - let mut n = 0; - for (i, entry) in table.iter().enumerate() { - let vaddr = start_vaddr + (i << (12 + (M::LEVELS - 1 - level) * 9)); - if entry.is_present() { - func(level, i, vaddr, entry); - if level < M::LEVELS - 1 && !entry.is_huge() { - let table_entry = self.next_table_mut(entry)?; - self.walk_recursive(table_entry, level + 1, vaddr, limit, func)?; - } - n += 1; - if n >= limit { - break; - } - } - } - Ok(()) - } -} - -impl Drop for PageTable64 { - fn drop(&mut self) { - for frame in &self.intrm_tables { - IF::dealloc_frame(*frame); - } - } -} diff --git a/crates/page_table/src/lib.rs b/crates/page_table/src/lib.rs deleted file mode 100644 index 9e781de..0000000 --- a/crates/page_table/src/lib.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! This crate provides generic, unified, architecture-independent, and OS-free -//! page table structures for various hardware architectures. -//! -//! The core struct is [`PageTable64`]. OS-functions and -//! architecture-dependent types are provided by generic parameters: -//! -//! - `M`: The architecture-dependent metadata, requires to implement -//! the [`PagingMetaData`] trait. -//! - `PTE`: The architecture-dependent page table entry, requires to implement -//! the [`GenericPTE`] trait. -//! - `IF`: OS-functions such as physical memory allocation, requires to -//! implement the [`PagingIf`] trait. -//! -//! Currently supported architectures and page table structures: -//! -//! - x86: [`x86_64::X64PageTable`] -//! - ARM: [`aarch64::A64PageTable`] -//! - RISC-V: [`riscv::Sv39PageTable`], [`riscv::Sv48PageTable`] - -#![no_std] -#![feature(const_trait_impl)] -#![feature(doc_auto_cfg)] -#![feature(result_option_inspect)] - -#[macro_use] -extern crate log; - -mod arch; -mod bits64; - -use memory_addr::{PhysAddr, VirtAddr}; - -pub use self::arch::*; -pub use self::bits64::PageTable64; - -#[doc(no_inline)] -pub use page_table_entry::{GenericPTE, MappingFlags}; - -/// The error type for page table operation failures. -#[derive(Debug)] -pub enum PagingError { - /// Cannot allocate memory. - NoMemory, - /// The address is not aligned to the page size. - NotAligned, - /// The mapping is not present. - NotMapped, - /// The mapping is already present. - AlreadyMapped, - /// The page table entry represents a huge page, but the target physical - /// frame is 4K in size. - MappedToHugePage, -} - -/// The specialized `Result` type for page table operations. -pub type PagingResult = Result; - -/// The **architecture-dependent** metadata that must be provided for -/// [`PageTable64`]. -#[const_trait] -pub trait PagingMetaData: Sync + Send + Sized { - /// The number of levels of the hardware page table. - const LEVELS: usize; - /// The maximum number of bits of physical address. - const PA_MAX_BITS: usize; - /// The maximum number of bits of virtual address. - const VA_MAX_BITS: usize; - - /// The maximum physical address. - const PA_MAX_ADDR: usize = (1 << Self::PA_MAX_BITS) - 1; - - /// Whether a given physical address is valid. - #[inline] - fn paddr_is_valid(paddr: usize) -> bool { - paddr <= Self::PA_MAX_ADDR // default - } - - /// Whether a given virtual address is valid. - #[inline] - fn vaddr_is_valid(vaddr: usize) -> bool { - // default: top bits sign extended - let top_mask = usize::MAX << (Self::VA_MAX_BITS - 1); - (vaddr & top_mask) == 0 || (vaddr & top_mask) == top_mask - } -} - -/// The low-level **OS-dependent** helpers that must be provided for -/// [`PageTable64`]. -pub trait PagingIf: Sized { - /// Request to allocate a 4K-sized physical frame. - fn alloc_frame() -> Option; - /// Request to free a allocated physical frame. - fn dealloc_frame(paddr: PhysAddr); - /// Returns a virtual address that maps to the given physical address. - /// - /// Used to access the physical memory directly in page table implementation. - fn phys_to_virt(paddr: PhysAddr) -> VirtAddr; -} - -/// The page sizes supported by the hardware page table. -#[repr(usize)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum PageSize { - /// Size of 4 kilobytes (212 bytes). - Size4K = 0x1000, - /// Size of 2 megabytes (221 bytes). - Size2M = 0x20_0000, - /// Size of 1 gigabytes (230 bytes). - Size1G = 0x4000_0000, -} - -impl PageSize { - /// Whether this page size is considered huge (larger than 4K). - pub const fn is_huge(self) -> bool { - matches!(self, Self::Size1G | Self::Size2M) - } -} - -impl From for usize { - #[inline] - fn from(size: PageSize) -> usize { - size as usize - } -} diff --git a/crates/page_table_entry/Cargo.toml b/crates/page_table_entry/Cargo.toml deleted file mode 100644 index a5a35df..0000000 --- a/crates/page_table_entry/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "page_table_entry" -version = "0.1.0" -edition = "2021" -authors = ["Yuekai Jia "] -description = "Page table entry definition for various hardware architectures" -license = "GPL-3.0-or-later OR Apache-2.0" -homepage = "https://github.com/rcore-os/arceos" -repository = "https://github.com/rcore-os/arceos/tree/main/crates/page_table_entry" -documentation = "https://rcore-os.github.io/arceos/page_table_entry/index.html" - -[dependencies] -bitflags = "2.2" -memory_addr = { path = "../memory_addr" } -aarch64-cpu = "9.3" # TODO: put it in [target.'cfg(target_arch = "aarch64")'.dependencies] - -[target.'cfg(target_arch = "x86_64")'.dependencies] -x86_64 = "0.14" diff --git a/crates/page_table_entry/src/arch/aarch64.rs b/crates/page_table_entry/src/arch/aarch64.rs deleted file mode 100644 index 5b88b97..0000000 --- a/crates/page_table_entry/src/arch/aarch64.rs +++ /dev/null @@ -1,240 +0,0 @@ -//! AArch64 VMSAv8-64 translation table format descriptors. - -use aarch64_cpu::registers::MAIR_EL1; -use core::fmt; -use memory_addr::PhysAddr; - -use crate::{GenericPTE, MappingFlags}; - -bitflags::bitflags! { - /// Memory attribute fields in the VMSAv8-64 translation table format descriptors. - #[derive(Debug)] - pub struct DescriptorAttr: u64 { - // Attribute fields in stage 1 VMSAv8-64 Block and Page descriptors: - - /// Whether the descriptor is valid. - const VALID = 1 << 0; - /// The descriptor gives the address of the next level of translation table or 4KB page. - /// (not a 2M, 1G block) - const NON_BLOCK = 1 << 1; - /// Memory attributes index field. - const ATTR_INDX = 0b111 << 2; - /// Non-secure bit. For memory accesses from Secure state, specifies whether the output - /// address is in Secure or Non-secure memory. - const NS = 1 << 5; - /// Access permission: accessable at EL0. - const AP_EL0 = 1 << 6; - /// Access permission: read-only. - const AP_RO = 1 << 7; - /// Shareability: Inner Shareable (otherwise Outer Shareable). - const INNER = 1 << 8; - /// Shareability: Inner or Outer Shareable (otherwise Non-shareable). - const SHAREABLE = 1 << 9; - /// The Access flag. - const AF = 1 << 10; - /// The not global bit. - const NG = 1 << 11; - /// Indicates that 16 adjacent translation table entries point to contiguous memory regions. - const CONTIGUOUS = 1 << 52; - /// The Privileged execute-never field. - const PXN = 1 << 53; - /// The Execute-never or Unprivileged execute-never field. - const UXN = 1 << 54; - - // Next-level attributes in stage 1 VMSAv8-64 Table descriptors: - - /// PXN limit for subsequent levels of lookup. - const PXN_TABLE = 1 << 59; - /// XN limit for subsequent levels of lookup. - const XN_TABLE = 1 << 60; - /// Access permissions limit for subsequent levels of lookup: access at EL0 not permitted. - const AP_NO_EL0_TABLE = 1 << 61; - /// Access permissions limit for subsequent levels of lookup: write access not permitted. - const AP_NO_WRITE_TABLE = 1 << 62; - /// For memory accesses from Secure state, specifies the Security state for subsequent - /// levels of lookup. - const NS_TABLE = 1 << 63; - } -} - -/// The memory attributes index field in the descriptor, which is used to index -/// into the MAIR (Memory Attribute Indirection Register). -#[repr(u64)] -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum MemAttr { - /// Device-nGnRE memory - Device = 0, - /// Normal memory - Normal = 1, - /// Normal non-cacheable memory - NormalNonCacheable = 2, -} - -impl DescriptorAttr { - #[allow(clippy::unusual_byte_groupings)] - const ATTR_INDEX_MASK: u64 = 0b111_00; - - /// Constructs a descriptor from the memory index, leaving the other fields - /// empty. - pub const fn from_mem_attr(idx: MemAttr) -> Self { - let mut bits = (idx as u64) << 2; - if matches!(idx, MemAttr::Normal | MemAttr::NormalNonCacheable) { - bits |= Self::INNER.bits() | Self::SHAREABLE.bits(); - } - Self::from_bits_retain(bits) - } - - /// Returns the memory attribute index field. - pub const fn mem_attr(&self) -> Option { - let idx = (self.bits() & Self::ATTR_INDEX_MASK) >> 2; - Some(match idx { - 0 => MemAttr::Device, - 1 => MemAttr::Normal, - 2 => MemAttr::NormalNonCacheable, - _ => return None, - }) - } -} - -impl MemAttr { - /// The MAIR_ELx register should be set to this value to match the memory - /// attributes in the descriptors. - pub const MAIR_VALUE: u64 = { - // Device-nGnRE memory - let attr0 = MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck.value; - // Normal memory - let attr1 = MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc.value - | MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc.value; - let attr2 = MAIR_EL1::Attr2_Normal_Inner::NonCacheable.value - + MAIR_EL1::Attr2_Normal_Outer::NonCacheable.value; - attr0 | attr1 | attr2 // 0x44_ff_04 - }; -} - -impl From for MappingFlags { - fn from(attr: DescriptorAttr) -> Self { - let mut flags = Self::empty(); - if attr.contains(DescriptorAttr::VALID) { - flags |= Self::READ; - } - if !attr.contains(DescriptorAttr::AP_RO) { - flags |= Self::WRITE; - } - if attr.contains(DescriptorAttr::AP_EL0) { - flags |= Self::USER; - if !attr.contains(DescriptorAttr::UXN) { - flags |= Self::EXECUTE; - } - } else if !attr.intersects(DescriptorAttr::PXN) { - flags |= Self::EXECUTE; - } - match attr.mem_attr() { - Some(MemAttr::Device) => flags |= Self::DEVICE, - Some(MemAttr::NormalNonCacheable) => flags |= Self::UNCACHED, - _ => {} - } - flags - } -} - -impl From for DescriptorAttr { - fn from(flags: MappingFlags) -> Self { - let mut attr = if flags.contains(MappingFlags::DEVICE) { - Self::from_mem_attr(MemAttr::Device) - } else if flags.contains(MappingFlags::UNCACHED) { - Self::from_mem_attr(MemAttr::NormalNonCacheable) - } else { - Self::from_mem_attr(MemAttr::Normal) - }; - if flags.contains(MappingFlags::READ) { - attr |= Self::VALID; - } - if !flags.contains(MappingFlags::WRITE) { - attr |= Self::AP_RO; - } - if flags.contains(MappingFlags::USER) { - attr |= Self::AP_EL0 | Self::PXN; - if !flags.contains(MappingFlags::EXECUTE) { - attr |= Self::UXN; - } - } else { - attr |= Self::UXN; - if !flags.contains(MappingFlags::EXECUTE) { - attr |= Self::PXN; - } - } - attr - } -} - -/// A VMSAv8-64 translation table descriptor. -/// -/// Note that the **AttrIndx\[2:0\]** (bit\[4:2\]) field is set to `0` for device -/// memory, and `1` for normal memory. The system must configure the MAIR_ELx -/// system register accordingly. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct A64PTE(u64); - -impl A64PTE { - const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000; // bits 12..48 - - /// Creates an empty descriptor with all bits set to zero. - pub const fn empty() -> Self { - Self(0) - } -} - -impl GenericPTE for A64PTE { - fn new_page(paddr: PhysAddr, flags: MappingFlags, is_huge: bool) -> Self { - let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF; - if !is_huge { - attr |= DescriptorAttr::NON_BLOCK; - } - Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: PhysAddr) -> Self { - let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID; - Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> PhysAddr { - PhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize) - } - fn flags(&self) -> MappingFlags { - DescriptorAttr::from_bits_truncate(self.0).into() - } - fn set_paddr(&mut self, paddr: PhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK) - } - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) { - let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF; - if !is_huge { - attr |= DescriptorAttr::NON_BLOCK; - } - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits(); - } - - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID) - } - fn is_huge(&self) -> bool { - !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK) - } - fn clear(&mut self) { - self.0 = 0 - } -} - -impl fmt::Debug for A64PTE { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("A64PTE"); - f.field("raw", &self.0) - .field("paddr", &self.paddr()) - .field("attr", &DescriptorAttr::from_bits_truncate(self.0)) - .field("flags", &self.flags()) - .finish() - } -} diff --git a/crates/page_table_entry/src/arch/mod.rs b/crates/page_table_entry/src/arch/mod.rs deleted file mode 100644 index 9491f95..0000000 --- a/crates/page_table_entry/src/arch/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[cfg(target_arch = "x86_64")] -pub mod x86_64; - -#[doc(cfg(any(target_arch = "riscv32", target_arch = "riscv64")))] -pub mod riscv; - -// TODO: `#[cfg(any(target_arch = "aarch64", doc))]` does not work. -#[doc(cfg(target_arch = "aarch64"))] -pub mod aarch64; diff --git a/crates/page_table_entry/src/arch/riscv.rs b/crates/page_table_entry/src/arch/riscv.rs deleted file mode 100644 index 16c9fc5..0000000 --- a/crates/page_table_entry/src/arch/riscv.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! RISC-V page table entries. - -use core::fmt; -use memory_addr::PhysAddr; - -use crate::{GenericPTE, MappingFlags}; - -bitflags::bitflags! { - /// Page-table entry flags. - #[derive(Debug)] - pub struct PTEFlags: usize { - /// Whether the PTE is valid. - const V = 1 << 0; - /// Whether the page is readable. - const R = 1 << 1; - /// Whether the page is writable. - const W = 1 << 2; - /// Whether the page is executable. - const X = 1 << 3; - /// Whether the page is accessible to user mode. - const U = 1 << 4; - /// Designates a global mapping. - const G = 1 << 5; - /// Indicates the virtual page has been read, written, or fetched from - /// since the last time the A bit was cleared. - const A = 1 << 6; - /// Indicates the virtual page has been written since the last time the - /// D bit was cleared. - const D = 1 << 7; - } -} - -impl From for MappingFlags { - fn from(f: PTEFlags) -> Self { - let mut ret = Self::empty(); - if f.contains(PTEFlags::R) { - ret |= Self::READ; - } - if f.contains(PTEFlags::W) { - ret |= Self::WRITE; - } - if f.contains(PTEFlags::X) { - ret |= Self::EXECUTE; - } - if f.contains(PTEFlags::U) { - ret |= Self::USER; - } - ret - } -} - -impl From for PTEFlags { - fn from(f: MappingFlags) -> Self { - if f.is_empty() { - return Self::empty(); - } - let mut ret = Self::V; - if f.contains(MappingFlags::READ) { - ret |= Self::R; - } - if f.contains(MappingFlags::WRITE) { - ret |= Self::W; - } - if f.contains(MappingFlags::EXECUTE) { - ret |= Self::X; - } - if f.contains(MappingFlags::USER) { - ret |= Self::U; - } - ret - } -} - -/// Sv39 and Sv48 page table entry for RV64 systems. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct Rv64PTE(u64); - -impl Rv64PTE { - const PHYS_ADDR_MASK: u64 = (1 << 54) - (1 << 10); // bits 10..54 -} - -impl GenericPTE for Rv64PTE { - fn new_page(paddr: PhysAddr, flags: MappingFlags, _is_huge: bool) -> Self { - let flags = PTEFlags::from(flags) | PTEFlags::A | PTEFlags::D; - debug_assert!(flags.intersects(PTEFlags::R | PTEFlags::X)); - Self(flags.bits() as u64 | ((paddr.as_usize() >> 2) as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: PhysAddr) -> Self { - Self(PTEFlags::V.bits() as u64 | ((paddr.as_usize() >> 2) as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> PhysAddr { - PhysAddr::from(((self.0 & Self::PHYS_ADDR_MASK) << 2) as usize) - } - fn flags(&self) -> MappingFlags { - PTEFlags::from_bits_truncate(self.0 as usize).into() - } - fn set_paddr(&mut self, paddr: PhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) - | ((paddr.as_usize() as u64 >> 2) & Self::PHYS_ADDR_MASK); - } - fn set_flags(&mut self, flags: MappingFlags, _is_huge: bool) { - let flags = PTEFlags::from(flags) | PTEFlags::A | PTEFlags::D; - debug_assert!(flags.intersects(PTEFlags::R | PTEFlags::X)); - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags.bits() as u64; - } - - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - PTEFlags::from_bits_truncate(self.0 as usize).contains(PTEFlags::V) - } - fn is_huge(&self) -> bool { - PTEFlags::from_bits_truncate(self.0 as usize).intersects(PTEFlags::R | PTEFlags::X) - } - fn clear(&mut self) { - self.0 = 0 - } -} - -impl fmt::Debug for Rv64PTE { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("Rv64PTE"); - f.field("raw", &self.0) - .field("paddr", &self.paddr()) - .field("flags", &self.flags()) - .finish() - } -} diff --git a/crates/page_table_entry/src/arch/x86_64.rs b/crates/page_table_entry/src/arch/x86_64.rs deleted file mode 100644 index 9ae024c..0000000 --- a/crates/page_table_entry/src/arch/x86_64.rs +++ /dev/null @@ -1,114 +0,0 @@ -//! x86 page table entries on 64-bit paging. - -use core::fmt; -use memory_addr::PhysAddr; - -pub use x86_64::structures::paging::page_table::PageTableFlags as PTF; - -use crate::{GenericPTE, MappingFlags}; - -impl From for MappingFlags { - fn from(f: PTF) -> Self { - if f.is_empty() { - return Self::empty(); - } - let mut ret = Self::READ; - if f.contains(PTF::WRITABLE) { - ret |= Self::WRITE; - } - if !f.contains(PTF::NO_EXECUTE) { - ret |= Self::EXECUTE; - } - if f.contains(PTF::USER_ACCESSIBLE) { - ret |= Self::USER; - } - if f.contains(PTF::NO_CACHE) { - ret |= Self::UNCACHED; - } - ret - } -} - -impl From for PTF { - fn from(f: MappingFlags) -> Self { - if f.is_empty() { - return Self::empty(); - } - let mut ret = Self::PRESENT; - if f.contains(MappingFlags::WRITE) { - ret |= Self::WRITABLE; - } - if !f.contains(MappingFlags::EXECUTE) { - ret |= Self::NO_EXECUTE; - } - if f.contains(MappingFlags::USER) { - ret |= Self::USER_ACCESSIBLE; - } - if f.contains(MappingFlags::DEVICE) || f.contains(MappingFlags::UNCACHED) { - ret |= Self::NO_CACHE | Self::WRITE_THROUGH; - } - ret - } -} - -/// An x86_64 page table entry. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct X64PTE(u64); - -impl X64PTE { - const PHYS_ADDR_MASK: u64 = 0x000f_ffff_ffff_f000; // bits 12..52 -} - -impl GenericPTE for X64PTE { - fn new_page(paddr: PhysAddr, flags: MappingFlags, is_huge: bool) -> Self { - let mut flags = PTF::from(flags); - if is_huge { - flags |= PTF::HUGE_PAGE; - } - Self(flags.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: PhysAddr) -> Self { - let flags = PTF::PRESENT | PTF::WRITABLE | PTF::USER_ACCESSIBLE; - Self(flags.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> PhysAddr { - PhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize) - } - fn flags(&self) -> MappingFlags { - PTF::from_bits_truncate(self.0).into() - } - fn set_paddr(&mut self, paddr: PhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK) - } - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) { - let mut flags = PTF::from(flags); - if is_huge { - flags |= PTF::HUGE_PAGE; - } - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags.bits() - } - - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - PTF::from_bits_truncate(self.0).contains(PTF::PRESENT) - } - fn is_huge(&self) -> bool { - PTF::from_bits_truncate(self.0).contains(PTF::HUGE_PAGE) - } - fn clear(&mut self) { - self.0 = 0 - } -} - -impl fmt::Debug for X64PTE { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("X64PTE"); - f.field("raw", &self.0) - .field("paddr", &self.paddr()) - .field("flags", &self.flags()) - .finish() - } -} diff --git a/crates/page_table_entry/src/lib.rs b/crates/page_table_entry/src/lib.rs deleted file mode 100644 index 8b774d2..0000000 --- a/crates/page_table_entry/src/lib.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! This crate provides the definition of page table entry for various hardware -//! architectures. -//! -//! Currently supported architectures and page table entry types: -//! -//! - x86: [`x86_64::X64PTE`] -//! - ARM: [`aarch64::A64PTE`] -//! - RISC-V: [`riscv::Rv64PTE`] -//! -//! All these types implement the [`GenericPTE`] trait, which provides unified -//! methods for manipulating various page table entries. - -#![no_std] -#![feature(doc_auto_cfg)] -#![feature(doc_cfg)] - -mod arch; - -use core::fmt::Debug; -use memory_addr::PhysAddr; - -pub use self::arch::*; - -bitflags::bitflags! { - /// Generic page table entry flags that indicate the corresponding mapped - /// memory region permissions and attributes. - #[derive(Debug, Clone, Copy)] - pub struct MappingFlags: usize { - /// The memory is mapped for lazy map - const Lazy = 0; - /// The memory is readable. - const READ = 1 << 0; - /// The memory is writable. - const WRITE = 1 << 1; - /// The memory is executable. - const EXECUTE = 1 << 2; - /// The memory is user accessible. - const USER = 1 << 3; - /// The memory is device memory. - const DEVICE = 1 << 4; - /// The memory is uncached. - const UNCACHED = 1 << 5; - } -} - -/// A generic page table entry. -/// -/// All architecture-specific page table entry types implement this trait. -pub trait GenericPTE: Debug + Clone + Copy + Sync + Send + Sized { - /// Creates a page table entry point to a terminate page or block. - fn new_page(paddr: PhysAddr, flags: MappingFlags, is_huge: bool) -> Self; - /// Creates a page table entry point to a next level page table. - fn new_table(paddr: PhysAddr) -> Self; - - /// Returns the physical address mapped by this entry. - fn paddr(&self) -> PhysAddr; - /// Returns the flags of this entry. - fn flags(&self) -> MappingFlags; - - /// Set mapped physical address of the entry. - fn set_paddr(&mut self, paddr: PhysAddr); - /// Set flags of the entry. - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool); - - /// Returns whether this entry is zero. - fn is_unused(&self) -> bool; - /// Returns whether this entry flag indicates present. - fn is_present(&self) -> bool; - /// For non-last level translation, returns whether this entry maps to a - /// huge frame. - fn is_huge(&self) -> bool; - /// Set this entry to zero. - fn clear(&mut self); -}