Skip to content

Commit

Permalink
kernel: make core-local kernel instance statically addressable
Browse files Browse the repository at this point in the history
  • Loading branch information
Qix- committed Sep 12, 2024
1 parent 4996d2c commit e4e7ec9
Show file tree
Hide file tree
Showing 12 changed files with 175 additions and 54 deletions.
10 changes: 8 additions & 2 deletions oro-arch-aarch64/src/boot/secondary.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
//! Secondary core (application processor) boot routine.
use crate::{mem::address_space::AddressSpaceLayout, psci::PsciMethod};
use crate::{
mem::{
address_space::{AddressSpaceLayout, Ttbr1Handle},
segment::Segment,
},
psci::PsciMethod,
};
use core::{
arch::asm,
ffi::CStr,
Expand Down Expand Up @@ -350,7 +356,7 @@ unsafe fn boot_secondary(

// Allocate a new stack for it...
let stack_segment = AddressSpaceLayout::kernel_stack();
let stack_end = stack_segment.range(&mapper).1 & !0xFFF;
let stack_end = <&Segment as AddressSegment<Ttbr1Handle>>::range(&stack_segment).1 & !0xFFF;

for stack_virt in (stack_end - stack_pages * 4096..stack_end).step_by(4096) {
let page = pfa.allocate().ok_or(SecondaryBootError::OutOfMemory)?;
Expand Down
4 changes: 3 additions & 1 deletion oro-arch-aarch64/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: Pfa) {
/// Must be called _exactly once_ per core, per core lifetime
/// (i.e. boot, or powerdown/subsequent bringup).
pub unsafe fn boot() -> ! {
let _kernel = Kernel::new(KERNEL_STATE.assume_init_ref());
// SAFETY(qix-): THIS MUST ABSOLUTELY BE FIRST.
let _kernel = Kernel::initialize_for_core(KERNEL_STATE.assume_init_ref())
.expect("failed to initialize kernel");

oro_debug::dbg!("boot");

Expand Down
40 changes: 40 additions & 0 deletions oro-arch-aarch64/src/mem/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ impl AddressSpaceLayout {
/// There is no associated descriptor for this index;
/// it's used however needed for the boot process.
pub const BOOT_RESERVED_IDX: usize = 350;
/// The segment for the kernel core-local data.
pub const KERNEL_CORE_LOCAL_IDX: usize = 375;
/// The segment for the ring registry
pub const KERNEL_RING_REGISTRY_IDX: usize = 400;
/// The segment for the module instance registry
Expand Down Expand Up @@ -422,6 +424,44 @@ unsafe impl AddressSpace for AddressSpaceLayout {
&DESCRIPTOR
}

fn kernel_core_local() -> Self::SupervisorSegment {
#[expect(clippy::missing_docs_in_private_items)]
static DESCRIPTOR: Segment = unsafe {
Segment {
valid_range: (
AddressSpaceLayout::KERNEL_CORE_LOCAL_IDX,
AddressSpaceLayout::KERNEL_CORE_LOCAL_IDX,
),
l0_template: L0PageTableDescriptor::new()
.with_valid()
.with_table_access_permissions(PageTableEntryTableAccessPerm::KernelOnly)
.with_user_no_exec()
.with_kernel_no_exec(),
l1_table_template: L1PageTableDescriptor::new()
.with_valid()
.with_table_access_permissions(PageTableEntryTableAccessPerm::KernelOnly)
.with_user_no_exec()
.with_kernel_no_exec(),
l2_table_template: L2PageTableDescriptor::new()
.with_valid()
.with_table_access_permissions(PageTableEntryTableAccessPerm::KernelOnly)
.with_user_no_exec()
.with_kernel_no_exec(),
l3_template: L3PageTableBlockDescriptor::new()
.with_valid()
.with_block_access_permissions(
PageTableEntryBlockAccessPerm::KernelRWUserNoAccess,
)
.with_user_no_exec()
.with_kernel_no_exec()
.with_not_secure()
.with_mair_index(MairEntry::NormalMemory.index() as u64),
}
};

&DESCRIPTOR
}

fn kernel_ring_registry() -> Self::SupervisorSegment {
#[expect(clippy::missing_docs_in_private_items)]
static DESCRIPTOR: Segment = unsafe {
Expand Down
2 changes: 1 addition & 1 deletion oro-arch-aarch64/src/mem/segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ impl Segment {
}

unsafe impl<Handle: TtbrHandle> AddressSegment<Handle> for &'static Segment {
fn range(&self, _handle: &Handle) -> (usize, usize) {
fn range(&self) -> (usize, usize) {
let start = (self.valid_range.0 << 39) | Handle::VIRT_START;
// TODO(qix-): Assumes a 48-bit virtual address space for each TT; will need
// TODO(qix-): to adjust this when other addressing modes are supported.
Expand Down
1 change: 1 addition & 0 deletions oro-arch-x86_64/src/boot/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use oro_boot_protocol::{memory_map::MemoryMapKind, MemoryMapEntry, MemoryMapEntr
use oro_debug::{dbg, dbg_warn};
use oro_macro::assert;
use oro_mem::{
mapper::AddressSegment,
pfa::{alloc::Alloc, filo::FiloPageFrameAllocator},
translate::{OffsetTranslator, Translator},
};
Expand Down
4 changes: 3 additions & 1 deletion oro-arch-x86_64/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: Pfa) {
/// Must be called _exactly once_ per core, per core lifetime
/// (i.e. boot, or powerdown/subsequent bringup).
pub unsafe fn boot() -> ! {
let _kernel = Kernel::new(KERNEL_STATE.assume_init_ref());
// SAFETY(qix-): THIS MUST ABSOLUTELY BE FIRST.
let _kernel = Kernel::initialize_for_core(KERNEL_STATE.assume_init_ref())
.expect("failed to initialize kernel");

oro_debug::dbg!("boot");

Expand Down
21 changes: 21 additions & 0 deletions oro-arch-x86_64/src/mem/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,27 @@ unsafe impl AddressSpace for AddressSpaceLayout {
&DESCRIPTOR
}

fn kernel_core_local() -> Self::SupervisorSegment {
#[expect(clippy::missing_docs_in_private_items)]
const DESCRIPTOR: AddressSegment = AddressSegment {
valid_range: (
AddressSpaceLayout::KERNEL_CORE_LOCAL_IDX,
AddressSpaceLayout::KERNEL_CORE_LOCAL_IDX,
),
entry_template: PageTableEntry::new()
.with_global()
.with_present()
.with_no_exec()
.with_writable(),
intermediate_entry_template: PageTableEntry::new()
.with_present()
.with_no_exec()
.with_writable(),
};

&DESCRIPTOR
}

fn kernel_ring_registry() -> Self::SupervisorSegment {
#[expect(clippy::missing_docs_in_private_items)]
const DESCRIPTOR: AddressSegment = AddressSegment {
Expand Down
45 changes: 16 additions & 29 deletions oro-arch-x86_64/src/mem/segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,33 +61,6 @@ pub struct AddressSegment {
}

impl AddressSegment {
/// Returns the virtual range of the segment.
///
/// On x86_64, there's no need for handle information (the segment
/// occupies the same range regardless of the type of address space,
/// either user/supervisor).
///
/// This function can return the addresses directly, without a mapper handle,
/// and is the function called by the `Segment` trait implementation, too.
#[must_use]
pub fn range(&self) -> (usize, usize) {
// Get the current paging level.
match PagingLevel::current_from_cpu() {
PagingLevel::Level4 => {
(
sign_extend!(L4, self.valid_range.0 << 39),
sign_extend!(L4, (self.valid_range.1 << 39) | 0x0000_007F_FFFF_FFFF),
)
}
PagingLevel::Level5 => {
(
sign_extend!(L5, self.valid_range.0 << 48),
sign_extend!(L5, (self.valid_range.1 << 48) | 0x0000_FFFF_FFFF_FFFF),
)
}
}
}

/// Returns the page table entry for the given virtual address,
/// allocating intermediate page tables as necessary.
unsafe fn entry<'a, A, P, Handle: MapperHandle>(
Expand Down Expand Up @@ -383,8 +356,22 @@ impl AddressSegment {

unsafe impl Segment<AddressSpaceHandle> for &'static AddressSegment {
// TODO(qix-): Once const trait methods are stabilitized, make this const.
fn range(&self, _handle: &AddressSpaceHandle) -> (usize, usize) {
AddressSegment::range(self)
fn range(&self) -> (usize, usize) {
// Get the current paging level.
match PagingLevel::current_from_cpu() {
PagingLevel::Level4 => {
(
sign_extend!(L4, self.valid_range.0 << 39),
sign_extend!(L4, (self.valid_range.1 << 39) | 0x0000_007F_FFFF_FFFF),
)
}
PagingLevel::Level5 => {
(
sign_extend!(L5, self.valid_range.0 << 48),
sign_extend!(L5, (self.valid_range.1 << 48) | 0x0000_FFFF_FFFF_FFFF),
)
}
}
}

fn provision_as_shared<A, P>(
Expand Down
2 changes: 1 addition & 1 deletion oro-boot/src/map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ pub fn map_kernel_stack<
let last_stack_page_virt =
<<TargetAddressSpace as AddressSpace>::SupervisorSegment as AddressSegment<
<TargetAddressSpace as AddressSpace>::SupervisorHandle,
>>::range(&kernel_stack_segment, supervisor_space)
>>::range(&kernel_stack_segment)
.1 & !0xFFF;

// make sure top guard page is unmapped
Expand Down
78 changes: 69 additions & 9 deletions oro-kernel/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@
// NOTE(qix-): https://github.com/rust-lang/rust/issues/95174
#![feature(adt_const_params)]

use oro_macro::assert;
use oro_mem::{
mapper::{AddressSpace, MapError},
mapper::{AddressSegment, AddressSpace, MapError},
pfa::alloc::Alloc,
translate::Translator,
};
Expand All @@ -26,9 +27,9 @@ pub mod thread;

/// Core-local instance of the Oro kernel.
///
/// Intended to live on the core's respective stack,
/// living for the lifetime of the core (and destroyed
/// and re-created on core powerdown/subsequent bringup).
/// This object's constructor sets up a core-local
/// mapping of itself such that it can be accessed
/// from anywhere in the kernel as a static reference.
pub struct Kernel<Pfa, Pat, AddrSpace, IntCtrl>
where
Pfa: Alloc + 'static,
Expand All @@ -47,7 +48,11 @@ where
AddrSpace: AddressSpace,
IntCtrl: InterruptController,
{
/// Creates a new core-local instance of the Kernel.
/// Initializes a new core-local instance of the Oro kernel.
///
/// The [`AddressSpace::kernel_core_local()`] segment must
/// be empty prior to calling this function, else it will
/// return [`MapError::Exists`].
///
/// # Safety
/// Must only be called once per CPU session (i.e.
Expand All @@ -58,8 +63,45 @@ where
/// The `state` given to the kernel must be shared for all
/// instances of the kernel that wish to partake in the same
/// Oro kernel universe.
pub unsafe fn new(state: &'static KernelState<Pfa, Pat, AddrSpace, IntCtrl>) -> Self {
Self { state }
pub unsafe fn initialize_for_core(
state: &'static KernelState<Pfa, Pat, AddrSpace, IntCtrl>,
) -> Result<&'static Self, MapError> {
assert::fits::<Self, 4096>();

let mapper = AddrSpace::current_supervisor_space(&state.pat);
let core_local_segment = AddrSpace::kernel_core_local();

let kernel_base = core_local_segment.range().0;
debug_assert!((kernel_base as *mut Self).is_aligned());

{
let mut pfa = state.pfa.lock::<IntCtrl>();
let phys = pfa.allocate().ok_or(MapError::OutOfMemory)?;
core_local_segment.map(&mapper, &mut *pfa, &state.pat, kernel_base, phys)?;
}

let kernel_ptr = kernel_base as *mut Self;
kernel_ptr.write(Self { state });

Ok(&*kernel_ptr)
}

/// Returns a reference to the core-local kernel instance.
///
/// # Assumed Safety
/// This function is not marked `unsafe` since, under pretty much
/// every circumstance, the kernel instance should be initialized
/// for the core before any other code runs. If this function is
/// called before the kernel is initialized, undefined behavior
/// will occur.
///
/// Architectures **must** make sure [`Self::initialize_for_core()`]
/// has been called as soon as possible after the core boots.
#[must_use]
pub fn get() -> &'static Self {
// SAFETY(qix-): The kernel instance is initialized for the core
// SAFETY(qix-): before any other code runs.
unsafe { &*(AddrSpace::kernel_core_local().range().0 as *const Self) }
}

/// Returns the underlying [`KernelState`] for this kernel instance.
Expand All @@ -81,6 +123,8 @@ where
/// The shared, spinlocked page frame allocator (PFA) for the
/// entire system.
pfa: UnfairCriticalSpinlock<Pfa>,
/// The physical address translator.
pat: Pat,
/// Ring registry.
ring_registry: registry::Registry<ring::Ring, IntCtrl, AddrSpace, Pat>,
}
Expand All @@ -96,6 +140,10 @@ where
/// once for all cores at boot time.
///
/// # Safety
/// This function must ONLY be called by the primary core,
/// only at boot time, and only before any other cores are brought up,
/// exactly once.
///
/// This function sets up shared page table mappings that MUST be
/// shared across cores. The caller MUST initialize the kernel
/// state (this struct) prior to booting _any other cores_
Expand All @@ -105,7 +153,15 @@ where
let ring_registry = {
let mut pfa_lock = pfa.lock::<IntCtrl>();

registry::Registry::new(pat, &mut *pfa_lock, AddrSpace::kernel_ring_registry())?
let reg = registry::Registry::new(
pat.clone(),
&mut *pfa_lock,
AddrSpace::kernel_ring_registry(),
)?;

let _ = pfa_lock;

reg
};

let root_ring_id = ring_registry.insert_permanent(
Expand All @@ -117,7 +173,11 @@ where
)?;
assert_eq!(root_ring_id, 0, "root ring ID must be 0");

Ok(Self { pfa, ring_registry })
Ok(Self {
pfa,
pat,
ring_registry,
})
}

/// Returns the underlying PFA belonging to the kernel state.
Expand Down
9 changes: 3 additions & 6 deletions oro-kernel/src/registry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ where
segment.provision_as_shared(&mapper, pfa, &pat)?;

Ok(Self {
base: segment.range(&mapper).0 as *mut _,
base: segment.range().0 as *mut _,
bookkeeping: UnfairCriticalSpinlock::new(RegistryBookkeeping::new()),
pat,
segment,
Expand Down Expand Up @@ -185,10 +185,7 @@ where
let byte_offset = bk.total_count * size_of::<MaybeUninit<ItemFrame<T>>>();
let byte_offset_end = byte_offset + size_of::<MaybeUninit<ItemFrame<T>>>();

if unlikely!(
(self.segment.range(&self.mapper).0 + byte_offset_end - 1)
> self.segment.range(&self.mapper).1
) {
if unlikely!((self.segment.range().0 + byte_offset_end - 1) > self.segment.range().1) {
return Err(MapError::VirtOutOfRange);
}

Expand All @@ -204,7 +201,7 @@ where
let page = pfa.allocate().ok_or(MapError::OutOfMemory)?;

// TODO(qix-): If PFAs ever support more than 4K pages, this will need to be updated.
let virt = self.segment.range(&self.mapper).0 + page_id * 4096;
let virt = self.segment.range().0 + page_id * 4096;
if let Err(err) =
self.segment
.map(&self.mapper, &mut *pfa, &self.pat, virt, page)
Expand Down
Loading

0 comments on commit e4e7ec9

Please sign in to comment.