Skip to content

Commit

Permalink
aarch64: switch to new physical address system and global translator
Browse files Browse the repository at this point in the history
  • Loading branch information
Qix- committed Oct 18, 2024
1 parent 2480f9f commit 553a4e8
Show file tree
Hide file tree
Showing 9 changed files with 168 additions and 167 deletions.
31 changes: 14 additions & 17 deletions oro-arch-aarch64/src/boot/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ use oro_debug::{dbg, dbg_warn};
use oro_macro::assert;
use oro_mem::{
pfa::{alloc::Alloc, filo::FiloPageFrameAllocator},
translate::{OffsetTranslator, Translator},
phys::{Phys, PhysAddr},
translate::OffsetTranslator,
};

use crate::{
Expand All @@ -22,14 +23,15 @@ use crate::{
},
};

/// The global physical address translator for the kernel.
#[oro_macro::oro_global_translator]
static mut GLOBAL_PAT: OffsetTranslator = OffsetTranslator::new(0);

/// Prepared memory items configured after preparing the memory
/// space for the kernel at boot time.
pub struct PreparedMemory {
/// A physical address translator usable with the
/// prepared memory.
pub pat: OffsetTranslator,
/// A page frame allocator usable with the prepared memory.
pub pfa: FiloPageFrameAllocator<OffsetTranslator>,
pub pfa: FiloPageFrameAllocator,
}

/// Prepares the kernel memory after transfer from the boot stage
Expand Down Expand Up @@ -79,11 +81,10 @@ pub unsafe fn prepare_memory() -> PreparedMemory {
let linear_offset = linear_map_regions(&otf, &mut pfa_iter, mmap_iter)
.expect("ran out of memory while linear mapping regions");

// Now make a new PFA with the linear map offset.
let pat = OffsetTranslator::new(
GLOBAL_PAT.set_offset(
usize::try_from(linear_offset).expect("linear offset doesn't fit into a usize"),
);
let mut pfa = FiloPageFrameAllocator::new(pat.clone());
let mut pfa = FiloPageFrameAllocator::new();

// Consume the MMAP PFA and free all memory that isn't used by the
// linear map intermediate page table entries.
Expand All @@ -95,14 +96,9 @@ pub unsafe fn prepare_memory() -> PreparedMemory {
continue;
}

let base = region.base + region.used;

// NOTE(qix-): Technically the saturating sub isn't necessary here
// NOTE(qix-): assuming the bootloader has done its job correctly.
// NOTE(qix-): However it's good to keep the spaceship flying.
let length = region.length.saturating_sub(region.used);
let base = region.base;
let aligned_base = (base + 4095) & !4095;
let length = length.saturating_sub(aligned_base - base);
let length = region.length.saturating_sub(aligned_base - base);

debug_assert_eq!(aligned_base % 4096, 0);
debug_assert_eq!(length % 4096, 0);
Expand All @@ -122,7 +118,8 @@ pub unsafe fn prepare_memory() -> PreparedMemory {
}

// Now unmap the recursive entry.
let page_table = pat.translate_mut::<PageTable>(crate::asm::load_ttbr1());
let page_table =
Phys::from_address_unchecked(crate::asm::load_ttbr1()).as_mut_unchecked::<PageTable>();
(*page_table)[RIDX].reset();
(*page_table)[RIDX + 1].reset();
(*page_table)[RIDX + 2].reset();
Expand All @@ -131,7 +128,7 @@ pub unsafe fn prepare_memory() -> PreparedMemory {
// Flush everything and finish.
crate::asm::invalid_tlb_el1_all();

PreparedMemory { pat, pfa }
PreparedMemory { pfa }
}

/// Maps all regions to a linear map in the current virtual address space.
Expand Down
9 changes: 5 additions & 4 deletions oro-arch-aarch64/src/boot/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ mod protocol;
mod secondary;

use oro_debug::dbg;
use oro_mem::phys::{Phys, PhysAddr};

/// The number of pages to allocate for the secondary core stacks.
// TODO(qix-): Make this configurable.
Expand All @@ -26,21 +27,21 @@ const SECONDARY_STACK_PAGES: usize = 16;
pub unsafe fn boot_primary() -> ! {
crate::asm::disable_interrupts();

let memory::PreparedMemory { pfa, pat } = memory::prepare_memory();
let memory::PreparedMemory { pfa } = memory::prepare_memory();

// We now have a valid physical map; let's re-init
// any MMIO loggers with that offset.
#[cfg(debug_assertions)]
oro_debug::init_with_offset(pat.offset());
oro_debug::init_with_offset(Phys::from_address_unchecked(0).virt());

// Initialize the primary core.
crate::init::initialize_primary(pat.clone(), pfa);
crate::init::initialize_primary(pfa);

{
let mut pfa = crate::init::KERNEL_STATE.assume_init_ref().pfa().lock();

// Boot secondaries.
let num_cores = secondary::boot_secondaries(&mut *pfa, &pat, SECONDARY_STACK_PAGES);
let num_cores = secondary::boot_secondaries(&mut *pfa, SECONDARY_STACK_PAGES);
dbg!("continuing with {num_cores} cores");
}

Expand Down
42 changes: 21 additions & 21 deletions oro-arch-aarch64/src/boot/secondary.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use oro_macro::{asm_buffer, assert};
use oro_mem::{
mapper::{AddressSegment, AddressSpace, MapError},
pfa::alloc::Alloc,
translate::{OffsetTranslator, Translator},
phys::{Phys, PhysAddr},
};
use oro_type::Be;

Expand All @@ -33,11 +33,7 @@ use crate::{
/// # Safety
/// This function is inherently unsafe and must only be called
/// once at kernel boot by the bootstrap processor (primary core).
pub unsafe fn boot_secondaries(
pfa: &mut impl Alloc,
pat: &OffsetTranslator,
stack_pages: usize,
) -> usize {
pub unsafe fn boot_secondaries(pfa: &mut impl Alloc, stack_pages: usize) -> usize {
// Get the devicetree blob.
let DeviceTreeKind::V0(dtb) = super::protocol::DTB_REQUEST
.response()
Expand All @@ -49,7 +45,11 @@ pub unsafe fn boot_secondaries(
let DeviceTreeDataV0 { base, length } = dtb.assume_init_ref();
dbg!("got DeviceTree blob of {} bytes", length);

let dtb = FdtHeader::from(pat.translate::<u8>(*base), Some(*length)).expect("dtb is invalid");
let dtb = FdtHeader::from(
Phys::from_address_unchecked(*base).as_ptr().unwrap(),
Some(*length),
)
.expect("dtb is invalid");
let boot_cpuid = dtb.phys_id();
dbg!("dtb is valid; primary core id is {boot_cpuid}");

Expand Down Expand Up @@ -188,7 +188,7 @@ pub unsafe fn boot_secondaries(
continue;
}

if let Err(err) = boot_secondary(pfa, pat, psci, cpu_id, reg_val, stack_pages) {
if let Err(err) = boot_secondary(pfa, psci, cpu_id, reg_val, stack_pages) {
dbg_err!("failed to boot cpu {cpu_id} ({reg_val}): {err:?}");
}

Expand Down Expand Up @@ -326,35 +326,34 @@ const SECONDARY_BOOT_STUB: &[u8] = &asm_buffer! {
/// Attempts to boot a single secondary core.
unsafe fn boot_secondary(
pfa: &mut impl Alloc,
pat: &OffsetTranslator,
psci: PsciMethod,
cpu_id: u64,
reg_val: u64,
stack_pages: usize,
) -> Result<(), SecondaryBootError> {
// Get the primary handle.
let primary_mapper = AddressSpaceLayout::current_supervisor_space(pat);
let primary_mapper = AddressSpaceLayout::current_supervisor_space();

// Create a new supervisor address space based on the current address space.
let mapper = AddressSpaceLayout::duplicate_supervisor_space_shallow(&primary_mapper, pfa, pat)
let mapper = AddressSpaceLayout::duplicate_supervisor_space_shallow(&primary_mapper, pfa)
.ok_or(SecondaryBootError::OutOfMemory)?;

// Also create an empty mapper for the TTBR0_EL1 space.
let lower_mapper = AddressSpaceLayout::new_supervisor_space_ttbr0(pfa, pat)
let lower_mapper = AddressSpaceLayout::new_supervisor_space_ttbr0(pfa)
.ok_or(SecondaryBootError::OutOfMemory)?;

// Allocate the boot stubs (maximum 4096 bytes).
let boot_phys = pfa.allocate().ok_or(SecondaryBootError::OutOfMemory)?;
let boot_virt = pat.translate_mut::<[u8; 4096]>(boot_phys);
let boot_virt = Phys::from_address_unchecked(boot_phys).as_mut_ptr_unchecked::<[u8; 4096]>();
(&mut *boot_virt)[..SECONDARY_BOOT_STUB.len()].copy_from_slice(SECONDARY_BOOT_STUB);

// Direct map the boot stubs into the lower page table.
AddressSpaceLayout::stubs()
.map(&lower_mapper, pfa, pat, boot_phys as usize, boot_phys)
.map(&lower_mapper, pfa, boot_phys as usize, boot_phys)
.map_err(SecondaryBootError::MapError)?;

// Forget the stack in the upper address space.
AddressSpaceLayout::kernel_stack().unmap_without_reclaim(&mapper, pat);
AddressSpaceLayout::kernel_stack().unmap_without_reclaim(&mapper);

// Allocate a new stack for it...
let stack_segment = AddressSpaceLayout::kernel_stack();
Expand All @@ -363,7 +362,7 @@ unsafe fn boot_secondary(
for stack_virt in (stack_end - stack_pages * 4096..stack_end).step_by(4096) {
let page = pfa.allocate().ok_or(SecondaryBootError::OutOfMemory)?;
stack_segment
.map(&mapper, pfa, pat, stack_virt, page)
.map(&mapper, pfa, stack_virt, page)
.map_err(SecondaryBootError::MapError)?;
}

Expand All @@ -374,17 +373,18 @@ unsafe fn boot_secondary(
// Write the boot init block.
assert::fits::<BootInitBlock, 4096>();
let init_block_phys = pfa.allocate().ok_or(SecondaryBootError::OutOfMemory)?;
let init_block_ptr = pat.translate_mut::<BootInitBlock>(init_block_phys);
let init_block_ptr =
Phys::from_address_unchecked(init_block_phys).as_mut_ptr_unchecked::<BootInitBlock>();
debug_assert!(init_block_ptr.is_aligned());
init_block_ptr.write(BootInitBlock {
core_id: cpu_id,
ttbr0_el1: lower_mapper.base_phys,
ttbr1_el1: mapper.base_phys,
ttbr0_el1: lower_mapper.base_phys.address_u64(),
ttbr1_el1: mapper.base_phys.address_u64(),
tcr_el1: tcr,
stack_pointer: stack_end as u64,
mair: mair_val,
entry_point: boot_secondary_entry as *const u8 as u64,
linear_offset: pat.offset() as u64,
linear_offset: Phys::from_address_unchecked(0).virt() as u64,
primary_flag: AtomicBool::new(false),
secondary_flag: AtomicBool::new(false),
});
Expand All @@ -401,7 +401,7 @@ unsafe fn boot_secondary(
core::hint::spin_loop();
}

// Unmap the
// XXX(qix-): Do we need to unmap something here?

Ok(())
}
Expand Down
8 changes: 4 additions & 4 deletions oro-arch-aarch64/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
use core::mem::MaybeUninit;

use oro_kernel::KernelState;
use oro_mem::translate::OffsetTranslator;
use spin::mutex::fair::FairMutex;

/// The global kernel state. Initialized once during boot
Expand All @@ -17,7 +16,7 @@ pub static mut KERNEL_STATE: MaybeUninit<KernelState<crate::Arch>> = MaybeUninit
/// Must be called exactly once for the lifetime of the system,
/// only by the boot processor at boot time (_not_ at any
/// subsequent bringup).
pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: crate::Pfa) {
pub unsafe fn initialize_primary(pfa: crate::Pfa) {
#[cfg(debug_assertions)]
{
use core::sync::atomic::{AtomicBool, Ordering};
Expand All @@ -35,7 +34,7 @@ pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: crate::Pfa) {

// SAFETY(qix-): We know what we're doing here.
#[expect(static_mut_refs)]
KernelState::init(&mut KERNEL_STATE, pat, FairMutex::new(pfa))
KernelState::init(&mut KERNEL_STATE, FairMutex::new(pfa))
.expect("failed to create global kernel state");
}

Expand All @@ -48,8 +47,9 @@ pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: crate::Pfa) {
pub unsafe fn boot() -> ! {
// SAFETY(qix-): THIS MUST ABSOLUTELY BE FIRST.
let _kernel = crate::Kernel::initialize_for_core(
0, // TODO(qix-): pass in the core ID
KERNEL_STATE.assume_init_ref(),
crate::CoreState { unused: 0 },
(),
)
.expect("failed to initialize kernel");

Expand Down
7 changes: 3 additions & 4 deletions oro-arch-aarch64/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ pub mod reg;
pub(crate) mod init;

use oro_elf::{ElfClass, ElfEndianness, ElfMachine};
use oro_mem::{pfa::filo::FiloPageFrameAllocator, translate::OffsetTranslator};
use oro_mem::pfa::filo::FiloPageFrameAllocator;

/// The ELF class for the AArch64 architecture.
pub const ELF_CLASS: ElfClass = ElfClass::Class64;
Expand All @@ -54,25 +54,24 @@ pub const ELF_MACHINE: ElfMachine = ElfMachine::Aarch64;

/// Type alias for the PFA (page frame allocator) implementation used
/// by the architecture.
pub(crate) type Pfa = FiloPageFrameAllocator<OffsetTranslator>;
pub(crate) type Pfa = FiloPageFrameAllocator;

/// Zero-sized type for specifying the architecture-specific types
/// used throughout the `oro-kernel` crate.
pub(crate) struct Arch;

impl oro_kernel::Arch for Arch {
type AddrSpace = crate::mem::address_space::AddressSpaceLayout;
type Pat = OffsetTranslator;
type Pfa = Pfa;
}

/// Type alias for the Oro kernel core-local instance type.
pub(crate) type Kernel = oro_kernel::Kernel<Arch>;

/// Architecture-specific core-local state.
#[expect(dead_code)] // XXX DEBUG
pub(crate) struct CoreState {
// XXX DEBUG
#[doc(hidden)]
#[expect(dead_code)]
pub unused: u64,
}
Loading

0 comments on commit 553a4e8

Please sign in to comment.