From 4c254b2030a94083461c26670be2ef8a8d029b08 Mon Sep 17 00:00:00 2001 From: ZhiyuanSue <108735103+ZhiyuanSue@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:23:23 +0800 Subject: [PATCH] Clean fastpath restore (#7) * add the aarch64 clean fastpath restore * try to add the riscv fastpath_restore,but failed * use a way to solve the problem * clean some warnings --- kernel/src/ffi.rs | 1 - kernel/src/interrupt/mod.rs | 2 +- kernel/src/kernel/fastpath.rs | 67 +++++++++++++++++-- kernel/src/kernel/fastpath_restore.S | 40 +++++++++++ kernel/src/kernel/mod.rs | 2 + .../syscall/invocation/decode/arch/aarch64.rs | 1 - sel4_common/src/fault.rs | 2 +- sel4_vspace/src/arch/aarch64/asid.rs | 2 +- sel4_vspace/src/arch/aarch64/machine.rs | 4 +- sel4_vspace/src/arch/riscv64/utils.rs | 4 +- 10 files changed, 109 insertions(+), 16 deletions(-) create mode 100644 kernel/src/kernel/fastpath_restore.S diff --git a/kernel/src/ffi.rs b/kernel/src/ffi.rs index cde85cc..8c11076 100644 --- a/kernel/src/ffi.rs +++ b/kernel/src/ffi.rs @@ -7,7 +7,6 @@ extern "C" { pub fn init_plat(); pub fn tcbDebugAppend(action: *mut tcb_t); pub fn tcbDebugRemove(tcb: *mut tcb_t); - pub fn fastpath_restore(badge: usize, msgInfo: usize, cur_thread: *mut tcb_t); } #[cfg(feature = "ENABLE_SMP")] diff --git a/kernel/src/interrupt/mod.rs b/kernel/src/interrupt/mod.rs index af46410..822d4f5 100644 --- a/kernel/src/interrupt/mod.rs +++ b/kernel/src/interrupt/mod.rs @@ -1,6 +1,6 @@ pub mod handler; -#[cfg(target_arch="riscv64")] +#[cfg(target_arch = "riscv64")] use crate::BIT; #[cfg(target_arch = "riscv64")] use core::arch::asm; diff --git a/kernel/src/kernel/fastpath.rs b/kernel/src/kernel/fastpath.rs index f41777a..8d00587 100644 --- a/kernel/src/kernel/fastpath.rs +++ b/kernel/src/kernel/fastpath.rs @@ -1,7 +1,6 @@ use crate::MASK; use crate::{ config::seL4_MsgLengthBits, - ffi::fastpath_restore, syscall::{slowpath, SysCall, SysReplyRecv}, }; use core::intrinsics::{likely, unlikely}; @@ -131,6 +130,64 @@ pub fn fastpath_copy_mrs(length: usize, src: &mut tcb_t, dest: &mut tcb_t) { // __restore_fp(badge, msgInfo, cur_thread_regs); // } // } +#[inline] +#[no_mangle] +#[cfg(target_arch = "aarch64")] +pub fn fastpath_restore(_badge: usize, _msgInfo: usize, cur_thread: *mut tcb_t) { + use core::arch::asm; + unsafe { + (*cur_thread).tcbArch.load_thread_local(); + asm!( + "mov sp, {} \n", + /* Restore thread's SPSR, LR, and SP */ + "ldp x21, x22, [sp, #31 * 8] \n", + "ldr x23, [sp, #33 * 8] \n", + "msr sp_el0, x21 \n", + // #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT + // "msr elr_el2, x22 \n" + // "msr spsr_el2, x23 \n" + // #else + "msr elr_el1, x22 \n", + "msr spsr_el1, x23 \n", + // #endif + + /* Restore remaining registers */ + "ldp x2, x3, [sp, #16 * 1] \n", + "ldp x4, x5, [sp, #16 * 2] \n", + "ldp x6, x7, [sp, #16 * 3] \n", + "ldp x8, x9, [sp, #16 * 4] \n", + "ldp x10, x11, [sp, #16 * 5] \n", + "ldp x12, x13, [sp, #16 * 6] \n", + "ldp x14, x15, [sp, #16 * 7] \n", + "ldp x16, x17, [sp, #16 * 8] \n", + "ldp x18, x19, [sp, #16 * 9] \n", + "ldp x20, x21, [sp, #16 * 10] \n", + "ldp x22, x23, [sp, #16 * 11] \n", + "ldp x24, x25, [sp, #16 * 12] \n", + "ldp x26, x27, [sp, #16 * 13] \n", + "ldp x28, x29, [sp, #16 * 14] \n", + "ldr x30, [sp, #30 * 8] \n", + "eret ", + in(reg) (*cur_thread).tcbArch.raw_ptr() + ); + } + panic!("unreachable") +} + +#[inline] +#[no_mangle] +#[cfg(target_arch = "riscv64")] +pub fn fastpath_restore(_badge: usize, _msgInfo: usize, cur_thread: *mut tcb_t) { + #[cfg(feature = "ENABLE_SMP")] + {} + extern "C" { + pub fn __fastpath_restore(badge: usize, msgInfo: usize, cur_thread_reg: usize); + } + unsafe { + __fastpath_restore(_badge,_msgInfo,(*cur_thread).tcbArch.raw_ptr()); + } + panic!("unreachable") +} #[inline] #[no_mangle] @@ -205,9 +262,7 @@ pub fn fastpath_call(cptr: usize, msgInfo: usize) { info.set_caps_unwrapped(0); let msgInfo1 = info.to_word(); let badge = ep_cap.get_ep_badge(); - unsafe { - fastpath_restore(badge, msgInfo1, get_currenct_thread()); - } + fastpath_restore(badge, msgInfo1, get_currenct_thread()); } #[inline] @@ -288,7 +343,7 @@ pub fn fastpath_reply_recv(cptr: usize, msgInfo: usize) { EPState_Recv, ); - unsafe { + // unsafe { let node = convert_to_mut_type_ref::(caller_slot.cteMDBNode.get_prev()); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged(&mut node.cteMDBNode, 0, 1, 1); caller_slot.cap = cap_t::new_null_cap(); @@ -302,5 +357,5 @@ pub fn fastpath_reply_recv(cptr: usize, msgInfo: usize) { info.set_caps_unwrapped(0); let msg_info1 = info.to_word(); fastpath_restore(0, msg_info1, get_currenct_thread() as *mut tcb_t); - } + // } } diff --git a/kernel/src/kernel/fastpath_restore.S b/kernel/src/kernel/fastpath_restore.S new file mode 100644 index 0000000..147ed19 --- /dev/null +++ b/kernel/src/kernel/fastpath_restore.S @@ -0,0 +1,40 @@ +__fastpath_restore: + mv t0 , a2 + ld ra, (0*8)(t0) + ld sp, (1*8)(t0) + ld gp, (2*8)(t0) + /* skip tp */ + /* skip x5/t0 */ + ld t2, (6*8)(t0) + ld s0, (7*8)(t0) + ld s1, (8*8)(t0) + ld a2, (11*8)(t0) + ld a3, (12*8)(t0) + ld a4, (13*8)(t0) + ld a5, (14*8)(t0) + ld a6, (15*8)(t0) + ld a7, (16*8)(t0) + ld s2, (17*8)(t0) + ld s3, (18*8)(t0) + ld s4, (19*8)(t0) + ld s5, (20*8)(t0) + ld s6, (21*8)(t0) + ld s7, (22*8)(t0) + ld s8, (23*8)(t0) + ld s9, (24*8)(t0) + ld s10, (25*8)(t0) + ld s11, (26*8)(t0) + ld t3, (27*8)(t0) + ld t4, (28*8)(t0) + ld t5, (29*8)(t0) + ld t6, (30*8)(t0) + ld t1, (3*8)(t0) + add tp, t1, x0 + ld t1, (34*8)(t0) + csrw sepc, t1 + csrw sscratch, t0 + ld t1, (32*8)(t0) + csrw sstatus, t1 + ld t1, (5*8)(t0) + ld t0, (4*8)(t0) + sret \ No newline at end of file diff --git a/kernel/src/kernel/mod.rs b/kernel/src/kernel/mod.rs index a4dcaec..9f9ccbd 100644 --- a/kernel/src/kernel/mod.rs +++ b/kernel/src/kernel/mod.rs @@ -1,3 +1,5 @@ pub mod boot; pub mod fastpath; pub mod fault; +#[cfg(target_arch="riscv64")] +core::arch::global_asm!(include_str!("fastpath_restore.S")); \ No newline at end of file diff --git a/kernel/src/syscall/invocation/decode/arch/aarch64.rs b/kernel/src/syscall/invocation/decode/arch/aarch64.rs index 184dea3..9b21b7e 100644 --- a/kernel/src/syscall/invocation/decode/arch/aarch64.rs +++ b/kernel/src/syscall/invocation/decode/arch/aarch64.rs @@ -712,7 +712,6 @@ fn decode_upper_page_directory_unmap(ctSlot: &mut cte_t) -> exception_t { exception_t::EXCEPTION_NONE } - fn decode_page_directory_unmap(ctSlot: &mut cte_t) -> exception_t { let cap = &mut ctSlot.cap; if cap.get_pd_is_mapped() != 0 { diff --git a/sel4_common/src/fault.rs b/sel4_common/src/fault.rs index 3fc29b4..02795f9 100644 --- a/sel4_common/src/fault.rs +++ b/sel4_common/src/fault.rs @@ -141,4 +141,4 @@ impl lookup_fault_t { pub fn get_lookup_fault_type(&self) -> LookupFaultType { unsafe { core::mem::transmute::(self.get_type() as u8) } } -} \ No newline at end of file +} diff --git a/sel4_vspace/src/arch/aarch64/asid.rs b/sel4_vspace/src/arch/aarch64/asid.rs index dd7696c..9379fc6 100644 --- a/sel4_vspace/src/arch/aarch64/asid.rs +++ b/sel4_vspace/src/arch/aarch64/asid.rs @@ -109,4 +109,4 @@ pub fn write_it_asid_pool(it_ap_cap: &cap_t, it_vspace_cap: &cap_t) { let asid_map = asid_map_t::new_vspace(it_vspace_cap.get_pgd_base_ptr()); ap[IT_ASID] = asid_map; set_asid_pool_by_index(IT_ASID >> asidLowBits, ap as *const _ as usize); -} \ No newline at end of file +} diff --git a/sel4_vspace/src/arch/aarch64/machine.rs b/sel4_vspace/src/arch/aarch64/machine.rs index 45cbf21..ff79780 100644 --- a/sel4_vspace/src/arch/aarch64/machine.rs +++ b/sel4_vspace/src/arch/aarch64/machine.rs @@ -66,7 +66,6 @@ pub fn invalidate_local_tlb_va_asid(mva_plus_asid: usize) { isb(); } - #[inline(always)] pub fn clean_by_va_pou(vaddr: usize, _paddr: usize) { unsafe { @@ -151,7 +150,6 @@ pub fn clean_cache_range_poc(start: usize, end: usize, pstart: usize) { } } - #[inline] pub fn clean_cache_range_pou(start: usize, end: usize, pstart: usize) { for idx in LINE_INDEX(start)..LINE_INDEX(end) + 1 { @@ -332,4 +330,4 @@ pub enum mair_types { NORMAL_NC, NORMAL, NORMAL_WT, -} \ No newline at end of file +} diff --git a/sel4_vspace/src/arch/riscv64/utils.rs b/sel4_vspace/src/arch/riscv64/utils.rs index 610d20e..aa74a79 100644 --- a/sel4_vspace/src/arch/riscv64/utils.rs +++ b/sel4_vspace/src/arch/riscv64/utils.rs @@ -70,8 +70,8 @@ impl PTE { pub fn get_ptr(&self) -> usize { self as *const Self as usize } - #[inline] - pub fn get_mut_ptr(&mut self) -> usize { + #[inline] + pub fn get_mut_ptr(&mut self) -> usize { self as *mut Self as usize } }