diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 85217e6..bb30888 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -44,7 +44,6 @@ jobs: strategy: matrix: platform: [spike, qemu-arm-virt] - mcs: [off, on] include: - platform: qemu-arm-virt arch: aarch64 @@ -78,11 +77,63 @@ jobs: env: ARCH: ${{ matrix.arch }} PLATFORM: ${{ matrix.platform }} - MCS: ${{ matrix.mcs }} run: | echo $ARCH echo $PLATFORM - cd rel4_kernel && ./build.py -p $PLATFORM -m $MCS + cd rel4_kernel && ./build.py -p $PLATFORM -m off + - name: simulate + env: + ARCH: ${{ matrix.arch }} + PLATFORM: ${{ matrix.platform }} + run: cd rel4_kernel/build && ./simulate > 1.log + timeout-minutes: 2 + continue-on-error: true + - run: cat rel4_kernel/build/1.log + - name: Check Result + run: rel4_kernel/.github/workflows/parse.py rel4_kernel/build/1.log + sel4-test-mcs: + # if: ${{ contains(github.event.head_commit.message, 'git subrepo')}} + runs-on: ubuntu-latest + strategy: + matrix: + platform: [spike, qemu-arm-virt] + include: + - platform: qemu-arm-virt + arch: aarch64 + - platform: spike + arch: riscv64 + container: + image: yfblock/rel4-dev:1.2 + options: --user=root + defaults: + run: + working-directory: ./sel4-test + steps: + - run: mkdir sel4-test + working-directory: . + - uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: nightly-2024-02-01 + components: rust-src rustfmt + rustflags: + target: riscv64imac-unknown-none-elf aarch64-unknown-none-softfloat + - name: Install qemu + run: apt update && apt -y install qemu-system-misc qemu-system-aarch64 + - name: Clone Menifest && Sync repositories + run: | + repo init -u https://github.com/rel4team/sel4test-manifest.git -b ci-test + sed -i "19c\ \t" .repo/manifests/default.xml + repo sync + - run: cd kernel && git checkout mi_dev + # - run: cd rel4_kernel && git checkout mi_dev + - name: Build + env: + ARCH: ${{ matrix.arch }} + PLATFORM: ${{ matrix.platform }} + run: | + echo $ARCH + echo $PLATFORM + cd rel4_kernel && ./build.py -p $PLATFORM -m on - name: simulate env: ARCH: ${{ matrix.arch }} diff --git a/kernel/src/arch/aarch64/c_traps.rs b/kernel/src/arch/aarch64/c_traps.rs index 14af6db..71460f0 100644 --- a/kernel/src/arch/aarch64/c_traps.rs +++ b/kernel/src/arch/aarch64/c_traps.rs @@ -101,6 +101,7 @@ pub fn c_handle_syscall(_cptr: usize, _msgInfo: usize, syscall: usize) { // if hart_id() == 0 { // debug!("c_handle_syscall: syscall: {},", syscall as isize); // } + // sel4_common::println!("c handle syscall"); slowpath(syscall); // debug!("c_handle_syscall complete"); } diff --git a/kernel/src/arch/aarch64/exception.rs b/kernel/src/arch/aarch64/exception.rs index be29151..f555868 100644 --- a/kernel/src/arch/aarch64/exception.rs +++ b/kernel/src/arch/aarch64/exception.rs @@ -2,7 +2,6 @@ use crate::arch::aarch64::consts::ARMDataAbort; use crate::arch::aarch64::consts::ARMPrefetchAbort; use crate::compatibility::lookupIPCBuffer; use crate::halt; -use crate::kernel::boot::current_fault; use crate::object::lookupCapAndSlot; use crate::strnlen; use crate::syscall::handle_fault; @@ -15,6 +14,7 @@ use aarch64_cpu::registers::Readable; use aarch64_cpu::registers::TTBR0_EL1; use log::debug; use sel4_common::arch::ArchReg::*; +use sel4_common::ffi::current_fault; use sel4_common::platform::timer; use sel4_common::platform::Timer_func; use sel4_common::print; @@ -116,6 +116,7 @@ pub fn handleVMFaultEvent(vm_faultType: usize) -> exception_t { if status != exception_t::EXCEPTION_NONE { handle_fault(get_currenct_thread()); } + // sel4_common::println!("handle vm fault event"); schedule(); activateThread(); exception_t::EXCEPTION_NONE diff --git a/kernel/src/arch/riscv/exception.rs b/kernel/src/arch/riscv/exception.rs index f920263..0beb7c4 100644 --- a/kernel/src/arch/riscv/exception.rs +++ b/kernel/src/arch/riscv/exception.rs @@ -2,7 +2,6 @@ use super::read_stval; use crate::compatibility::lookupIPCBuffer; use crate::config::*; use crate::halt; -use crate::kernel::boot::current_fault; use crate::object::lookupCapAndSlot; use crate::strnlen; use crate::syscall::handle_fault; @@ -12,6 +11,7 @@ use crate::syscall::{ }; use log::debug; use sel4_common::arch::ArchReg::*; +use sel4_common::ffi::current_fault; use sel4_common::platform::read_time; use sel4_common::print; use sel4_common::sel4_config::seL4_MsgMaxLength; diff --git a/kernel/src/config.rs b/kernel/src/config.rs index 20560ea..ccc2d42 100644 --- a/kernel/src/config.rs +++ b/kernel/src/config.rs @@ -89,11 +89,25 @@ pub const RISCVLoadPageFault: usize = 13; pub const RISCVStorePageFault: usize = 15; pub const RISCVSupervisorTimer: usize = 9223372036854775813; +pub const thread_control_caps_update_ipc_buffer: usize = 0x1; +pub const thread_control_caps_update_space: usize = 0x2; +pub const thread_control_caps_update_fault: usize = 0x4; +pub const thread_control_caps_update_timeout: usize = 0x8; + +pub const thread_control_sched_update_priority: usize = 0x1; +pub const thread_control_sched_update_mcp: usize = 0x2; +pub const thread_control_sched_update_sc: usize = 0x4; +pub const thread_control_sched_update_fault: usize = 0x8; + pub const thread_control_update_priority: usize = 0x1; pub const thread_control_update_ipc_buffer: usize = 0x2; pub const thread_control_update_space: usize = 0x4; pub const thread_control_update_mcp: usize = 0x8; +pub const thread_control_update_sc: usize = 0x10; +pub const thread_control_update_fault: usize = 0x20; +pub const thread_control_update_timeout: usize = 0x40; + pub const seL4_WordBits: usize = 64; pub const seL4_UserTop: usize = 0x00007fffffffffff; diff --git a/kernel/src/interfaces_impl/cspace.rs b/kernel/src/interfaces_impl/cspace.rs index 8ac58cc..fe7b3cc 100644 --- a/kernel/src/interfaces_impl/cspace.rs +++ b/kernel/src/interfaces_impl/cspace.rs @@ -1,3 +1,5 @@ +use core::usize; + use crate::config::CONFIG_MAX_NUM_WORK_UNITS_PER_PREEMPTION; // use crate::ffi::tcbDebugRemove; use crate::interrupt::{deletingIRQHandler, isIRQPending, setIRQState, IRQState}; @@ -6,12 +8,16 @@ use crate::syscall::safe_unbind_notification; use sel4_common::sel4_config::{tcbCNodeEntries, tcbCTable, tcbVTable}; use sel4_common::structures::exception_t; use sel4_common::structures_gen::{cap, cap_null_cap, cap_tag, endpoint, notification}; -use sel4_common::utils::convert_to_mut_type_ref; +use sel4_common::utils::{ + convert_to_mut_type_ref, convert_to_option_mut_type_ref, convert_to_option_type_ref, +}; use sel4_cspace::capability::cap_func; use sel4_cspace::compatibility::{ZombieType_ZombieTCB, Zombie_new}; use sel4_cspace::interface::finaliseCap_ret; use sel4_ipc::{endpoint_func, notification_func, Transfer}; -use sel4_task::{get_currenct_thread, ksWorkUnitsCompleted, tcb_t}; +use sel4_task::{get_currenct_thread, ksWorkUnitsCompleted, tcb_t, ThreadState}; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::{reply::reply_t, sched_context::sched_context_t}; #[cfg(target_arch = "riscv64")] use sel4_vspace::find_vspace_for_asid; #[cfg(target_arch = "aarch64")] @@ -190,6 +196,12 @@ pub fn finaliseCap(capability: &cap, _final: bool, _exposed: bool) -> finaliseCa let ntfn = convert_to_mut_type_ref::( cap::cap_notification_cap(capability).get_capNtfnPtr() as usize, ); + #[cfg(feature = "KERNEL_MCS")] + if let Some(sc) = convert_to_option_mut_type_ref::( + ntfn.get_ntfnSchedContext() as usize, + ) { + sc.schedContext_unbindNtfn(); + } ntfn.safe_unbind_tcb(); ntfn.cacncel_all_signal(); } @@ -197,7 +209,32 @@ pub fn finaliseCap(capability: &cap, _final: bool, _exposed: bool) -> finaliseCa fc_ret.cleanupInfo = cap_null_cap::new().unsplay(); return fc_ret; } - cap_tag::cap_reply_cap | cap_tag::cap_null_cap | cap_tag::cap_domain_cap => { + cap_tag::cap_reply_cap => { + #[cfg(feature = "KERNEL_MCS")] + if _final { + if let Some(reply) = convert_to_option_mut_type_ref::( + cap::cap_reply_cap(capability).get_capReplyPtr() as usize, + ) { + if reply.replyTCB != 0 { + match convert_to_mut_type_ref::(reply.replyTCB).get_state() { + ThreadState::ThreadStateBlockedOnReply => { + reply.remove(convert_to_mut_type_ref::(reply.replyTCB)); + } + ThreadState::ThreadStateBlockedOnReceive => { + convert_to_mut_type_ref::(reply.replyTCB).cancel_ipc(); + } + _ => { + panic!("invalid tcb state"); + } + } + } + } + } + fc_ret.remainder = cap_null_cap::new().unsplay(); + fc_ret.cleanupInfo = cap_null_cap::new().unsplay(); + return fc_ret; + } + cap_tag::cap_null_cap | cap_tag::cap_domain_cap => { fc_ret.remainder = cap_null_cap::new().unsplay(); fc_ret.cleanupInfo = cap_null_cap::new().unsplay(); return fc_ret; @@ -236,6 +273,16 @@ pub fn finaliseCap(capability: &cap, _final: bool, _exposed: bool) -> finaliseCa }; let cte_ptr = tcb.get_cspace_mut_ref(tcbCTable); safe_unbind_notification(tcb); + #[cfg(feature = "KERNEL_MCS")] + if let Some(sc) = + convert_to_option_mut_type_ref::(tcb.tcbSchedContext) + { + sc.schedContext_unbindTCB(tcb); + if sc.scYieldFrom != 0 { + convert_to_mut_type_ref::(sc.scYieldFrom) + .schedContext_completeYieldTo(); + } + } tcb.cancel_ipc(); tcb.suspend(); // #[cfg(feature="DEBUG_BUILD")] diff --git a/kernel/src/kernel/boot.rs b/kernel/src/kernel/boot.rs index 073ac92..3a78c5a 100644 --- a/kernel/src/kernel/boot.rs +++ b/kernel/src/kernel/boot.rs @@ -14,12 +14,6 @@ use crate::structures::{extra_caps_t, syscall_error_t}; // #[link_section = ".boot.bss"] pub static mut current_lookup_fault: lookup_fault = lookup_fault(Bitfield { arr: [0; 2] }); -#[no_mangle] -// #[link_section = ".boot.bss"] -pub static mut current_fault: seL4_Fault = seL4_Fault { - 0: Bitfield { arr: [0; 2usize] }, -}; - #[no_mangle] // #[link_section = ".boot.bss"] pub static mut current_syscall_error: syscall_error_t = syscall_error_t { diff --git a/kernel/src/kernel/fastpath.rs b/kernel/src/kernel/fastpath.rs index 5b00bb4..5c37464 100644 --- a/kernel/src/kernel/fastpath.rs +++ b/kernel/src/kernel/fastpath.rs @@ -4,9 +4,13 @@ use crate::{ syscall::{slowpath, SysCall, SysReplyRecv}, }; use core::intrinsics::{likely, unlikely}; +#[cfg(feature = "KERNEL_MCS")] +use sched_context::sched_context_t; use sel4_common::arch::msgRegister; use sel4_common::message_info::seL4_MessageInfo_func; use sel4_common::shared_types_bf_gen::seL4_MessageInfo; +#[cfg(feature = "KERNEL_MCS")] +use sel4_common::structures_gen::call_stack; use sel4_common::structures_gen::{ cap, cap_cnode_cap, cap_null_cap, cap_page_table_cap, cap_reply_cap, cap_tag, endpoint, mdb_node, notification, seL4_Fault_tag, thread_state, @@ -17,6 +21,8 @@ use sel4_common::{ }; use sel4_cspace::interface::*; use sel4_ipc::*; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::reply::reply_t; use sel4_task::*; use sel4_vspace::*; @@ -239,16 +245,18 @@ pub fn fastpath_call(cptr: usize, msgInfo: usize) { if unlikely((ep_cap.get_capCanGrant() == 0) && (ep_cap.get_capCanGrantReply() == 0)) { slowpath(SysCall as usize); } - // #ifdef CONFIG_KERNEL_MCS - // if (unlikely(dest->tcbSchedContext != NULL)) { - // slowpath(SysCall); - // } - - // reply_t *reply = thread_state_get_replyObject_np(dest->tcbState); - // if (unlikely(reply == NULL)) { - // slowpath(SysCall); - // } - // #endif + #[cfg(feature = "KERNEL_MCS")] + { + if unlikely(dest.tcbSchedContext != 0) { + slowpath(SysCall as usize); + } + assert!(dest.tcbState.get_tcbQueued() == 0); + assert!(dest.tcbState.get_tcbInReleaseQueue() == 0); + let reply = dest.tcbState.get_replyObject(); + if unlikely(reply == 0) { + slowpath(SysCall as usize); + } + } #[cfg(feature = "ENABLE_SMP")] if unlikely(get_currenct_thread().tcbAffinity != dest.tcbAffinity) { slowpath(SysCall as usize); @@ -268,25 +276,31 @@ pub fn fastpath_call(cptr: usize, msgInfo: usize) { #[cfg(feature = "KERNEL_MCS")] { - // TODO: MCS - // #ifdef CONFIG_KERNEL_MCS - // thread_state_ptr_set_replyObject_np(&dest->tcbState, 0); - // thread_state_ptr_set_replyObject_np(&NODE_STATE(ksCurThread)->tcbState, REPLY_REF(reply)); - // reply->replyTCB = NODE_STATE(ksCurThread); - - // sched_context_t *sc = NODE_STATE(ksCurThread)->tcbSchedContext; - // sc->scTcb = dest; - // dest->tcbSchedContext = sc; - // NODE_STATE(ksCurThread)->tcbSchedContext = NULL; - - // reply_t *old_caller = sc->scReply; - // reply->replyPrev = call_stack_new(REPLY_REF(sc->scReply), false); - // if (unlikely(old_caller)) { - // old_caller->replyNext = call_stack_new(REPLY_REF(reply), false); - // } - // reply->replyNext = call_stack_new(SC_REF(sc), true); - // sc->scReply = reply; - // #endif + let reply = dest.tcbState.get_replyObject(); + assert!(dest.tcbState.get_tcbQueued() == 0); + assert!(dest.tcbState.get_tcbInReleaseQueue() == 0); + dest.tcbState.set_replyObject(0); + + assert!(current.tcbState.get_tcbQueued() == 0); + assert!(current.tcbState.get_tcbInReleaseQueue() == 0); + current.tcbState.set_replyObject(reply); + + convert_to_mut_type_ref::(reply as usize).replyTCB = unsafe { ksCurThread }; + + let sc = convert_to_mut_type_ref::(current.tcbSchedContext); + sc.scTcb = dest.get_ptr(); + dest.tcbSchedContext = sc.get_ptr(); + current.tcbSchedContext = 0; + + let old_caller = convert_to_mut_type_ref::(sc.scReply); + convert_to_mut_type_ref::(reply as usize).replyPrev = + call_stack::new(sc.scReply as u64, 0); + if unlikely(old_caller.get_ptr() != 0) { + old_caller.replyNext = call_stack::new(reply, 0); + } + convert_to_mut_type_ref::(reply as usize).replyNext = + call_stack::new(sc.get_ptr() as u64, 1); + sc.scReply = reply as usize; } #[cfg(not(feature = "KERNEL_MCS"))] { @@ -432,9 +446,6 @@ pub fn fastpath_reply_recv(cptr: usize, msgInfo: usize) { pub fn fastpath_reply_recv(cptr: usize, msgInfo: usize, reply: usize) { // debug!("enter fastpath_reply_recv"); - use sel4_common::reply::reply_t; - use sel4_ipc::endpoint_func; - use sel4_task::sched_context::sched_context_t; let current = get_currenct_thread(); let mut info = seL4_MessageInfo::from_word(msgInfo); let length = info.get_length() as usize; @@ -479,20 +490,6 @@ pub fn fastpath_reply_recv(cptr: usize, msgInfo: usize, reply: usize) { if unlikely(ep.get_ep_state() == EPState::Send) { slowpath(SysReplyRecv as usize); } - // #ifdef CONFIG_KERNEL_MCS - // /* Get the reply address */ - // reply_t *reply_ptr = REPLY_PTR(cap_reply_cap_get_capReplyPtr(reply_cap)); - // /* check that its valid and at the head of the call chain - // and that the current thread's SC is going to be donated. */ - // if (unlikely(reply_ptr->replyTCB == NULL || - // call_stack_get_isHead(reply_ptr->replyNext) == 0 || - // SC_PTR(call_stack_get_callStackPtr(reply_ptr->replyNext)) != NODE_STATE(ksCurThread)->tcbSchedContext)) { - // slowpath(SysReplyRecv); - // } - - // /* Determine who the caller is. */ - // caller = reply_ptr->replyTCB; - // #endif /* Get the reply address */ let reply_ptr = convert_to_mut_type_ref::(reply_cap.get_capReplyPtr() as usize); /* check that its valid and at the head of the call chain @@ -526,18 +523,13 @@ pub fn fastpath_reply_recv(cptr: usize, msgInfo: usize, reply: usize) { if unlikely(caller.tcbSchedContext != 0) { slowpath(SysReplyRecv as usize); } + assert!(current.tcbState.get_replyObject() == 0); thread_state_ptr_mset_blockingObject_tsType( &mut current.tcbState, ep.get_ptr(), ThreadState::ThreadStateBlockedOnReceive as usize, ); - // #ifdef CONFIG_KERNEL_MCS - // /* unlink reply object from caller */ - // thread_state_ptr_set_replyObject_np(&caller->tcbState, 0); - // /* set the reply object */ - // thread_state_ptr_set_replyObject_np(&NODE_STATE(ksCurThread)->tcbState, REPLY_REF(reply_ptr)); - // reply_ptr->replyTCB = NODE_STATE(ksCurThread); caller.tcbState.set_replyObject(0); current .tcbState diff --git a/kernel/src/object/mod.rs b/kernel/src/object/mod.rs index db412f6..20a82fd 100644 --- a/kernel/src/object/mod.rs +++ b/kernel/src/object/mod.rs @@ -1,4 +1,4 @@ -use crate::structures::lookupCapAndSlot_ret_t; +use crate::structures::{lookupCapAndSlot_ret_t, lookupCap_ret_t}; use crate::syscall::handle_fault; use sel4_common::arch::MessageLabel; use sel4_common::structures::exception_t; @@ -63,3 +63,21 @@ pub extern "C" fn lookupCapAndSlot(thread: *mut tcb_t, cPtr: usize) -> lookupCap ret } } +#[no_mangle] +pub fn lookup_cap(thread: *mut tcb_t, cPtr: usize) -> lookupCap_ret_t { + let lu_ret = unsafe { (*thread).lookup_slot(cPtr) }; + if lu_ret.status != exception_t::EXCEPTION_NONE { + let ret = lookupCap_ret_t { + status: lu_ret.status, + capability: cap_null_cap::new().unsplay(), + }; + return ret; + } + unsafe { + let ret = lookupCap_ret_t { + status: exception_t::EXCEPTION_NONE, + capability: (*lu_ret.slot).capability.clone(), + }; + ret + } +} diff --git a/kernel/src/syscall/invocation/decode/arch/aarch64.rs b/kernel/src/syscall/invocation/decode/arch/aarch64.rs index ad2d5f5..2c85541 100644 --- a/kernel/src/syscall/invocation/decode/arch/aarch64.rs +++ b/kernel/src/syscall/invocation/decode/arch/aarch64.rs @@ -264,6 +264,7 @@ fn decode_frame_invocation( call: bool, buffer: &seL4_IPCBuffer, ) -> exception_t { + // sel4_common::println!("decode frame invocation {}",label as usize); match label { MessageLabel::ARMPageMap => decode_frame_map(length, frame_slot, buffer), MessageLabel::ARMPageUnmap => { diff --git a/kernel/src/syscall/invocation/decode/decode_cnode_invocation.rs b/kernel/src/syscall/invocation/decode/decode_cnode_invocation.rs index c78c2fa..4ae4cc3 100644 --- a/kernel/src/syscall/invocation/decode/decode_cnode_invocation.rs +++ b/kernel/src/syscall/invocation/decode/decode_cnode_invocation.rs @@ -27,6 +27,7 @@ pub fn decode_cnode_invocation( capability: &cap_cnode_cap, buffer: &seL4_IPCBuffer, ) -> exception_t { + // sel4_common::println!("decode cnode invocation {}", invLabel as usize); if invLabel < MessageLabel::CNodeRevoke || invLabel as usize > CNODE_LAST_INVOCATION { debug!("CNodeCap: Illegal Operation attempted."); unsafe { diff --git a/kernel/src/syscall/invocation/decode/decode_sched_invocation.rs b/kernel/src/syscall/invocation/decode/decode_sched_invocation.rs index 8747a88..4987d27 100644 --- a/kernel/src/syscall/invocation/decode/decode_sched_invocation.rs +++ b/kernel/src/syscall/invocation/decode/decode_sched_invocation.rs @@ -1,29 +1,269 @@ +use core::intrinsics::unlikely; + use log::debug; -use sel4_common::{structures::exception_t, utils::global_ops}; -use sel4_task::sched_context::sched_context; +use sel4_common::{ + arch::{usToTicks, MessageLabel}, + platform::time_def::time_t, + println, + sel4_config::{ + seL4_IllegalOperation, seL4_InvalidCapability, seL4_RangeError, seL4_TruncatedMessage, + TIME_ARG_SIZE, + }, + structures::{exception_t, seL4_IPCBuffer}, + structures_gen::{ + cap, cap_Splayed, cap_sched_context_cap, cap_sched_control_cap, cap_tag, notification_t, + }, + utils::{convert_to_mut_type_ref, global_ops}, +}; +use sel4_cspace::interface::cte_t; +use sel4_task::{ + get_currenct_thread, ksCurThread, + sched_context::{ + refill_absolute_max, sched_context, sched_context_t, MAX_PERIOD_US, MIN_BUDGET, + MIN_BUDGET_US, MIN_REFILLS, + }, + set_thread_state, tcb_t, ThreadState, +}; -use crate::kernel::boot::current_extra_caps; +use crate::{ + kernel::boot::{current_extra_caps, current_syscall_error, get_extra_cap_by_index}, + syscall::{ + get_syscall_arg, + invocation::invoke_sched::{ + invokeSchedContext_Bind, invokeSchedContext_Consumed, invokeSchedContext_Unbind, + invokeSchedControl_ConfigureFlags, + }, + }, +}; -pub fn decode_sched_context_invocation() -> exception_t { - exception_t::EXCEPTION_NONE +pub fn decode_sched_context_invocation( + inv_label: MessageLabel, + capability: &cap_sched_context_cap, + buffer: &seL4_IPCBuffer, +) -> exception_t { + // println!("go into decode sched context invocation"); + let sc = convert_to_mut_type_ref::(capability.get_capSCPtr() as usize); + match inv_label { + MessageLabel::SchedContextConsumed => { + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + invokeSchedContext_Consumed(sc, buffer) + } + MessageLabel::SchedContextBind => decodeSchedContext_Bind(sc), + MessageLabel::SchedContextUnbindObject => decodeSchedContext_UnbindObject(sc), + MessageLabel::SchedContextUnbind => { + if sc.scTcb == unsafe { ksCurThread } { + debug!("SchedContext UnbindObject: cannot unbind sc of current thread"); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + invokeSchedContext_Unbind(sc) + } + MessageLabel::SchedContextYieldTo => decodeSchedContext_YieldTo(sc, buffer), + _ => { + debug!("SchedContext invocation: Illegal operation attempted."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + } } -pub fn decode_sched_control_invocation() -> exception_t { +pub fn decode_sched_control_invocation( + inv_label: MessageLabel, + length: usize, + capability: &cap_sched_control_cap, + buffer: &seL4_IPCBuffer, +) -> exception_t { + match inv_label { + MessageLabel::SchedControlConfigureFlags => { + if global_ops!(current_extra_caps.excaprefs[0] == 0) { + debug!("SchedControl_ConfigureFlags: Truncated message."); + unsafe { + current_syscall_error._type = seL4_TruncatedMessage; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + if length < (TIME_ARG_SIZE * 2) + 3 { + debug!("SchedControl_configureFlags: truncated message."); + unsafe { + current_syscall_error._type = seL4_TruncatedMessage; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + let budget_us: time_t = get_syscall_arg(0, buffer); + let budget_ticks = usToTicks(budget_us); + let period_us = get_syscall_arg(TIME_ARG_SIZE, buffer); + let period_ticks = usToTicks(period_us); + let extra_refills = get_syscall_arg(TIME_ARG_SIZE * 2, buffer); + let badge = get_syscall_arg(TIME_ARG_SIZE * 2 + 1, buffer); + let flags = get_syscall_arg(TIME_ARG_SIZE * 2 + 2, buffer); + + let targetCap = + &convert_to_mut_type_ref::(unsafe { current_extra_caps.excaprefs[0] }) + .capability; + if unlikely(targetCap.get_tag() != cap_tag::cap_sched_context_cap) { + debug!("SchedControl_ConfigureFlags: budget out of range."); + unsafe { + current_syscall_error._type = seL4_RangeError; + current_syscall_error.rangeErrorMin = MIN_BUDGET_US(); + current_syscall_error.rangeErrorMax = MAX_PERIOD_US(); + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + } + if budget_us > MAX_PERIOD_US() || budget_ticks < MIN_BUDGET() { + debug!("SchedControl_ConfigureFlags: budget out of range."); + unsafe { + current_syscall_error._type = seL4_RangeError; + current_syscall_error.rangeErrorMin = MIN_BUDGET_US(); + current_syscall_error.rangeErrorMax = MAX_PERIOD_US(); + } + + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + if period_us > MAX_PERIOD_US() || period_ticks < MIN_BUDGET() { + debug!("SchedControl_ConfigureFlags: period out of range."); + unsafe { + current_syscall_error._type = seL4_RangeError; + current_syscall_error.rangeErrorMin = MIN_BUDGET_US(); + current_syscall_error.rangeErrorMax = MAX_PERIOD_US(); + } + + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + if budget_ticks > period_ticks { + debug!("SchedControl_ConfigureFlags: budget must be <= period"); + unsafe { + current_syscall_error._type = seL4_RangeError; + current_syscall_error.rangeErrorMin = MIN_BUDGET_US(); + current_syscall_error.rangeErrorMax = period_us; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + if extra_refills + MIN_REFILLS + > refill_absolute_max(cap::cap_sched_context_cap(&targetCap)) + { + unsafe { + current_syscall_error._type = seL4_RangeError; + current_syscall_error.rangeErrorMin = 0; + current_syscall_error.rangeErrorMax = + refill_absolute_max(cap::cap_sched_context_cap(&targetCap)) - MIN_REFILLS; + debug!( + "Max refills invalid, got {}, max {}", + extra_refills, current_syscall_error.rangeErrorMax + ); + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + return invokeSchedControl_ConfigureFlags( + convert_to_mut_type_ref::( + cap::cap_sched_context_cap(&targetCap).get_capSCPtr() as usize, + ), + capability.get_core() as usize, + budget_ticks, + period_ticks, + extra_refills + MIN_REFILLS, + badge, + flags, + ); + } + _ => { + debug!("SchedControl invocation: Illegal operation attempted."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + } + } exception_t::EXCEPTION_NONE } pub fn decodeSchedContext_UnbindObject(sc: &mut sched_context) -> exception_t { // TODO: MCS - unimplemented!("MCS"); + unimplemented!("MCS unbind object"); if global_ops!(current_extra_caps.excaprefs[0] == 0) { debug!("") } exception_t::EXCEPTION_NONE } pub fn decodeSchedContext_Bind(sc: &mut sched_context) -> exception_t { - unimplemented!("MCS"); - // TODO: MCS - exception_t::EXCEPTION_NONE + if get_extra_cap_by_index(0).is_none() { + debug!("SchedContext_Bind: Truncated Message."); + unsafe { + current_syscall_error._type = seL4_TruncatedMessage; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + let capability = &get_extra_cap_by_index(0).unwrap().capability; + match capability.clone().splay() { + cap_Splayed::thread_cap(data) => { + if sc.scTcb != 0 { + debug!("SchedContext_Bind: sched context already bound."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + if convert_to_mut_type_ref::(data.get_capTCBPtr() as usize).tcbSchedContext != 0 + { + debug!("SchedContext_Bind: tcb already bound."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + if convert_to_mut_type_ref::(data.get_capTCBPtr() as usize).is_blocked() + && !sc.sc_released() + { + debug!("SchedContext_Bind: tcb blocked and scheduling context not schedulable."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + return invokeSchedContext_Bind(sc, &capability); + } + cap_Splayed::notification_cap(data) => { + if sc.scNotification != 0 { + debug!("SchedContext_Bind: sched context already bound."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + if convert_to_mut_type_ref::(data.get_capNtfnPtr() as usize) + .get_ntfnSchedContext() + != 0 + { + debug!("SchedContext_Bind: notification already bound"); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + return invokeSchedContext_Bind(sc, &capability); + } + _ => { + debug!("SchedContext_Bind: invalid cap."); + unsafe { + current_syscall_error._type = seL4_InvalidCapability; + current_syscall_error.invalidCapNumber = 1; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + } } -pub fn decodeSchedContext_YieldTo(sc: &mut sched_context) { - unimplemented!("MCS"); +pub fn decodeSchedContext_YieldTo(sc: &mut sched_context, buffer: &seL4_IPCBuffer) -> exception_t { + unimplemented!("MCS yield to"); // TODO: MCS } diff --git a/kernel/src/syscall/invocation/decode/decode_tcb_invocation.rs b/kernel/src/syscall/invocation/decode/decode_tcb_invocation.rs index 9b09a3a..2cefc89 100644 --- a/kernel/src/syscall/invocation/decode/decode_tcb_invocation.rs +++ b/kernel/src/syscall/invocation/decode/decode_tcb_invocation.rs @@ -20,6 +20,10 @@ use sel4_cspace::capability::cap_func; use sel4_cspace::interface::cte_t; use sel4_task::{get_currenct_thread, set_thread_state, tcb_t, ThreadState}; +use crate::config::{ + thread_control_caps_update_fault, thread_control_caps_update_ipc_buffer, + thread_control_caps_update_space, +}; use crate::{ kernel::boot::{current_syscall_error, get_extra_cap_by_index}, syscall::utils::{check_ipc_buffer_vaild, check_prio, get_syscall_arg}, @@ -92,6 +96,7 @@ pub fn decode_tcb_invocation( call: bool, buffer: &seL4_IPCBuffer, ) -> exception_t { + // sel4_common::println!("label is {}", invLabel as usize); match invLabel { MessageLabel::TCBReadRegisters => decode_read_registers(capability, length, call, buffer), MessageLabel::TCBWriteRegisters => decode_write_registers(capability, length, buffer), @@ -111,11 +116,18 @@ pub fn decode_tcb_invocation( MessageLabel::TCBConfigure => decode_tcb_configure(capability, length, slot, buffer), MessageLabel::TCBSetPriority => decode_set_priority(capability, length, buffer), MessageLabel::TCBSetMCPriority => decode_set_mc_priority(capability, length, buffer), + #[cfg(not(feature = "KERNEL_MCS"))] MessageLabel::TCBSetSchedParams => decode_set_sched_params(capability, length, buffer), + #[cfg(feature = "KERNEL_MCS")] + MessageLabel::TCBSetSchedParams => { + decode_set_sched_params(capability, length, slot, buffer) + } MessageLabel::TCBSetIPCBuffer => decode_set_ipc_buffer(capability, length, slot, buffer), MessageLabel::TCBSetSpace => decode_set_space(capability, length, slot, buffer), MessageLabel::TCBBindNotification => decode_bind_notification(capability), MessageLabel::TCBUnbindNotification => decode_unbind_notification(capability), + #[cfg(feature = "KERNEL_MCS")] + MessageLabel::TCBSetTimeoutEndpoint => decode_set_timeout_endpoint(capability, slot), MessageLabel::TCBSetTLSBase => decode_set_tls_base(capability, length, buffer), _ => unsafe { debug!("TCB: Illegal operation invLabel :{:?}", invLabel); @@ -238,7 +250,11 @@ fn decode_tcb_configure( target_thread_slot: &mut cte_t, buffer: &seL4_IPCBuffer, ) -> exception_t { - if msg_length < 4 + #[cfg(not(feature = "KERNEL_MCS"))] + let TCBCONFIGURE_ARGS = 3; + #[cfg(feature = "KERNEL_MCS")] + let TCBCONFIGURE_ARGS = 4; + if msg_length < TCBCONFIGURE_ARGS || get_extra_cap_by_index(0).is_none() || get_extra_cap_by_index(1).is_none() || get_extra_cap_by_index(2).is_none() @@ -249,11 +265,22 @@ fn decode_tcb_configure( } return exception_t::EXCEPTION_SYSCALL_ERROR; } - + #[cfg(not(feature = "KERNEL_MCS"))] let fault_ep = get_syscall_arg(0, buffer); + #[cfg(not(feature = "KERNEL_MCS"))] let croot_data = get_syscall_arg(1, buffer); + #[cfg(not(feature = "KERNEL_MCS"))] let vroot_data = get_syscall_arg(2, buffer); + #[cfg(not(feature = "KERNEL_MCS"))] let new_buffer_addr = get_syscall_arg(3, buffer); + + #[cfg(feature = "KERNEL_MCS")] + let croot_data = get_syscall_arg(0, buffer); + #[cfg(feature = "KERNEL_MCS")] + let vroot_data = get_syscall_arg(1, buffer); + #[cfg(feature = "KERNEL_MCS")] + let new_buffer_addr = get_syscall_arg(2, buffer); + let croot_slot = get_extra_cap_by_index(0).unwrap(); let mut croot_cap = &croot_slot.clone().capability; let vroot_slot = get_extra_cap_by_index(1).unwrap(); @@ -324,6 +351,7 @@ fn decode_tcb_configure( } set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + #[cfg(not(feature = "KERNEL_MCS"))] let status = invoke_tcb_set_space( target_thread, target_thread_slot, @@ -333,6 +361,20 @@ fn decode_tcb_configure( vroot_cap, vroot_slot, ); + #[cfg(feature = "KERNEL_MCS")] + let status = invoke_tcb_thread_control_caps( + target_thread, + target_thread_slot, + &cap_null_cap::new().unsplay(), + unsafe { &mut *(0 as *mut cte_t) }, + &cap_null_cap::new().unsplay(), + unsafe { &mut *(0 as *mut cte_t) }, + croot_cap, + croot_slot, + vroot_cap, + vroot_slot, + thread_control_caps_update_space | thread_control_caps_update_ipc_buffer, + ); if status != exception_t::EXCEPTION_NONE { return status; } @@ -420,12 +462,62 @@ fn decode_set_mc_priority( new_mcp, ) } +#[cfg(not(feature = "KERNEL_MCS"))] +fn decode_set_sched_params( + capability: &cap_thread_cap, + length: usize, + buffer: &seL4_IPCBuffer, +) -> exception_t { + if length < 2 || get_extra_cap_by_index(0).is_some() { + debug!("TCB SetSchedParams: Truncated message."); + unsafe { + current_syscall_error._type = seL4_TruncatedMessage; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + let new_mcp = get_syscall_arg(0, buffer); + let new_prio = get_syscall_arg(1, buffer); + let auth_cap = cap::cap_thread_cap(&get_extra_cap_by_index(0).unwrap().capability); + if auth_cap.clone().unsplay().get_tag() != cap_tag::cap_thread_cap { + debug!("SetSchedParams: authority cap not a TCB."); + unsafe { + current_syscall_error._type = seL4_InvalidCapability; + current_syscall_error.invalidCapNumber = 1; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + let auth_tcb = convert_to_mut_type_ref::(auth_cap.get_capTCBPtr() as usize); + let status = check_prio(new_mcp, auth_tcb); + if status != exception_t::EXCEPTION_NONE { + debug!( + "TCB SetSchedParams: Requested maximum controlled priority {} too high (max {}).", + new_mcp, auth_tcb.tcbMCP + ); + return status; + } + let status = check_prio(new_prio, auth_tcb); + if status != exception_t::EXCEPTION_NONE { + debug!( + "TCB SetSchedParams: Requested priority {} too high (max {}).", + new_prio, auth_tcb.tcbMCP + ); + return status; + } + + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + let target = convert_to_mut_type_ref::(capability.get_capTCBPtr() as usize); + invoke_tcb_set_mcp(target, new_mcp); + invoke_tcb_set_priority(target, new_prio) +} +#[cfg(feature = "KERNEL_MCS")] fn decode_set_sched_params( capability: &cap_thread_cap, length: usize, + slot: &mut cte_t, buffer: &seL4_IPCBuffer, ) -> exception_t { + // TODO: MCS if length < 2 || get_extra_cap_by_index(0).is_some() { debug!("TCB SetSchedParams: Truncated message."); unsafe { @@ -513,6 +605,7 @@ fn decode_set_ipc_buffer( ) } +#[cfg(not(feature = "KERNEL_MCS"))] fn decode_set_space( capability: &cap_thread_cap, length: usize, @@ -590,6 +683,132 @@ fn decode_set_space( vroot_slot, ) } +#[cfg(feature = "KERNEL_MCS")] +pub fn validFaultHandler(capability: &cap) -> bool { + use sel4_common::structures_gen::cap_Splayed; + + match capability.clone().splay() { + cap_Splayed::endpoint_cap(data) => { + if data.get_capCanSend() == 0 + || (data.get_capCanGrant() == 0 && data.get_capCanGrantReply() == 0) + { + unsafe { + current_syscall_error._type = seL4_InvalidCapability; + } + return false; + } + return true; + } + cap_Splayed::null_cap(_) => { + return true; + } + _ => { + unsafe { + current_syscall_error._type = seL4_InvalidCapability; + } + return false; + } + } +} +#[cfg(feature = "KERNEL_MCS")] +fn decode_set_space( + capability: &cap_thread_cap, + length: usize, + slot: &mut cte_t, + buffer: &seL4_IPCBuffer, +) -> exception_t { + if length < 2 + || get_extra_cap_by_index(0).is_none() + || get_extra_cap_by_index(1).is_none() + || get_extra_cap_by_index(2).is_none() + { + sel4_common::println!("TCB SetSpace: Truncated message. {}", length); + unsafe { + current_syscall_error._type = seL4_TruncatedMessage; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + let croot_data = get_syscall_arg(0, buffer); + let vroot_data = get_syscall_arg(1, buffer); + + let fh_slot = get_extra_cap_by_index(0).unwrap(); + let fh_cap = &fh_slot.clone().capability; + + let croot_slot = get_extra_cap_by_index(1).unwrap(); + let mut croot_cap = &croot_slot.capability; + + let vroot_slot = get_extra_cap_by_index(2).unwrap(); + let mut vroot_cap = &vroot_slot.capability; + + let target_thread = convert_to_mut_type_ref::(capability.get_capTCBPtr() as usize); + if target_thread.get_cspace(tcbCTable).is_long_running_delete() + || target_thread.get_cspace(tcbVTable).is_long_running_delete() + { + sel4_common::println!("TCB Configure: CSpace or VSpace currently being deleted."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + let decode_croot_cap = decode_set_space_args(croot_data, croot_cap, croot_slot); + let binding = decode_croot_cap.clone().unwrap(); + match decode_croot_cap { + Ok(_) => croot_cap = &binding, + Err(status) => return status, + } + if croot_cap.get_tag() != cap_tag::cap_cnode_cap { + sel4_common::println!("TCB Configure: CSpace cap is invalid."); + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + let decode_vroot_cap_ret = decode_set_space_args(vroot_data, vroot_cap, vroot_slot); + let binding = decode_vroot_cap_ret.clone().unwrap(); + match decode_vroot_cap_ret { + Ok(_) => vroot_cap = &binding, + Err(status) => return status, + } + #[cfg(target_arch = "riscv64")] + if !is_valid_vtable_root(&vroot_cap) { + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + #[cfg(target_arch = "aarch64")] + if !vroot_cap.is_valid_vtable_root() { + unsafe { + current_syscall_error._type = seL4_IllegalOperation; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + if !validFaultHandler(fh_cap) { + sel4_common::println!("TCB SetSpace: fault endpoint cap invalid."); + unsafe { + current_syscall_error.invalidCapNumber = 1; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + invoke_tcb_thread_control_caps( + target_thread, + slot, + &fh_cap, + fh_slot, + &cap_null_cap::new().unsplay(), + unsafe { &mut *(0 as *mut cte_t) }, + &croot_cap, + croot_slot, + &vroot_cap, + vroot_slot, + thread_control_caps_update_space | thread_control_caps_update_fault, + ) +} fn decode_bind_notification(capability: &cap_thread_cap) -> exception_t { if get_extra_cap_by_index(0).is_none() { @@ -652,6 +871,38 @@ fn decode_unbind_notification(capability: &cap_thread_cap) -> exception_t { set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); invoke_tcb_unbind_notification(tcb) } +#[cfg(feature = "KERNEL_MCS")] +pub fn decode_set_timeout_endpoint(capability: &cap_thread_cap, slot: &mut cte_t) -> exception_t { + use crate::config::thread_control_caps_update_timeout; + + if get_extra_cap_by_index(0).is_none() { + debug!("TCB SetSchedParams: Truncated message."); + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + let mut thSlot = get_extra_cap_by_index(0).unwrap(); + let thCap = &thSlot.clone().capability; + if !validFaultHandler(&thCap) { + debug!("TCB SetTimeoutEndpoint: timeout endpoint cap invalid."); + unsafe { + current_syscall_error.invalidCapNumber = 1; + } + return exception_t::EXCEPTION_SYSCALL_ERROR; + } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + invoke_tcb_thread_control_caps( + convert_to_mut_type_ref::(capability.get_capTCBPtr() as usize), + slot, + &cap_null_cap::new().unsplay(), + unsafe { &mut *(0 as *mut cte_t) }, + thCap, + &mut thSlot, + &cap_null_cap::new().unsplay(), + unsafe { &mut *(0 as *mut cte_t) }, + &cap_null_cap::new().unsplay(), + unsafe { &mut *(0 as *mut cte_t) }, + thread_control_caps_update_timeout, + ) +} #[cfg(feature = "ENABLE_SMP")] fn decode_set_affinity(cap: &cap_t, length: usize, buffer: &seL4_IPCBuffer) -> exception_t { diff --git a/kernel/src/syscall/invocation/decode/mod.rs b/kernel/src/syscall/invocation/decode/mod.rs index 50950de..3786edb 100644 --- a/kernel/src/syscall/invocation/decode/mod.rs +++ b/kernel/src/syscall/invocation/decode/mod.rs @@ -104,29 +104,21 @@ pub fn decode_invocation( } cap_Splayed::reply_cap(data) => { - #[cfg(feature = "KERNEL_MCS")] - { - // TODO: MCS - exception_t::EXCEPTION_NONE - } - #[cfg(not(feature = "KERNEL_MCS"))] - { - if unlikely(data.get_capReplyMaster() != 0) { - debug!("Attempted to invoke an invalid reply cap {}.", cap_index); - unsafe { - current_syscall_error._type = seL4_InvalidCapability; - current_syscall_error.invalidCapNumber = 0; - return exception_t::EXCEPTION_SYSCALL_ERROR; - } + if unlikely(data.get_capReplyMaster() != 0) { + debug!("Attempted to invoke an invalid reply cap {}.", cap_index); + unsafe { + current_syscall_error._type = seL4_InvalidCapability; + current_syscall_error.invalidCapNumber = 0; + return exception_t::EXCEPTION_SYSCALL_ERROR; } - set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); - get_currenct_thread().do_reply( - convert_to_mut_type_ref::(data.get_capTCBPtr() as usize), - slot, - data.get_capReplyCanGrant() != 0, - ); - exception_t::EXCEPTION_NONE } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + get_currenct_thread().do_reply( + convert_to_mut_type_ref::(data.get_capTCBPtr() as usize), + slot, + data.get_capReplyCanGrant() != 0, + ); + exception_t::EXCEPTION_NONE } cap_Splayed::thread_cap(data) => { decode_tcb_invocation(label, length, &data, slot, call, buffer) @@ -159,7 +151,9 @@ pub fn decode_invocation( buffer: &seL4_IPCBuffer, ) -> exception_t { // TODO: MCS , in this function, there's lot's of mcs codes - // println!("decode invocation {}",capability.get_tag()); + // sel4_common::println!("decode invocation {}", capability.get_tag()); + + use sel4_task::reply::reply_t; match capability.clone().splay() { cap_Splayed::null_cap(_) | cap_Splayed::zombie_cap(_) => { @@ -219,29 +213,12 @@ pub fn decode_invocation( } cap_Splayed::reply_cap(data) => { - #[cfg(feature = "KERNEL_MCS")] - { - // TODO: MCS - exception_t::EXCEPTION_NONE - } - #[cfg(not(feature = "KERNEL_MCS"))] - { - if unlikely(data.get_capReplyMaster() != 0) { - debug!("Attempted to invoke an invalid reply cap {}.", cap_index); - unsafe { - current_syscall_error._type = seL4_InvalidCapability; - current_syscall_error.invalidCapNumber = 0; - return exception_t::EXCEPTION_SYSCALL_ERROR; - } - } - set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); - get_currenct_thread().do_reply( - convert_to_mut_type_ref::(data.get_capTCBPtr() as usize), - slot, - data.get_capReplyCanGrant() != 0, - ); - exception_t::EXCEPTION_NONE - } + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + get_currenct_thread().do_reply( + convert_to_mut_type_ref::(data.get_capReplyPtr() as usize), + data.get_capReplyCanGrant() != 0, + ); + exception_t::EXCEPTION_NONE } cap_Splayed::thread_cap(data) => { if unlikely(firstPhase) { @@ -275,7 +252,7 @@ pub fn decode_invocation( cap_Splayed::irq_handler_cap(data) => { decode_irq_handler_invocation(label, data.get_capIRQ() as usize) } - cap_Splayed::sched_control_cap(_) => { + cap_Splayed::sched_control_cap(data) => { if unlikely(firstPhase) { debug!( "Cannot invoke sched control capabilities in the first phase of an invocation" @@ -286,9 +263,9 @@ pub fn decode_invocation( } return exception_t::EXCEPTION_NONE; } - decode_sched_control_invocation() + decode_sched_control_invocation(label, length, &data, buffer) } - cap_Splayed::sched_context_cap(_) => { + cap_Splayed::sched_context_cap(data) => { if unlikely(firstPhase) { debug!( "Cannot invoke sched context capabilities in the first phase of an invocation" @@ -299,7 +276,7 @@ pub fn decode_invocation( } return exception_t::EXCEPTION_NONE; } - decode_sched_context_invocation() + decode_sched_context_invocation(label, &data, buffer) } _ => decode_mmu_invocation(label, length, slot, call, buffer), } diff --git a/kernel/src/syscall/invocation/invoke_sched.rs b/kernel/src/syscall/invocation/invoke_sched.rs index f14bc03..f175f36 100644 --- a/kernel/src/syscall/invocation/invoke_sched.rs +++ b/kernel/src/syscall/invocation/invoke_sched.rs @@ -1,9 +1,16 @@ use sel4_common::{ - structures::exception_t, - structures_gen::{cap, cap_tag}, + platform::time_def::ticks_t, + structures::{exception_t, seL4_IPCBuffer}, + structures_gen::{call_stack, cap, cap_Splayed, cap_tag, notification_t}, utils::convert_to_mut_type_ref, }; -use sel4_task::{sched_context::sched_context, tcb_t}; +use sel4_task::{ + checkBudget, commitTime, ksCurSC, ksCurThread, possible_switch_to, + reply::reply_t, + rescheduleRequired, + sched_context::{sched_context, MIN_REFILLS}, + seL4_SchedContext_Sporadic, tcb_t, +}; pub fn invokeSchedContext_UnbindObject(sc: &mut sched_context, capability: cap) -> exception_t { match capability.get_tag() { @@ -20,19 +27,88 @@ pub fn invokeSchedContext_UnbindObject(sc: &mut sched_context, capability: cap) exception_t::EXCEPTION_NONE } -pub fn invokeSchedContext_Bind(sc: &mut sched_context) -> exception_t { - // TODO: MCS +pub fn invokeSchedContext_Bind(sc: &mut sched_context, capability: &cap) -> exception_t { + match capability.clone().splay() { + cap_Splayed::thread_cap(data) => sc.schedContext_bindTCB(convert_to_mut_type_ref::( + data.get_capTCBPtr() as usize, + )), + cap_Splayed::notification_cap(data) => { + sc.schedContext_bindNtfn(convert_to_mut_type_ref::( + data.get_capNtfnPtr() as usize, + )) + } + _ => { + panic!("invalid cap type of invoke sched context bind") + } + } exception_t::EXCEPTION_NONE } pub fn invokeSchedContext_Unbind(sc: &mut sched_context) -> exception_t { - // TODO: MCS + sc.schedContext_unbindAllTCBs(); + sc.schedContext_unbindNtfn(); + if sc.scReply != 0 { + convert_to_mut_type_ref::(sc.scReply).replyNext = call_stack::new(0, 0); + sc.scReply = 0; + } exception_t::EXCEPTION_NONE } -pub fn invokeSchedContext_Consumed(sc: &mut sched_context) -> exception_t { +pub fn invokeSchedContext_Consumed(sc: &mut sched_context, buffer: &seL4_IPCBuffer) -> exception_t { // TODO: MCS + unimplemented!("invoke shced context consumed"); exception_t::EXCEPTION_NONE } pub fn invokeSchedContext_YieldTo(sc: &mut sched_context) -> exception_t { // TODO: MCS + unimplemented!("invoke sched context yieldto"); + exception_t::EXCEPTION_NONE +} +pub fn invokeSchedControl_ConfigureFlags( + target: &mut sched_context, + core: usize, + budget: ticks_t, + period: ticks_t, + max_refills: usize, + badge: usize, + flags: usize, +) -> exception_t { + target.scBadge = badge; + target.scSporadic = (flags & seL4_SchedContext_Sporadic) != 0; + + if target.scTcb != 0 { + /* remove from scheduler */ + convert_to_mut_type_ref::(target.scTcb).Release_Remove(); + convert_to_mut_type_ref::(target.scTcb).sched_dequeue(); + /* bill the current consumed amount before adjusting the params */ + if target.is_current() { + assert!(checkBudget()); + commitTime(); + } + } + + if budget == period { + target.refill_new(MIN_REFILLS, budget, 0); + } else if target.scRefillMax > 0 + && target.scTcb != 0 + && convert_to_mut_type_ref::(target.scTcb).is_runnable() + { + target.refill_update(period, budget, max_refills); + } else { + /* the scheduling context isn't active - it's budget is not being used, so + * we can just populate the parameters from now */ + target.refill_new(max_refills, budget, period); + } + + assert!(target.scRefillMax > 0); + if target.scTcb != 0 { + target.schedContext_resume(); + if convert_to_mut_type_ref::(target.scTcb).is_runnable() + && target.scTcb != unsafe { ksCurThread } + { + possible_switch_to(convert_to_mut_type_ref::(target.scTcb)); + } + if target.scTcb == unsafe { ksCurThread } { + rescheduleRequired(); + } + } exception_t::EXCEPTION_NONE } diff --git a/kernel/src/syscall/invocation/invoke_tcb.rs b/kernel/src/syscall/invocation/invoke_tcb.rs index 56d1841..3404f33 100644 --- a/kernel/src/syscall/invocation/invoke_tcb.rs +++ b/kernel/src/syscall/invocation/invoke_tcb.rs @@ -98,7 +98,9 @@ pub fn invoke_tcb_write_registers( if resumeTarget != 0 { // cancel_ipc(dest); - dest.cancel_ipc(); + if dest.is_stopped() { + dest.cancel_ipc(); + } dest.restart(); } if dest.is_current() { @@ -149,7 +151,9 @@ pub fn invoke_tcb_suspend(thread: &mut tcb_t) -> exception_t { #[inline] pub fn invoke_tcb_resume(thread: &mut tcb_t) -> exception_t { // cancel_ipc(thread); - thread.cancel_ipc(); + if thread.is_stopped() { + thread.cancel_ipc(); + } thread.restart(); exception_t::EXCEPTION_NONE } @@ -165,7 +169,7 @@ pub fn invoke_tcb_set_priority(target: &mut tcb_t, prio: usize) -> exception_t { target.set_priority(prio); exception_t::EXCEPTION_NONE } - +#[cfg(not(feature = "KERNEL_MCS"))] pub fn invoke_tcb_set_space( target: &mut tcb_t, slot: &mut cte_t, @@ -200,6 +204,122 @@ pub fn invoke_tcb_set_space( } exception_t::EXCEPTION_NONE } +#[cfg(feature = "KERNEL_MCS")] +#[no_mangle] +pub fn installTCBCap( + target: &mut tcb_t, + tCap: &cap, + slot: &mut cte_t, + index: usize, + newCap: &cap, + srcSlot: &mut cte_t, +) -> exception_t { + let mut rootSlot = target.get_cspace_mut_ref(index); + let e = rootSlot.delete_all(true); + if e != exception_t::EXCEPTION_NONE { + return e; + } + if same_object_as(newCap, &srcSlot.capability) && same_object_as(tCap, &slot.capability) { + cte_insert(newCap, srcSlot, &mut rootSlot); + } + return e; +} +#[cfg(feature = "KERNEL_MCS")] +pub fn invoke_tcb_thread_control_caps( + target: &mut tcb_t, + slot: &mut cte_t, + fh_newCap: &cap, + fh_srcSlot: &mut cte_t, + th_newCap: &cap, + th_srcSlot: &mut cte_t, + croot_new_cap: &cap, + croot_src_slot: &mut cte_t, + vroot_new_cap: &cap, + vroot_src_slot: &mut cte_t, + updateFlags: usize, +) -> exception_t { + use sel4_common::sel4_config::{tcbFaultHandler, tcbTimeoutHandler}; + + use crate::config::{ + thread_control_caps_update_fault, thread_control_caps_update_space, + thread_control_caps_update_timeout, + }; + let target_cap = cap_thread_cap::new(target.get_ptr() as u64).unsplay(); + if updateFlags & thread_control_caps_update_fault != 0 { + let e = installTCBCap( + target, + &target_cap, + slot, + tcbFaultHandler, + fh_newCap, + fh_srcSlot, + ); + if e != exception_t::EXCEPTION_NONE { + return e; + } + } + if updateFlags & thread_control_caps_update_timeout != 0 { + let e = installTCBCap( + target, + &target_cap, + slot, + tcbTimeoutHandler, + th_newCap, + th_srcSlot, + ); + if e != exception_t::EXCEPTION_NONE { + return e; + } + } + if updateFlags & thread_control_caps_update_space != 0 { + let e = installTCBCap( + target, + &target_cap, + slot, + tcbCTable, + croot_new_cap, + croot_src_slot, + ); + if e != exception_t::EXCEPTION_NONE { + return e; + } + let e = installTCBCap( + target, + &target_cap, + slot, + tcbVTable, + vroot_new_cap, + vroot_src_slot, + ); + if e != exception_t::EXCEPTION_NONE { + return e; + } + } + + // target.tcbFaultHandler = fault_ep; + // let root_slot = target.get_cspace_mut_ref(tcbCTable); + // let status = root_slot.delete_all(true); + // if status != exception_t::EXCEPTION_NONE { + // return status; + // } + // if same_object_as(croot_new_cap, &croot_src_slot.capability) + // && same_object_as(&target_cap, &slot.capability) + // { + // cte_insert(croot_new_cap, croot_src_slot, root_slot); + // } + + // let root_vslot = target.get_cspace_mut_ref(tcbVTable); + // let status = root_vslot.delete_all(true); + // if status != exception_t::EXCEPTION_NONE { + // return status; + // } + // if same_object_as(vroot_new_cap, &vroot_src_slot.capability) + // && same_object_as(&target_cap, &slot.capability) + // { + // cte_insert(vroot_new_cap, vroot_src_slot, root_vslot); + // } + exception_t::EXCEPTION_NONE +} pub fn invoke_tcb_set_ipc_buffer( target: &mut tcb_t, @@ -215,11 +335,11 @@ pub fn invoke_tcb_set_ipc_buffer( return status; } target.tcbIPCBuffer = buffer_addr; - if let Some(buffer_src_slot) = buffer_src_slot { + if let Some(mut buffer_src_slot) = buffer_src_slot { if same_object_as(&buffer_cap, &buffer_src_slot.capability) && same_object_as(&target_cap, &slot.capability) { - cte_insert(&buffer_cap, buffer_src_slot, buffer_slot); + cte_insert(&buffer_cap, &mut buffer_src_slot, buffer_slot); } } if target.is_current() { diff --git a/kernel/src/syscall/invocation/mod.rs b/kernel/src/syscall/invocation/mod.rs index 862b045..60b74b9 100644 --- a/kernel/src/syscall/invocation/mod.rs +++ b/kernel/src/syscall/invocation/mod.rs @@ -18,10 +18,10 @@ use sel4_common::structures::exception_t; use sel4_common::structures_gen::seL4_Fault_CapFault; use sel4_task::{get_currenct_thread, set_thread_state, ThreadState}; -use crate::kernel::boot::current_fault; use crate::syscall::invocation::decode::decode_invocation; use crate::syscall::syscall_reply::{reply_error_from_kernel, reply_success_from_kernel}; use crate::syscall::{handle_fault, lookup_extra_caps_with_buf}; +use sel4_common::ffi::current_fault; #[no_mangle] #[cfg(not(feature = "KERNEL_MCS"))] diff --git a/kernel/src/syscall/mod.rs b/kernel/src/syscall/mod.rs index 0e109dd..9dc4f71 100644 --- a/kernel/src/syscall/mod.rs +++ b/kernel/src/syscall/mod.rs @@ -10,6 +10,8 @@ use sel4_common::arch::ArchReg; use sel4_common::arch::ArchReg::*; #[cfg(not(feature = "KERNEL_MCS"))] use sel4_common::sel4_config::tcbCaller; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::sched_context::sched_context_t; pub const SysCall: isize = -1; pub const SYSCALL_MAX: isize = SysCall; @@ -60,6 +62,8 @@ pub const SysDebugNameThread: isize = SysDebugSnapshot - 1; pub const SysGetClock: isize = -30; #[cfg(feature = "KERNEL_MCS")] pub const SysGetClock: isize = -33; +#[cfg(feature = "KERNEL_MCS")] +use crate::structures::lookupCap_ret_t; use sel4_common::structures::exception_t; use sel4_common::structures_gen::{ cap, cap_Splayed, cap_tag, endpoint, lookup_fault_missing_capability, notification, @@ -71,12 +75,15 @@ use sel4_task::{ activateThread, get_currenct_thread, rescheduleRequired, schedule, set_thread_state, tcb_t, ThreadState, }; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::{chargeBudget, get_current_sc, ksConsumed, ksCurSC, mcs_preemption_point}; pub use utils::*; use crate::arch::restore_user_context; use crate::interrupt::handler::handleInterrupt; -use crate::kernel::boot::{current_fault, current_lookup_fault}; +use crate::kernel::boot::current_lookup_fault; use crate::{config::irqInvalid, interrupt::getActiveIRQ}; +use sel4_common::ffi::current_fault; use self::invocation::handleInvocation; @@ -146,86 +153,110 @@ pub fn handleSyscall(_syscall: usize) -> exception_t { #[no_mangle] #[cfg(feature = "KERNEL_MCS")] pub fn handleSyscall(_syscall: usize) -> exception_t { + use core::intrinsics::likely; + + use sel4_task::{checkBudgetRestart, updateTimestamp}; + let syscall: isize = _syscall as isize; // if hart_id() == 0 { // debug!("handle syscall: {}", syscall); // } - // TODO: MCS - // updateTimestamp(); - // if (likely(checkBudgetRestart())) { - match syscall { - SysSend => { - let ret = handleInvocation( - false, - true, - false, - false, - get_currenct_thread().tcbArch.get_register(Cap), - ); - if unlikely(ret != exception_t::EXCEPTION_NONE) { - let irq = getActiveIRQ(); - if irq != irqInvalid { - handleInterrupt(irq); + // sel4_common::println!("handle syscall {}", syscall); + updateTimestamp(); + if likely(checkBudgetRestart()) { + match syscall { + SysSend => { + let ret = handleInvocation( + false, + true, + false, + false, + get_currenct_thread().tcbArch.get_register(Cap), + ); + if unlikely(ret != exception_t::EXCEPTION_NONE) { + let irq = getActiveIRQ(); + if irq != irqInvalid { + handleInterrupt(irq); + } } } - } - SysNBSend => { - let ret = handleInvocation( - false, - false, - false, - false, - get_currenct_thread().tcbArch.get_register(Cap), - ); - if unlikely(ret != exception_t::EXCEPTION_NONE) { - let irq = getActiveIRQ(); - if irq != irqInvalid { - handleInterrupt(irq); + SysNBSend => { + let ret = handleInvocation( + false, + false, + false, + false, + get_currenct_thread().tcbArch.get_register(Cap), + ); + if unlikely(ret != exception_t::EXCEPTION_NONE) { + let irq = getActiveIRQ(); + if irq != irqInvalid { + handleInterrupt(irq); + } } } - } - SysCall => { - let ret = handleInvocation( - true, - true, - true, - false, - get_currenct_thread().tcbArch.get_register(Cap), - ); - if unlikely(ret != exception_t::EXCEPTION_NONE) { - let irq = getActiveIRQ(); - if irq != irqInvalid { - handleInterrupt(irq); + SysCall => { + let ret = handleInvocation( + true, + true, + true, + false, + get_currenct_thread().tcbArch.get_register(Cap), + ); + if unlikely(ret != exception_t::EXCEPTION_NONE) { + let irq = getActiveIRQ(); + if irq != irqInvalid { + handleInterrupt(irq); + } } } + SysRecv => { + handle_recv(true, true); + } + SysWait => { + handle_recv(true, false); + } + SysNBWait => { + handle_recv(false, false); + } + SysReplyRecv => { + let reply = get_currenct_thread().tcbArch.get_register(Reply); + let ret = handleInvocation(false, false, true, true, reply); + assert!(ret == exception_t::EXCEPTION_NONE); + handle_recv(true, true); + } + SysNBSendRecv => { + // TODO: MCS + let dest = get_currenct_thread().tcbArch.get_register(nbsRecvDest); + let ret = handleInvocation(false, false, true, true, dest); + if unlikely(ret != exception_t::EXCEPTION_NONE) { + mcs_preemption_point(); + let irq = getActiveIRQ(); + if irq != irqInvalid { + handleInterrupt(irq); + } + } else { + handle_recv(true, true); + } + } + SysNBSendWait => { + let reply = get_currenct_thread().tcbArch.get_register(Reply); + let ret = handleInvocation(false, false, true, true, reply); + if unlikely(ret != exception_t::EXCEPTION_NONE) { + mcs_preemption_point(); + let irq = getActiveIRQ(); + if irq != irqInvalid { + handleInterrupt(irq); + } + } else { + handle_recv(true, false); + } + } + SysNBRecv => handle_recv(false, true), + SysYield => handle_yield(), + _ => panic!("Invalid syscall"), } - SysRecv => { - // TODO: MCS - handle_recv(true, true); - } - SysWait => { - handle_recv(true,false); - } - SysNBWait => { - handle_recv(false,false); - } - SysReplyRecv => { - // TODO: MCS - } - SysNBSendRecv => { - // TODO: MCS - } - SysNBSendWait => { - // TODO: MCS - } - SysNBRecv => { - // TODO: MCS - handle_recv(true, true) - } - SysYield => handle_yield(), - _ => panic!("Invalid syscall"), } - // } schedule(); activateThread(); exception_t::EXCEPTION_NONE @@ -313,6 +344,7 @@ pub fn handle_fault(thread: &mut tcb_t) { } #[inline] #[cfg(feature = "KERNEL_MCS")] +#[no_mangle] pub fn handleTimeout(tptr: &mut tcb_t) { use sel4_common::sel4_config::tcbTimeoutHandler; @@ -325,11 +357,11 @@ pub fn handleTimeout(tptr: &mut tcb_t) { #[no_mangle] pub fn endTimeslice(can_timeout_fault: bool) { use sel4_common::structures_gen::seL4_Fault_Timeout; - use sel4_task::{ksCurSC, sched_context::sched_context_t}; + use sel4_task::get_current_sc; unsafe { let thread = get_currenct_thread(); - let sched_context = convert_to_mut_type_ref::(ksCurSC); + let sched_context = get_current_sc(); if can_timeout_fault && !sched_context.is_round_robin() && thread.validTimeoutHandler() { current_fault = seL4_Fault_Timeout::new(sched_context.scBadge as u64).unsplay(); handleTimeout(thread); @@ -344,9 +376,32 @@ pub fn endTimeslice(can_timeout_fault: bool) { } } } -// #[cfg(feature="KERNEL_MCS")] -// #[inline] -// pub fn lookupReply() -> lookupCap_ret_t +#[cfg(feature = "KERNEL_MCS")] +#[inline] +pub fn lookupReply() -> lookupCap_ret_t { + use log::debug; + + use crate::object::lookup_cap; + + let reply_ptr = get_currenct_thread().tcbArch.get_register(ArchReg::Reply); + let mut lu_ret = lookup_cap(get_currenct_thread(), reply_ptr); + + if unlikely(lu_ret.status != exception_t::EXCEPTION_NONE) { + debug!("Reply cap lookup failed"); + unsafe { current_fault = seL4_Fault_CapFault::new(reply_ptr as u64, 1).unsplay() }; + handle_fault(get_currenct_thread()); + return lu_ret; + } + + if unlikely(lu_ret.capability.get_tag() != cap_tag::cap_reply_cap) { + debug!("Cap in reply slot is not a reply"); + unsafe { current_fault = seL4_Fault_CapFault::new(reply_ptr as u64, 1).unsplay() }; + handle_fault(get_currenct_thread()); + lu_ret.status = exception_t::EXCEPTION_FAULT; + return lu_ret; + } + lu_ret +} // TODO: MCS #[cfg(not(feature = "KERNEL_MCS"))] fn handle_reply() { @@ -363,6 +418,8 @@ fn handle_reply() { } #[cfg(feature = "KERNEL_MCS")] fn handle_recv(block: bool, canReply: bool) { + use sel4_common::structures_gen::cap_null_cap; + let current_thread = get_currenct_thread(); let ep_cptr = current_thread.tcbArch.get_register(ArchReg::Cap); let lu_ret = current_thread.lookup_slot(ep_cptr); @@ -383,6 +440,20 @@ fn handle_recv(block: bool, canReply: bool) { return handle_fault(current_thread); } // TODO: MCS + let mut reply_cap = cap_null_cap::new().unsplay(); + if canReply { + let lu_ret = lookupReply(); + if lu_ret.status != exception_t::EXCEPTION_NONE { + return; + } else { + reply_cap = lu_ret.capability; + } + } + convert_to_mut_type_ref::(data.get_capEPPtr() as usize).receive_ipc( + current_thread, + block, + cap::cap_reply_cap(&reply_cap), + ); } cap_Splayed::notification_cap(data) => { @@ -467,7 +538,11 @@ fn handle_recv(block: bool) { fn handle_yield() { #[cfg(feature = "KERNEL_MCS")] { - // TODO: MCS + unsafe { + let consumed = get_current_sc().scConsumed + ksConsumed; + chargeBudget((*get_current_sc().refill_head()).rAmount, false); + get_current_sc().scConsumed = consumed; + } } #[cfg(not(feature = "KERNEL_MCS"))] { diff --git a/kernel/src/syscall/utils.rs b/kernel/src/syscall/utils.rs index e2c6c24..5f6a0aa 100644 --- a/kernel/src/syscall/utils.rs +++ b/kernel/src/syscall/utils.rs @@ -1,6 +1,6 @@ use core::intrinsics::unlikely; -use crate::kernel::boot::{current_extra_caps, current_fault}; +use crate::kernel::boot::current_extra_caps; use crate::{ config::seL4_MinPrio, kernel::boot::{current_lookup_fault, current_syscall_error}, @@ -8,6 +8,7 @@ use crate::{ }; use log::debug; use sel4_common::arch::{maskVMRights, msgRegisterNum, ArchReg}; +use sel4_common::ffi::current_fault; use sel4_common::sel4_config::seL4_MinUntypedBits; use sel4_common::shared_types_bf_gen::seL4_CapRights; use sel4_common::structures_gen::{ diff --git a/sel4_common/src/arch/aarch64/registers.rs b/sel4_common/src/arch/aarch64/registers.rs index d451f7d..4293f7c 100644 --- a/sel4_common/src/arch/aarch64/registers.rs +++ b/sel4_common/src/arch/aarch64/registers.rs @@ -95,6 +95,10 @@ impl ArchReg { ArchReg::Frame(i) => frameRegisters[*i], ArchReg::GP(i) => gpRegisters[*i], ArchReg::FaultMessage(id, index) => fault_messages[*id][*index], + #[cfg(feature = "KERNEL_MCS")] + ArchReg::Reply => replyRegister, + #[cfg(feature = "KERNEL_MCS")] + ArchReg::nbsRecvDest => nbsendRecvDest, } } } diff --git a/sel4_common/src/arch/mod.rs b/sel4_common/src/arch/mod.rs index fa49e10..8f0fa58 100644 --- a/sel4_common/src/arch/mod.rs +++ b/sel4_common/src/arch/mod.rs @@ -66,6 +66,10 @@ pub enum ArchReg { GP(usize), /// Fault Message Reg, (id, index) FaultMessage(usize, usize), + #[cfg(feature = "KERNEL_MCS")] + Reply, + #[cfg(feature = "KERNEL_MCS")] + nbsRecvDest, } #[cfg(feature = "KERNEL_MCS")] diff --git a/sel4_common/src/arch/riscv64/registers.rs b/sel4_common/src/arch/riscv64/registers.rs index 0cb6c10..2654edf 100644 --- a/sel4_common/src/arch/riscv64/registers.rs +++ b/sel4_common/src/arch/riscv64/registers.rs @@ -94,6 +94,10 @@ impl ArchReg { ArchReg::Frame(i) => frameRegisters[*i], ArchReg::GP(i) => gpRegisters[*i], ArchReg::FaultMessage(id, index) => fault_messages[*id][*index], + #[cfg(feature = "KERNEL_MCS")] + ArchReg::Reply => replyRegister, + #[cfg(feature = "KERNEL_MCS")] + ArchReg::nbsRecvDest => nbsendRecvDest, } } } diff --git a/sel4_common/src/ffi.rs b/sel4_common/src/ffi.rs index 1f1b6f0..6032d11 100644 --- a/sel4_common/src/ffi.rs +++ b/sel4_common/src/ffi.rs @@ -1,4 +1,6 @@ +use crate::sel4_bitfield_types::Bitfield; use crate::sel4_config::{CONFIG_KERNEL_STACK_BITS, CONFIG_MAX_NUM_NODES}; +use crate::structures_gen::seL4_Fault; use crate::BIT; #[repr(align(4096))] pub struct kernel_stack_alloc_data { @@ -13,3 +15,9 @@ pub static mut kernel_stack_alloc: kernel_stack_alloc_data = kernel_stack_alloc_ extern "C" { pub fn coreMap(); } + +#[no_mangle] +// #[link_section = ".boot.bss"] +pub static mut current_fault: seL4_Fault = seL4_Fault { + 0: Bitfield { arr: [0; 2usize] }, +}; diff --git a/sel4_common/src/lib.rs b/sel4_common/src/lib.rs index 8457c9f..24e46d6 100644 --- a/sel4_common/src/lib.rs +++ b/sel4_common/src/lib.rs @@ -15,8 +15,6 @@ pub mod logging; pub mod message_info; pub mod object; pub mod platform; -#[cfg(feature = "KERNEL_MCS")] -pub mod reply; pub mod sel4_bitfield_types; pub mod sel4_config; pub mod shared_types_bf_gen; diff --git a/sel4_common/src/reply.rs b/sel4_common/src/reply.rs deleted file mode 100644 index 9feb723..0000000 --- a/sel4_common/src/reply.rs +++ /dev/null @@ -1,13 +0,0 @@ -use crate::structures_gen::call_stack; - -pub type reply_t = reply; -#[repr(C)] -#[derive(Debug, Clone)] -// TODO: MCS -pub struct reply { - /// TCB pointed to by this reply object - pub replyTCB: usize, - pub replyPrev: call_stack, - pub replyNext: call_stack, - pub padding: usize, -} diff --git a/sel4_common/src/sched_context.rs b/sel4_common/src/sched_context.rs deleted file mode 100644 index 8a5c58b..0000000 --- a/sel4_common/src/sched_context.rs +++ /dev/null @@ -1,21 +0,0 @@ -/// 时钟ticks -pub type ticks_t = usize; - -pub type sched_context_t = sched_context; -#[repr(C)] -#[derive(Debug, Clone)] -pub struct sched_context { - // TODO: MCS - pub scPeriod: ticks_t, - pub scConsumed: ticks_t, - pub scCore: usize, - pub scTcb: usize, - pub scReply: usize, - pub scNotification: usize, - pub scBadge: usize, - pub scYieldFrom: usize, - pub scRefillMax: usize, - pub scRefillHead: usize, - pub scRefillTail: usize, - pub scSporadic: bool, -} diff --git a/sel4_common/src/sel4_config.rs b/sel4_common/src/sel4_config.rs index c285c2f..db0cd75 100644 --- a/sel4_common/src/sel4_config.rs +++ b/sel4_common/src/sel4_config.rs @@ -146,3 +146,5 @@ pub const CONFIG_BOOT_THREAD_TIME_SLICE: usize = 5; pub const minDom: usize = 0; pub const maxDom: usize = CONFIG_NUM_DOMAINS - 1; pub const numDomains: usize = CONFIG_NUM_DOMAINS; + +pub const TIME_ARG_SIZE: usize = 1; diff --git a/sel4_cspace/src/cte.rs b/sel4_cspace/src/cte.rs index f847354..69dc1e3 100644 --- a/sel4_cspace/src/cte.rs +++ b/sel4_cspace/src/cte.rs @@ -67,6 +67,7 @@ impl cte_t { ret.capability = capability.clone(); } } + #[cfg(not(feature = "KERNEL_MCS"))] cap_tag::cap_reply_cap => { ret.capability = cap_null_cap::new().unsplay(); } diff --git a/sel4_ipc/src/endpoint.rs b/sel4_ipc/src/endpoint.rs index 90ddd03..8709799 100644 --- a/sel4_ipc/src/endpoint.rs +++ b/sel4_ipc/src/endpoint.rs @@ -2,6 +2,8 @@ use crate::transfer::Transfer; use sel4_common::arch::ArchReg; use sel4_common::structures_gen::endpoint; use sel4_common::utils::{convert_to_mut_type_ref, convert_to_option_mut_type_ref}; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::reply::reply_t; use sel4_task::{ possible_switch_to, rescheduleRequired, schedule_tcb, set_thread_state, tcb_queue_t, tcb_t, ThreadState, @@ -19,6 +21,8 @@ pub enum EPState { Send = 1, Recv = 2, } +#[cfg(feature = "KERNEL_MCS")] +use sel4_common::structures_gen::cap_reply_cap; pub trait endpoint_func { fn get_ptr(&self) -> pptr_t; @@ -49,8 +53,11 @@ pub trait endpoint_func { can_grant_reply: bool, canDonate: bool, ); + #[cfg(not(feature = "KERNEL_MCS"))] fn receive_ipc(&mut self, thread: &mut tcb_t, is_blocking: bool, grant: bool); #[cfg(feature = "KERNEL_MCS")] + fn receive_ipc(&mut self, thread: &mut tcb_t, is_blocking: bool, reply_cap: &mut cap_reply_cap); + #[cfg(feature = "KERNEL_MCS")] fn reorder_EP(&mut self, thread: &mut tcb_t); } impl endpoint_func for endpoint { @@ -93,6 +100,14 @@ impl endpoint_func for endpoint { if queue.head == 0 { self.set_state(EPState::Idle as u64); } + #[cfg(feature = "KERNEL_MCS")] + { + if let Some(reply) = + convert_to_option_mut_type_ref::(tcb.tcbState.get_replyObject() as usize) + { + reply.unlink(tcb); + } + } set_thread_state(tcb, ThreadState::ThreadStateInactive); } @@ -227,6 +242,9 @@ impl endpoint_func for endpoint { can_grant_reply: bool, canDonate: bool, ) { + use sel4_common::structures_gen::seL4_Fault_tag; + use sel4_task::{ksCurSC, reply::reply_t, sched_context::sched_context_t}; + match self.get_ep_state() { EPState::Idle | EPState::Send => { if blocking { @@ -265,7 +283,48 @@ impl endpoint_func for endpoint { } src_thread.do_ipc_transfer(dest_thread, Some(self), badge, can_grant); - // TODO: MCS + if let Some(reply) = convert_to_option_mut_type_ref::( + dest_thread.tcbState.get_replyObject() as usize, + ) { + reply.unlink(dest_thread); + } + if do_call || src_thread.tcbFault.get_tag() != seL4_Fault_tag::seL4_Fault_NullFault + { + if let Some(reply) = convert_to_option_mut_type_ref::( + dest_thread.tcbState.get_replyObject() as usize, + ) { + if can_grant || can_grant_reply { + reply.push(src_thread, dest_thread, canDonate); + } else { + set_thread_state(dest_thread, ThreadState::ThreadStateInactive); + } + } else { + set_thread_state(dest_thread, ThreadState::ThreadStateInactive); + } + } else if canDonate && dest_thread.tcbSchedContext == 0 { + convert_to_mut_type_ref::(src_thread.tcbSchedContext) + .schedContext_donate(dest_thread); + } + + assert!( + dest_thread.tcbSchedContext == 0 + || convert_to_mut_type_ref::(dest_thread.tcbSchedContext) + .refill_sufficient(0) + ); + assert!( + dest_thread.tcbSchedContext == 0 + || convert_to_mut_type_ref::(dest_thread.tcbSchedContext) + .refill_ready() + ); + set_thread_state(dest_thread, ThreadState::ThreadStateRunning); + if convert_to_mut_type_ref::(dest_thread.tcbSchedContext) + .sc_sporadic() + && dest_thread.tcbSchedContext != unsafe { ksCurSC } + { + convert_to_mut_type_ref::(dest_thread.tcbSchedContext) + .refill_unblock_check(); + } + possible_switch_to(dest_thread); } } } @@ -325,16 +384,44 @@ impl endpoint_func for endpoint { } //TODO: MCS #[cfg(feature = "KERNEL_MCS")] - fn receive_ipc(&mut self, thread: &mut tcb_t, is_blocking: bool, grant: bool) { - //TODO: MCS + fn receive_ipc( + &mut self, + thread: &mut tcb_t, + is_blocking: bool, + reply_cap: &mut cap_reply_cap, + ) { + use core::intrinsics::unlikely; + use log::debug; + use sel4_common::structures_gen::{cap_tag::cap_reply_cap, notification_t, seL4_Fault_tag}; + use sel4_task::{ksCurSC, sched_context::sched_context_t}; + + use crate::notification_func; + + let mut replyptr: usize = 0; + if reply_cap.clone().unsplay().get_tag() == cap_reply_cap { + replyptr = reply_cap.get_capReplyPtr() as usize; + let reply = convert_to_mut_type_ref::(replyptr); + if unlikely(reply.replyTCB != 0 && reply.replyTCB != thread.get_ptr()) { + debug!("Reply object already has unexecuted reply!"); + convert_to_mut_type_ref::(reply.replyTCB as usize).cancel_ipc(); + } + } if thread.complete_signal() { return; } + if thread.tcbBoundNotification != 0 && is_blocking { + convert_to_mut_type_ref::(thread.tcbBoundNotification) + .maybeReturnSchedContext(thread); + } match self.get_ep_state() { EPState::Idle | EPState::Recv => { if is_blocking { thread.tcbState.set_blockingObject(self.get_ptr() as u64); - //TODO: MCS + // MCS + thread.tcbState.set_replyObject(replyptr as u64); + if replyptr != 0 { + convert_to_mut_type_ref::(replyptr).replyTCB = thread.get_ptr(); + } set_thread_state(thread, ThreadState::ThreadStateBlockedOnReceive); let mut queue = self.get_queue(); queue.ep_append(thread); @@ -359,7 +446,35 @@ impl endpoint_func for endpoint { let can_grant_reply = sender.tcbState.get_blockingIPCCanGrantReply() != 0; sender.do_ipc_transfer(thread, Some(self), badge, can_grant); let do_call = sender.tcbState.get_blockingIPCIsCall() != 0; - // TODO: MCS + // MCS + if convert_to_mut_type_ref::(sender.tcbSchedContext).sc_sporadic() + { + unsafe { + assert!(sender.tcbSchedContext != ksCurSC); + if sender.tcbSchedContext != ksCurSC { + convert_to_mut_type_ref::(sender.tcbSchedContext) + .refill_unblock_check(); + } + } + } + if do_call || sender.tcbFault.get_tag() != seL4_Fault_tag::seL4_Fault_NullFault { + if can_grant || can_grant_reply && replyptr != 0 { + let canDonate = sender.tcbSchedContext != 0 + && sender.tcbFault.get_tag() != seL4_Fault_tag::seL4_Fault_Timeout; + convert_to_mut_type_ref::(replyptr) + .push(sender, thread, canDonate); + } else { + set_thread_state(sender, ThreadState::ThreadStateInactive); + } + } else { + set_thread_state(sender, ThreadState::ThreadStateRunning); + possible_switch_to(sender); + assert!( + sender.tcbSchedContext == 0 + || convert_to_mut_type_ref::(sender.tcbSchedContext) + .refill_sufficient(0) + ); + } } } } diff --git a/sel4_ipc/src/notification.rs b/sel4_ipc/src/notification.rs index e3126dd..4d92907 100644 --- a/sel4_ipc/src/notification.rs +++ b/sel4_ipc/src/notification.rs @@ -1,7 +1,9 @@ use crate::transfer::Transfer; use sel4_common::arch::ArchReg; -use sel4_common::structures_gen::notification; +use sel4_common::structures_gen::{notification, notification_t}; use sel4_common::utils::{convert_to_mut_type_ref, convert_to_option_mut_type_ref}; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::sched_context::sched_context_t; use sel4_task::{ possible_switch_to, rescheduleRequired, set_thread_state, tcb_queue_t, tcb_t, ThreadState, }; @@ -29,6 +31,8 @@ pub trait notification_func { fn receive_signal(&mut self, recv_thread: &mut tcb_t, is_blocking: bool); #[cfg(feature = "KERNEL_MCS")] fn reorder_NTFN(&mut self, thread: &mut tcb_t); + #[cfg(feature = "KERNEL_MCS")] + fn maybeReturnSchedContext(&mut self, thread: &mut tcb_t); } impl notification_func for notification { #[inline] @@ -139,7 +143,32 @@ impl notification_func for notification { tcb.cancel_ipc(); set_thread_state(tcb, ThreadState::ThreadStateRunning); tcb.tcbArch.set_register(ArchReg::Badge, badge); + #[cfg(feature = "KERNEL_MCS")] + { + maybeDonateSchedContext(tcb, self); + if tcb.is_schedulable() { + possible_switch_to(tcb); + } + } + #[cfg(not(feature = "KERNEL_MCS"))] possible_switch_to(tcb); + #[cfg(feature = "KERNEL_MCS")] + if let Some(tcbsc) = + convert_to_option_mut_type_ref::(tcb.tcbSchedContext) + { + if tcbsc.sc_active() { + if let Some(sc) = convert_to_option_mut_type_ref::( + self.get_ntfnSchedContext() as usize, + ) { + if tcbsc.get_ptr() == sc.get_ptr() + && sc.sc_sporadic() + && !tcbsc.is_current() + { + tcbsc.refill_unblock_check(); + } + } + } + } } else { self.active(badge); } @@ -157,7 +186,26 @@ impl notification_func for notification { } set_thread_state(dest, ThreadState::ThreadStateRunning); dest.tcbArch.set_register(ArchReg::Badge, badge); + #[cfg(feature = "KERNEL_MCS")] + { + maybeDonateSchedContext(dest, self); + if dest.is_schedulable() { + possible_switch_to(dest); + } + } + #[cfg(not(feature = "KERNEL_MCS"))] possible_switch_to(dest); + #[cfg(feature = "KERNEL_MCS")] + if let Some(sc) = + convert_to_option_mut_type_ref::(dest.tcbSchedContext) + { + if sc.sc_sporadic() { + assert!(!sc.is_current()); + if !sc.is_current() { + sc.refill_unblock_check(); + } + } + } } else { panic!("queue is empty!") } @@ -209,4 +257,32 @@ impl notification_func for notification { queue.ep_append(thread); self.set_queue(&queue); } + #[cfg(feature = "KERNEL_MCS")] + #[inline] + fn maybeReturnSchedContext(&mut self, thread: &mut tcb_t) { + if let Some(sc) = + convert_to_option_mut_type_ref::(self.get_ntfnSchedContext() as usize) + { + if sc.get_ptr() == thread.tcbSchedContext { + thread.tcbSchedContext = 0; + sc.scTcb = 0; + if thread.is_current() { + rescheduleRequired(); + } + } + } + } +} +#[cfg(feature = "KERNEL_MCS")] +pub fn maybeDonateSchedContext(tcb: &mut tcb_t, ntfnptr: ¬ification_t) { + if tcb.tcbSchedContext == 0 { + if let Some(sc) = convert_to_option_mut_type_ref::( + ntfnptr.get_ntfnSchedContext() as usize, + ) { + if sc.scTcb == 0 { + sc.schedContext_donate(tcb); + sc.schedContext_resume(); + } + } + } } diff --git a/sel4_ipc/src/transfer.rs b/sel4_ipc/src/transfer.rs index 4c8fe4a..42d7dd4 100644 --- a/sel4_ipc/src/transfer.rs +++ b/sel4_ipc/src/transfer.rs @@ -19,6 +19,10 @@ use sel4_common::structures_gen::seL4_Fault_NullFault; use sel4_common::structures_gen::seL4_Fault_tag; use sel4_common::utils::*; use sel4_cspace::interface::*; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::reply::reply_t; +#[cfg(feature = "KERNEL_MCS")] +use sel4_task::reply_remove_tcb; use sel4_task::{possible_switch_to, set_thread_state, tcb_t, ThreadState}; use sel4_vspace::pptr_t; @@ -62,13 +66,19 @@ pub trait Transfer { badge: usize, grant: bool, ); - + #[cfg(feature = "KERNEL_MCS")] + fn do_reply(&mut self, reply: &mut reply_t, grant: bool); + #[cfg(not(feature = "KERNEL_MCS"))] fn do_reply(&mut self, receiver: &mut tcb_t, slot: &mut cte_t, grant: bool); } impl Transfer for tcb_t { fn cancel_ipc(&mut self) { let state = &self.tcbState; + #[cfg(feature = "KERNEL_MCS")] + { + seL4_Fault_NullFault::new(); + } match self.get_state() { ThreadState::ThreadStateBlockedOnSend | ThreadState::ThreadStateBlockedOnReceive => { let ep = convert_to_mut_type_ref::(state.get_blockingObject() as usize); @@ -85,6 +95,7 @@ impl Transfer for tcb_t { #[cfg(feature = "KERNEL_MCS")] { //TODO + reply_remove_tcb(self); } #[cfg(not(feature = "KERNEL_MCS"))] { @@ -326,7 +337,61 @@ impl Transfer for tcb_t { self.do_fault_transfer(receiver, badge) } } + #[cfg(feature = "KERNEL_MCS")] + fn do_reply(&mut self, reply: &mut reply_t, grant: bool) { + use sel4_common::{ffi::current_fault, structures_gen::seL4_Fault_Timeout}; + use sel4_task::{handleTimeout, sched_context::sched_context_t}; + + if reply.replyTCB == 0 + || convert_to_mut_type_ref::(reply.replyTCB) + .tcbState + .get_tsType() + != ThreadState::ThreadStateBlockedOnReply as u64 + { + /* nothing to do */ + return; + } + + let receiver = convert_to_mut_type_ref::(reply.replyTCB); + reply.remove(receiver); + assert!(receiver.tcbState.get_replyObject() == 0); + assert!(reply.replyTCB == 0); + let sc = convert_to_mut_type_ref::(receiver.tcbSchedContext); + if sc.sc_sporadic() && !sc.is_current() { + sc.refill_unblock_check(); + } + assert_eq!(receiver.get_state(), ThreadState::ThreadStateBlockedOnReply); + + let fault_type = receiver.tcbFault.get_tag(); + if likely(fault_type == seL4_Fault_tag::seL4_Fault_NullFault) { + self.do_ipc_transfer(receiver, None, 0, grant); + set_thread_state(receiver, ThreadState::ThreadStateRunning); + } else { + if self.do_fault_reply_transfer(receiver) { + set_thread_state(receiver, ThreadState::ThreadStateRestart); + } else { + set_thread_state(receiver, ThreadState::ThreadStateInactive); + } + } + if receiver.tcbSchedContext != 0 && receiver.is_runnable() { + if sc.refill_ready() && sc.refill_sufficient(0) { + possible_switch_to(receiver); + } else { + if receiver.validTimeoutHandler() + && fault_type != seL4_Fault_tag::seL4_Fault_Timeout as u64 + { + unsafe { + current_fault = seL4_Fault_Timeout::new(sc.scBadge as u64).unsplay(); + handleTimeout(receiver) + }; + } else { + sc.postpone(); + } + } + } + } + #[cfg(not(feature = "KERNEL_MCS"))] fn do_reply(&mut self, receiver: &mut tcb_t, slot: &mut cte_t, grant: bool) { assert_eq!(receiver.get_state(), ThreadState::ThreadStateBlockedOnReply); let fault_type = receiver.tcbFault.get_tag(); diff --git a/sel4_task/src/ffi.rs b/sel4_task/src/ffi.rs index 85c5bb1..4593819 100644 --- a/sel4_task/src/ffi.rs +++ b/sel4_task/src/ffi.rs @@ -9,10 +9,6 @@ pub extern "C" fn sendIPC() { unimplemented!("MCS"); } #[no_mangle] -pub extern "C" fn installTCBCap() { - unimplemented!("MCS"); -} -#[no_mangle] pub extern "C" fn tcbSchedDequeue(tcb: &mut tcb_t) { (*tcb).sched_dequeue(); } @@ -42,4 +38,5 @@ extern "C" { pub fn reorder_EP(ep: &mut endpoint, thread: &mut tcb_t); pub fn reorder_NTFN(ntfn: &mut notification, thread: &mut tcb_t); pub fn endTimeslice(can_timeout_fault: bool); + pub fn handleTimeout(tptr: &mut tcb_t); } diff --git a/sel4_task/src/lib.rs b/sel4_task/src/lib.rs index 4b6e944..dc2550b 100644 --- a/sel4_task/src/lib.rs +++ b/sel4_task/src/lib.rs @@ -19,9 +19,10 @@ pub mod tcb; mod tcb_queue; mod thread_state; pub use ffi::*; +#[cfg(feature = "KERNEL_MCS")] +pub mod reply; pub use scheduler::*; pub use structures::*; pub use tcb::*; pub use tcb_queue::*; pub use thread_state::*; -pub use ffi::*; \ No newline at end of file diff --git a/sel4_task/src/reply.rs b/sel4_task/src/reply.rs new file mode 100644 index 0000000..feb9214 --- /dev/null +++ b/sel4_task/src/reply.rs @@ -0,0 +1,134 @@ +use core::intrinsics::likely; + +use sel4_common::{structures_gen::call_stack, utils::convert_to_mut_type_ref}; + +use crate::{sched_context::sched_context_t, set_thread_state, tcb_t, ThreadState}; + +pub type reply_t = reply; +#[repr(C)] +#[derive(Debug, Clone)] +// TODO: MCS +pub struct reply { + /// TCB pointed to by this reply object + pub replyTCB: usize, + pub replyPrev: call_stack, + pub replyNext: call_stack, + pub padding: usize, +} +impl reply { + pub fn get_ptr(&mut self) -> usize { + self as *const _ as usize + } + pub fn unlink(&mut self, tcb: &mut tcb_t) { + assert!(self.replyTCB == tcb.get_ptr()); + assert!(tcb.tcbState.get_replyObject() as usize == self.get_ptr()); + tcb.tcbState.set_replyObject(0); + self.replyTCB = 0; + set_thread_state(tcb, ThreadState::ThreadStateInactive); + } + pub fn push(&mut self, tcb_caller: &mut tcb_t, tcb_callee: &mut tcb_t, canDonate: bool) { + let sc_donated = convert_to_mut_type_ref::(tcb_caller.tcbSchedContext); + + assert!(tcb_caller.get_ptr() != 0); + assert!(self.get_ptr() != 0); + assert!(self.replyTCB == 0); + + assert!(self.replyPrev.get_callStackPtr() == 0); + assert!(self.replyNext.get_callStackPtr() == 0); + + /* tcb caller should not be in a existing call stack */ + assert!(tcb_caller.tcbState.get_replyObject() == 0); + + /* unlink callee and reply - they may not have been linked already, + * if this rendesvous is occuring when seL4_Recv is called, + * however, no harm in overring 0 with 0 */ + tcb_callee.tcbState.set_replyObject(0); + + /* link caller and reply */ + self.replyTCB = tcb_caller.get_ptr(); + tcb_caller.tcbState.set_replyObject(self.get_ptr() as u64); + set_thread_state(tcb_caller, ThreadState::ThreadStateBlockedOnReply); + + if sc_donated.get_ptr() != 0 && tcb_callee.tcbSchedContext == 0 && canDonate { + let old_caller = convert_to_mut_type_ref::(sc_donated.scReply); + + /* check stack integrity */ + assert!( + old_caller.get_ptr() == 0 + || old_caller.replyNext.get_callStackPtr() == sc_donated.get_ptr() as u64 + ); + + /* push on to stack */ + self.replyPrev = call_stack::new(old_caller.get_ptr() as u64, 0); + if old_caller.get_ptr() != 0 { + old_caller.replyNext = call_stack::new(self.get_ptr() as u64, 0); + } + self.replyNext = call_stack::new(sc_donated.get_ptr() as u64, 1); + sc_donated.scReply = self.get_ptr(); + + /* now do the actual donation */ + sc_donated.schedContext_donate(tcb_callee); + } + } + pub fn pop(&mut self, tcb: &mut tcb_t) { + assert!(self.get_ptr() != 0); + assert!(self.replyTCB == tcb.get_ptr()); + assert!(tcb.tcbState.get_tsType() == ThreadState::ThreadStateBlockedOnReply as u64); + assert!(tcb.tcbState.get_replyObject() as usize == self.get_ptr()); + + let next_ptr = self.replyNext.get_callStackPtr() as usize; + let prev_ptr = self.replyPrev.get_callStackPtr() as usize; + + if likely(next_ptr != 0) { + assert!(self.replyNext.get_isHead() != 0); + + convert_to_mut_type_ref::(next_ptr).scReply = prev_ptr; + if prev_ptr != 0 { + convert_to_mut_type_ref::(prev_ptr).replyNext = self.replyNext.clone(); + assert!( + convert_to_mut_type_ref::(prev_ptr) + .replyNext + .get_isHead() + != 0 + ); + } + + /* give it back */ + if tcb.tcbSchedContext == 0 { + /* only give the SC back if our SC is NULL. This prevents + * strange behaviour when a thread is bound to an sc while it is + * in the BlockedOnReply state. The semantics in this case are that the + * SC cannot go back to the caller if the caller has received another one */ + convert_to_mut_type_ref::(next_ptr).schedContext_donate(tcb); + } + } + + self.replyPrev = call_stack::new(0, 0); + self.replyNext = call_stack::new(0, 0); + self.unlink(tcb); + } + pub fn remove(&mut self, tcb: &mut tcb_t) { + assert!(self.replyTCB == tcb.get_ptr()); + assert!(tcb.tcbState.get_tsType() == ThreadState::ThreadStateBlockedOnReply as u64); + assert!(tcb.tcbState.get_replyObject() == self.get_ptr() as u64); + + let next_ptr = self.replyNext.get_callStackPtr() as usize; + let prev_ptr = self.replyPrev.get_callStackPtr() as usize; + + if likely(next_ptr != 0 && self.replyNext.get_isHead() != 0) { + /* head of the call stack -> just pop */ + self.pop(tcb); + } else { + if next_ptr != 0 { + /* not the head, remove from middle - break the chain */ + convert_to_mut_type_ref::(next_ptr).replyPrev = call_stack::new(0, 0); + } + if prev_ptr != 0 { + convert_to_mut_type_ref::(prev_ptr).replyNext = call_stack::new(0, 0); + } + self.replyPrev = call_stack::new(0, 0); + self.replyNext = call_stack::new(0, 0); + self.unlink(tcb); + } + } +} diff --git a/sel4_task/src/sched_context.rs b/sel4_task/src/sched_context.rs index 0f43fa8..3b81434 100644 --- a/sel4_task/src/sched_context.rs +++ b/sel4_task/src/sched_context.rs @@ -13,13 +13,14 @@ use sel4_common::{ println, sel4_config::{CONFIG_KERNEL_WCET_SCALE, UINT64_MAX}, shared_types_bf_gen::seL4_MessageInfo, - structures_gen::{notification, notification_t}, + structures_gen::{cap_sched_context_cap, notification, notification_t}, utils::convert_to_mut_type_ref, + BIT, }; use crate::{ - get_currenct_thread, ksCurSC, ksCurTime, ksReprogram, ksSchedulerAction, rescheduleRequired, - tcb_t, + get_currenct_thread, get_current_sc, ksCurSC, ksCurTime, ksReprogram, ksSchedulerAction, + rescheduleRequired, tcb_t, }; pub type sched_context_t = sched_context; @@ -40,7 +41,7 @@ pub struct sched_context { pub scRefillTail: usize, pub scSporadic: bool, } -pub(crate) const MIN_REFILLS: usize = 2; +pub const MIN_REFILLS: usize = 2; pub(crate) type refill_t = refill; #[repr(C)] #[derive(Debug, Clone)] @@ -60,6 +61,10 @@ pub fn MAX_PERIOD_US() -> time_t { pub fn MAX_RELEASE_TIME() -> time_t { UINT64_MAX - 5 * usToTicks(MAX_PERIOD_US()) } +pub fn refill_absolute_max(sc_cap: &cap_sched_context_cap) -> usize { + return (BIT!(sc_cap.get_capSCSizeBits() as usize) - size_of::()) + / size_of::(); +} impl sched_context { #[inline] @@ -71,6 +76,19 @@ impl sched_context { self.scPeriod == 0 } #[inline] + pub fn is_current(&self) -> bool { + self.get_ptr() == unsafe { ksCurSC } + } + #[inline] + pub fn sc_released(&mut self) -> bool { + if self.sc_active() { + assert!(self.refill_sufficient(0)); + return self.refill_ready(); + } else { + return false; + } + } + #[inline] pub fn sc_active(&self) -> bool { self.scRefillMax > 0 } @@ -208,6 +226,44 @@ impl sched_context { self.refill_capacity(usage) >= MIN_BUDGET() } #[inline] + pub fn refill_update( + &mut self, + new_period: ticks_t, + new_budget: ticks_t, + new_max_refills: usize, + ) { + /* refill must be initialised in order to be updated - otherwise refill_new should be used */ + assert!(self.scRefillMax > 0); + + unsafe { + (*self.refill_index(0)).rAmount = (*self.refill_head()).rAmount; + (*self.refill_index(0)).rTime = (*self.refill_head()).rTime; + self.scRefillHead = 0; + /* truncate refill list to size 1 */ + self.scRefillTail = self.scRefillHead; + /* update max refills */ + self.scRefillMax = new_max_refills; + /* update period */ + self.scPeriod = new_period; + + if self.refill_ready() { + (*self.refill_head()).rTime = ksCurTime; + } + + if (*self.refill_head()).rAmount >= new_budget { + /* if the heads budget exceeds the new budget just trim it */ + (*self.refill_head()).rAmount = new_budget; + self.maybe_add_empty_tail(); + } else { + /* otherwise schedule the rest for the next period */ + self.refill_add_tail( + (*self.refill_head()).rTime + new_period, + new_budget - (*self.refill_head()).rAmount, + ); + } + } + } + #[inline] pub fn schedule_used(&mut self, new_rTime: ticks_t, new_rAmount: ticks_t) { // TODO: MCS unsafe { @@ -243,7 +299,7 @@ impl sched_context { assert!(tcb.tcbSchedContext == 0); tcb.tcbSchedContext = self.get_ptr(); self.scTcb = tcb.get_ptr(); - if self.sc_sporadic() && self.sc_active() && self.get_ptr() != unsafe { ksCurSC } { + if self.sc_sporadic() && self.sc_active() && !self.is_current() { self.refill_unblock_check() } self.schedContext_resume(); @@ -283,7 +339,7 @@ impl sched_context { to.tcbSchedContext = self.get_ptr() } pub fn schedContext_bindNtfn(&mut self, ntfn: &mut notification_t) { - ntfn.set_ntfnSchedContext(self as *mut _ as u64); + ntfn.set_ntfnSchedContext(self.get_ptr() as u64); self.scNotification = ntfn as *mut _ as usize; } pub fn schedContext_unbindNtfn(&mut self) { @@ -314,7 +370,7 @@ impl sched_context { pub fn refill_budget_check(_usage: ticks_t) { unsafe { let mut usage = _usage; - let sc = convert_to_mut_type_ref::(ksCurSC); + let sc = get_current_sc(); assert!(!sc.is_round_robin()); while (*sc.refill_head()).rAmount <= usage && (*sc.refill_head()).rTime < MAX_RELEASE_TIME() diff --git a/sel4_task/src/scheduler.rs b/sel4_task/src/scheduler.rs index e4cbbbb..442b1ce 100644 --- a/sel4_task/src/scheduler.rs +++ b/sel4_task/src/scheduler.rs @@ -89,6 +89,8 @@ pub const SchedulerAction_ResumeCurrentThread: usize = 0; pub const SchedulerAction_ChooseNewThread: usize = 1; pub const ksDomScheduleLength: usize = 1; +pub const seL4_SchedContext_NoFlag: usize = 0; +pub const seL4_SchedContext_Sporadic: usize = 1; #[no_mangle] pub static mut ksDomainTime: usize = 0; @@ -270,6 +272,21 @@ pub fn get_current_domain() -> usize { unsafe { ksCurDomain } } +#[inline] +#[cfg(feature = "KERNEL_MCS")] +pub fn get_current_sc() -> &'static mut sched_context_t { + unsafe { + #[cfg(feature = "ENABLE_SMP")] + { + //TODO: SMP + } + #[cfg(not(feature = "ENABLE_SMP"))] + { + convert_to_mut_type_ref_unsafe::(ksCurSC) + } + } +} + #[inline] /// Get the index of the ready queue for the given domain and priority level. pub fn ready_queues_index(dom: usize, prio: usize) -> usize { @@ -434,6 +451,18 @@ fn chooseThread() { } }; assert_ne!(thread, 0); + assert!(convert_to_mut_type_ref::(thread).is_schedulable()); + #[cfg(feature = "KERNEL_MCS")] + { + assert!(convert_to_mut_type_ref::( + convert_to_mut_type_ref::(thread).tcbSchedContext + ) + .refill_sufficient(0)); + assert!(convert_to_mut_type_ref::( + convert_to_mut_type_ref::(thread).tcbSchedContext + ) + .refill_ready()); + } convert_to_mut_type_ref::(thread).switch_to_this(); } else { #[cfg(target_arch = "aarch64")] @@ -549,7 +578,7 @@ pub fn checkDomainTime() { #[cfg(feature = "KERNEL_MCS")] pub fn checkBudget() -> bool { unsafe { - let current_sched_context = convert_to_mut_type_ref::(ksCurSC); + let current_sched_context = get_current_sc(); assert!(current_sched_context.refill_ready()); if likely(current_sched_context.refill_sufficient(ksConsumed)) { if unlikely(isCurDomainExpired()) { @@ -562,6 +591,28 @@ pub fn checkBudget() -> bool { false } #[cfg(feature = "KERNEL_MCS")] +pub fn checkBudgetRestart() -> bool { + assert!(get_currenct_thread().is_runnable()); + let result = checkBudget(); + if !result && get_currenct_thread().is_runnable() { + set_thread_state(get_currenct_thread(), ThreadState::ThreadStateRestart); + } + result +} +#[inline] +pub fn mcs_preemption_point() { + #[cfg(feature = "KERNEL_MCS")] + unsafe { + if get_currenct_thread().is_schedulable() { + checkBudget(); + } else if get_current_sc().scRefillMax != 0 { + chargeBudget(ksConsumed, false); + } else { + ksConsumed = 0; + } + } +} +#[cfg(feature = "KERNEL_MCS")] pub fn setNextInterrupt() { use sel4_common::{ arch::getTimerPrecision, @@ -598,7 +649,7 @@ pub fn chargeBudget(consumed: ticks_t, canTimeoutFault: bool) { unsafe { if likely(ksCurSC != ksIdleSC) { - let current_sched_context = convert_to_mut_type_ref::(ksCurSC); + let current_sched_context = get_current_sc(); if current_sched_context.is_round_robin() { assert!(current_sched_context.refill_size() == MIN_REFILLS); (*current_sched_context.refill_head()).rAmount += @@ -624,13 +675,13 @@ pub fn chargeBudget(consumed: ticks_t, canTimeoutFault: bool) { #[cfg(feature = "KERNEL_MCS")] pub fn commitTime() { unsafe { - let current_sched_context = convert_to_mut_type_ref::(ksCurSC); + let current_sched_context = get_current_sc(); if likely(current_sched_context.scRefillMax != 0 && ksCurSC != ksIdleSC) { if likely(ksConsumed > 0) { assert!(current_sched_context.refill_sufficient(ksConsumed)); assert!(current_sched_context.refill_ready()); - if (current_sched_context.is_round_robin()) { + if current_sched_context.is_round_robin() { assert!(current_sched_context.refill_size() == MIN_REFILLS); (*current_sched_context.refill_head()).rAmount -= ksConsumed; (*current_sched_context.refill_tail()).rAmount += ksConsumed; @@ -838,6 +889,7 @@ pub fn activateThread() { ThreadState::ThreadStateRestart => { let pc = thread.tcbArch.get_register(ArchReg::FaultIP); // setNextPC(thread, pc); + // sel4_common::println!("restart pc is {:x}",pc); thread.tcbArch.set_register(ArchReg::NextIP, pc); // setThreadState(thread, ThreadStateRunning); set_thread_state(thread, ThreadState::ThreadStateRunning); diff --git a/sel4_task/src/tcb.rs b/sel4_task/src/tcb.rs index 8158e35..de5af8e 100644 --- a/sel4_task/src/tcb.rs +++ b/sel4_task/src/tcb.rs @@ -1,4 +1,8 @@ #[cfg(feature = "KERNEL_MCS")] +use crate::ksCurSC; +use crate::prio_t; +use crate::tcb_queue::tcb_queue_t; +#[cfg(feature = "KERNEL_MCS")] use crate::{ksReleaseHead, sched_context::sched_context_t}; use core::intrinsics::{likely, unlikely}; use sel4_common::arch::{ @@ -6,7 +10,9 @@ use sel4_common::arch::{ }; use sel4_common::fault::*; use sel4_common::message_info::seL4_MessageInfo_func; +use sel4_common::sel4_config::*; use sel4_common::shared_types_bf_gen::seL4_MessageInfo; +use sel4_common::structures::{exception_t, seL4_IPCBuffer}; use sel4_common::structures_gen::{ cap, cap_reply_cap, cap_tag, lookup_fault, lookup_fault_Splayed, mdb_node, seL4_Fault, seL4_Fault_CapFault, seL4_Fault_tag, thread_state, @@ -25,11 +31,6 @@ use sel4_vspace::{ }; use sel4_vspace::{pptr_t, set_vm_root}; -use crate::prio_t; -use crate::tcb_queue::tcb_queue_t; -use sel4_common::sel4_config::*; -use sel4_common::structures::{exception_t, seL4_IPCBuffer}; - use super::scheduler::{ addToBitmap, get_currenct_thread, possible_switch_to, ready_queues_index, removeFromBitmap, rescheduleRequired, schedule_tcb, set_current_thread, @@ -137,6 +138,16 @@ impl tcb_t { } } #[inline] + pub fn is_blocked(&self) -> bool { + match self.get_state() { + ThreadState::ThreadStateBlockedOnReceive + | ThreadState::ThreadStateBlockedOnSend + | ThreadState::ThreadStateBlockedOnNotification + | ThreadState::ThreadStateBlockedOnReply => true, + _ => false, + } + } + #[inline] #[cfg(not(feature = "KERNEL_MCS"))] pub fn is_schedulable(&self) -> bool { self.is_runnable() @@ -529,10 +540,19 @@ impl tcb_t { if self.is_stopped() { #[cfg(feature = "KERNEL_MCS")] { - // TODO: MCS - // #ifdef CONFIG_KERNEL_MCS - // reply_remove_tcb(tptr); - // #else + // MCS + set_thread_state(self, ThreadState::ThreadStateRestart); + if convert_to_mut_type_ref::(self.tcbSchedContext).sc_sporadic() + && self.tcbSchedContext != unsafe { ksCurSC } + { + convert_to_mut_type_ref::(self.tcbSchedContext) + .refill_unblock_check(); + } + convert_to_mut_type_ref::(self.tcbSchedContext) + .schedContext_resume(); + if self.is_schedulable() { + possible_switch_to(self); + } } #[cfg(not(feature = "KERNEL_MCS"))] { @@ -936,12 +956,72 @@ impl tcb_t { #[inline] #[cfg(feature = "KERNEL_MCS")] pub fn Release_Remove(&mut self) { - unimplemented!("MCS"); + use crate::ksReprogram; + + if likely(self.tcbState.get_tcbInReleaseQueue() != 0) { + if self.tcbSchedPrev != 0 { + convert_to_mut_type_ref::(self.tcbSchedPrev).tcbSchedNext = + self.tcbSchedNext; + } else { + unsafe { + ksReleaseHead = self.tcbSchedNext; + ksReprogram = true; + } + } + + if self.tcbSchedNext != 0 { + convert_to_mut_type_ref::(self.tcbSchedNext).tcbSchedPrev = + self.tcbSchedPrev; + } + + self.tcbSchedNext = 0; + self.tcbSchedPrev = 0; + self.tcbState.set_tcbInReleaseQueue(0); + } } #[inline] #[cfg(feature = "KERNEL_MCS")] pub fn Release_Enqueue(&mut self) { - unimplemented!("MCS") + use crate::ksReprogram; + + assert!(self.tcbState.get_tcbInReleaseQueue() == 0); + assert!(self.tcbState.get_tcbQueued() == 0); + + unsafe { + let mut before_ptr: usize = 0; + let mut after_ptr: usize = ksReleaseHead; + + /* find our place in the ordered queue */ + while after_ptr != 0 + && (*convert_to_mut_type_ref::(self.tcbSchedContext).refill_head()) + .rTime + >= (*convert_to_mut_type_ref::( + convert_to_mut_type_ref::(after_ptr).tcbSchedContext, + ) + .refill_head()) + .rTime + { + before_ptr = after_ptr; + after_ptr = convert_to_mut_type_ref::(after_ptr).tcbSchedNext; + } + + if before_ptr == 0 { + /* insert at head */ + ksReleaseHead = self.get_ptr(); + ksReprogram = true; + } else { + convert_to_mut_type_ref::(before_ptr).tcbSchedNext = self.get_ptr(); + } + + if after_ptr != 0 { + convert_to_mut_type_ref::(after_ptr).tcbSchedPrev = self.get_ptr(); + } + + self.tcbSchedNext = after_ptr; + self.tcbSchedPrev = before_ptr; + } + + self.tcbState.set_tcbInReleaseQueue(1); } #[inline] #[cfg(feature = "KERNEL_MCS")] @@ -985,25 +1065,6 @@ pub fn tcb_Release_Dequeue() -> *mut tcb_t { assert!(ksReleaseHead != 0); assert!(convert_to_mut_type_ref::(ksReleaseHead).tcbSchedPrev != 0); - // tcb_t *detached_head = NODE_STATE(ksReleaseHead); - // NODE_STATE(ksReleaseHead) = NODE_STATE(ksReleaseHead)->tcbSchedNext; - - // if (NODE_STATE(ksReleaseHead)) - // { - // NODE_STATE(ksReleaseHead)->tcbSchedPrev = NULL; - // } - - // if (detached_head->tcbSchedNext) - // { - // detached_head->tcbSchedNext->tcbSchedPrev = NULL; - // detached_head->tcbSchedNext = NULL; - // } - - // thread_state_ptr_set_tcbInReleaseQueue(&detached_head->tcbState, false); - // NODE_STATE(ksReprogram) = true; - - // return detached_head; - let detached_head = ksReleaseHead as *mut tcb_t; ksReleaseHead = (*detached_head).tcbSchedNext; @@ -1021,3 +1082,32 @@ pub fn tcb_Release_Dequeue() -> *mut tcb_t { return detached_head; } } +#[cfg(feature = "KERNEL_MCS")] +pub fn reply_remove_tcb(tcb: &mut tcb_t) { + // TODO: MCS + + use sel4_common::structures_gen::call_stack; + + use crate::reply::reply_t; + assert!(tcb.tcbState.get_tsType() == ThreadState::ThreadStateBlockedOnReply as u64); + let reply = convert_to_mut_type_ref::(tcb.tcbState.get_replyObject() as usize); + + let next_ptr = reply.replyNext.get_callStackPtr() as usize; + let prev_ptr = reply.replyPrev.get_callStackPtr() as usize; + + if next_ptr != 0 { + if reply.replyNext.get_isHead() != 0 { + convert_to_mut_type_ref::(next_ptr).scReply = 0; + } else { + convert_to_mut_type_ref::(next_ptr).replyPrev = call_stack::new(0, 0); + } + } + + if prev_ptr != 0 { + convert_to_mut_type_ref::(prev_ptr).replyNext = call_stack::new(0, 0); + } + + reply.replyPrev = call_stack::new(0, 0); + reply.replyNext = call_stack::new(0, 0); + reply.unlink(tcb); +}