diff --git a/homework/src/boc.rs b/homework/src/boc.rs index a6677f72cbb..3e5ddc8ba07 100644 --- a/homework/src/boc.rs +++ b/homework/src/boc.rs @@ -241,7 +241,8 @@ pub unsafe trait CownPtrs { } unsafe impl CownPtrs for () { - type CownRefs<'l> = () + type CownRefs<'l> + = () where Self: 'l; @@ -253,7 +254,8 @@ unsafe impl CownPtrs for () { } unsafe impl CownPtrs for (CownPtr, Ts) { - type CownRefs<'l> = (&'l mut T, Ts::CownRefs<'l>) + type CownRefs<'l> + = (&'l mut T, Ts::CownRefs<'l>) where Self: 'l; @@ -270,7 +272,8 @@ unsafe impl CownPtrs for (CownPtr, Ts) { } unsafe impl CownPtrs for Vec> { - type CownRefs<'l> = Vec<&'l mut T> + type CownRefs<'l> + = Vec<&'l mut T> where Self: 'l; diff --git a/homework/src/elim_stack/base.rs b/homework/src/elim_stack/base.rs index c190ed6ad88..7bd87b60a34 100644 --- a/homework/src/elim_stack/base.rs +++ b/homework/src/elim_stack/base.rs @@ -68,7 +68,6 @@ pub struct ElimStack> { // - 2: pop request // - 3: request acknowledged pub(crate) slots: [Atomic; ELIM_SIZE], - _marker: PhantomData, } impl> Default for ElimStack { @@ -76,7 +75,6 @@ impl> Default for ElimStack { Self { inner: Default::default(), slots: Default::default(), - _marker: PhantomData, } } } diff --git a/homework/src/hazard_pointer/hazard.rs b/homework/src/hazard_pointer/hazard.rs index 10898fc3518..6678043bd60 100644 --- a/homework/src/hazard_pointer/hazard.rs +++ b/homework/src/hazard_pointer/hazard.rs @@ -45,10 +45,7 @@ impl Shield { /// means that this shield is validated. pub fn try_protect(&self, pointer: *mut T, src: &AtomicPtr) -> Result<(), *mut T> { self.set(pointer); - Self::validate(pointer, src).map_err(|new| { - self.clear(); - new - }) + Self::validate(pointer, src).inspect_err(|_| self.clear()) } /// Get a protected pointer from `src`. diff --git a/homework/src/list_set/optimistic_fine_grained.rs b/homework/src/list_set/optimistic_fine_grained.rs index 9c853814277..0696ad93d9d 100644 --- a/homework/src/list_set/optimistic_fine_grained.rs +++ b/homework/src/list_set/optimistic_fine_grained.rs @@ -2,10 +2,11 @@ use std::cmp::Ordering::*; use std::mem::{self, ManuallyDrop}; use std::sync::atomic::Ordering; -use crate::ConcurrentSet; use crossbeam_epoch::{pin, Atomic, Guard, Owned, Shared}; use cs431::lock::seqlock::{ReadGuard, SeqLock}; +use crate::ConcurrentSet; + #[derive(Debug)] struct Node { data: T, @@ -92,7 +93,7 @@ pub struct Iter<'g, T> { impl OptimisticFineGrainedListSet { /// An iterator visiting all elements. `next()` returns `Some(Err(()))` when validation fails. /// In that case, the user must restart the iteration. - pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'_, T> { + pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T> { Iter { cursor: ManuallyDrop::new(self.head(guard)), guard, diff --git a/homework/tests/growable_array.rs b/homework/tests/growable_array.rs index 7518f6af0e9..e221ff1fc88 100644 --- a/homework/tests/growable_array.rs +++ b/homework/tests/growable_array.rs @@ -63,7 +63,7 @@ impl ConcurrentMap for ArrayMap { return Err(()); } match slot.compare_exchange(curr, Shared::null(), AcqRel, Acquire, guard) { - Ok(_) => Ok(unsafe { curr.deref() }.deref()), + Ok(_) => Ok(unsafe { curr.deref() }), Err(_) => Err(()), // already removed } } @@ -132,8 +132,11 @@ mod stack { pub(super) unsafe fn push_node<'g>(&self, n: Shared<'g, Node>, guard: &'g Guard) { let mut head = self.head.load(Relaxed, guard); loop { + // SAEFTY: as n is pused only once, and after the push, n is not used again, we are + // the unique accessor of `n.next`. Hence non-atomic write is safe. unsafe { *n.deref().next.get() = head.as_raw() }; + // TODO: Relaxed fine here? Might need release so that it syncs with `drop`? match self.head.compare_exchange(head, n, Relaxed, Relaxed, guard) { Ok(_) => break, Err(e) => head = e.current, diff --git a/src/lock/api.rs b/src/lock/api.rs index 65827bc8858..484d5304a69 100644 --- a/src/lock/api.rs +++ b/src/lock/api.rs @@ -12,8 +12,9 @@ use core::ops::{Deref, DerefMut}; pub unsafe trait RawLock: Default + Send + Sync { /// Raw lock's token type. /// - /// Send + Sync is needed to make LockGuard Send + Sync. - type Token: Send + Sync; + /// We don't enforce Send + Sync, as some locks may not satisfy it. Nessecary bounds will be + /// auto-derived. + type Token; /// Acquires the raw lock. fn lock(&self) -> Self::Token; @@ -49,6 +50,8 @@ pub struct Lock { } // Send is automatically implemented for Lock. + +// SATEFY: threads can only access `&mut T` via the lock, and `L` is `Sync`. unsafe impl Sync for Lock {} impl Default for Lock @@ -99,7 +102,7 @@ impl Lock { } /// A guard that holds the lock and dereferences the inner value. -// Send/Sync are automatically implemented. +// `Send` and `Sync` are automatically derived. #[derive(Debug)] pub struct LockGuard<'s, L: RawLock, T> { lock: &'s Lock,