You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
fnbernoulli_sample() -> bool{use rand::distributions::{Bernoulli,Distribution};let d = Bernoulli::new(0.5).unwrap();let v = d.sample(&mut rand::thread_rng());
v
}
lifetime limits
Improperly reduced borrows
Elision rules:
Each elided lifetime in input position becomes a distinct lifetime parameter.
If there is exactly one input lifetime position (elided or not), that lifetime is assigned to all elided output lifetimes.
If there are multiple input lifetime positions, but one of them is &self or &mut self, the lifetime of self is assigned to all elided output lifetimes.
thread_local!{pubstaticSTATIC_VECS:RefCell<Vec<&'staticstr>> = RefCell::new(Vec::new());}/// saves the input given into a thread local `Vec<&'static str>`fnstore(input:&'static str){STATIC_VECS.with_borrow_mut(|v| v.push(input));}
...STATIC_VECS.with_borrow(|v| println!("{v:?}"));
fndebug<'a>(a:&'a str,b:&'a str){// immutable -> covariant arg to param (can pass longer lived)println!("a = {a:?} b = {b:?}");}fnassign<T>(input:&mutT,val:T){// mutable -> invariant arg to param (must be exactly the same)*input = val;}
'a
T
U
&'a T
covariant
covariant
&'a mut T
covariant
invariant
Box<T>
covariant
Vec<T>
covariant
UnsafeCell<T>
invariant
Cell<T>
invariant
fn(T) -> U
contravariant
covariant
*const T
covariant
*mut T
covariant
A struct "inherits" the variance of its fields
Drop Check
#![feature(dropck_eyepatch)]#![allow(unused_attributes)]structInspector<'a>(&'a u8,&'static str);// NIGhTLY ONLY: #[may_dangle] -escape hatchunsafeimpl<#[may_dangle]'a>DropforInspector<'a>{fndrop(&mutself){println!("Inspector(_, {}) knows when *not* to inspect.",self.1);}}
Sound generic drop is enforced by the drop checker
For a generic type to soundly implement drop, its generics arguments must strictly outlive it.
phantom data
market type for bounding lifetimes for purpose of static analysis
use std::marker;structIter<'a,T:'a>{ptr:*constT,end:*constT,_marker: marker::PhantomData<&'a T>,}
Generic parameters and drop-checking
Phantom type
variance of 'a
variance of T
Send/Sync (or lack thereof)
dangling 'a or T in drop glue (e.g., #[may_dangle] Drop)
PhantomData<T>
-
covariant
inherited
disallowed ("owns T")
PhantomData<&'a T>
covariant
covariant
Send + Sync requires T : Sync
allowed
PhantomData<&'a mut T>
covariant
invariant
inherited
allowed
PhantomData<*const T>
-
covariant
!Send + !Sync
allowed
PhantomData<*mut T>
-
invariant
!Send + !Sync
allowed
PhantomData<fn(T)>
-
contravariant
Send + Sync
allowed
PhantomData<fn() -> T>
-
covariant
Send + Sync
allowed
PhantomData<fn(T) -> T>
-
invariant
Send + Sync
allowed
PhantomData<Cell<&'a ()>>
invariant
-
Send + !Sync
allowed
Unique<T> :
wraps a *const T for variance
includes a PhantomData<T>
auto-derives Send/Sync as if T was contained
marks the pointer as NonZero for the null-pointer optimization
Splitting Borrows
mutable slices expose a split_at_mut function that consumes the slice and returns two mutable slices.
use std::mem::{self,MaybeUninit};use std::ptr;fnmain(){check_unitialized_memory();drop_flags();unchecked_uninitialized_memory();}
MaybeUninit
ptr::write(ptr, val) - takes a val and moves it into the address pointed to by ptr.
ptr::copy(src, dest, count) - copies the bits that count T items would occupy from src to dest. (equivalent to C's memmove)
ptr::copy_nonoverlapping(src, dest, count) does what copy does, but a little faster on the assumption that the two ranges of memory don't overlap. (equivalent to C's memcpy)
let x = {letmut x:[MaybeUninit<Box<u32>>;SIZE] = unsafe{MaybeUninit::uninit().assume_init()};for i in0..SIZE{
x[i] = MaybeUninit::new(Box::new(i asu32));}unsafe{ mem::transmute::<_,[Box<u32>;SIZE]>(x)}};dbg!(x);letmut uninit = MaybeUninit::<Demo>::uninit();let f1_ptr = unsafe{ ptr::addr_of_mut!((*uninit.as_mut_ptr()).field)};unsafe{ f1_ptr.write(true);}let _init = unsafe{ uninit.assume_init()};
use std::{
mem::{align_of, size_of},
ptr,};use std::ops::{Deref,DerefMut};pubstructCarton<T>(ptr::NonNull<T>);impl<T>Carton<T>{pubfnnew(value:T) -> Self{assert_ne!(size_of::<T>(),0,"Zero-sized types are out of the scope of this example");letmut memptr:*mutT = ptr::null_mut();unsafe{let ret = libc::posix_memalign((&mut memptr).cast(),align_of::<T>(),size_of::<T>());assert_eq!(ret,0,"Failed to allocate or invalid alignment");};let ptr = {
ptr::NonNull::new(memptr).expect("Guaranteed non-null if posix_memalign returns 0")};unsafe{
ptr.as_ptr().write(value);}Self(ptr)}}impl<T>DerefforCarton<T>{typeTarget = T;fnderef(&self) -> &Self::Target{unsafe{self.0.as_ref()}}}impl<T>DerefMutforCarton<T>{fnderef_mut(&mutself) -> &mutSelf::Target{unsafe{self.0.as_mut()}}}impl<T>DropforCarton<T>{fndrop(&mutself){unsafe{
libc::free(self.0.as_ptr().cast());}}}unsafeimpl<T>SendforCarton<T>whereT:Send{}unsafeimpl<T>SyncforCarton<T>whereT:Sync{}
atomic spin_locking
use std::sync::Arc;use std::sync::atomic::{AtomicBool,Ordering};// use std::thread;let lock = Arc::new(AtomicBool::new(false));// value answers "am I locked?"// ... distribute lock to threads somehow ...// Try to acquire the lock by setting it to truewhile !lock.compare_exchange(false,true,Ordering::Acquire,Ordering::Acquire).is_err(){}// broke out of the loop, so we successfully acquired the lock!// ... scary data accesses ...// ok we're done, release the lock
lock.store(false,Ordering::Release);
let new_ptr = ifself.cap == 0{unsafe{ alloc::alloc(new_layout)}}else{let old_layout = Layout::array::<T>(self.cap).unwrap();let old_ptr = self.ptr.as_ptr()as*mutu8;unsafe{ alloc::realloc(old_ptr, old_layout, new_layout.size())}};// If allocation fails, `new_ptr` will be null, in which case we abort.self.ptr = matchNonNull::new(new_ptr as*mutT){Some(p) => p,None => alloc::handle_alloc_error(new_layout),};