Skip to content
This repository has been archived by the owner on Sep 14, 2024. It is now read-only.

Commit

Permalink
Skip areas with flags unchanged
Browse files Browse the repository at this point in the history
  • Loading branch information
tkf2019 committed Aug 4, 2024
1 parent b84d859 commit 52c31d7
Show file tree
Hide file tree
Showing 4 changed files with 129 additions and 70 deletions.
18 changes: 17 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,10 @@

Data structures and operations for managing memory mappings.

It is useful to implement [`mmap`][1] and [`munmap`][1].
It is useful to implement [`mmap`][1], [`munmap`][1] and [`mprotect`][2].

[1]: https://man7.org/linux/man-pages/man2/mmap.2.html
[2]: https://man7.org/linux/man-pages/man2/mprotect.2.html

## Examples

Expand Down Expand Up @@ -65,5 +66,20 @@ impl MappingBackend<MockFlags, MockPageTable> for MockBackend {
}
true
}

fn protect(
&self,
start: VirtAddr,
size: usize,
old_flags: MockFlags,
new_flags: MockFlags,
pt: &mut MockPageTable,
) -> Option<MockFlags> {
let flags = (new_flags & 0x7) | (old_flags & !0x7);
for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
*entry = flags;
}
Some(flags)
}
}
```
31 changes: 14 additions & 17 deletions src/area.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,15 @@ use crate::{MappingError, MappingResult};
/// mappings, the target physical address is known when it is added to the page
/// table. For lazy mappings, an empty mapping needs to be added to the page table
/// to trigger a page fault.
pub trait MappingBackend<F: Copy + PartialEq, P>: Clone {
pub trait MappingBackend<F: Copy, P>: Clone {
/// What to do when mapping a region within the area with the given flags.
fn map(&self, start: VirtAddr, size: usize, flags: F, page_table: &mut P) -> bool;
/// What to do when unmaping a memory region within the area.
fn unmap(&self, start: VirtAddr, size: usize, page_table: &mut P) -> bool;
/// What to do when changing access flags.
/// What to do when changing access flags. We pass both old and new flags
/// for backend's processing (e.g., some flags can not be changed through this
/// interface). Flags for replacement will be returned. And this interface
/// returns [None] if we need to skip this area for no flag will be changed.
fn protect(
&self,
start: VirtAddr,
Expand All @@ -34,14 +37,14 @@ pub trait MappingBackend<F: Copy + PartialEq, P>: Clone {
/// The target physical memory frames are determined by [`MappingBackend`] and
/// may not be contiguous.
#[derive(Clone)]
pub struct MemoryArea<F: Copy + PartialEq, P, B: MappingBackend<F, P>> {
pub struct MemoryArea<F: Copy, P, B: MappingBackend<F, P>> {
va_range: VirtAddrRange,
flags: F,
backend: B,
_phantom: PhantomData<(F, P)>,
}

impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
/// Creates a new memory area.
pub const fn new(start: VirtAddr, size: usize, flags: F, backend: B) -> Self {
Self {
Expand Down Expand Up @@ -81,19 +84,19 @@ impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
pub const fn backend(&self) -> &B {
&self.backend
}
}

impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
/// Changes the flags.
pub fn set_flags(&mut self, new_flags: F) {
pub(crate) fn set_flags(&mut self, new_flags: F) {
self.flags = new_flags;
}

/// Changes the end address of the memory area.
pub fn set_end(&mut self, new_end: VirtAddr) {
pub(crate) fn set_end(&mut self, new_end: VirtAddr) {
self.va_range.end = new_end;
}
}

impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
/// Maps the whole memory area in the page table.
pub(crate) fn map_area(&self, page_table: &mut P) -> MappingResult {
self.backend
Expand All @@ -111,15 +114,9 @@ impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
}

/// Changes the flags in the page table.
pub(crate) fn protect_area(
&mut self,
old_flags: F,
new_flags: F,
page_table: &mut P,
) -> MappingResult<F> {
pub(crate) fn protect_area(&mut self, new_flags: F, page_table: &mut P) -> Option<F> {
self.backend
.protect(self.start(), self.size(), old_flags, new_flags, page_table)
.ok_or(MappingError::BadState)
.protect(self.start(), self.size(), self.flags, new_flags, page_table)
}

/// Shrinks the memory area at the left side.
Expand Down Expand Up @@ -178,7 +175,7 @@ impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {

impl<F, P, B: MappingBackend<F, P>> fmt::Debug for MemoryArea<F, P, B>
where
F: fmt::Debug + Copy + PartialEq,
F: fmt::Debug + Copy,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemoryArea")
Expand Down
92 changes: 42 additions & 50 deletions src/set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ use memory_addr::{VirtAddr, VirtAddrRange};
use crate::{MappingBackend, MappingError, MappingResult, MemoryArea};

/// A container that maintains memory mappings ([`MemoryArea`]).
pub struct MemorySet<F: Copy + PartialEq, P, B: MappingBackend<F, P>> {
pub struct MemorySet<F: Copy, P, B: MappingBackend<F, P>> {
areas: BTreeMap<VirtAddr, MemoryArea<F, P, B>>,
}

impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemorySet<F, P, B> {
impl<F: Copy, P, B: MappingBackend<F, P>> MemorySet<F, P, B> {
/// Creates a new memory set.
pub const fn new() -> Self {
Self {
Expand Down Expand Up @@ -193,71 +193,63 @@ impl<F: Copy + PartialEq, P, B: MappingBackend<F, P>> MemorySet<F, P, B> {
let mut to_insert = Vec::new();
for (_, area) in self.areas.iter_mut() {
if area.start() >= end {
/*
* [ prot ]
* [ area ]
*/
// [ prot ]
// [ area ]
break;
} else if area.end() <= start {
/*
* [ prot ]
* [ area ]
*/
// [ prot ]
// [ area ]
// Do nothing
} else if area.start() >= start && area.end() <= end {
/*
* [ prot ]
* [ area ]
*/
let new_flags = area.protect_area(area.flags(), new_flags, page_table)?;
area.set_flags(new_flags);
// [ prot ]
// [ area ]
if let Some(new_flags) = area.protect_area(new_flags, page_table) {
area.set_flags(new_flags);
}
} else if area.start() < start && area.end() > end {
/*
* [ prot ]
* [ left | area | right ]
*/
let right_part = area.split(end).unwrap();
area.set_end(start);

// [ prot ]
// [ left | area | right ]
let mut middle_part =
MemoryArea::new(start, size, area.flags(), area.backend().clone());
let new_flags = middle_part.protect_area(area.flags(), new_flags, page_table)?;
middle_part.set_flags(new_flags);

to_insert.push((right_part.start(), right_part));
to_insert.push((middle_part.start(), middle_part));
if let Some(new_flags) = middle_part.protect_area(new_flags, page_table) {
middle_part.set_flags(new_flags);
let right_part = area.split(end).unwrap();
area.set_end(start);
to_insert.push((right_part.start(), right_part));
to_insert.push((middle_part.start(), middle_part));
}
} else if area.end() > end {
/*
* [ prot ]
* [ area | right ]
*/
let right_part = area.split(end).unwrap();

let new_flags = area.protect_area(area.flags(), new_flags, page_table)?;
area.set_flags(new_flags);

to_insert.push((right_part.start(), right_part));
// [ prot ]
// [ area | right ]
let mut left_part = MemoryArea::new(
area.start(),
end.as_usize() - area.start().as_usize(),
area.flags(),
area.backend().clone(),
);
if let Some(new_flags) = left_part.protect_area(new_flags, page_table) {
let right_part = area.split(end).unwrap();
area.set_flags(new_flags);
to_insert.push((right_part.start(), right_part));
}
} else {
/*
* [ prot ]
* [ left | area ]
*/
// [ prot ]
// [ left | area ]
let mut right_part = area.split(start).unwrap();

let new_flags = right_part.protect_area(area.flags(), new_flags, page_table)?;
right_part.set_flags(new_flags);

to_insert.push((right_part.start(), right_part));
if let Some(new_flags) = right_part.protect_area(new_flags, page_table) {
right_part.set_flags(new_flags);
to_insert.push((right_part.start(), right_part));
} else {
area.set_end(right_part.end()); // rollback the end
}
}
}
self.areas.extend(to_insert.into_iter());
Ok(())
}
}

impl<F: Copy + PartialEq + fmt::Debug, P, B: MappingBackend<F, P>> fmt::Debug
for MemorySet<F, P, B>
{
impl<F: Copy + fmt::Debug, P, B: MappingBackend<F, P>> fmt::Debug for MemorySet<F, P, B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.areas.values()).finish()
}
Expand Down
58 changes: 56 additions & 2 deletions src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ impl MappingBackend<MockFlags, MockPageTable> for MockBackend {
new_flags: MockFlags,
pt: &mut MockPageTable,
) -> Option<MockFlags> {
if (old_flags & 0x7) == (new_flags & 0x7) {
return None;
}
let flags = (new_flags & 0x7) | (old_flags & !0x7);
for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
*entry = flags;
Expand Down Expand Up @@ -240,21 +243,72 @@ fn test_protect() {
assert_eq!(set.len(), 8);

// Protect [0xc00, 0x2400), [0x2c00, 0x4400), [0x4c00, 0x6400), ...
// The areas are shrinked at the left and right boundaries.
// The areas are split into two areas.
for start in (0..MAX_ADDR).step_by(0x2000) {
assert_ok!(set.protect((start + 0xc00).into(), 0x1800, 0x1, &mut pt));
}
dump_memory_set(&set);
assert_eq!(set.len(), 23);

for area in set.iter() {
let off = area.start().align_offset_4k();
if area.start().as_usize() == 0 {
assert_eq!(area.size(), 0xc00);
assert_eq!(area.flags(), 0x7);
} else {
if off == 0 {
assert_eq!(area.size(), 0x400);
assert_eq!(area.flags(), 0x1);
} else if off == 0x400 {
assert_eq!(area.size(), 0x800);
assert_eq!(area.flags(), 0x7);
} else if off == 0xc00 {
assert_eq!(area.size(), 0x400);
assert_eq!(area.flags(), 0x1);
}
}
}

// Protect [0x800, 0x900), [0x2800, 0x2900), [0x4800, 0x4900), ...
// The areas are split into two areas.
// The areas are split into three areas.
for start in (0..MAX_ADDR).step_by(0x2000) {
assert_ok!(set.protect((start + 0x800).into(), 0x100, 0x13, &mut pt));
}
dump_memory_set(&set);
assert_eq!(set.len(), 39);

for area in set.iter() {
let off = area.start().align_offset_4k();
if area.start().as_usize() == 0 {
assert_eq!(area.size(), 0x800);
assert_eq!(area.flags(), 0x7);
} else {
if off == 0 {
assert_eq!(area.size(), 0x400);
assert_eq!(area.flags(), 0x1);
} else if off == 0x400 {
assert_eq!(area.size(), 0x400);
assert_eq!(area.flags(), 0x7);
} else if off == 0x800 {
assert_eq!(area.size(), 0x100);
assert_eq!(area.flags(), 0x3);
} else if off == 0x900 {
assert_eq!(area.size(), 0x300);
assert_eq!(area.flags(), 0x7);
} else if off == 0xc00 {
assert_eq!(area.size(), 0x400);
assert_eq!(area.flags(), 0x1);
}
}
}

// Test skip [0x850, 0x900), [0x2850, 0x2900), [0x4850, 0x4900), ...
for start in (0..MAX_ADDR).step_by(0x2000) {
assert_ok!(set.protect((start + 0x880).into(), 0x80, 0x3, &mut pt));
}
dump_memory_set(&set);
assert_eq!(set.len(), 39);

// Unmap all areas.
assert_ok!(set.unmap(0.into(), MAX_ADDR, &mut pt));
assert_eq!(set.len(), 0);
Expand Down

0 comments on commit 52c31d7

Please sign in to comment.