diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..531ddd1
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,55 @@
+name: CI
+
+on: [push, pull_request]
+
+jobs:
+ ci:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ rust-toolchain: [nightly]
+ targets: [x86_64-unknown-linux-gnu, x86_64-unknown-none, riscv64gc-unknown-none-elf, aarch64-unknown-none-softfloat]
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dtolnay/rust-toolchain@nightly
+ with:
+ toolchain: ${{ matrix.rust-toolchain }}
+ components: rust-src, clippy, rustfmt
+ targets: ${{ matrix.targets }}
+ - name: Check rust version
+ run: rustc --version --verbose
+ - name: Check code format
+ run: cargo fmt --all -- --check
+ - name: Clippy
+ run: cargo clippy --target ${{ matrix.targets }} --all-features -- -A clippy::new_without_default
+ - name: Build
+ run: cargo build --target ${{ matrix.targets }} --all-features
+ - name: Unit test
+ if: ${{ matrix.targets == 'x86_64-unknown-linux-gnu' }}
+ run: cargo test --target ${{ matrix.targets }} -- --nocapture
+
+ doc:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ permissions:
+ contents: write
+ env:
+ default-branch: ${{ format('refs/heads/{0}', github.event.repository.default_branch) }}
+ RUSTDOCFLAGS: -D rustdoc::broken_intra_doc_links -D missing-docs
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dtolnay/rust-toolchain@nightly
+ - name: Build docs
+ continue-on-error: ${{ github.ref != env.default-branch && github.event_name != 'pull_request' }}
+ run: |
+ cargo doc --no-deps --all-features
+ printf '' $(cargo tree | head -1 | cut -d' ' -f1) > target/doc/index.html
+ - name: Deploy to Github Pages
+ if: ${{ github.ref == env.default-branch }}
+ uses: JamesIves/github-pages-deploy-action@v4
+ with:
+ single-commit: true
+ branch: gh-pages
+ folder: target/doc
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ff78c42
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+/target
+/.vscode
+.DS_Store
+Cargo.lock
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..46ae2dd
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,15 @@
+[package]
+name = "memory_set"
+version = "0.1.0"
+edition = "2021"
+authors = ["Yuekai Jia "]
+description = "Data structures and operations for managing memory mappings"
+license = "GPL-3.0-or-later OR Apache-2.0 OR MulanPSL-2.0"
+homepage = "https://github.com/rcore-os/arceos"
+repository = "https://github.com/arceos-org/memory_set"
+documentation = "https://docs.rs/memory_set"
+keywords = ["arceos", "virtual-memory", "memory-area", "mmap"]
+categories = ["os", "memory-management", "no-std"]
+
+[dependencies]
+memory_addr = "0.2"
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3d5deec
--- /dev/null
+++ b/README.md
@@ -0,0 +1,69 @@
+# memory_set
+
+[![Crates.io](https://img.shields.io/crates/v/memory_set)](https://crates.io/crates/memory_set)
+[![Docs.rs](https://docs.rs/memory_set/badge.svg)](https://docs.rs/memory_set)
+[![CI](https://github.com/arceos-org/memory_set/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/arceos-org/memory_set/actions/workflows/ci.yml)
+
+Data structures and operations for managing memory mappings.
+
+It is useful to implement [`mmap`][1] and [`munmap`][1].
+
+[1]: https://man7.org/linux/man-pages/man2/mmap.2.html
+
+## Examples
+
+```rust
+use memory_addr::{va, va_range, VirtAddr};
+use memory_set::{MappingBackend, MemoryArea, MemorySet};
+
+const MAX_ADDR: usize = 0x10000;
+
+/// A mock memory flags.
+type MockFlags = u8;
+/// A mock page table, which is a simple array that maps addresses to flags.
+type MockPageTable = [MockFlags; MAX_ADDR];
+
+/// A mock mapping backend that manipulates the page table on `map` and `unmap`.
+#[derive(Clone)]
+struct MockBackend;
+
+let mut pt = [0; MAX_ADDR];
+let mut memory_set = MemorySet::::new();
+
+// Map [0x1000..0x5000).
+memory_set.map(
+ /* area: */ MemoryArea::new(va!(0x1000), 0x4000, 1, MockBackend),
+ /* page_table: */ &mut pt,
+ /* unmap_overlap */ false,
+).unwrap();
+// Unmap [0x2000..0x4000), will split the area into two parts.
+memory_set.unmap(va!(0x2000), 0x2000, &mut pt).unwrap();
+
+let areas = memory_set.iter().collect::>();
+assert_eq!(areas.len(), 2);
+assert_eq!(areas[0].va_range(), va_range!(0x1000..0x2000));
+assert_eq!(areas[1].va_range(), va_range!(0x4000..0x5000));
+
+// Underlying operations to do when manipulating mappings.
+impl MappingBackend for MockBackend {
+ fn map(&self, start: VirtAddr, size: usize, flags: MockFlags, pt: &mut MockPageTable) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry != 0 {
+ return false;
+ }
+ *entry = flags;
+ }
+ true
+ }
+
+ fn unmap(&self, start: VirtAddr, size: usize, pt: &mut MockPageTable) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry == 0 {
+ return false;
+ }
+ *entry = 0;
+ }
+ true
+ }
+}
+```
diff --git a/src/area.rs b/src/area.rs
new file mode 100644
index 0000000..f536f62
--- /dev/null
+++ b/src/area.rs
@@ -0,0 +1,157 @@
+use core::fmt;
+use core::marker::PhantomData;
+
+use memory_addr::{VirtAddr, VirtAddrRange};
+
+use crate::{MappingError, MappingResult};
+
+/// Underlying operations to do when manipulating mappings within the specific
+/// memory area.
+///
+/// The backend can be different for different memory areas. e.g., for linear
+/// mappings, the target physical address is known when it is added to the page
+/// table. For lazy mappings, an empty mapping needs to be added to the page table
+/// to trigger a page fault.
+pub trait MappingBackend: Clone {
+ /// What to do when mapping a region within the area with the given flags.
+ fn map(&self, start: VirtAddr, size: usize, flags: F, page_table: &mut P) -> bool;
+ /// What to do when unmaping a memory region within the area.
+ fn unmap(&self, start: VirtAddr, size: usize, page_table: &mut P) -> bool;
+}
+
+/// A memory area represents a continuous range of virtual memory with the same
+/// flags.
+///
+/// The target physical memory frames are determined by [`MappingBackend`] and
+/// may not be contiguous.
+pub struct MemoryArea> {
+ va_range: VirtAddrRange,
+ flags: F,
+ backend: B,
+ _phantom: PhantomData<(F, P)>,
+}
+
+impl> MemoryArea {
+ /// Creates a new memory area.
+ pub const fn new(start: VirtAddr, size: usize, flags: F, backend: B) -> Self {
+ Self {
+ va_range: VirtAddrRange::from_start_size(start, size),
+ flags,
+ backend,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Returns the virtual address range.
+ pub const fn va_range(&self) -> VirtAddrRange {
+ self.va_range
+ }
+
+ /// Returns the memory flags, e.g., the permission bits.
+ pub const fn flags(&self) -> F {
+ self.flags
+ }
+
+ /// Returns the start address of the memory area.
+ pub const fn start(&self) -> VirtAddr {
+ self.va_range.start
+ }
+
+ /// Returns the end address of the memory area.
+ pub const fn end(&self) -> VirtAddr {
+ self.va_range.end
+ }
+
+ /// Returns the size of the memory area.
+ pub const fn size(&self) -> usize {
+ self.va_range.size()
+ }
+
+ /// Returns the mapping backend of the memory area.
+ pub const fn backend(&self) -> &B {
+ &self.backend
+ }
+}
+
+impl> MemoryArea {
+ /// Maps the whole memory area in the page table.
+ pub(crate) fn map_area(&self, page_table: &mut P) -> MappingResult {
+ self.backend
+ .map(self.start(), self.size(), self.flags, page_table)
+ .then_some(())
+ .ok_or(MappingError::BadState)
+ }
+
+ /// Unmaps the whole memory area in the page table.
+ pub(crate) fn unmap_area(&self, page_table: &mut P) -> MappingResult {
+ self.backend
+ .unmap(self.start(), self.size(), page_table)
+ .then_some(())
+ .ok_or(MappingError::BadState)
+ }
+
+ /// Shrinks the memory area at the left side.
+ ///
+ /// The start address of the memory area is increased by `new_size`. The
+ /// shrunk part is unmapped.
+ pub(crate) fn shrink_left(&mut self, new_size: usize, page_table: &mut P) -> MappingResult {
+ let unmap_size = self.size() - new_size;
+ if !self.backend.unmap(self.start(), unmap_size, page_table) {
+ return Err(MappingError::BadState);
+ }
+ self.va_range.start += unmap_size;
+ Ok(())
+ }
+
+ /// Shrinks the memory area at the right side.
+ ///
+ /// The end address of the memory area is decreased by `new_size`. The
+ /// shrunk part is unmapped.
+ pub(crate) fn shrink_right(&mut self, new_size: usize, page_table: &mut P) -> MappingResult {
+ let unmap_size = self.size() - new_size;
+ if !self
+ .backend
+ .unmap(self.start() + new_size, unmap_size, page_table)
+ {
+ return Err(MappingError::BadState);
+ }
+ self.va_range.end -= unmap_size;
+ Ok(())
+ }
+
+ /// Splits the memory area at the given position.
+ ///
+ /// The original memory area is shrunk to the left part, and the right part
+ /// is returned.
+ ///
+ /// Returns `None` if the given position is not in the memory area, or one
+ /// of the parts is empty after splitting.
+ pub(crate) fn split(&mut self, pos: VirtAddr) -> Option {
+ let start = self.start();
+ let end = self.end();
+ if start < pos && pos < end {
+ let new_area = Self::new(
+ pos,
+ end.as_usize() - pos.as_usize(),
+ self.flags,
+ self.backend.clone(),
+ );
+ self.va_range.end = pos;
+ Some(new_area)
+ } else {
+ None
+ }
+ }
+}
+
+impl> fmt::Debug for MemoryArea
+where
+ F: fmt::Debug + Copy,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("MemoryArea")
+ .field("va_range", &self.va_range)
+ .field("flags", &self.flags)
+ .finish()
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..4b2ce97
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,27 @@
+#![cfg_attr(not(test), no_std)]
+#![doc = include_str!("../README.md")]
+
+extern crate alloc;
+
+mod area;
+mod set;
+
+#[cfg(test)]
+mod tests;
+
+pub use self::area::{MappingBackend, MemoryArea};
+pub use self::set::MemorySet;
+
+/// Error type for memory mapping operations.
+#[derive(Debug, Eq, PartialEq)]
+pub enum MappingError {
+ /// Invalid parameter (e.g., `addr`, `size`, `flags`, etc.)
+ InvalidParam,
+ /// The given range clashes with an existing.
+ AlreadyExists,
+ /// The backend page table is in a bad state.
+ BadState,
+}
+
+/// A [`Result`] type with [`MappingError`] as the error type.
+pub type MappingResult = Result;
diff --git a/src/set.rs b/src/set.rs
new file mode 100644
index 0000000..1da2e81
--- /dev/null
+++ b/src/set.rs
@@ -0,0 +1,185 @@
+use alloc::collections::BTreeMap;
+use core::fmt;
+
+use memory_addr::{VirtAddr, VirtAddrRange};
+
+use crate::{MappingBackend, MappingError, MappingResult, MemoryArea};
+
+/// A container that maintains memory mappings ([`MemoryArea`]).
+pub struct MemorySet> {
+ areas: BTreeMap>,
+}
+
+impl> MemorySet {
+ /// Creates a new memory set.
+ pub const fn new() -> Self {
+ Self {
+ areas: BTreeMap::new(),
+ }
+ }
+
+ /// Returns the number of memory areas in the memory set.
+ pub fn len(&self) -> usize {
+ self.areas.len()
+ }
+
+ /// Returns `true` if the memory set contains no memory areas.
+ pub fn is_empty(&self) -> bool {
+ self.areas.is_empty()
+ }
+
+ /// Returns the iterator over all memory areas.
+ pub fn iter(&self) -> impl Iterator- > {
+ self.areas.values()
+ }
+
+ /// Returns whether the given address range overlaps with any existing area.
+ pub fn overlaps(&self, range: VirtAddrRange) -> bool {
+ if let Some((_, before)) = self.areas.range(..range.start).last() {
+ if before.va_range().overlaps(range) {
+ return true;
+ }
+ }
+ if let Some((_, after)) = self.areas.range(range.start..).next() {
+ if after.va_range().overlaps(range) {
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Finds the memory area that contains the given address.
+ pub fn find(&self, addr: VirtAddr) -> Option<&MemoryArea> {
+ let candidate = self.areas.range(..=addr).last().map(|(_, a)| a);
+ candidate.filter(|a| a.va_range().contains(addr))
+ }
+
+ /// Finds a free area that can accommodate the given size.
+ ///
+ /// The search starts from the given `hint` address, and the area should be
+ /// within the given `limit` range.
+ ///
+ /// Returns the start address of the free area. Returns `None` if no such
+ /// area is found.
+ pub fn find_free_area(
+ &self,
+ hint: VirtAddr,
+ size: usize,
+ limit: VirtAddrRange,
+ ) -> Option {
+ // brute force: try each area's end address as the start.
+ let mut last_end = hint.max(limit.start);
+ for (addr, area) in self.areas.iter() {
+ if last_end + size <= *addr {
+ return Some(last_end);
+ }
+ last_end = area.end();
+ }
+ if last_end + size <= limit.end {
+ Some(last_end)
+ } else {
+ None
+ }
+ }
+
+ /// Add a new memory mapping.
+ ///
+ /// The mapping is represented by a [`MemoryArea`].
+ ///
+ /// If the new area overlaps with any existing area, the behavior is
+ /// determined by the `unmap_overlap` parameter. If it is `true`, the
+ /// overlapped regions will be unmapped first. Otherwise, it returns an
+ /// error.
+ pub fn map(
+ &mut self,
+ area: MemoryArea,
+ page_table: &mut P,
+ unmap_overlap: bool,
+ ) -> MappingResult {
+ if area.va_range().is_empty() {
+ return Err(MappingError::InvalidParam);
+ }
+
+ if self.overlaps(area.va_range()) {
+ if unmap_overlap {
+ self.unmap(area.start(), area.size(), page_table)?;
+ } else {
+ return Err(MappingError::AlreadyExists);
+ }
+ }
+
+ area.map_area(page_table)?;
+ assert!(self.areas.insert(area.start(), area).is_none());
+ Ok(())
+ }
+
+ /// Remove memory mappings within the given address range.
+ ///
+ /// All memory areas that are fully contained in the range will be removed
+ /// directly. If the area intersects with the boundary, it will be shrinked.
+ /// If the unmapped range is in the middle of an existing area, it will be
+ /// split into two areas.
+ pub fn unmap(&mut self, start: VirtAddr, size: usize, page_table: &mut P) -> MappingResult {
+ let range = VirtAddrRange::from_start_size(start, size);
+ let end = range.end;
+ if range.is_empty() {
+ return Ok(());
+ }
+
+ // Unmap entire areas that are contained by the range.
+ self.areas.retain(|_, area| {
+ if area.va_range().contained_in(range) {
+ area.unmap_area(page_table).unwrap();
+ false
+ } else {
+ true
+ }
+ });
+
+ // Shrink right if the area intersects with the left boundary.
+ if let Some((before_start, before)) = self.areas.range_mut(..start).last() {
+ let before_end = before.end();
+ if before_end > start {
+ if before_end <= end {
+ // the unmapped area is at the end of `before`.
+ before.shrink_right(start.as_usize() - before_start.as_usize(), page_table)?;
+ } else {
+ // the unmapped area is in the middle `before`, need to split.
+ let right_part = before.split(end).unwrap();
+ before.shrink_right(start.as_usize() - before_start.as_usize(), page_table)?;
+ assert_eq!(right_part.start(), end);
+ self.areas.insert(end, right_part);
+ }
+ }
+ }
+
+ // Shrink left if the area intersects with the right boundary.
+ if let Some((&after_start, after)) = self.areas.range_mut(start..).next() {
+ let after_end = after.end();
+ if after_start < end {
+ // the unmapped area is at the start of `after`.
+ let mut new_area = self.areas.remove(&after_start).unwrap();
+ new_area.shrink_left(after_end.as_usize() - end.as_usize(), page_table)?;
+ assert_eq!(new_area.start(), end);
+ self.areas.insert(end, new_area);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Remove all memory areas and the underlying mappings.
+ pub fn clear(&mut self, page_table: &mut P) -> MappingResult {
+ for (_, area) in self.areas.iter() {
+ area.unmap_area(page_table)?;
+ }
+ self.areas.clear();
+ Ok(())
+ }
+}
+
+impl> fmt::Debug for MemorySet {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_list().entries(self.areas.values()).finish()
+ }
+}
diff --git a/src/tests.rs b/src/tests.rs
new file mode 100644
index 0000000..f8fc586
--- /dev/null
+++ b/src/tests.rs
@@ -0,0 +1,210 @@
+use memory_addr::VirtAddr;
+
+use crate::{MappingBackend, MappingError, MemoryArea, MemorySet};
+
+const MAX_ADDR: usize = 0x10000;
+
+type MockFlags = u8;
+type MockPageTable = [MockFlags; MAX_ADDR];
+
+#[derive(Clone)]
+struct MockBackend;
+
+type MockMemorySet = MemorySet;
+
+impl MappingBackend for MockBackend {
+ fn map(&self, start: VirtAddr, size: usize, flags: MockFlags, pt: &mut MockPageTable) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry != 0 {
+ return false;
+ }
+ *entry = flags;
+ }
+ true
+ }
+
+ fn unmap(&self, start: VirtAddr, size: usize, pt: &mut MockPageTable) -> bool {
+ for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
+ if *entry == 0 {
+ return false;
+ }
+ *entry = 0;
+ }
+ true
+ }
+}
+
+macro_rules! assert_ok {
+ ($expr: expr) => {
+ assert!(($expr).is_ok())
+ };
+}
+
+macro_rules! assert_err {
+ ($expr: expr) => {
+ assert!(($expr).is_err())
+ };
+ ($expr: expr, $err: ident) => {
+ assert_eq!(($expr).err(), Some(MappingError::$err))
+ };
+}
+
+fn dump_memory_set(set: &MockMemorySet) {
+ use std::sync::Mutex;
+ static DUMP_LOCK: Mutex<()> = Mutex::new(());
+
+ let _lock = DUMP_LOCK.lock().unwrap();
+ println!("Number of areas: {}", set.len());
+ for area in set.iter() {
+ println!("{:?}", area);
+ }
+}
+
+#[test]
+fn test_map_unmap() {
+ let mut set = MockMemorySet::new();
+ let mut pt = [0; MAX_ADDR];
+
+ // Map [0, 0x1000), [0x2000, 0x3000), [0x4000, 0x5000), ...
+ for start in (0..MAX_ADDR).step_by(0x2000) {
+ assert_ok!(set.map(
+ MemoryArea::new(start.into(), 0x1000, 1, MockBackend),
+ &mut pt,
+ false,
+ ));
+ }
+ // Map [0x1000, 0x2000), [0x3000, 0x4000), [0x5000, 0x6000), ...
+ for start in (0x1000..MAX_ADDR).step_by(0x2000) {
+ assert_ok!(set.map(
+ MemoryArea::new(start.into(), 0x1000, 2, MockBackend),
+ &mut pt,
+ false,
+ ));
+ }
+ dump_memory_set(&set);
+ assert_eq!(set.len(), 16);
+ for addr in 0..MAX_ADDR {
+ assert!(pt[addr] == 1 || pt[addr] == 2);
+ }
+
+ // Found [0x4000, 0x5000), flags = 1.
+ let area = set.find(0x4100.into()).unwrap();
+ assert_eq!(area.start(), 0x4000.into());
+ assert_eq!(area.end(), 0x5000.into());
+ assert_eq!(area.flags(), 1);
+ assert_eq!(pt[0x4200], 1);
+
+ // The area [0x4000, 0x8000) is already mapped, map returns an error.
+ assert_err!(
+ set.map(
+ MemoryArea::new(0x4000.into(), 0x4000, 3, MockBackend),
+ &mut pt,
+ false
+ ),
+ AlreadyExists
+ );
+ // Unmap overlapped areas before adding the new mapping [0x4000, 0x8000).
+ assert_ok!(set.map(
+ MemoryArea::new(0x4000.into(), 0x4000, 3, MockBackend),
+ &mut pt,
+ true
+ ));
+ dump_memory_set(&set);
+ assert_eq!(set.len(), 13);
+
+ // Found [0x4000, 0x8000), flags = 3.
+ let area = set.find(0x4100.into()).unwrap();
+ assert_eq!(area.start(), 0x4000.into());
+ assert_eq!(area.end(), 0x8000.into());
+ assert_eq!(area.flags(), 3);
+ for addr in 0x4000..0x8000 {
+ assert_eq!(pt[addr], 3);
+ }
+
+ // Unmap areas in the middle.
+ assert_ok!(set.unmap(0x4000.into(), 0x8000, &mut pt));
+ assert_eq!(set.len(), 8);
+ // Unmap the remaining areas, including the unmapped ranges.
+ assert_ok!(set.unmap(0.into(), MAX_ADDR * 2, &mut pt));
+ assert_eq!(set.len(), 0);
+ for addr in 0..MAX_ADDR {
+ assert_eq!(pt[addr], 0);
+ }
+}
+
+#[test]
+fn test_unmap_split() {
+ let mut set = MockMemorySet::new();
+ let mut pt = [0; MAX_ADDR];
+
+ // Map [0, 0x1000), [0x2000, 0x3000), [0x4000, 0x5000), ...
+ for start in (0..MAX_ADDR).step_by(0x2000) {
+ assert_ok!(set.map(
+ MemoryArea::new(start.into(), 0x1000, 1, MockBackend),
+ &mut pt,
+ false,
+ ));
+ }
+ assert_eq!(set.len(), 8);
+
+ // Unmap [0xc00, 0x2400), [0x2c00, 0x4400), [0x4c00, 0x6400), ...
+ // The areas are shrinked at the left and right boundaries.
+ for start in (0..MAX_ADDR).step_by(0x2000) {
+ assert_ok!(set.unmap((start + 0xc00).into(), 0x1800, &mut pt));
+ }
+ dump_memory_set(&set);
+ assert_eq!(set.len(), 8);
+
+ for area in set.iter() {
+ if area.start().as_usize() == 0 {
+ assert_eq!(area.size(), 0xc00);
+ } else {
+ assert_eq!(area.start().align_offset_4k(), 0x400);
+ assert_eq!(area.end().align_offset_4k(), 0xc00);
+ assert_eq!(area.size(), 0x800);
+ }
+ for addr in area.start().as_usize()..area.end().as_usize() {
+ assert_eq!(pt[addr], 1);
+ }
+ }
+
+ // Unmap [0x800, 0x900), [0x2800, 0x4400), [0x4800, 0x4900), ...
+ // The areas are split into two areas.
+ for start in (0..MAX_ADDR).step_by(0x2000) {
+ assert_ok!(set.unmap((start + 0x800).into(), 0x100, &mut pt));
+ }
+ dump_memory_set(&set);
+ assert_eq!(set.len(), 16);
+
+ for area in set.iter() {
+ let off = area.start().align_offset_4k();
+ if off == 0 {
+ assert_eq!(area.size(), 0x800);
+ } else if off == 0x400 {
+ assert_eq!(area.size(), 0x400);
+ } else if off == 0x900 {
+ assert_eq!(area.size(), 0x300);
+ } else {
+ unreachable!();
+ }
+ for addr in area.start().as_usize()..area.end().as_usize() {
+ assert_eq!(pt[addr], 1);
+ }
+ }
+ let mut iter = set.iter();
+ while let Some(area) = iter.next() {
+ if let Some(next) = iter.next() {
+ for addr in area.end().as_usize()..next.start().as_usize() {
+ assert_eq!(pt[addr], 0);
+ }
+ }
+ }
+ drop(iter);
+
+ // Unmap all areas.
+ assert_ok!(set.unmap(0.into(), MAX_ADDR, &mut pt));
+ assert_eq!(set.len(), 0);
+ for addr in 0..MAX_ADDR {
+ assert_eq!(pt[addr], 0);
+ }
+}