Skip to content

Commit

Permalink
mempool: do not use ksize() for poisoning
Browse files Browse the repository at this point in the history
Nothing appears to be using ksize() within the kmalloc-backed mempools
except the mempool poisoning logic.  Use the actual pool size instead of
the ksize() to avoid needing any special handling of the memory as needed
by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.

[[email protected]: for slab mempools pool_data is not object size]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Vlastimil Babka <[email protected]>
Suggested-by: Vlastimil Babka <[email protected]>
  Link: https://lore.kernel.org/lkml/[email protected]/
Acked-by: Vlastimil Babka <[email protected]>
Reviewed-by: Andrey Konovalov <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Marco Elver <[email protected]>
Cc: Vincenzo Frascino <[email protected]>
Reported-by: Anders Roxell <[email protected]>
  Link: https://lore.kernel.org/all/20221031105514.GB69385@mutt/
Cc: Matthew Wilcox <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
kees authored and akpm00 committed Nov 30, 2022
1 parent 6e7ba8b commit b2b23ba
Showing 1 changed file with 12 additions and 6 deletions.
18 changes: 12 additions & 6 deletions mm/mempool.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
static void check_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
__check_element(pool, element, ksize(element));
if (pool->free == mempool_kfree) {
__check_element(pool, element, (size_t)pool->pool_data);
} else if (pool->free == mempool_free_slab) {
__check_element(pool, element, kmem_cache_size(pool->pool_data));
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
Expand All @@ -80,8 +82,10 @@ static void __poison_element(void *element, size_t size)
static void poison_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
__poison_element(element, ksize(element));
if (pool->alloc == mempool_kmalloc) {
__poison_element(element, (size_t)pool->pool_data);
} else if (pool->alloc == mempool_alloc_slab) {
__poison_element(element, kmem_cache_size(pool->pool_data));
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
Expand Down Expand Up @@ -111,8 +115,10 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)

static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, __ksize(element));
if (pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, (size_t)pool->pool_data);
else if (pool->alloc == mempool_alloc_slab)
kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
else if (pool->alloc == mempool_alloc_pages)
kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
false);
Expand Down

0 comments on commit b2b23ba

Please sign in to comment.