From dd411433129c07a3bea43a0cc5ad9587bac1736a Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 8 Mar 2023 16:33:09 +0800 Subject: [PATCH 1/8] mm/damon/paddr: minor refactor of damon_pa_pageout() Patch series "mm/damon/paddr: minor code improvement", v3. Unify folio_put() to make code more clear, and also fix minor issue in damon_pa_young(). This patch (of 3): Omit three lines by unified folio_put(), and make code more clear. Link: https://lkml.kernel.org/r/20230308083311.120951-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20230308083311.120951-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index dd9c33fbe80527..0db724aec5cb9a 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -238,21 +238,18 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) if (!folio) continue; - if (damos_pa_filter_out(s, folio)) { - folio_put(folio); - continue; - } + if (damos_pa_filter_out(s, folio)) + goto put_folio; folio_clear_referenced(folio); folio_test_clear_young(folio); - if (!folio_isolate_lru(folio)) { - folio_put(folio); - continue; - } + if (!folio_isolate_lru(folio)) + goto put_folio; if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); +put_folio: folio_put(folio); } applied = reclaim_pages(&folio_list); From b6993be23601c8bc992dc9743fbf78c1ff5d6b6a Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 8 Mar 2023 16:33:10 +0800 Subject: [PATCH 2/8] mm/damon/paddr: minor refactor of damon_pa_mark_accessed_or_deactivate() Omit one line by unified folio_put(), and make code more clear. Link: https://lkml.kernel.org/r/20230308083311.120951-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 0db724aec5cb9a..b22f6fbb5816e5 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -268,16 +268,15 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (!folio) continue; - if (damos_pa_filter_out(s, folio)) { - folio_put(folio); - continue; - } + if (damos_pa_filter_out(s, folio)) + goto put_folio; if (mark_accessed) folio_mark_accessed(folio); else folio_deactivate(folio); applied += folio_nr_pages(folio); +put_folio: folio_put(folio); } return applied * PAGE_SIZE; From 70307b0e297a6634be855c49525ccbea50922a90 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 8 Mar 2023 16:33:11 +0800 Subject: [PATCH 3/8] mm/damon/paddr: fix missing folio_sz update in damon_pa_young() The *folio_sz in damon_pa_young() will be used(as last_folio_sz) by __damon_pa_check_access(), so it's need to be updated, fix missing branch. Link: https://lkml.kernel.org/r/20230308083311.120951-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index b22f6fbb5816e5..467b99166b4378 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -134,10 +134,8 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) } need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) { - folio_put(folio); - return false; - } + if (need_lock && !folio_trylock(folio)) + goto out; rmap_walk(folio, &rwc); From 24139c07f413ef4b555482c758343d71392a19bc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 22 Apr 2023 22:54:18 +0200 Subject: [PATCH 4/8] mm/ksm: unmerge and clear VM_MERGEABLE when setting PR_SET_MEMORY_MERGE=0 Patch series "mm/ksm: improve PR_SET_MEMORY_MERGE=0 handling and cleanup disabling KSM", v2. (1) Make PR_SET_MEMORY_MERGE=0 unmerge pages like setting MADV_UNMERGEABLE does, (2) add a selftest for it and (3) factor out disabling of KSM from s390/gmap code. This patch (of 3): Let's unmerge any KSM pages when setting PR_SET_MEMORY_MERGE=0, and clear the VM_MERGEABLE flag from all VMAs -- just like KSM would. Of course, only do that if we previously set PR_SET_MEMORY_MERGE=1. Link: https://lkml.kernel.org/r/20230422205420.30372-1-david@redhat.com Link: https://lkml.kernel.org/r/20230422205420.30372-2-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Stefan Roesch Cc: Christian Borntraeger Cc: Claudio Imbrenda Cc: Heiko Carstens Cc: Janosch Frank Cc: Johannes Weiner Cc: Michal Hocko Cc: Rik van Riel Cc: Shuah Khan Cc: Sven Schnelle Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- include/linux/ksm.h | 1 + kernel/sys.c | 12 +++------ mm/ksm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7a9b76fb6c3f72..429efa6ff4ae85 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -21,6 +21,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, void ksm_add_vma(struct vm_area_struct *vma); int ksm_enable_merge_any(struct mm_struct *mm); +int ksm_disable_merge_any(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); diff --git a/kernel/sys.c b/kernel/sys.c index 72cdb16e26366b..339fee3eff6a2b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2695,16 +2695,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, if (mmap_write_lock_killable(me->mm)) return -EINTR; - if (arg2) { + if (arg2) error = ksm_enable_merge_any(me->mm); - } else { - /* - * TODO: we might want disable KSM on all VMAs and - * trigger unsharing to completely disable KSM. - */ - clear_bit(MMF_VM_MERGE_ANY, &me->mm->flags); - error = 0; - } + else + error = ksm_disable_merge_any(me->mm); mmap_write_unlock(me->mm); break; case PR_GET_MEMORY_MERGE: diff --git a/mm/ksm.c b/mm/ksm.c index 9e48258985d2f4..823bb3475a680e 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2520,6 +2520,22 @@ static void __ksm_add_vma(struct vm_area_struct *vma) vm_flags_set(vma, VM_MERGEABLE); } +static int __ksm_del_vma(struct vm_area_struct *vma) +{ + int err; + + if (!(vma->vm_flags & VM_MERGEABLE)) + return 0; + + if (vma->anon_vma) { + err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); + if (err) + return err; + } + + vm_flags_clear(vma, VM_MERGEABLE); + return 0; +} /** * ksm_add_vma - Mark vma as mergeable if compatible * @@ -2542,6 +2558,20 @@ static void ksm_add_vmas(struct mm_struct *mm) __ksm_add_vma(vma); } +static int ksm_del_vmas(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + int err; + + VMA_ITERATOR(vmi, mm, 0); + for_each_vma(vmi, vma) { + err = __ksm_del_vma(vma); + if (err) + return err; + } + return 0; +} + /** * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all * compatible VMA's @@ -2569,6 +2599,35 @@ int ksm_enable_merge_any(struct mm_struct *mm) return 0; } +/** + * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, + * previously enabled via ksm_enable_merge_any(). + * + * Disabling merging implies unmerging any merged pages, like setting + * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and + * merging on all compatible VMA's remains enabled. + * + * @mm: Pointer to mm + * + * Returns 0 on success, otherwise error code + */ +int ksm_disable_merge_any(struct mm_struct *mm) +{ + int err; + + if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return 0; + + err = ksm_del_vmas(mm); + if (err) { + ksm_add_vmas(mm); + return err; + } + + clear_bit(MMF_VM_MERGE_ANY, &mm->flags); + return 0; +} + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { From 1150ea9338554ee6685c9f8939cf7202bf5b27cf Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 22 Apr 2023 22:54:19 +0200 Subject: [PATCH 5/8] selftests/ksm: ksm_functional_tests: add prctl unmerge test Let's test whether setting PR_SET_MEMORY_MERGE to 0 after setting it to 1 will unmerge pages, similar to how setting MADV_UNMERGEABLE after setting MADV_MERGEABLE would. Link: https://lkml.kernel.org/r/20230422205420.30372-3-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Stefan Roesch Cc: Christian Borntraeger Cc: Claudio Imbrenda Cc: Heiko Carstens Cc: Janosch Frank Cc: Johannes Weiner Cc: Michal Hocko Cc: Rik van Riel Cc: Shuah Khan Cc: Sven Schnelle Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- .../selftests/mm/ksm_functional_tests.c | 46 ++++++++++++++++--- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 7bc9fc17c9f0af..26853badae7056 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -91,9 +91,10 @@ static int ksm_merge(void) return 0; } -static char *mmap_and_merge_range(char val, unsigned long size) +static char *mmap_and_merge_range(char val, unsigned long size, bool use_prctl) { char *map; + int ret; map = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); @@ -110,7 +111,17 @@ static char *mmap_and_merge_range(char val, unsigned long size) /* Make sure each page contains the same values to merge them. */ memset(map, val, size); - if (madvise(map, size, MADV_MERGEABLE)) { + + if (use_prctl) { + ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0); + if (ret < 0 && errno == EINVAL) { + ksft_test_result_skip("PR_SET_MEMORY_MERGE not supported\n"); + goto unmap; + } else if (ret) { + ksft_test_result_fail("PR_SET_MEMORY_MERGE=1 failed\n"); + goto unmap; + } + } else if (madvise(map, size, MADV_MERGEABLE)) { ksft_test_result_fail("MADV_MERGEABLE failed\n"); goto unmap; } @@ -133,7 +144,7 @@ static void test_unmerge(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size); + map = mmap_and_merge_range(0xcf, size, false); if (map == MAP_FAILED) return; @@ -155,7 +166,7 @@ static void test_unmerge_discarded(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size); + map = mmap_and_merge_range(0xcf, size, false); if (map == MAP_FAILED) return; @@ -187,7 +198,7 @@ static void test_unmerge_uffd_wp(void) ksft_print_msg("[RUN] %s\n", __func__); - map = mmap_and_merge_range(0xcf, size); + map = mmap_and_merge_range(0xcf, size, false); if (map == MAP_FAILED) return; @@ -323,9 +334,31 @@ static void test_prctl_fork(void) ksft_test_result_pass("PR_SET_MEMORY_MERGE value is inherited\n"); } +static void test_prctl_unmerge(void) +{ + const unsigned int size = 2 * MiB; + char *map; + + ksft_print_msg("[RUN] %s\n", __func__); + + map = mmap_and_merge_range(0xcf, size, true); + if (map == MAP_FAILED) + return; + + if (prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0)) { + ksft_test_result_fail("PR_SET_MEMORY_MERGE=0 failed\n"); + goto unmap; + } + + ksft_test_result(!range_maps_duplicates(map, size), + "Pages were unmerged\n"); +unmap: + munmap(map, size); +} + int main(int argc, char **argv) { - unsigned int tests = 4; + unsigned int tests = 5; int err; #ifdef __NR_userfaultfd @@ -355,6 +388,7 @@ int main(int argc, char **argv) test_prctl(); test_prctl_fork(); + test_prctl_unmerge(); err = ksft_get_fail_cnt(); if (err) From 2c281f54f556e1f3266c8cb104adf9eea7a7b742 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 22 Apr 2023 23:01:56 +0200 Subject: [PATCH 6/8] mm/ksm: move disabling KSM from s390/gmap code to KSM code Let's factor out actual disabling of KSM. The existing "mm->def_flags &= ~VM_MERGEABLE;" was essentially a NOP and can be dropped, because def_flags should never include VM_MERGEABLE. Note that we don't currently prevent re-enabling KSM. This should now be faster in case KSM was never enabled, because we only conditionally iterate all VMAs. Further, it certainly looks cleaner. Link: https://lkml.kernel.org/r/20230422210156.33630-1-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Janosch Frank Acked-by: Stefan Roesch Cc: Christian Borntraeger Cc: Claudio Imbrenda Cc: Heiko Carstens Cc: Johannes Weiner Cc: Michal Hocko Cc: Rik van Riel Cc: Shuah Khan Cc: Sven Schnelle Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/s390/mm/gmap.c | 20 +------------------- include/linux/ksm.h | 6 ++++++ mm/ksm.c | 11 +++++++++++ 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 0949811761e6ce..dfe905c7bd8e0b 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2585,30 +2585,12 @@ EXPORT_SYMBOL_GPL(s390_enable_sie); int gmap_mark_unmergeable(void) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long vm_flags; - int ret; - VMA_ITERATOR(vmi, mm, 0); - /* * Make sure to disable KSM (if enabled for the whole process or * individual VMAs). Note that nothing currently hinders user space * from re-enabling it. */ - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); - - for_each_vma(vmi, vma) { - /* Copy vm_flags to avoid partial modifications in ksm_madvise */ - vm_flags = vma->vm_flags; - ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, - MADV_UNMERGEABLE, &vm_flags); - if (ret) - return ret; - vm_flags_reset(vma, vm_flags); - } - mm->def_flags &= ~VM_MERGEABLE; - return 0; + return ksm_disable(current->mm); } EXPORT_SYMBOL_GPL(gmap_mark_unmergeable); diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 429efa6ff4ae85..899a314bc48720 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -22,6 +22,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, void ksm_add_vma(struct vm_area_struct *vma); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); +int ksm_disable(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); @@ -80,6 +81,11 @@ static inline void ksm_add_vma(struct vm_area_struct *vma) { } +static inline int ksm_disable(struct mm_struct *mm) +{ + return 0; +} + static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { return 0; diff --git a/mm/ksm.c b/mm/ksm.c index 823bb3475a680e..0156bded3a66c5 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2628,6 +2628,17 @@ int ksm_disable_merge_any(struct mm_struct *mm) return 0; } +int ksm_disable(struct mm_struct *mm) +{ + mmap_assert_write_locked(mm); + + if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) + return 0; + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return ksm_disable_merge_any(mm); + return ksm_del_vmas(mm); +} + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { From 65f67a3e002f0e5ae84a65be915248d57d480060 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 25 Apr 2023 20:44:53 +0800 Subject: [PATCH 7/8] mm/page_alloc: add some comments to explain the possible hole in __pageblock_pfn_to_page() Now the __pageblock_pfn_to_page() is used by set_zone_contiguous(), which checks whether the given zone contains holes, and uses pfn_to_online_page() to validate if the start pfn is online and valid, as well as using pfn_valid() to validate the end pfn. However, the __pageblock_pfn_to_page() function may return non-NULL even if the end pfn of a pageblock is in a memory hole in some situations. For example, if the pageblock order is MAX_ORDER, which will fall into 2 sub-sections, and the end pfn of the pageblock may be hole even though the start pfn is online and valid. See below memory layout as an example and suppose the pageblock order is MAX_ORDER. [ 0.000000] Zone ranges: [ 0.000000] DMA [mem 0x0000000040000000-0x00000000ffffffff] [ 0.000000] DMA32 empty [ 0.000000] Normal [mem 0x0000000100000000-0x0000001fa7ffffff] [ 0.000000] Movable zone start for each node [ 0.000000] Early memory node ranges [ 0.000000] node 0: [mem 0x0000000040000000-0x0000001fa3c7ffff] [ 0.000000] node 0: [mem 0x0000001fa3c80000-0x0000001fa3ffffff] [ 0.000000] node 0: [mem 0x0000001fa4000000-0x0000001fa402ffff] [ 0.000000] node 0: [mem 0x0000001fa4030000-0x0000001fa40effff] [ 0.000000] node 0: [mem 0x0000001fa40f0000-0x0000001fa73cffff] [ 0.000000] node 0: [mem 0x0000001fa73d0000-0x0000001fa745ffff] [ 0.000000] node 0: [mem 0x0000001fa7460000-0x0000001fa746ffff] [ 0.000000] node 0: [mem 0x0000001fa7470000-0x0000001fa758ffff] [ 0.000000] node 0: [mem 0x0000001fa7590000-0x0000001fa7dfffff] Focus on the last memory range, and there is a hole for the range [mem 0x0000001fa7590000-0x0000001fa7dfffff]. That means the last pageblock will contain the range from 0x1fa7c00000 to 0x1fa7ffffff, since the pageblock must be 4M aligned. And in this pageblock, these pfns will fall into 2 sub-section (the sub-section size is 2M aligned). So, the 1st sub-section (indicates pfn range: 0x1fa7c00000 - 0x1fa7dfffff ) in this pageblock is valid by calling subsection_map_init() in free_area_init(), but the 2nd sub-section (indicates pfn range: 0x1fa7e00000 - 0x1fa7ffffff ) in this pageblock is not valid. This did not break anything until now, but the zone continuous is fragile in this possible scenario. So as previous discussion[1], it is better to add some comments to explain this possible issue in case there are some future pfn walkers that rely on this. [1] https://lore.kernel.org/all/87r0sdsmr6.fsf@yhuang6-desk2.ccr.corp.intel.com/ Link: https://lkml.kernel.org/r/5c26368865e79c743a453dea48d30670b19d2e4f.1682425534.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/5c26368865e79c743a453dea48d30670b19d2e4f.1682425534.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Michal Hocko Reviewed-by: "Huang, Ying" Cc: Baolin Wang Cc: David Hildenbrand Cc: Mel Gorman Cc: Mike Rapoport (IBM) Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6da423ec356f9e..d71c80f299461e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1502,6 +1502,15 @@ void __free_pages_core(struct page *page, unsigned int order) * interleaving within a single pageblock. It is therefore sufficient to check * the first and last page of a pageblock and avoid checking each individual * page in a pageblock. + * + * Note: the function may return non-NULL struct page even for a page block + * which contains a memory hole (i.e. there is no physical memory for a subset + * of the pfn range). For example, if the pageblock order is MAX_ORDER, which + * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole + * even though the start pfn is online and valid. This should be safe most of + * the time because struct pages are still initialized via init_unavailable_range() + * and pfn walkers shouldn't touch any physical memory range for which they do + * not recognize any specific metadata in struct pages. */ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) From 245f0922689364b21163af4937a05ea0ba576fae Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 17 Apr 2023 12:53:23 +0800 Subject: [PATCH 8/8] mm: hwpoison: coredump: support recovery from dump_user_range() dump_user_range() is used to copy the user page to a coredump file, but if a hardware memory error occurred during copy, which called from __kernel_write_iter() in dump_user_range(), it crashes, CPU: 112 PID: 7014 Comm: mca-recover Not tainted 6.3.0-rc2 #425 pc : __memcpy+0x110/0x260 lr : _copy_from_iter+0x3bc/0x4c8 ... Call trace: __memcpy+0x110/0x260 copy_page_from_iter+0xcc/0x130 pipe_write+0x164/0x6d8 __kernel_write_iter+0x9c/0x210 dump_user_range+0xc8/0x1d8 elf_core_dump+0x308/0x368 do_coredump+0x2e8/0xa40 get_signal+0x59c/0x788 do_signal+0x118/0x1f8 do_notify_resume+0xf0/0x280 el0_da+0x130/0x138 el0t_64_sync_handler+0x68/0xc0 el0t_64_sync+0x188/0x190 Generally, the '->write_iter' of file ops will use copy_page_from_iter() and copy_page_from_iter_atomic(), change memcpy() to copy_mc_to_kernel() in both of them to handle #MC during source read, which stop coredump processing and kill the task instead of kernel panic, but the source address may not always a user address, so introduce a new copy_mc flag in struct iov_iter{} to indicate that the iter could do a safe memory copy, also introduce the helpers to set/cleck the flag, for now, it's only used in coredump's dump_user_range(), but it could expand to any other scenarios to fix the similar issue. Link: https://lkml.kernel.org/r/20230417045323.11054-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Alexander Viro Cc: Christian Brauner Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Tong Tiangen Cc: Jens Axboe Signed-off-by: Andrew Morton --- fs/coredump.c | 1 + include/linux/uio.h | 16 ++++++++++++++++ lib/iov_iter.c | 17 +++++++++++++++-- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index 5df1e6e1eb2bea..ece7badf701bc2 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -882,6 +882,7 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page) pos = file->f_pos; bvec_set_page(&bvec, page, PAGE_SIZE, 0); iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE); + iov_iter_set_copy_mc(&iter); n = __kernel_write_iter(cprm->file, &iter, &pos); if (n != PAGE_SIZE) return 0; diff --git a/include/linux/uio.h b/include/linux/uio.h index 3d386849a75849..044c1d8c230cfa 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -42,6 +42,7 @@ struct iov_iter_state { struct iov_iter { u8 iter_type; + bool copy_mc; bool nofault; bool data_source; bool user_backed; @@ -256,8 +257,22 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); #ifdef CONFIG_ARCH_HAS_COPY_MC size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); +static inline void iov_iter_set_copy_mc(struct iov_iter *i) +{ + i->copy_mc = true; +} + +static inline bool iov_iter_is_copy_mc(const struct iov_iter *i) +{ + return i->copy_mc; +} #else #define _copy_mc_to_iter _copy_to_iter +static inline void iov_iter_set_copy_mc(struct iov_iter *i) { } +static inline bool iov_iter_is_copy_mc(const struct iov_iter *i) +{ + return false; +} #endif size_t iov_iter_zero(size_t bytes, struct iov_iter *); @@ -380,6 +395,7 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_UBUF, + .copy_mc = false, .user_backed = true, .data_source = direction, .ubuf = buf, diff --git a/lib/iov_iter.c b/lib/iov_iter.c index c3dbe994112c67..960223ed91991a 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -434,6 +434,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_IOVEC, + .copy_mc = false, .nofault = false, .user_backed = true, .data_source = direction, @@ -630,6 +631,14 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ +static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from, + size_t size) +{ + if (iov_iter_is_copy_mc(i)) + return (void *)copy_mc_to_kernel(to, from, size); + return memcpy(to, from, size); +} + size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) @@ -639,7 +648,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) might_fault(); iterate_and_advance(i, bytes, base, len, off, copyin(addr + off, base, len), - memcpy(addr + off, base, len) + memcpy_from_iter(i, addr + off, base, len) ) return bytes; @@ -862,7 +871,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt } iterate_and_advance(i, bytes, base, len, off, copyin(p + off, base, len), - memcpy(p + off, base, len) + memcpy_from_iter(i, p + off, base, len) ) kunmap_atomic(kaddr); return bytes; @@ -1043,6 +1052,7 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_KVEC, + .copy_mc = false, .data_source = direction, .kvec = kvec, .nr_segs = nr_segs, @@ -1059,6 +1069,7 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_BVEC, + .copy_mc = false, .data_source = direction, .bvec = bvec, .nr_segs = nr_segs, @@ -1105,6 +1116,7 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, BUG_ON(direction & ~1); *i = (struct iov_iter) { .iter_type = ITER_XARRAY, + .copy_mc = false, .data_source = direction, .xarray = xarray, .xarray_start = start, @@ -1128,6 +1140,7 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) BUG_ON(direction != READ); *i = (struct iov_iter){ .iter_type = ITER_DISCARD, + .copy_mc = false, .data_source = false, .count = count, .iov_offset = 0