aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2022-12-15 10:58:01 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2022-12-15 10:58:01 +1100
commitf3e7252e895d7bae3e914832142c3ab1d06b18d4 (patch)
tree4216e990f29bc75dfd1e71fd31d15e868a6558ce
parent181bd4df65a0c1e277079d2b55cf6ef6212f859e (diff)
parent51b0cdaec12945046d480d6d7143ecc403e25e06 (diff)
Merge branch 'mm-everything' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
-rw-r--r--Documentation/mm/highmem.rst41
-rw-r--r--lib/maple_tree.c2
-rw-r--r--mm/gup_test.c4
-rw-r--r--mm/ksm.c8
-rw-r--r--mm/memory.c3
-rw-r--r--mm/swapfile.c2
-rw-r--r--tools/testing/radix-tree/maple.c5
7 files changed, 47 insertions, 18 deletions
diff --git a/Documentation/mm/highmem.rst b/Documentation/mm/highmem.rst
index 0f731d9196b0..59d1078f53df 100644
--- a/Documentation/mm/highmem.rst
+++ b/Documentation/mm/highmem.rst
@@ -57,7 +57,8 @@ list shows them in order of preference of use.
It can be invoked from any context (including interrupts) but the mappings
can only be used in the context which acquired them.
- This function should be preferred, where feasible, over all the others.
+ This function should always be used. kmap_atomic() and kmap() have been
+ deprecated.
These mappings are thread-local and CPU-local, meaning that the mapping
can only be accessed from within this thread and the thread is bound to the
@@ -100,10 +101,21 @@ list shows them in order of preference of use.
(included in the "Functions" section) for details on how to manage nested
mappings.
-* kmap_atomic(). This permits a very short duration mapping of a single
- page. Since the mapping is restricted to the CPU that issued it, it
- performs well, but the issuing task is therefore required to stay on that
- CPU until it has finished, lest some other task displace its mappings.
+* kmap_atomic(). This function has been deprecated; use kmap_local_page().
+
+ NOTE: Conversions to kmap_local_page() must take care to follow the mapping
+ restrictions imposed on kmap_local_page(). Furthermore, the code between
+ calls to kmap_atomic() and kunmap_atomic() may implicitly depend on the side
+ effects of atomic mappings, i.e. disabling page faults or preemption, or both.
+ In that case, explicit calls to pagefault_disable() or preempt_disable() or
+ both must be made in conjunction with the use of kmap_local_page().
+
+ [Legacy documentation]
+
+ This permits a very short duration mapping of a single page. Since the
+ mapping is restricted to the CPU that issued it, it performs well, but
+ the issuing task is therefore required to stay on that CPU until it has
+ finished, lest some other task displace its mappings.
kmap_atomic() may also be used by interrupt contexts, since it does not
sleep and the callers too may not sleep until after kunmap_atomic() is
@@ -115,11 +127,20 @@ list shows them in order of preference of use.
It is assumed that k[un]map_atomic() won't fail.
-* kmap(). This should be used to make short duration mapping of a single
- page with no restrictions on preemption or migration. It comes with an
- overhead as mapping space is restricted and protected by a global lock
- for synchronization. When mapping is no longer needed, the address that
- the page was mapped to must be released with kunmap().
+* kmap(). This function has been deprecated; use kmap_local_page().
+
+ NOTE: Conversions to kmap_local_page() must take care to follow the mapping
+ restrictions imposed on kmap_local_page(). In particular, it is necessary to
+ make sure that the kernel virtual memory pointer is only valid in the thread
+ that obtained it.
+
+ [Legacy documentation]
+
+ This should be used to make short duration mapping of a single page with no
+ restrictions on preemption or migration. It comes with an overhead as mapping
+ space is restricted and protected by a global lock for synchronization. When
+ mapping is no longer needed, the address that the page was mapped to must be
+ released with kunmap().
Mapping changes must be propagated across all the CPUs. kmap() also
requires global TLB invalidation when the kmap's pool wraps and it might
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 3fe1491d2bf9..fe3947b80069 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -6062,7 +6062,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
if (mas->index < min)
return NULL;
- /* Retries on dead nodes handled by mas_next_entry */
+ /* Retries on dead nodes handled by mas_prev_entry */
return mas_prev_entry(mas, min);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
diff --git a/mm/gup_test.c b/mm/gup_test.c
index 33f431e0da60..8ae7307a1bb6 100644
--- a/mm/gup_test.c
+++ b/mm/gup_test.c
@@ -214,7 +214,7 @@ static inline void pin_longterm_test_stop(void)
if (pin_longterm_test_nr_pages)
unpin_user_pages(pin_longterm_test_pages,
pin_longterm_test_nr_pages);
- kfree(pin_longterm_test_pages);
+ kvfree(pin_longterm_test_pages);
pin_longterm_test_pages = NULL;
pin_longterm_test_nr_pages = 0;
}
@@ -255,7 +255,7 @@ static inline int pin_longterm_test_start(unsigned long arg)
fast = !!(args.flags & PIN_LONGTERM_TEST_FLAG_USE_FAST);
if (!fast && mmap_read_lock_killable(current->mm)) {
- kfree(pages);
+ kvfree(pages);
return -EINTR;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index dd02780c387f..83e2f74ae7da 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2629,8 +2629,12 @@ struct page *ksm_might_need_to_copy(struct page *page,
new_page = NULL;
}
if (new_page) {
- copy_user_highpage(new_page, page, address, vma);
-
+ if (copy_mc_user_highpage(new_page, page, address, vma)) {
+ put_page(new_page);
+ new_page = ERR_PTR(-EHWPOISON);
+ memory_failure_queue(page_to_pfn(page), 0);
+ return new_page;
+ }
SetPageDirty(new_page);
__SetPageUptodate(new_page);
__SetPageLocked(new_page);
diff --git a/mm/memory.c b/mm/memory.c
index aad226daf41b..5b2c137dfb2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3840,6 +3840,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
goto out_page;
+ } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+ ret = VM_FAULT_HWPOISON;
+ goto out_page;
}
folio = page_folio(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 908a529bca12..d479811bc311 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1767,7 +1767,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
swapcache = page;
page = ksm_might_need_to_copy(page, vma, addr);
- if (unlikely(!page))
+ if (IS_ERR_OR_NULL(page))
return -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 2e91973fbaa6..81fa7ec2e66a 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * maple_tree.c: Userspace shim for maple tree test-suite
- * Copyright (c) 2018 Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * maple_tree.c: Userspace testing for maple tree test-suite
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
*
* Any tests that require internal knowledge of the tree or threads and other
* difficult to handle in kernel tests.