highmem: add folio_test_partial_kmap()

commit 97dfbbd135 upstream.

In commit c749d9b7eb ("iov_iter: fix copy_page_from_iter_atomic() if
KMAP_LOCAL_FORCE_MAP"), Hugh correctly noted that if KMAP_LOCAL_FORCE_MAP
is enabled, we must limit ourselves to PAGE_SIZE bytes per call to
kmap_local().  The same problem exists in memcpy_from_folio(),
memcpy_to_folio(), folio_zero_tail(), folio_fill_tail() and
memcpy_from_file_folio(), so add folio_test_partial_kmap() to do this more
succinctly.

Link: https://lkml.kernel.org/r/20250514170607.3000994-2-willy@infradead.org
Fixes: 00cdf76012 ("mm: add memcpy_from_file_folio()")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2025-05-14 18:06:02 +01:00 committed by Greg Kroah-Hartman
parent cb9a1019a6
commit 9b8263cae6
2 changed files with 12 additions and 5 deletions

View File

@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
const char *from = kmap_local_folio(folio, offset);
size_t chunk = len;
if (folio_test_highmem(folio) &&
if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
char *to = kmap_local_folio(folio, offset);
size_t chunk = len;
if (folio_test_highmem(folio) &&
if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
{
size_t len = folio_size(folio) - offset;
if (folio_test_highmem(folio)) {
if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
VM_BUG_ON(offset + len > folio_size(folio));
if (folio_test_highmem(folio)) {
if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
size_t offset = offset_in_folio(folio, pos);
char *from = kmap_local_folio(folio, offset);
if (folio_test_highmem(folio)) {
if (folio_test_partial_kmap(folio)) {
offset = offset_in_page(offset);
len = min_t(size_t, len, PAGE_SIZE - offset);
} else

View File

@ -573,6 +573,13 @@ FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
PAGEFLAG_FALSE(HighMem, highmem)
#endif
/* Does kmap_local_folio() only allow access to one page of the folio? */
#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
#define folio_test_partial_kmap(f) true
#else
#define folio_test_partial_kmap(f) folio_test_highmem(f)
#endif
#ifdef CONFIG_SWAP
static __always_inline bool folio_test_swapcache(const struct folio *folio)
{