mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-09-28 17:40:21 +10:00
mm: fix accounting of memmap pages
For !CONFIG_SPARSEMEM_VMEMMAP, memmap page accounting is currently done
upfront in sparse_buffer_init(). However, sparse_buffer_alloc() may
return NULL in failure scenario.
Also, memmap pages may be allocated either from the memblock allocator
during early boot or from the buddy allocator. When removed via
arch_remove_memory(), accounting of memmap pages must reflect the original
allocation source.
To ensure correctness:
* Account memmap pages after successful allocation in sparse_init_nid()
and section_activate().
* Account memmap pages in section_deactivate() based on allocation
source.
Link: https://lkml.kernel.org/r/20250807183545.1424509-1-sumanthk@linux.ibm.com
Fixes: 15995a3524
("mm: report per-page metadata information")
Signed-off-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9f68eabab9
commit
c3576889d8
@ -578,11 +578,6 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn,
|
||||
if (r < 0)
|
||||
return NULL;
|
||||
|
||||
if (system_state == SYSTEM_BOOTING)
|
||||
memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
|
||||
else
|
||||
memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
|
||||
|
||||
return pfn_to_page(pfn);
|
||||
}
|
||||
|
||||
|
15
mm/sparse.c
15
mm/sparse.c
@ -454,9 +454,6 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
|
||||
*/
|
||||
sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
|
||||
sparsemap_buf_end = sparsemap_buf + size;
|
||||
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
||||
memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init sparse_buffer_fini(void)
|
||||
@ -567,6 +564,8 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
|
||||
sparse_buffer_fini();
|
||||
goto failed;
|
||||
}
|
||||
memmap_boot_pages_add(DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page),
|
||||
PAGE_SIZE));
|
||||
sparse_init_early_section(nid, map, pnum, 0);
|
||||
}
|
||||
}
|
||||
@ -680,7 +679,6 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
|
||||
unsigned long start = (unsigned long) pfn_to_page(pfn);
|
||||
unsigned long end = start + nr_pages * sizeof(struct page);
|
||||
|
||||
memmap_pages_add(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
|
||||
vmemmap_free(start, end, altmap);
|
||||
}
|
||||
static void free_map_bootmem(struct page *memmap)
|
||||
@ -856,10 +854,14 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
|
||||
* The memmap of early sections is always fully populated. See
|
||||
* section_activate() and pfn_valid() .
|
||||
*/
|
||||
if (!section_is_early)
|
||||
if (!section_is_early) {
|
||||
memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
|
||||
depopulate_section_memmap(pfn, nr_pages, altmap);
|
||||
else if (memmap)
|
||||
} else if (memmap) {
|
||||
memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
|
||||
PAGE_SIZE)));
|
||||
free_map_bootmem(memmap);
|
||||
}
|
||||
|
||||
if (empty)
|
||||
ms->section_mem_map = (unsigned long)NULL;
|
||||
@ -904,6 +906,7 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
|
||||
section_deactivate(pfn, nr_pages, altmap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
|
||||
|
||||
return memmap;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user