mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-29 04:09:20 +10:00
mmu_gather: move tlb flush for VM_PFNMAP/VM_MIXEDMAP vmas into free_pgtables()
Commit b67fbebd4c
("mmu_gather: Force tlb-flush VM_PFNMAP vmas") added a
forced tlbflush to tlb_vma_end(), which is required to avoid a race
between munmap() and unmap_mapping_range(). However it added some
overhead to other paths where tlb_vma_end() is used, but vmas are not
removed, e.g. madvise(MADV_DONTNEED).
Fix this by moving the tlb flush out of tlb_end_vma() into new
tlb_flush_vmas() called from free_pgtables(), somewhat similar to the
stable version of the original commit: commit 895428ee124a ("mm: Force TLB
flush for PFNMAP mappings before unlink_file_vma()").
Note, that if tlb->fullmm is set, no flush is required, as the whole mm is
about to be destroyed.
Link: https://lkml.kernel.org/r/20250522012838.163876-1-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Reviewed-by: Jann Horn <jannh@google.com>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Nick Piggin <npiggin@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
28615e6eed
commit
bfe125f1b1
@ -58,6 +58,11 @@
|
||||
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
|
||||
* there's large holes between the VMAs.
|
||||
*
|
||||
* - tlb_free_vmas()
|
||||
*
|
||||
* tlb_free_vmas() marks the start of unlinking of one or more vmas
|
||||
* and freeing page-tables.
|
||||
*
|
||||
* - tlb_remove_table()
|
||||
*
|
||||
* tlb_remove_table() is the basic primitive to free page-table directories
|
||||
@ -464,7 +469,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
||||
*/
|
||||
tlb->vma_huge = is_vm_hugetlb_page(vma);
|
||||
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
|
||||
tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
|
||||
|
||||
/*
|
||||
* Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
|
||||
* in the tracked range, see tlb_free_vmas().
|
||||
*/
|
||||
tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
|
||||
}
|
||||
|
||||
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||
@ -547,23 +557,39 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
|
||||
}
|
||||
|
||||
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
||||
{
|
||||
if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Do a TLB flush and reset the range at VMA boundaries; this avoids
|
||||
* the ranges growing with the unused space between consecutive VMAs,
|
||||
* but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
|
||||
* this.
|
||||
*/
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
}
|
||||
|
||||
static inline void tlb_free_vmas(struct mmu_gather *tlb)
|
||||
{
|
||||
if (tlb->fullmm)
|
||||
return;
|
||||
|
||||
/*
|
||||
* VM_PFNMAP is more fragile because the core mm will not track the
|
||||
* page mapcount -- there might not be page-frames for these PFNs after
|
||||
* all. Force flush TLBs for such ranges to avoid munmap() vs
|
||||
* unmap_mapping_range() races.
|
||||
* page mapcount -- there might not be page-frames for these PFNs
|
||||
* after all.
|
||||
*
|
||||
* Specifically() there is a race between munmap() and
|
||||
* unmap_mapping_range(), where munmap() will unlink the VMA, such
|
||||
* that unmap_mapping_range() will no longer observe the VMA and
|
||||
* no-op, without observing the TLBI, returning prematurely.
|
||||
*
|
||||
* So if we're about to unlink such a VMA, and we have pending
|
||||
* TLBI for such a vma, flush things now.
|
||||
*/
|
||||
if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
|
||||
/*
|
||||
* Do a TLB flush and reset the range at VMA boundaries; this avoids
|
||||
* the ranges growing with the unused space between consecutive VMAs.
|
||||
*/
|
||||
if (tlb->vma_pfn)
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -358,6 +358,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
|
||||
{
|
||||
struct unlink_vma_file_batch vb;
|
||||
|
||||
tlb_free_vmas(tlb);
|
||||
|
||||
do {
|
||||
unsigned long addr = vma->vm_start;
|
||||
struct vm_area_struct *next;
|
||||
|
@ -424,6 +424,7 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
|
||||
tlb->page_size = 0;
|
||||
#endif
|
||||
tlb->vma_pfn = 0;
|
||||
|
||||
__tlb_reset_range(tlb);
|
||||
inc_tlb_flush_pending(tlb->mm);
|
||||
|
Loading…
Reference in New Issue
Block a user