mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-10-25 15:17:01 +10:00
mm: remove redundant pXd_devmap calls
DAX was the only thing that created pmd_devmap and pud_devmap entries however it no longer does as DAX pages are now refcounted normally and pXd_trans_huge() returns true for those. Therefore checking both pXd_devmap and pXd_trans_huge() is redundant and the former can be removed without changing behaviour as it will always be false. Link: https://lkml.kernel.org/r/d58f089dc16b7feb7c6728164f37dea65d64a0d3.1750323463.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Cc: Balbir Singh <balbirs@nvidia.com> Cc: Björn Töpel <bjorn@kernel.org> Cc: Björn Töpel <bjorn@rivosinc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chunyan Zhang <zhang.lyra@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Deepak Gupta <debug@rivosinc.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Inki Dae <m.szyprowski@samsung.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Groves <john@groves.net> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7b2ae3c47f
commit
8a6a984c2e
5
fs/dax.c
5
fs/dax.c
@ -1937,7 +1937,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||
* the PTE we need to set up. If so just return and the fault will be
|
||||
* retried.
|
||||
*/
|
||||
if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
|
||||
if (pmd_trans_huge(*vmf->pmd)) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto unlock_entry;
|
||||
}
|
||||
@ -2060,8 +2060,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||
* the PMD we need to set up. If so just return and the fault will be
|
||||
* retried.
|
||||
*/
|
||||
if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
|
||||
!pmd_devmap(*vmf->pmd)) {
|
||||
if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) {
|
||||
ret = 0;
|
||||
goto unlock_entry;
|
||||
}
|
||||
|
||||
@ -400,8 +400,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
#define split_huge_pmd(__vma, __pmd, __address) \
|
||||
do { \
|
||||
pmd_t *____pmd = (__pmd); \
|
||||
if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
|
||||
|| pmd_devmap(*____pmd)) \
|
||||
if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \
|
||||
__split_huge_pmd(__vma, __pmd, __address, \
|
||||
false); \
|
||||
} while (0)
|
||||
@ -426,8 +425,7 @@ change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
#define split_huge_pud(__vma, __pud, __address) \
|
||||
do { \
|
||||
pud_t *____pud = (__pud); \
|
||||
if (pud_trans_huge(*____pud) \
|
||||
|| pud_devmap(*____pud)) \
|
||||
if (pud_trans_huge(*____pud)) \
|
||||
__split_huge_pud(__vma, __pud, __address); \
|
||||
} while (0)
|
||||
|
||||
@ -450,7 +448,7 @@ static inline int is_swap_pmd(pmd_t pmd)
|
||||
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
|
||||
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
|
||||
return __pmd_trans_huge_lock(pmd, vma);
|
||||
else
|
||||
return NULL;
|
||||
@ -458,7 +456,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
||||
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
if (pud_trans_huge(*pud) || pud_devmap(*pud))
|
||||
if (pud_trans_huge(*pud))
|
||||
return __pud_trans_huge_lock(pud, vma);
|
||||
else
|
||||
return NULL;
|
||||
|
||||
@ -1672,7 +1672,7 @@ static inline int pud_trans_unstable(pud_t *pud)
|
||||
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
|
||||
pud_t pudval = READ_ONCE(*pud);
|
||||
|
||||
if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
|
||||
if (pud_none(pudval) || pud_trans_huge(pudval))
|
||||
return 1;
|
||||
if (unlikely(pud_bad(pudval))) {
|
||||
pud_clear_bad(pud);
|
||||
|
||||
4
mm/hmm.c
4
mm/hmm.c
@ -360,7 +360,7 @@ again:
|
||||
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
|
||||
}
|
||||
|
||||
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
|
||||
if (pmd_trans_huge(pmd)) {
|
||||
/*
|
||||
* No need to take pmd_lock here, even if some other thread
|
||||
* is splitting the huge pmd we will get that event through
|
||||
@ -371,7 +371,7 @@ again:
|
||||
* values.
|
||||
*/
|
||||
pmd = pmdp_get_lockless(pmdp);
|
||||
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
|
||||
if (!pmd_trans_huge(pmd))
|
||||
goto again;
|
||||
|
||||
return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
|
||||
|
||||
@ -1459,8 +1459,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
|
||||
* but we need to be consistent with PTEs and architectures that
|
||||
* can't support a 'special' bit.
|
||||
*/
|
||||
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
|
||||
!pfn_t_devmap(pfn));
|
||||
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
|
||||
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
|
||||
(VM_PFNMAP|VM_MIXEDMAP));
|
||||
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
||||
@ -1596,8 +1595,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
|
||||
* but we need to be consistent with PTEs and architectures that
|
||||
* can't support a 'special' bit.
|
||||
*/
|
||||
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
|
||||
!pfn_t_devmap(pfn));
|
||||
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
|
||||
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
|
||||
(VM_PFNMAP|VM_MIXEDMAP));
|
||||
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
||||
@ -1815,7 +1813,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
|
||||
ret = -EAGAIN;
|
||||
pud = *src_pud;
|
||||
if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
|
||||
if (unlikely(!pud_trans_huge(pud)))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
@ -2677,8 +2675,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
ptl = pmd_lock(vma->vm_mm, pmd);
|
||||
if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
|
||||
pmd_devmap(*pmd)))
|
||||
if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)))
|
||||
return ptl;
|
||||
spin_unlock(ptl);
|
||||
return NULL;
|
||||
@ -2695,7 +2692,7 @@ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
|
||||
spinlock_t *ptl;
|
||||
|
||||
ptl = pud_lock(vma->vm_mm, pud);
|
||||
if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
|
||||
if (likely(pud_trans_huge(*pud)))
|
||||
return ptl;
|
||||
spin_unlock(ptl);
|
||||
return NULL;
|
||||
@ -2747,7 +2744,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
|
||||
VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
|
||||
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
|
||||
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
|
||||
VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
|
||||
VM_BUG_ON(!pud_trans_huge(*pud));
|
||||
|
||||
count_vm_event(THP_SPLIT_PUD);
|
||||
|
||||
@ -2780,7 +2777,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
|
||||
(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
ptl = pud_lock(vma->vm_mm, pud);
|
||||
if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
|
||||
if (unlikely(!pud_trans_huge(*pud)))
|
||||
goto out;
|
||||
__split_huge_pud_locked(vma, pud, range.start);
|
||||
|
||||
@ -2853,8 +2850,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
|
||||
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
|
||||
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
|
||||
VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
|
||||
&& !pmd_devmap(*pmd));
|
||||
VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd));
|
||||
|
||||
count_vm_event(THP_SPLIT_PMD);
|
||||
|
||||
@ -3062,8 +3058,7 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmd, bool freeze)
|
||||
{
|
||||
VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
|
||||
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
|
||||
is_pmd_migration_entry(*pmd))
|
||||
if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd))
|
||||
__split_huge_pmd_locked(vma, pmd, address, freeze);
|
||||
}
|
||||
|
||||
|
||||
@ -129,7 +129,7 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
pmd_t pmdval = pmdp_get_lockless(pmd);
|
||||
|
||||
/* Do not split a huge pmd, present or migrated */
|
||||
if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) {
|
||||
if (pmd_trans_huge(pmdval)) {
|
||||
WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
|
||||
walk->action = ACTION_CONTINUE;
|
||||
}
|
||||
@ -152,7 +152,7 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
pud_t pudval = READ_ONCE(*pud);
|
||||
|
||||
/* Do not split a huge pud */
|
||||
if (pud_trans_huge(pudval) || pud_devmap(pudval)) {
|
||||
if (pud_trans_huge(pudval)) {
|
||||
WARN_ON(pud_write(pudval) || pud_dirty(pudval));
|
||||
walk->action = ACTION_CONTINUE;
|
||||
}
|
||||
|
||||
15
mm/memory.c
15
mm/memory.c
@ -675,8 +675,6 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
if (pmd_devmap(pmd))
|
||||
return NULL;
|
||||
if (is_huge_zero_pmd(pmd))
|
||||
return NULL;
|
||||
if (unlikely(pfn > highest_memmap_pfn))
|
||||
@ -1240,8 +1238,7 @@ copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||
src_pmd = pmd_offset(src_pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
|
||||
|| pmd_devmap(*src_pmd)) {
|
||||
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)) {
|
||||
int err;
|
||||
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
|
||||
err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
|
||||
@ -1277,7 +1274,7 @@ copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||
src_pud = pud_offset(src_p4d, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
|
||||
if (pud_trans_huge(*src_pud)) {
|
||||
int err;
|
||||
|
||||
VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
|
||||
@ -1791,7 +1788,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
|
||||
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) {
|
||||
if (next - addr != HPAGE_PMD_SIZE)
|
||||
__split_huge_pmd(vma, pmd, addr, false);
|
||||
else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
|
||||
@ -1833,7 +1830,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
|
||||
pud = pud_offset(p4d, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
|
||||
if (pud_trans_huge(*pud)) {
|
||||
if (next - addr != HPAGE_PUD_SIZE) {
|
||||
mmap_assert_locked(tlb->mm);
|
||||
split_huge_pud(vma, pud, addr);
|
||||
@ -6136,7 +6133,7 @@ retry_pud:
|
||||
pud_t orig_pud = *vmf.pud;
|
||||
|
||||
barrier();
|
||||
if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
|
||||
if (pud_trans_huge(orig_pud)) {
|
||||
|
||||
/*
|
||||
* TODO once we support anonymous PUDs: NUMA case and
|
||||
@ -6177,7 +6174,7 @@ retry_pud:
|
||||
pmd_migration_entry_wait(mm, vmf.pmd);
|
||||
return 0;
|
||||
}
|
||||
if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
|
||||
if (pmd_trans_huge(vmf.orig_pmd)) {
|
||||
if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
|
||||
return do_huge_pmd_numa_page(&vmf);
|
||||
|
||||
|
||||
@ -615,7 +615,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
if (!pmdp)
|
||||
goto abort;
|
||||
if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
|
||||
if (pmd_trans_huge(*pmdp))
|
||||
goto abort;
|
||||
if (pte_alloc(mm, pmdp))
|
||||
goto abort;
|
||||
|
||||
@ -376,7 +376,7 @@ again:
|
||||
goto next;
|
||||
|
||||
_pmd = pmdp_get_lockless(pmd);
|
||||
if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
|
||||
if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd)) {
|
||||
if ((next - addr != HPAGE_PMD_SIZE) ||
|
||||
pgtable_split_needed(vma, cp_flags)) {
|
||||
__split_huge_pmd(vma, pmd, addr, false);
|
||||
|
||||
@ -820,7 +820,7 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc)
|
||||
new_pud = alloc_new_pud(mm, pmc->new_addr);
|
||||
if (!new_pud)
|
||||
break;
|
||||
if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
|
||||
if (pud_trans_huge(*old_pud)) {
|
||||
if (extent == HPAGE_PUD_SIZE) {
|
||||
move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
|
||||
/* We ignore and continue on error? */
|
||||
@ -839,8 +839,7 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc)
|
||||
if (!new_pmd)
|
||||
break;
|
||||
again:
|
||||
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
|
||||
pmd_devmap(*old_pmd)) {
|
||||
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
|
||||
if (extent == HPAGE_PMD_SIZE &&
|
||||
move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
|
||||
continue;
|
||||
|
||||
@ -246,8 +246,7 @@ restart:
|
||||
*/
|
||||
pmde = pmdp_get_lockless(pvmw->pmd);
|
||||
|
||||
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
|
||||
(pmd_present(pmde) && pmd_devmap(pmde))) {
|
||||
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
|
||||
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
|
||||
pmde = *pvmw->pmd;
|
||||
if (!pmd_present(pmde)) {
|
||||
@ -262,7 +261,7 @@ restart:
|
||||
return not_found(pvmw);
|
||||
return true;
|
||||
}
|
||||
if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
|
||||
if (likely(pmd_trans_huge(pmde))) {
|
||||
if (pvmw->flags & PVMW_MIGRATION)
|
||||
return not_found(pvmw);
|
||||
if (!check_pmd(pmd_pfn(pmde), pvmw))
|
||||
|
||||
@ -143,8 +143,7 @@ again:
|
||||
* We are ONLY installing, so avoid unnecessarily
|
||||
* splitting a present huge page.
|
||||
*/
|
||||
if (pmd_present(*pmd) &&
|
||||
(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
|
||||
if (pmd_present(*pmd) && pmd_trans_huge(*pmd))
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -210,8 +209,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
* We are ONLY installing, so avoid unnecessarily
|
||||
* splitting a present huge page.
|
||||
*/
|
||||
if (pud_present(*pud) &&
|
||||
(pud_trans_huge(*pud) || pud_devmap(*pud)))
|
||||
if (pud_present(*pud) && pud_trans_huge(*pud))
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -908,7 +906,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
|
||||
* TODO: FW_MIGRATION support for PUD migration entries
|
||||
* once there are relevant users.
|
||||
*/
|
||||
if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
|
||||
if (!pud_present(pud) || pud_special(pud)) {
|
||||
spin_unlock(ptl);
|
||||
goto not_found;
|
||||
} else if (!pud_leaf(pud)) {
|
||||
|
||||
@ -139,8 +139,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
{
|
||||
pmd_t pmd;
|
||||
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
||||
VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
|
||||
!pmd_devmap(*pmdp));
|
||||
VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp));
|
||||
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
return pmd;
|
||||
@ -153,7 +152,7 @@ pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
pud_t pud;
|
||||
|
||||
VM_BUG_ON(address & ~HPAGE_PUD_MASK);
|
||||
VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
|
||||
VM_BUG_ON(!pud_trans_huge(*pudp));
|
||||
pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
|
||||
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
|
||||
return pud;
|
||||
@ -293,7 +292,7 @@ pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
|
||||
*pmdvalp = pmdval;
|
||||
if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
|
||||
goto nomap;
|
||||
if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval)))
|
||||
if (unlikely(pmd_trans_huge(pmdval)))
|
||||
goto nomap;
|
||||
if (unlikely(pmd_bad(pmdval))) {
|
||||
pmd_clear_bad(pmd);
|
||||
|
||||
@ -795,8 +795,8 @@ retry:
|
||||
* (This includes the case where the PMD used to be THP and
|
||||
* changed back to none after __pte_alloc().)
|
||||
*/
|
||||
if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
|
||||
pmd_devmap(dst_pmdval))) {
|
||||
if (unlikely(!pmd_present(dst_pmdval) ||
|
||||
pmd_trans_huge(dst_pmdval))) {
|
||||
err = -EEXIST;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3450,9 +3450,6 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
|
||||
if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
|
||||
return -1;
|
||||
|
||||
if (WARN_ON_ONCE(pmd_devmap(pmd)))
|
||||
return -1;
|
||||
|
||||
if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm))
|
||||
return -1;
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user