mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-28 19:57:53 +10:00
19 hotfixes. A whopping 16 are cc:stable and the remainder address
post-6.15 issues or aren't considered necessary for -stable kernels. 14 are for MM. Three gdb-script fixes and a kallsyms build fix. -----BEGIN PGP SIGNATURE----- iHQEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaHGbTgAKCRDdBJ7gKXxA jowqAPiCWBFfcFaX20BxVaMU1PjC3Lh9llDXqQwBhBNdcadSAP44SGQ8nrfV+piB OcNz2AEwBBfS354G0Etlh4k08YoAAw== =IDDc -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "19 hotfixes. A whopping 16 are cc:stable and the remainder address post-6.15 issues or aren't considered necessary for -stable kernels. 14 are for MM. Three gdb-script fixes and a kallsyms build fix" * tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: Revert "sched/numa: add statistics of numa balance task" mm: fix the inaccurate memory statistics issue for users mm/damon: fix divide by zero in damon_get_intervals_score() samples/damon: fix damon sample mtier for start failure samples/damon: fix damon sample wsse for start failure samples/damon: fix damon sample prcl for start failure kasan: remove kasan_find_vm_area() to prevent possible deadlock scripts: gdb: vfs: support external dentry names mm/migrate: fix do_pages_stat in compat mode mm/damon/core: handle damon_call_control as normal under kdmond deactivation mm/rmap: fix potential out-of-bounds page table access during batched unmap mm/hugetlb: don't crash when allocating a folio if there are no resv scripts/gdb: de-reference per-CPU MCE interrupts scripts/gdb: fix interrupts.py after maple tree conversion maple_tree: fix mt_destroy_walk() on root leaf node mm/vmalloc: leave lazy MMU mode on PTE mapping error scripts/gdb: fix interrupts display after MCP on x86 lib/alloc_tag: do not acquire non-existent lock in alloc_tag_top_users() kallsyms: fix build without execinfo
This commit is contained in:
commit
3f31a806a6
@ -1732,12 +1732,6 @@ The following nested keys are defined.
|
||||
numa_hint_faults (npn)
|
||||
Number of NUMA hinting faults.
|
||||
|
||||
numa_task_migrated (npn)
|
||||
Number of task migration by NUMA balancing.
|
||||
|
||||
numa_task_swapped (npn)
|
||||
Number of task swap by NUMA balancing.
|
||||
|
||||
pgdemote_kswapd
|
||||
Number of pages demoted by kswapd.
|
||||
|
||||
|
@ -36,9 +36,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
|
||||
unsigned long text, lib, swap, anon, file, shmem;
|
||||
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
|
||||
|
||||
anon = get_mm_counter(mm, MM_ANONPAGES);
|
||||
file = get_mm_counter(mm, MM_FILEPAGES);
|
||||
shmem = get_mm_counter(mm, MM_SHMEMPAGES);
|
||||
anon = get_mm_counter_sum(mm, MM_ANONPAGES);
|
||||
file = get_mm_counter_sum(mm, MM_FILEPAGES);
|
||||
shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES);
|
||||
|
||||
/*
|
||||
* Note: to minimize their overhead, mm maintains hiwater_vm and
|
||||
@ -59,7 +59,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
|
||||
text = min(text, mm->exec_vm << PAGE_SHIFT);
|
||||
lib = (mm->exec_vm << PAGE_SHIFT) - text;
|
||||
|
||||
swap = get_mm_counter(mm, MM_SWAPENTS);
|
||||
swap = get_mm_counter_sum(mm, MM_SWAPENTS);
|
||||
SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
|
||||
SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
|
||||
SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
|
||||
@ -92,12 +92,12 @@ unsigned long task_statm(struct mm_struct *mm,
|
||||
unsigned long *shared, unsigned long *text,
|
||||
unsigned long *data, unsigned long *resident)
|
||||
{
|
||||
*shared = get_mm_counter(mm, MM_FILEPAGES) +
|
||||
get_mm_counter(mm, MM_SHMEMPAGES);
|
||||
*shared = get_mm_counter_sum(mm, MM_FILEPAGES) +
|
||||
get_mm_counter_sum(mm, MM_SHMEMPAGES);
|
||||
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
|
||||
>> PAGE_SHIFT;
|
||||
*data = mm->data_vm + mm->stack_vm;
|
||||
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
|
||||
*resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES);
|
||||
return mm->total_vm;
|
||||
}
|
||||
|
||||
|
@ -2568,6 +2568,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
|
||||
return percpu_counter_read_positive(&mm->rss_stat[member]);
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
|
||||
{
|
||||
return percpu_counter_sum_positive(&mm->rss_stat[member]);
|
||||
}
|
||||
|
||||
void mm_trace_rss_stat(struct mm_struct *mm, int member);
|
||||
|
||||
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
|
@ -548,10 +548,6 @@ struct sched_statistics {
|
||||
u64 nr_failed_migrations_running;
|
||||
u64 nr_failed_migrations_hot;
|
||||
u64 nr_forced_migrations;
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
u64 numa_task_migrated;
|
||||
u64 numa_task_swapped;
|
||||
#endif
|
||||
|
||||
u64 nr_wakeups;
|
||||
u64 nr_wakeups_sync;
|
||||
|
@ -66,8 +66,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
||||
NUMA_HINT_FAULTS,
|
||||
NUMA_HINT_FAULTS_LOCAL,
|
||||
NUMA_PAGE_MIGRATE,
|
||||
NUMA_TASK_MIGRATE,
|
||||
NUMA_TASK_SWAP,
|
||||
#endif
|
||||
#ifdef CONFIG_MIGRATION
|
||||
PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
|
||||
|
@ -3362,10 +3362,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
static void __migrate_swap_task(struct task_struct *p, int cpu)
|
||||
{
|
||||
__schedstat_inc(p->stats.numa_task_swapped);
|
||||
count_vm_numa_event(NUMA_TASK_SWAP);
|
||||
count_memcg_event_mm(p->mm, NUMA_TASK_SWAP);
|
||||
|
||||
if (task_on_rq_queued(p)) {
|
||||
struct rq *src_rq, *dst_rq;
|
||||
struct rq_flags srf, drf;
|
||||
@ -7939,9 +7935,8 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
|
||||
if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
|
||||
return -EINVAL;
|
||||
|
||||
__schedstat_inc(p->stats.numa_task_migrated);
|
||||
count_vm_numa_event(NUMA_TASK_MIGRATE);
|
||||
count_memcg_event_mm(p->mm, NUMA_TASK_MIGRATE);
|
||||
/* TODO: This is not properly updating schedstats */
|
||||
|
||||
trace_sched_move_numa(p, curr_cpu, target_cpu);
|
||||
return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
|
||||
}
|
||||
|
@ -1210,10 +1210,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
|
||||
P_SCHEDSTAT(nr_failed_migrations_running);
|
||||
P_SCHEDSTAT(nr_failed_migrations_hot);
|
||||
P_SCHEDSTAT(nr_forced_migrations);
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
P_SCHEDSTAT(numa_task_migrated);
|
||||
P_SCHEDSTAT(numa_task_swapped);
|
||||
#endif
|
||||
P_SCHEDSTAT(nr_wakeups);
|
||||
P_SCHEDSTAT(nr_wakeups_sync);
|
||||
P_SCHEDSTAT(nr_wakeups_migrate);
|
||||
|
@ -135,6 +135,9 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl
|
||||
struct codetag_bytes n;
|
||||
unsigned int i, nr = 0;
|
||||
|
||||
if (IS_ERR_OR_NULL(alloc_tag_cttype))
|
||||
return 0;
|
||||
|
||||
if (can_sleep)
|
||||
codetag_lock_module_list(alloc_tag_cttype, true);
|
||||
else if (!codetag_trylock_module_list(alloc_tag_cttype))
|
||||
|
@ -5319,6 +5319,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
|
||||
struct maple_enode *start;
|
||||
|
||||
if (mte_is_leaf(enode)) {
|
||||
mte_set_node_dead(enode);
|
||||
node->type = mte_node_type(enode);
|
||||
goto free_leaf;
|
||||
}
|
||||
|
@ -1449,6 +1449,7 @@ static unsigned long damon_get_intervals_score(struct damon_ctx *c)
|
||||
}
|
||||
}
|
||||
target_access_events = max_access_events * goal_bp / 10000;
|
||||
target_access_events = target_access_events ? : 1;
|
||||
return access_events * 10000 / target_access_events;
|
||||
}
|
||||
|
||||
@ -2355,9 +2356,8 @@ static void kdamond_usleep(unsigned long usecs)
|
||||
*
|
||||
* If there is a &struct damon_call_control request that registered via
|
||||
* &damon_call() on @ctx, do or cancel the invocation of the function depending
|
||||
* on @cancel. @cancel is set when the kdamond is deactivated by DAMOS
|
||||
* watermarks, or the kdamond is already out of the main loop and therefore
|
||||
* will be terminated.
|
||||
* on @cancel. @cancel is set when the kdamond is already out of the main loop
|
||||
* and therefore will be terminated.
|
||||
*/
|
||||
static void kdamond_call(struct damon_ctx *ctx, bool cancel)
|
||||
{
|
||||
@ -2405,7 +2405,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
|
||||
if (ctx->callback.after_wmarks_check &&
|
||||
ctx->callback.after_wmarks_check(ctx))
|
||||
break;
|
||||
kdamond_call(ctx, true);
|
||||
kdamond_call(ctx, false);
|
||||
damos_walk_cancel(ctx);
|
||||
}
|
||||
return -EBUSY;
|
||||
|
@ -2340,12 +2340,15 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
|
||||
struct folio *folio;
|
||||
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
if (!h->resv_huge_pages) {
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
|
||||
nmask);
|
||||
if (folio) {
|
||||
VM_BUG_ON(!h->resv_huge_pages);
|
||||
if (folio)
|
||||
h->resv_huge_pages--;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
return folio;
|
||||
|
@ -370,36 +370,6 @@ static inline bool init_task_stack_addr(const void *addr)
|
||||
sizeof(init_thread_union.stack));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is invoked with report_lock (a raw_spinlock) held. A
|
||||
* PREEMPT_RT kernel cannot call find_vm_area() as it will acquire a sleeping
|
||||
* rt_spinlock.
|
||||
*
|
||||
* For !RT kernel, the PROVE_RAW_LOCK_NESTING config option will print a
|
||||
* lockdep warning for this raw_spinlock -> spinlock dependency. This config
|
||||
* option is enabled by default to ensure better test coverage to expose this
|
||||
* kind of RT kernel problem. This lockdep splat, however, can be suppressed
|
||||
* by using DEFINE_WAIT_OVERRIDE_MAP() if it serves a useful purpose and the
|
||||
* invalid PREEMPT_RT case has been taken care of.
|
||||
*/
|
||||
static inline struct vm_struct *kasan_find_vm_area(void *addr)
|
||||
{
|
||||
static DEFINE_WAIT_OVERRIDE_MAP(vmalloc_map, LD_WAIT_SLEEP);
|
||||
struct vm_struct *va;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Suppress lockdep warning and fetch vmalloc area of the
|
||||
* offending address.
|
||||
*/
|
||||
lock_map_acquire_try(&vmalloc_map);
|
||||
va = find_vm_area(addr);
|
||||
lock_map_release(&vmalloc_map);
|
||||
return va;
|
||||
}
|
||||
|
||||
static void print_address_description(void *addr, u8 tag,
|
||||
struct kasan_report_info *info)
|
||||
{
|
||||
@ -429,19 +399,8 @@ static void print_address_description(void *addr, u8 tag,
|
||||
}
|
||||
|
||||
if (is_vmalloc_addr(addr)) {
|
||||
struct vm_struct *va = kasan_find_vm_area(addr);
|
||||
|
||||
if (va) {
|
||||
pr_err("The buggy address belongs to the virtual mapping at\n"
|
||||
" [%px, %px) created by:\n"
|
||||
" %pS\n",
|
||||
va->addr, va->addr + va->size, va->caller);
|
||||
pr_err("\n");
|
||||
|
||||
page = vmalloc_to_page(addr);
|
||||
} else {
|
||||
pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
|
||||
}
|
||||
pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
|
||||
page = vmalloc_to_page(addr);
|
||||
}
|
||||
|
||||
if (page) {
|
||||
|
@ -474,8 +474,6 @@ static const unsigned int memcg_vm_event_stat[] = {
|
||||
NUMA_PAGE_MIGRATE,
|
||||
NUMA_PTE_UPDATES,
|
||||
NUMA_HINT_FAULTS,
|
||||
NUMA_TASK_MIGRATE,
|
||||
NUMA_TASK_SWAP,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
14
mm/migrate.c
14
mm/migrate.c
@ -2399,6 +2399,7 @@ set_status:
|
||||
|
||||
static int get_compat_pages_array(const void __user *chunk_pages[],
|
||||
const void __user * __user *pages,
|
||||
unsigned long chunk_offset,
|
||||
unsigned long chunk_nr)
|
||||
{
|
||||
compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
|
||||
@ -2406,7 +2407,7 @@ static int get_compat_pages_array(const void __user *chunk_pages[],
|
||||
int i;
|
||||
|
||||
for (i = 0; i < chunk_nr; i++) {
|
||||
if (get_user(p, pages32 + i))
|
||||
if (get_user(p, pages32 + chunk_offset + i))
|
||||
return -EFAULT;
|
||||
chunk_pages[i] = compat_ptr(p);
|
||||
}
|
||||
@ -2425,27 +2426,28 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
|
||||
#define DO_PAGES_STAT_CHUNK_NR 16UL
|
||||
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
|
||||
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
|
||||
unsigned long chunk_offset = 0;
|
||||
|
||||
while (nr_pages) {
|
||||
unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
|
||||
|
||||
if (in_compat_syscall()) {
|
||||
if (get_compat_pages_array(chunk_pages, pages,
|
||||
chunk_nr))
|
||||
chunk_offset, chunk_nr))
|
||||
break;
|
||||
} else {
|
||||
if (copy_from_user(chunk_pages, pages,
|
||||
if (copy_from_user(chunk_pages, pages + chunk_offset,
|
||||
chunk_nr * sizeof(*chunk_pages)))
|
||||
break;
|
||||
}
|
||||
|
||||
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
|
||||
|
||||
if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
|
||||
if (copy_to_user(status + chunk_offset, chunk_status,
|
||||
chunk_nr * sizeof(*status)))
|
||||
break;
|
||||
|
||||
pages += chunk_nr;
|
||||
status += chunk_nr;
|
||||
chunk_offset += chunk_nr;
|
||||
nr_pages -= chunk_nr;
|
||||
}
|
||||
return nr_pages ? -EFAULT : 0;
|
||||
|
46
mm/rmap.c
46
mm/rmap.c
@ -1845,23 +1845,32 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,
|
||||
#endif
|
||||
}
|
||||
|
||||
/* We support batch unmapping of PTEs for lazyfree large folios */
|
||||
static inline bool can_batch_unmap_folio_ptes(unsigned long addr,
|
||||
struct folio *folio, pte_t *ptep)
|
||||
static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
|
||||
struct page_vma_mapped_walk *pvmw,
|
||||
enum ttu_flags flags, pte_t pte)
|
||||
{
|
||||
const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
|
||||
int max_nr = folio_nr_pages(folio);
|
||||
pte_t pte = ptep_get(ptep);
|
||||
unsigned long end_addr, addr = pvmw->address;
|
||||
struct vm_area_struct *vma = pvmw->vma;
|
||||
unsigned int max_nr;
|
||||
|
||||
if (flags & TTU_HWPOISON)
|
||||
return 1;
|
||||
if (!folio_test_large(folio))
|
||||
return 1;
|
||||
|
||||
/* We may only batch within a single VMA and a single page table. */
|
||||
end_addr = pmd_addr_end(addr, vma->vm_end);
|
||||
max_nr = (end_addr - addr) >> PAGE_SHIFT;
|
||||
|
||||
/* We only support lazyfree batching for now ... */
|
||||
if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
|
||||
return false;
|
||||
return 1;
|
||||
if (pte_unused(pte))
|
||||
return false;
|
||||
if (pte_pfn(pte) != folio_pfn(folio))
|
||||
return false;
|
||||
return 1;
|
||||
|
||||
return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
|
||||
NULL, NULL) == max_nr;
|
||||
return folio_pte_batch(folio, addr, pvmw->pte, pte, max_nr, fpb_flags,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2024,9 +2033,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
if (pte_dirty(pteval))
|
||||
folio_mark_dirty(folio);
|
||||
} else if (likely(pte_present(pteval))) {
|
||||
if (folio_test_large(folio) && !(flags & TTU_HWPOISON) &&
|
||||
can_batch_unmap_folio_ptes(address, folio, pvmw.pte))
|
||||
nr_pages = folio_nr_pages(folio);
|
||||
nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);
|
||||
end_addr = address + nr_pages * PAGE_SIZE;
|
||||
flush_cache_range(vma, address, end_addr);
|
||||
|
||||
@ -2206,13 +2213,16 @@ discard:
|
||||
hugetlb_remove_rmap(folio);
|
||||
} else {
|
||||
folio_remove_rmap_ptes(folio, subpage, nr_pages, vma);
|
||||
folio_ref_sub(folio, nr_pages - 1);
|
||||
}
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
mlock_drain_local();
|
||||
folio_put(folio);
|
||||
/* We have already batched the entire folio */
|
||||
if (nr_pages > 1)
|
||||
folio_put_refs(folio, nr_pages);
|
||||
|
||||
/*
|
||||
* If we are sure that we batched the entire folio and cleared
|
||||
* all PTEs, we can just optimize and stop right here.
|
||||
*/
|
||||
if (nr_pages == folio_nr_pages(folio))
|
||||
goto walk_done;
|
||||
continue;
|
||||
walk_abort:
|
||||
|
22
mm/vmalloc.c
22
mm/vmalloc.c
@ -514,6 +514,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
|
||||
pgtbl_mod_mask *mask)
|
||||
{
|
||||
int err = 0;
|
||||
pte_t *pte;
|
||||
|
||||
/*
|
||||
@ -530,12 +531,18 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
do {
|
||||
struct page *page = pages[*nr];
|
||||
|
||||
if (WARN_ON(!pte_none(ptep_get(pte))))
|
||||
return -EBUSY;
|
||||
if (WARN_ON(!page))
|
||||
return -ENOMEM;
|
||||
if (WARN_ON(!pfn_valid(page_to_pfn(page))))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(!pte_none(ptep_get(pte)))) {
|
||||
err = -EBUSY;
|
||||
break;
|
||||
}
|
||||
if (WARN_ON(!page)) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
|
||||
(*nr)++;
|
||||
@ -543,7 +550,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
|
||||
arch_leave_lazy_mmu_mode();
|
||||
*mask |= PGTBL_PTE_MODIFIED;
|
||||
return 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
|
||||
|
@ -1346,8 +1346,6 @@ const char * const vmstat_text[] = {
|
||||
"numa_hint_faults",
|
||||
"numa_hint_faults_local",
|
||||
"numa_pages_migrated",
|
||||
"numa_task_migrated",
|
||||
"numa_task_swapped",
|
||||
#endif
|
||||
#ifdef CONFIG_MIGRATION
|
||||
"pgmigrate_success",
|
||||
|
@ -164,8 +164,12 @@ static int damon_sample_mtier_enable_store(
|
||||
if (enable == enabled)
|
||||
return 0;
|
||||
|
||||
if (enable)
|
||||
return damon_sample_mtier_start();
|
||||
if (enable) {
|
||||
err = damon_sample_mtier_start();
|
||||
if (err)
|
||||
enable = false;
|
||||
return err;
|
||||
}
|
||||
damon_sample_mtier_stop();
|
||||
return 0;
|
||||
}
|
||||
|
@ -122,8 +122,12 @@ static int damon_sample_prcl_enable_store(
|
||||
if (enable == enabled)
|
||||
return 0;
|
||||
|
||||
if (enable)
|
||||
return damon_sample_prcl_start();
|
||||
if (enable) {
|
||||
err = damon_sample_prcl_start();
|
||||
if (err)
|
||||
enable = false;
|
||||
return err;
|
||||
}
|
||||
damon_sample_prcl_stop();
|
||||
return 0;
|
||||
}
|
||||
|
@ -102,8 +102,12 @@ static int damon_sample_wsse_enable_store(
|
||||
if (enable == enabled)
|
||||
return 0;
|
||||
|
||||
if (enable)
|
||||
return damon_sample_wsse_start();
|
||||
if (enable) {
|
||||
err = damon_sample_wsse_start();
|
||||
if (err)
|
||||
enable = false;
|
||||
return err;
|
||||
}
|
||||
damon_sample_wsse_stop();
|
||||
return 0;
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/page_ext.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/maple_tree.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE)
|
||||
LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
|
||||
LX_GDBPARSED(RADIX_TREE_MAP_MASK)
|
||||
|
||||
/* linux/maple_tree.h */
|
||||
LX_VALUE(MAPLE_NODE_SLOTS)
|
||||
LX_VALUE(MAPLE_RANGE64_SLOTS)
|
||||
LX_VALUE(MAPLE_ARANGE64_SLOTS)
|
||||
LX_GDBPARSED(MAPLE_NODE_MASK)
|
||||
|
||||
/* linux/vmalloc.h */
|
||||
LX_VALUE(VM_IOREMAP)
|
||||
LX_VALUE(VM_ALLOC)
|
||||
|
@ -7,7 +7,7 @@ import gdb
|
||||
from linux import constants
|
||||
from linux import cpus
|
||||
from linux import utils
|
||||
from linux import radixtree
|
||||
from linux import mapletree
|
||||
|
||||
irq_desc_type = utils.CachedType("struct irq_desc")
|
||||
|
||||
@ -23,12 +23,12 @@ def irqd_is_level(desc):
|
||||
def show_irq_desc(prec, irq):
|
||||
text = ""
|
||||
|
||||
desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq)
|
||||
desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq)
|
||||
if desc is None:
|
||||
return text
|
||||
|
||||
desc = desc.cast(irq_desc_type.get_type())
|
||||
if desc is None:
|
||||
desc = desc.cast(irq_desc_type.get_type().pointer())
|
||||
if desc == 0:
|
||||
return text
|
||||
|
||||
if irq_settings_is_hidden(desc):
|
||||
@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc):
|
||||
pvar = gdb.parse_and_eval(var)
|
||||
text = "%*s: " % (prec, pfx)
|
||||
for cpu in cpus.each_online_cpu():
|
||||
text += "%10u " % (cpus.per_cpu(pvar, cpu))
|
||||
text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference())
|
||||
text += " %s\n" % (desc)
|
||||
return text
|
||||
|
||||
@ -142,7 +142,7 @@ def x86_show_interupts(prec):
|
||||
|
||||
if constants.LX_CONFIG_X86_MCE:
|
||||
text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions")
|
||||
text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
|
||||
text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
|
||||
|
||||
text += show_irq_err_count(prec)
|
||||
|
||||
@ -221,8 +221,8 @@ class LxInterruptList(gdb.Command):
|
||||
gdb.write("CPU%-8d" % cpu)
|
||||
gdb.write("\n")
|
||||
|
||||
if utils.gdb_eval_or_none("&irq_desc_tree") is None:
|
||||
return
|
||||
if utils.gdb_eval_or_none("&sparse_irqs") is None:
|
||||
raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?")
|
||||
|
||||
for irq in range(nr_irqs):
|
||||
gdb.write(show_irq_desc(prec, irq))
|
||||
|
252
scripts/gdb/linux/mapletree.py
Normal file
252
scripts/gdb/linux/mapletree.py
Normal file
@ -0,0 +1,252 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Maple tree helpers
|
||||
#
|
||||
# Copyright (c) 2025 Broadcom
|
||||
#
|
||||
# Authors:
|
||||
# Florian Fainelli <florian.fainelli@broadcom.com>
|
||||
|
||||
import gdb
|
||||
|
||||
from linux import utils
|
||||
from linux import constants
|
||||
from linux import xarray
|
||||
|
||||
maple_tree_root_type = utils.CachedType("struct maple_tree")
|
||||
maple_node_type = utils.CachedType("struct maple_node")
|
||||
maple_enode_type = utils.CachedType("void")
|
||||
|
||||
maple_dense = 0
|
||||
maple_leaf_64 = 1
|
||||
maple_range_64 = 2
|
||||
maple_arange_64 = 3
|
||||
|
||||
class Mas(object):
|
||||
ma_active = 0
|
||||
ma_start = 1
|
||||
ma_root = 2
|
||||
ma_none = 3
|
||||
ma_pause = 4
|
||||
ma_overflow = 5
|
||||
ma_underflow = 6
|
||||
ma_error = 7
|
||||
|
||||
def __init__(self, mt, first, end):
|
||||
if mt.type == maple_tree_root_type.get_type().pointer():
|
||||
self.tree = mt.dereference()
|
||||
elif mt.type != maple_tree_root_type.get_type():
|
||||
raise gdb.GdbError("must be {} not {}"
|
||||
.format(maple_tree_root_type.get_type().pointer(), mt.type))
|
||||
self.tree = mt
|
||||
self.index = first
|
||||
self.last = end
|
||||
self.node = None
|
||||
self.status = self.ma_start
|
||||
self.min = 0
|
||||
self.max = -1
|
||||
|
||||
def is_start(self):
|
||||
# mas_is_start()
|
||||
return self.status == self.ma_start
|
||||
|
||||
def is_ptr(self):
|
||||
# mas_is_ptr()
|
||||
return self.status == self.ma_root
|
||||
|
||||
def is_none(self):
|
||||
# mas_is_none()
|
||||
return self.status == self.ma_none
|
||||
|
||||
def root(self):
|
||||
# mas_root()
|
||||
return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer())
|
||||
|
||||
def start(self):
|
||||
# mas_start()
|
||||
if self.is_start() is False:
|
||||
return None
|
||||
|
||||
self.min = 0
|
||||
self.max = ~0
|
||||
|
||||
while True:
|
||||
self.depth = 0
|
||||
root = self.root()
|
||||
if xarray.xa_is_node(root):
|
||||
self.depth = 0
|
||||
self.status = self.ma_active
|
||||
self.node = mte_safe_root(root)
|
||||
self.offset = 0
|
||||
if mte_dead_node(self.node) is True:
|
||||
continue
|
||||
|
||||
return None
|
||||
|
||||
self.node = None
|
||||
# Empty tree
|
||||
if root is None:
|
||||
self.status = self.ma_none
|
||||
self.offset = constants.LX_MAPLE_NODE_SLOTS
|
||||
return None
|
||||
|
||||
# Single entry tree
|
||||
self.status = self.ma_root
|
||||
self.offset = constants.LX_MAPLE_NODE_SLOTS
|
||||
|
||||
if self.index != 0:
|
||||
return None
|
||||
|
||||
return root
|
||||
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
# mas_reset()
|
||||
self.status = self.ma_start
|
||||
self.node = None
|
||||
|
||||
def mte_safe_root(node):
|
||||
if node.type != maple_enode_type.get_type().pointer():
|
||||
raise gdb.GdbError("{} must be {} not {}"
|
||||
.format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type))
|
||||
ulong_type = utils.get_ulong_type()
|
||||
indirect_ptr = node.cast(ulong_type) & ~0x2
|
||||
val = indirect_ptr.cast(maple_enode_type.get_type().pointer())
|
||||
return val
|
||||
|
||||
def mte_node_type(entry):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
val = None
|
||||
if entry.type == maple_enode_type.get_type().pointer():
|
||||
val = entry.cast(ulong_type)
|
||||
elif entry.type == ulong_type:
|
||||
val = entry
|
||||
else:
|
||||
raise gdb.GdbError("{} must be {} not {}"
|
||||
.format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type))
|
||||
return (val >> 0x3) & 0xf
|
||||
|
||||
def ma_dead_node(node):
|
||||
if node.type != maple_node_type.get_type().pointer():
|
||||
raise gdb.GdbError("{} must be {} not {}"
|
||||
.format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type))
|
||||
ulong_type = utils.get_ulong_type()
|
||||
parent = node['parent']
|
||||
indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK
|
||||
return indirect_ptr == node
|
||||
|
||||
def mte_to_node(enode):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
if enode.type == maple_enode_type.get_type().pointer():
|
||||
indirect_ptr = enode.cast(ulong_type)
|
||||
elif enode.type == ulong_type:
|
||||
indirect_ptr = enode
|
||||
else:
|
||||
raise gdb.GdbError("{} must be {} not {}"
|
||||
.format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
|
||||
indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK
|
||||
return indirect_ptr.cast(maple_node_type.get_type().pointer())
|
||||
|
||||
def mte_dead_node(enode):
|
||||
if enode.type != maple_enode_type.get_type().pointer():
|
||||
raise gdb.GdbError("{} must be {} not {}"
|
||||
.format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
|
||||
node = mte_to_node(enode)
|
||||
return ma_dead_node(node)
|
||||
|
||||
def ma_is_leaf(tp):
|
||||
result = tp < maple_range_64
|
||||
return tp < maple_range_64
|
||||
|
||||
def mt_pivots(t):
|
||||
if t == maple_dense:
|
||||
return 0
|
||||
elif t == maple_leaf_64 or t == maple_range_64:
|
||||
return constants.LX_MAPLE_RANGE64_SLOTS - 1
|
||||
elif t == maple_arange_64:
|
||||
return constants.LX_MAPLE_ARANGE64_SLOTS - 1
|
||||
|
||||
def ma_pivots(node, t):
|
||||
if node.type != maple_node_type.get_type().pointer():
|
||||
raise gdb.GdbError("{}: must be {} not {}"
|
||||
.format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type))
|
||||
if t == maple_arange_64:
|
||||
return node['ma64']['pivot']
|
||||
elif t == maple_leaf_64 or t == maple_range_64:
|
||||
return node['mr64']['pivot']
|
||||
else:
|
||||
return None
|
||||
|
||||
def ma_slots(node, tp):
|
||||
if node.type != maple_node_type.get_type().pointer():
|
||||
raise gdb.GdbError("{}: must be {} not {}"
|
||||
.format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type))
|
||||
if tp == maple_arange_64:
|
||||
return node['ma64']['slot']
|
||||
elif tp == maple_range_64 or tp == maple_leaf_64:
|
||||
return node['mr64']['slot']
|
||||
elif tp == maple_dense:
|
||||
return node['slot']
|
||||
else:
|
||||
return None
|
||||
|
||||
def mt_slot(mt, slots, offset):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
return slots[offset].cast(ulong_type)
|
||||
|
||||
def mtree_lookup_walk(mas):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
n = mas.node
|
||||
|
||||
while True:
|
||||
node = mte_to_node(n)
|
||||
tp = mte_node_type(n)
|
||||
pivots = ma_pivots(node, tp)
|
||||
end = mt_pivots(tp)
|
||||
offset = 0
|
||||
while True:
|
||||
if pivots[offset] >= mas.index:
|
||||
break
|
||||
if offset >= end:
|
||||
break
|
||||
offset += 1
|
||||
|
||||
slots = ma_slots(node, tp)
|
||||
n = mt_slot(mas.tree, slots, offset)
|
||||
if ma_dead_node(node) is True:
|
||||
mas.reset()
|
||||
return None
|
||||
break
|
||||
|
||||
if ma_is_leaf(tp) is True:
|
||||
break
|
||||
|
||||
return n
|
||||
|
||||
def mtree_load(mt, index):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
# MT_STATE(...)
|
||||
mas = Mas(mt, index, index)
|
||||
entry = None
|
||||
|
||||
while True:
|
||||
entry = mas.start()
|
||||
if mas.is_none():
|
||||
return None
|
||||
|
||||
if mas.is_ptr():
|
||||
if index != 0:
|
||||
entry = None
|
||||
return entry
|
||||
|
||||
entry = mtree_lookup_walk(mas)
|
||||
if entry is None and mas.is_start():
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
if xarray.xa_is_zero(entry):
|
||||
return None
|
||||
|
||||
return entry
|
@ -22,7 +22,7 @@ def dentry_name(d):
|
||||
if parent == d or parent == 0:
|
||||
return ""
|
||||
p = dentry_name(d['d_parent']) + "/"
|
||||
return p + d['d_shortname']['string'].string()
|
||||
return p + d['d_name']['name'].string()
|
||||
|
||||
class DentryName(gdb.Function):
|
||||
"""Return string of the full path of a dentry.
|
||||
|
28
scripts/gdb/linux/xarray.py
Normal file
28
scripts/gdb/linux/xarray.py
Normal file
@ -0,0 +1,28 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Xarray helpers
|
||||
#
|
||||
# Copyright (c) 2025 Broadcom
|
||||
#
|
||||
# Authors:
|
||||
# Florian Fainelli <florian.fainelli@broadcom.com>
|
||||
|
||||
import gdb
|
||||
|
||||
from linux import utils
|
||||
from linux import constants
|
||||
|
||||
def xa_is_internal(entry):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
return ((entry.cast(ulong_type) & 3) == 2)
|
||||
|
||||
def xa_mk_internal(v):
|
||||
return ((v << 2) | 2)
|
||||
|
||||
def xa_is_zero(entry):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
return entry.cast(ulong_type) == xa_mk_internal(257)
|
||||
|
||||
def xa_is_node(entry):
|
||||
ulong_type = utils.get_ulong_type()
|
||||
return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096)
|
@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef HAVE_BACKTRACE_SUPPORT
|
||||
#include <execinfo.h>
|
||||
#include <stdlib.h>
|
||||
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
|
||||
@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
|
||||
|
||||
free(name);
|
||||
}
|
||||
#else
|
||||
static inline void print_ip_sym(const char *loglvl, unsigned long ip) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user