mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-24 17:37:29 +10:00
workqueue: Merge branch 'for-6.5-fixes' into for-6.6
Unbound workqueue execution locality improvement patchset is about to applied which will cause merge conflicts with changes in for-6.5-fixes. Let's avoid future merge conflict by pulling in for-6.5-fixes. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
commit
87437656c2
@ -52,6 +52,7 @@
|
|||||||
#include <linux/sched/debug.h>
|
#include <linux/sched/debug.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/kvm_para.h>
|
#include <linux/kvm_para.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
|
||||||
#include "workqueue_internal.h"
|
#include "workqueue_internal.h"
|
||||||
|
|
||||||
@ -338,8 +339,10 @@ static cpumask_var_t *wq_numa_possible_cpumask;
|
|||||||
* Per-cpu work items which run for longer than the following threshold are
|
* Per-cpu work items which run for longer than the following threshold are
|
||||||
* automatically considered CPU intensive and excluded from concurrency
|
* automatically considered CPU intensive and excluded from concurrency
|
||||||
* management to prevent them from noticeably delaying other per-cpu work items.
|
* management to prevent them from noticeably delaying other per-cpu work items.
|
||||||
|
* ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
|
||||||
|
* The actual value is initialized in wq_cpu_intensive_thresh_init().
|
||||||
*/
|
*/
|
||||||
static unsigned long wq_cpu_intensive_thresh_us = 10000;
|
static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
|
||||||
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
|
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
|
||||||
|
|
||||||
static bool wq_disable_numa;
|
static bool wq_disable_numa;
|
||||||
@ -6516,6 +6519,42 @@ void __init workqueue_init_early(void)
|
|||||||
!system_freezable_power_efficient_wq);
|
!system_freezable_power_efficient_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init wq_cpu_intensive_thresh_init(void)
|
||||||
|
{
|
||||||
|
unsigned long thresh;
|
||||||
|
unsigned long bogo;
|
||||||
|
|
||||||
|
/* if the user set it to a specific value, keep it */
|
||||||
|
if (wq_cpu_intensive_thresh_us != ULONG_MAX)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The default of 10ms is derived from the fact that most modern (as of
|
||||||
|
* 2023) processors can do a lot in 10ms and that it's just below what
|
||||||
|
* most consider human-perceivable. However, the kernel also runs on a
|
||||||
|
* lot slower CPUs including microcontrollers where the threshold is way
|
||||||
|
* too low.
|
||||||
|
*
|
||||||
|
* Let's scale up the threshold upto 1 second if BogoMips is below 4000.
|
||||||
|
* This is by no means accurate but it doesn't have to be. The mechanism
|
||||||
|
* is still useful even when the threshold is fully scaled up. Also, as
|
||||||
|
* the reports would usually be applicable to everyone, some machines
|
||||||
|
* operating on longer thresholds won't significantly diminish their
|
||||||
|
* usefulness.
|
||||||
|
*/
|
||||||
|
thresh = 10 * USEC_PER_MSEC;
|
||||||
|
|
||||||
|
/* see init/calibrate.c for lpj -> BogoMIPS calculation */
|
||||||
|
bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
|
||||||
|
if (bogo < 4000)
|
||||||
|
thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
|
||||||
|
|
||||||
|
pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
|
||||||
|
loops_per_jiffy, bogo, thresh);
|
||||||
|
|
||||||
|
wq_cpu_intensive_thresh_us = thresh;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* workqueue_init - bring workqueue subsystem fully online
|
* workqueue_init - bring workqueue subsystem fully online
|
||||||
*
|
*
|
||||||
@ -6531,6 +6570,8 @@ void __init workqueue_init(void)
|
|||||||
struct worker_pool *pool;
|
struct worker_pool *pool;
|
||||||
int cpu, bkt;
|
int cpu, bkt;
|
||||||
|
|
||||||
|
wq_cpu_intensive_thresh_init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It'd be simpler to initialize NUMA in workqueue_init_early() but
|
* It'd be simpler to initialize NUMA in workqueue_init_early() but
|
||||||
* CPU to node mapping may not be available that early on some
|
* CPU to node mapping may not be available that early on some
|
||||||
|
@ -1200,7 +1200,7 @@ config WQ_CPU_INTENSIVE_REPORT
|
|||||||
help
|
help
|
||||||
Say Y here to enable reporting of concurrency-managed per-cpu work
|
Say Y here to enable reporting of concurrency-managed per-cpu work
|
||||||
items that hog CPUs for longer than
|
items that hog CPUs for longer than
|
||||||
workqueue.cpu_intensive_threshold_us. Workqueue automatically
|
workqueue.cpu_intensive_thresh_us. Workqueue automatically
|
||||||
detects and excludes them from concurrency management to prevent
|
detects and excludes them from concurrency management to prevent
|
||||||
them from stalling other per-cpu work items. Occassional
|
them from stalling other per-cpu work items. Occassional
|
||||||
triggering may not necessarily indicate a problem. Repeated
|
triggering may not necessarily indicate a problem. Repeated
|
||||||
|
Loading…
Reference in New Issue
Block a user