mirror of
				https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
				synced 2025-11-04 07:44:51 +10:00 
			
		
		
		
	u64_stats: Disable preemption on 32bit UP+SMP PREEMPT_RT during updates.
On PREEMPT_RT the seqcount_t for synchronisation is required on 32bit architectures even on UP because the softirq (and the threaded IRQ handler) can be preempted. With the seqcount_t for synchronisation, a reader with higher priority can preempt the writer and then spin endlessly in read_seqcount_begin() while the writer can't make progress. To avoid such a lock up on PREEMPT_RT the writer must disable preemption during the update. There is no need to disable interrupts because no writer is using this API in hard-IRQ context on PREEMPT_RT. Disable preemption on 32bit-RT within the u64_stats write section. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									d147dd7090
								
							
						
					
					
						commit
						3c118547f8
					
				@ -66,7 +66,7 @@
 | 
			
		||||
#include <linux/seqlock.h>
 | 
			
		||||
 | 
			
		||||
struct u64_stats_sync {
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	seqcount_t	seq;
 | 
			
		||||
#endif
 | 
			
		||||
};
 | 
			
		||||
@ -125,7 +125,7 @@ static inline void u64_stats_inc(u64_stats_t *p)
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
#define u64_stats_init(syncp)	seqcount_init(&(syncp)->seq)
 | 
			
		||||
#else
 | 
			
		||||
static inline void u64_stats_init(struct u64_stats_sync *syncp)
 | 
			
		||||
@ -135,15 +135,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp)
 | 
			
		||||
 | 
			
		||||
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 | 
			
		||||
		preempt_disable();
 | 
			
		||||
	write_seqcount_begin(&syncp->seq);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	write_seqcount_end(&syncp->seq);
 | 
			
		||||
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 | 
			
		||||
		preempt_enable();
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -152,8 +156,11 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags = 0;
 | 
			
		||||
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
	local_irq_save(flags);
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 | 
			
		||||
		preempt_disable();
 | 
			
		||||
	else
 | 
			
		||||
		local_irq_save(flags);
 | 
			
		||||
	write_seqcount_begin(&syncp->seq);
 | 
			
		||||
#endif
 | 
			
		||||
	return flags;
 | 
			
		||||
@ -163,15 +170,18 @@ static inline void
 | 
			
		||||
u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
 | 
			
		||||
				unsigned long flags)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	write_seqcount_end(&syncp->seq);
 | 
			
		||||
	local_irq_restore(flags);
 | 
			
		||||
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 | 
			
		||||
		preempt_enable();
 | 
			
		||||
	else
 | 
			
		||||
		local_irq_restore(flags);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	return read_seqcount_begin(&syncp->seq);
 | 
			
		||||
#else
 | 
			
		||||
	return 0;
 | 
			
		||||
@ -180,7 +190,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *
 | 
			
		||||
 | 
			
		||||
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	preempt_disable();
 | 
			
		||||
#endif
 | 
			
		||||
	return __u64_stats_fetch_begin(syncp);
 | 
			
		||||
@ -189,7 +199,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy
 | 
			
		||||
static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 | 
			
		||||
					 unsigned int start)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	return read_seqcount_retry(&syncp->seq, start);
 | 
			
		||||
#else
 | 
			
		||||
	return false;
 | 
			
		||||
@ -199,7 +209,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 | 
			
		||||
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 | 
			
		||||
					 unsigned int start)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
 | 
			
		||||
	preempt_enable();
 | 
			
		||||
#endif
 | 
			
		||||
	return __u64_stats_fetch_retry(syncp, start);
 | 
			
		||||
@ -213,7 +223,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 | 
			
		||||
 */
 | 
			
		||||
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
 | 
			
		||||
	preempt_disable();
 | 
			
		||||
#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
 | 
			
		||||
	local_irq_disable();
 | 
			
		||||
#endif
 | 
			
		||||
	return __u64_stats_fetch_begin(syncp);
 | 
			
		||||
@ -222,7 +234,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync
 | 
			
		||||
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
 | 
			
		||||
					     unsigned int start)
 | 
			
		||||
{
 | 
			
		||||
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 | 
			
		||||
#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
 | 
			
		||||
	preempt_enable();
 | 
			
		||||
#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
 | 
			
		||||
	local_irq_enable();
 | 
			
		||||
#endif
 | 
			
		||||
	return __u64_stats_fetch_retry(syncp, start);
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user