mirror of
				https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
				synced 2025-10-30 22:47:06 +10:00 
			
		
		
		
	Despite the current efforts to read CR2 before tracing happens there still
exist a number of possible holes:
  idtentry page_fault             do_page_fault           has_error_code=1
    call error_entry
      TRACE_IRQS_OFF
        call trace_hardirqs_off*
          #PF // modifies CR2
      CALL_enter_from_user_mode
        __context_tracking_exit()
          trace_user_exit(0)
            #PF // modifies CR2
    call do_page_fault
      address = read_cr2(); /* whoopsie */
And similar for i386.
Fix it by pulling the CR2 read into the entry code, before any of that
stuff gets a chance to run and ruin things.
Reported-by: He Zhe <zhe.he@windriver.com>
Reported-by: Eiichi Tsukata <devel@etsukata.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Cc: bp@alien8.de
Cc: rostedt@goodmis.org
Cc: torvalds@linux-foundation.org
Cc: hpa@zytor.com
Cc: dave.hansen@linux.intel.com
Cc: jgross@suse.com
Cc: joel@joelfernandes.org
Link: https://lkml.kernel.org/r/20190711114336.116812491@infradead.org
Debugged-by: Steven Rostedt <rostedt@goodmis.org>
		
	
			
		
			
				
	
	
		
			136 lines
		
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			136 lines
		
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _ASM_X86_KVM_PARA_H
 | |
| #define _ASM_X86_KVM_PARA_H
 | |
| 
 | |
| #include <asm/processor.h>
 | |
| #include <asm/alternative.h>
 | |
| #include <uapi/asm/kvm_para.h>
 | |
| 
 | |
| extern void kvmclock_init(void);
 | |
| 
 | |
| #ifdef CONFIG_KVM_GUEST
 | |
| bool kvm_check_and_clear_guest_paused(void);
 | |
| #else
 | |
| static inline bool kvm_check_and_clear_guest_paused(void)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| #endif /* CONFIG_KVM_GUEST */
 | |
| 
 | |
| #define KVM_HYPERCALL \
 | |
|         ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
 | |
| 
 | |
| /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
 | |
|  * instruction.  The hypervisor may replace it with something else but only the
 | |
|  * instructions are guaranteed to be supported.
 | |
|  *
 | |
|  * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
 | |
|  * The hypercall number should be placed in rax and the return value will be
 | |
|  * placed in rax.  No other registers will be clobbered unless explicitly
 | |
|  * noted by the particular hypercall.
 | |
|  */
 | |
| 
 | |
| static inline long kvm_hypercall0(unsigned int nr)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
 | |
| 				  unsigned long p2)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1), "c"(p2)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
 | |
| 				  unsigned long p2, unsigned long p3)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
 | |
| 				  unsigned long p2, unsigned long p3,
 | |
| 				  unsigned long p4)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_KVM_GUEST
 | |
| bool kvm_para_available(void);
 | |
| unsigned int kvm_arch_para_features(void);
 | |
| unsigned int kvm_arch_para_hints(void);
 | |
| void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
 | |
| void kvm_async_pf_task_wake(u32 token);
 | |
| u32 kvm_read_and_reset_pf_reason(void);
 | |
| extern void kvm_disable_steal_time(void);
 | |
| void do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
 | |
| 
 | |
| #ifdef CONFIG_PARAVIRT_SPINLOCKS
 | |
| void __init kvm_spinlock_init(void);
 | |
| #else /* !CONFIG_PARAVIRT_SPINLOCKS */
 | |
| static inline void kvm_spinlock_init(void)
 | |
| {
 | |
| }
 | |
| #endif /* CONFIG_PARAVIRT_SPINLOCKS */
 | |
| 
 | |
| #else /* CONFIG_KVM_GUEST */
 | |
| #define kvm_async_pf_task_wait(T, I) do {} while(0)
 | |
| #define kvm_async_pf_task_wake(T) do {} while(0)
 | |
| 
 | |
| static inline bool kvm_para_available(void)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| 
 | |
| static inline unsigned int kvm_arch_para_features(void)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static inline unsigned int kvm_arch_para_hints(void)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static inline u32 kvm_read_and_reset_pf_reason(void)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static inline void kvm_disable_steal_time(void)
 | |
| {
 | |
| 	return;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #endif /* _ASM_X86_KVM_PARA_H */
 |