mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-09-14 11:19:08 +10:00
Commit f9af88a3d3
upstream.
It will be used by other x86 mitigations.
No functional changes.
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
51 lines
1.1 KiB
ArmAsm
51 lines
1.1 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Common place for both 32- and 64-bit entry routines.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/export.h>
|
|
#include <asm/msr-index.h>
|
|
#include <asm/unwind_hints.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/nospec-branch.h>
|
|
|
|
.pushsection .noinstr.text, "ax"
|
|
|
|
SYM_FUNC_START(entry_ibpb)
|
|
movl $MSR_IA32_PRED_CMD, %ecx
|
|
movl _ASM_RIP(x86_pred_cmd), %eax
|
|
xorl %edx, %edx
|
|
wrmsr
|
|
|
|
/* Make sure IBPB clears return stack preductions too. */
|
|
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
|
|
RET
|
|
SYM_FUNC_END(entry_ibpb)
|
|
/* For KVM */
|
|
EXPORT_SYMBOL_GPL(entry_ibpb);
|
|
|
|
.popsection
|
|
|
|
/*
|
|
* Define the VERW operand that is disguised as entry code so that
|
|
* it can be referenced with KPTI enabled. This ensures VERW can be
|
|
* used late in exit-to-user path after page tables are switched.
|
|
*/
|
|
.pushsection .entry.text, "ax"
|
|
|
|
.align L1_CACHE_BYTES, 0xcc
|
|
SYM_CODE_START_NOALIGN(x86_verw_sel)
|
|
UNWIND_HINT_EMPTY
|
|
ANNOTATE_NOENDBR
|
|
.word __KERNEL_DS
|
|
.align L1_CACHE_BYTES, 0xcc
|
|
SYM_CODE_END(x86_verw_sel);
|
|
/* For KVM */
|
|
EXPORT_SYMBOL_GPL(x86_verw_sel);
|
|
|
|
.popsection
|
|
|