x86/vmscape: Enable the mitigation

Commit 556c1ad666 upstream.

Enable the previously added mitigation for VMscape. Add the cmdline
vmscape={off|ibpb|force} and sysfs reporting.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Pawan Gupta 2025-08-14 10:20:42 -07:00 committed by Greg Kroah-Hartman
parent 15006289e5
commit 893387c186
6 changed files with 106 additions and 0 deletions

View File

@ -528,6 +528,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa /sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
/sys/devices/system/cpu/vulnerabilities/vmscape
Date: January 2018 Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities Description: Information about CPU vulnerabilities

View File

@ -3297,6 +3297,7 @@
srbds=off [X86,INTEL] srbds=off [X86,INTEL]
ssbd=force-off [ARM64] ssbd=force-off [ARM64]
tsx_async_abort=off [X86] tsx_async_abort=off [X86]
vmscape=off [X86]
Exceptions: Exceptions:
This does not have any effect on This does not have any effect on
@ -6813,6 +6814,16 @@
vmpoff= [KNL,S390] Perform z/VM CP command after power off. vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command> Format: <command>
vmscape= [X86] Controls mitigation for VMscape attacks.
VMscape attacks can leak information from a userspace
hypervisor to a guest via speculative side-channels.
off - disable the mitigation
ibpb - use Indirect Branch Prediction Barrier
(IBPB) mitigation (default)
force - force vulnerability detection even on
unaffected processors
vsyscall= [X86-64] vsyscall= [X86-64]
Controls the behavior of vsyscalls (i.e. calls to Controls the behavior of vsyscalls (i.e. calls to
fixed addresses of 0xffffffffff600x00 from legacy fixed addresses of 0xffffffffff600x00 from legacy

View File

@ -2595,6 +2595,15 @@ config MITIGATION_TSA
security vulnerability on AMD CPUs which can lead to forwarding of security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage. timing and thereby cause a leakage.
config MITIGATION_VMSCAPE
bool "Mitigate VMSCAPE"
depends on KVM
default y
help
Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
vulnerability on Intel and AMD CPUs that may allow a guest to do
Spectre v2 style attacks on userspace hypervisor.
endif endif
config ARCH_HAS_ADD_PAGES config ARCH_HAS_ADD_PAGES

View File

@ -50,6 +50,7 @@ static void __init gds_select_mitigation(void);
static void __init srso_select_mitigation(void); static void __init srso_select_mitigation(void);
static void __init its_select_mitigation(void); static void __init its_select_mitigation(void);
static void __init tsa_select_mitigation(void); static void __init tsa_select_mitigation(void);
static void __init vmscape_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
@ -193,6 +194,7 @@ void __init cpu_select_mitigations(void)
gds_select_mitigation(); gds_select_mitigation();
its_select_mitigation(); its_select_mitigation();
tsa_select_mitigation(); tsa_select_mitigation();
vmscape_select_mitigation();
} }
/* /*
@ -2898,6 +2900,68 @@ pred_cmd:
x86_pred_cmd = PRED_CMD_SBPB; x86_pred_cmd = PRED_CMD_SBPB;
} }
#undef pr_fmt
#define pr_fmt(fmt) "VMSCAPE: " fmt
enum vmscape_mitigations {
VMSCAPE_MITIGATION_NONE,
VMSCAPE_MITIGATION_AUTO,
VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
};
static const char * const vmscape_strings[] = {
[VMSCAPE_MITIGATION_NONE] = "Vulnerable",
/* [VMSCAPE_MITIGATION_AUTO] */
[VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
[VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
};
static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
static int __init vmscape_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off")) {
vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
} else if (!strcmp(str, "ibpb")) {
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
} else if (!strcmp(str, "force")) {
setup_force_cpu_bug(X86_BUG_VMSCAPE);
vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
} else {
pr_err("Ignoring unknown vmscape=%s option.\n", str);
}
return 0;
}
early_param("vmscape", vmscape_parse_cmdline);
static void __init vmscape_select_mitigation(void)
{
if (cpu_mitigations_off() ||
!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
!boot_cpu_has(X86_FEATURE_IBPB)) {
vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
return;
}
if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
}
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) fmt #define pr_fmt(fmt) fmt
@ -3146,6 +3210,11 @@ static ssize_t tsa_show_state(char *buf)
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
} }
static ssize_t vmscape_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
@ -3210,6 +3279,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_TSA: case X86_BUG_TSA:
return tsa_show_state(buf); return tsa_show_state(buf);
case X86_BUG_VMSCAPE:
return vmscape_show_state(buf);
default: default:
break; break;
} }
@ -3299,4 +3371,9 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu
{ {
return cpu_show_common(dev, attr, buf, X86_BUG_TSA); return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
} }
ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
}
#endif #endif

View File

@ -606,6 +606,11 @@ ssize_t __weak cpu_show_tsa(struct device *dev, struct device_attribute *attr, c
return sysfs_emit(buf, "Not affected\n"); return sysfs_emit(buf, "Not affected\n");
} }
ssize_t __weak cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@ -622,6 +627,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = { static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr, &dev_attr_meltdown.attr,
@ -640,6 +646,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_reg_file_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr,
&dev_attr_indirect_target_selection.attr, &dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr, &dev_attr_tsa.attr,
&dev_attr_vmscape.attr,
NULL NULL
}; };

View File

@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
extern ssize_t cpu_show_indirect_target_selection(struct device *dev, extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5) extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata, struct device *cpu_device_create(struct device *parent, void *drvdata,