summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2018-07-14 02:37:21 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-07-25 10:18:28 +0200
commit6e2119e4b8767a6c3a415875ad09596ada00755c (patch)
tree1176b22e1bf5107c3901445ae6bd6eec62cb9d02 /arch/x86/kernel/process.c
parentafc6bf9131efc36d4ae8a003e8597119a2190661 (diff)
x86/bugs: Rename _RDS to _SSBD
commit 9f65fb29374ee37856dbad847b4e121aab72b510 upstream Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2] as SSBD (Speculative Store Bypass Disable). Hence changing it. It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name is going to be. Following the rename it would be SSBD_NO but that rolls out to Speculative Store Bypass Disable No. Also fixed the missing space in X86_FEATURE_AMD_SSBD. [ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> [ Srivatsa: Backported to 4.4.y, skipping the KVM changes in this patch. ] Signed-off-by: Srivatsa S. Bhat <srivatsa@csail.mit.edu> Reviewed-by: Matt Helsley (VMware) <matt.helsley@gmail.com> Reviewed-by: Alexey Makhalov <amakhalov@vmware.com> Reviewed-by: Bo Gan <ganb@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9689e92e72dd..57d4ba250c6a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -203,11 +203,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
{
u64 msr;
- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
+ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrl(MSR_AMD64_LS_CFG, msr);
} else {
- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
+ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
}
@@ -246,7 +246,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOTSC)
cr4_toggle_bits(X86_CR4_TSD);
- if ((tifp ^ tifn) & _TIF_RDS)
+ if ((tifp ^ tifn) & _TIF_SSBD)
__speculative_store_bypass_update(tifn);
}