summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2018-07-31 20:18:19 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2018-07-31 20:18:19 +0200
commit4b2d6badbc4057bd43e442eb20141fb40c549f6d (patch)
treef8892478ef4bfd14895e6704f4b3a398682892be /arch/x86/kernel/cpu/common.c
parent7bbfac190345ca532ea790c9d9ccb15682d7b99b (diff)
parent762b585c492fedda1b0bc4c6d0a867307bf7cd0f (diff)
Merge 4.4.144 into android-4.4
Changes in 4.4.144 KVM/Eventfd: Avoid crash when assign and deassign specific eventfd in parallel. x86/MCE: Remove min interval polling limitation fat: fix memory allocation failure handling of match_strdup() ALSA: rawmidi: Change resized buffers atomically ARC: Fix CONFIG_SWAP ARC: mm: allow mprotect to make stack mappings executable mm: memcg: fix use after free in mem_cgroup_iter() ipv4: Return EINVAL when ping_group_range sysctl doesn't map to user ns ipv6: fix useless rol32 call on hash lib/rhashtable: consider param->min_size when setting initial table size net/ipv4: Set oif in fib_compute_spec_dst net: phy: fix flag masking in __set_phy_supported ptp: fix missing break in switch tg3: Add higher cpu clock for 5762. net: Don't copy pfmemalloc flag in __copy_skb_header() skbuff: Unconditionally copy pfmemalloc in __skb_clone() xhci: Fix perceived dead host due to runtime suspend race with event handler x86/paravirt: Make native_save_fl() extern inline x86/cpufeatures: Add CPUID_7_EDX CPUID leaf x86/cpufeatures: Add Intel feature bits for Speculation Control x86/cpufeatures: Add AMD feature bits for Speculation Control x86/msr: Add definitions for new speculation control MSRs x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support x86/cpufeatures: Clean up Spectre v2 related CPUID flags x86/cpuid: Fix up "virtual" IBRS/IBPB/STIBP feature bits on Intel x86/pti: Mark constant arrays as __initconst x86/asm/entry/32: Simplify pushes of zeroed pt_regs->REGs x86/entry/64/compat: Clear registers for compat syscalls, to reduce speculation attack surface x86/speculation: Update Speculation Control microcode blacklist x86/speculation: Correct Speculation Control microcode blacklist again x86/speculation: Clean up various Spectre related details x86/speculation: Fix up array_index_nospec_mask() asm constraint x86/speculation: Add <asm/msr-index.h> dependency x86/xen: Zero MSR_IA32_SPEC_CTRL before suspend x86/mm: Factor out LDT init from context init x86/mm: Give each mm TLB flush generation a unique ID x86/speculation: Use Indirect Branch Prediction Barrier in context switch x86/spectre_v2: Don't check microcode versions when running under hypervisors x86/speculation: Use IBRS if available before calling into firmware x86/speculation: Move firmware_restrict_branch_speculation_*() from C to CPP x86/speculation: Remove Skylake C2 from Speculation Control microcode blacklist selftest/seccomp: Fix the flag name SECCOMP_FILTER_FLAG_TSYNC selftest/seccomp: Fix the seccomp(2) signature xen: set cpu capabilities from xen_start_kernel() x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running under Xen x86/nospec: Simplify alternative_msr_write() x86/bugs: Concentrate bug detection into a separate function x86/bugs: Concentrate bug reporting into a separate function x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits x86/bugs, KVM: Support the combination of guest and host IBRS x86/cpu: Rename Merrifield2 to Moorefield x86/cpu/intel: Add Knights Mill to Intel family x86/bugs: Expose /sys/../spec_store_bypass x86/cpufeatures: Add X86_FEATURE_RDS x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation x86/bugs/intel: Set proper CPU features and setup RDS x86/bugs: Whitelist allowed SPEC_CTRL MSR values x86/bugs/AMD: Add support to disable RDS on Fam[15, 16, 17]h if requested x86/speculation: Create spec-ctrl.h to avoid include hell prctl: Add speculation control prctls x86/process: Optimize TIF checks in __switch_to_xtra() x86/process: Correct and optimize TIF_BLOCKSTEP switch x86/process: Optimize TIF_NOTSC switch x86/process: Allow runtime control of Speculative Store Bypass x86/speculation: Add prctl for Speculative Store Bypass mitigation nospec: Allow getting/setting on non-current task proc: Provide details on speculation flaw mitigations seccomp: Enable speculation flaw mitigations prctl: Add force disable speculation seccomp: Use PR_SPEC_FORCE_DISABLE seccomp: Add filter flag to opt-out of SSB mitigation seccomp: Move speculation migitation control to arch code x86/speculation: Make "seccomp" the default mode for Speculative Store Bypass x86/bugs: Rename _RDS to _SSBD proc: Use underscores for SSBD in 'status' Documentation/spec_ctrl: Do some minor cleanups x86/bugs: Fix __ssb_select_mitigation() return type x86/bugs: Make cpu_show_common() static x86/bugs: Fix the parameters alignment and missing void x86/cpu: Make alternative_msr_write work for 32-bit code x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS x86/cpufeatures: Disentangle SSBD enumeration x86/cpu/AMD: Fix erratum 1076 (CPB bit) x86/cpufeatures: Add FEATURE_ZEN x86/speculation: Handle HT correctly on AMD x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL x86/speculation: Add virtualized speculative store bypass disable support x86/speculation: Rework speculative_store_bypass_update() x86/bugs: Unify x86_spec_ctrl_{set_guest, restore_host} x86/bugs: Expose x86_spec_ctrl_base directly x86/bugs: Remove x86_spec_ctrl_set() x86/bugs: Rework spec_ctrl base and mask logic x86/speculation, KVM: Implement support for VIRT_SPEC_CTRL/LS_CFG x86/bugs: Rename SSBD_NO to SSB_NO x86/xen: Add call of speculative_store_bypass_ht_init() to PV paths x86/cpu: Re-apply forced caps every time CPU caps are re-read block: do not use interruptible wait anywhere clk: tegra: Fix PLL_U post divider and initial rate on Tegra30 ubi: Introduce vol_ignored() ubi: Rework Fastmap attach base code ubi: Be more paranoid while seaching for the most recent Fastmap ubi: Fix races around ubi_refill_pools() ubi: Fix Fastmap's update_vol() ubi: fastmap: Erase outdated anchor PEBs during attach Linux 4.4.144 Change-Id: Ia3e9b2b7bc653cba68b76878d34f8fcbbc007a13 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c121
1 files changed, 116 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 736e2843139b..3d21b28f9826 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -43,6 +43,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -674,6 +676,40 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
}
}
+static void init_speculation_control(struct cpuinfo_x86 *c)
+{
+ /*
+ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+
+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+}
+
void get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
@@ -695,6 +731,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_ECX] = ecx;
+ c->x86_capability[CPUID_7_EDX] = edx;
}
/* Extended state features: level 0x0000000d */
@@ -765,6 +802,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
init_scattered_cpuid_features(c);
+ init_speculation_control(c);
+
+ /*
+ * Clear/Set all flags overridden by options, after probe.
+ * This needs to happen each time we re-probe, which may happen
+ * several times during CPU initialization.
+ */
+ apply_forced_caps(c);
}
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -793,6 +838,75 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
+static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ { X86_VENDOR_CENTAUR, 5, },
+ { X86_VENDOR_INTEL, 5, },
+ { X86_VENDOR_NSC, 5, },
+ { X86_VENDOR_AMD, 0x12, },
+ { X86_VENDOR_AMD, 0x11, },
+ { X86_VENDOR_AMD, 0x10, },
+ { X86_VENDOR_AMD, 0xf, },
+ { X86_VENDOR_ANY, 4, },
+ {}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+ !(ia32_cap & ARCH_CAP_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -839,11 +953,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
- if (c->x86_vendor != X86_VENDOR_AMD)
- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ cpu_set_bug_bits(c);
fpu__init_system(c);
@@ -1132,6 +1242,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
enable_sep_cpu();
#endif
mtrr_ap_init();
+ x86_spec_ctrl_setup_ap();
}
struct msr_range {