summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2018-08-15 18:20:41 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2018-08-15 18:20:41 +0200
commitf057ff937754efc42d56bee825187b2ce6c36958 (patch)
treea3e7e80d30998b70b74a82bbc11118bb07acf4fd /arch/x86/kernel/cpu/bugs.c
parenta5fc66599b6143999290603a51da11868f7d36a9 (diff)
parent30a97c1e2dc39f45d9deeeccc2733278fc285d5e (diff)
Merge 4.4.148 into android-4.4
Changes in 4.4.148 ext4: fix check to prevent initializing reserved inodes tpm: fix race condition in tpm_common_write() ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV fork: unconditionally clear stack on fork parisc: Enable CONFIG_MLONGCALLS by default parisc: Define mb() and add memory barriers to assembler unlock sequences xen/netfront: don't cache skb_shinfo() ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled root dentries need RCU-delayed freeing fix mntput/mntput race fix __legitimize_mnt()/mntput() race IB/core: Make testing MR flags for writability a static inline function IB/mlx4: Mark user MR as writable if actual virtual memory is writable IB/ocrdma: fix out of bounds access to local buffer ARM: dts: imx6sx: fix irq for pcie bridge x86/paravirt: Fix spectre-v2 mitigations for paravirt guests x86/speculation: Protect against userspace-userspace spectreRSB kprobes/x86: Fix %p uses in error messages x86/irqflags: Provide a declaration for native_save_fl x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT x86/mm: Move swap offset/type up in PTE to work around erratum x86/mm: Fix swap entry comment and macro mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 x86/speculation/l1tf: Change order of offset/type in swap entry x86/speculation/l1tf: Protect swap entries against L1TF x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation x86/speculation/l1tf: Make sure the first page is always reserved x86/speculation/l1tf: Add sysfs reporting for l1tf mm: Add vm_insert_pfn_prot() mm: fix cache mode tracking in vm_insert_mixed() x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings x86/speculation/l1tf: Limit swap file size to MAX_PA/2 x86/bugs: Move the l1tf function and define pr_fmt properly x86/speculation/l1tf: Extend 64bit swap file size limit x86/cpufeatures: Add detection of L1D cache flush support. x86/speculation/l1tf: Protect PAE swap entries against L1TF x86/speculation/l1tf: Fix up pte->pfn conversion for PAE x86/speculation/l1tf: Invert all not present mappings x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert x86/mm/pat: Make set_memory_np() L1TF safe x86/mm/kmmio: Make the tracer robust against L1TF x86/speculation/l1tf: Fix up CPU feature flags x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures Linux 4.4.148 Change-Id: I83c857d9d9d74ee47e61d15eb411f276f057ba3d Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c81
1 files changed, 50 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 12a8867071f3..34e4aaaf03d2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -26,9 +26,11 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
+#include <asm/e820.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
+static void __init l1tf_select_mitigation(void);
/*
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -80,6 +82,8 @@ void __init check_bugs(void)
*/
ssb_select_mitigation();
+ l1tf_select_mitigation();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -309,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6) {
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SKYLAKE_MOBILE:
- case INTEL_FAM6_SKYLAKE_DESKTOP:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_MOBILE:
- case INTEL_FAM6_KABYLAKE_DESKTOP:
- return true;
- }
- }
- return false;
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -386,22 +373,15 @@ retpoline_auto:
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP nor PTI are available, there is a risk of
- * hitting userspace addresses in the RSB after a context switch
- * from a shallow call stack to a deeper one. To prevent this fill
- * the entire RSB, even when using IBRS.
+ * If spectre v2 protection has been enabled, unconditionally fill
+ * RSB during a context switch; this protects against two independent
+ * issues:
*
- * Skylake era CPUs have a separate issue with *underflow* of the
- * RSB, when they will predict 'ret' targets from the generic BTB.
- * The proper mitigation for this is IBRS. If IBRS is not supported
- * or deactivated in favour of retpolines the RSB fill on context
- * switch is required.
+ * - RSB underflow (and switch to BTB) on Skylake+
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
- if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
- }
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -652,6 +632,35 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+#undef pr_fmt
+#define pr_fmt(fmt) "L1TF: " fmt
+static void __init l1tf_select_mitigation(void)
+{
+ u64 half_pa;
+
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
+#if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+#endif
+
+ /*
+ * This is extremely unlikely to happen because almost all
+ * systems have far more MAX_PA/2 than RAM can be fit into
+ * DIMM slots.
+ */
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+ return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+}
+#undef pr_fmt
+
#ifdef CONFIG_SYSFS
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
@@ -679,6 +688,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+ return sprintf(buf, "Mitigation: Page Table Inversion\n");
+ break;
+
default:
break;
}
@@ -705,4 +719,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+}
#endif