summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
authorSrinivasarao P <spathi@codeaurora.org>2018-08-16 10:31:30 +0530
committerSrinivasarao P <spathi@codeaurora.org>2018-08-24 00:07:01 +0530
commit79de04d8065db03fb4a0cf9d2bf1916b092cabcc (patch)
tree54733763f20f975ad1b37d26d56690012c0b6786 /arch/x86/kernel/cpu/bugs.c
parent4bef50d041e80243d279ed1bccda5297b81ba306 (diff)
parentf057ff937754efc42d56bee825187b2ce6c36958 (diff)
Merge android-4.4.148 (f057ff9) into msm-4.4
* refs/heads/tmp-f057ff9 Linux 4.4.148 x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Fix up CPU feature flags x86/mm/kmmio: Make the tracer robust against L1TF x86/mm/pat: Make set_memory_np() L1TF safe x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert x86/speculation/l1tf: Invert all not present mappings x86/speculation/l1tf: Fix up pte->pfn conversion for PAE x86/speculation/l1tf: Protect PAE swap entries against L1TF x86/cpufeatures: Add detection of L1D cache flush support. x86/speculation/l1tf: Extend 64bit swap file size limit x86/bugs: Move the l1tf function and define pr_fmt properly x86/speculation/l1tf: Limit swap file size to MAX_PA/2 x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings mm: fix cache mode tracking in vm_insert_mixed() mm: Add vm_insert_pfn_prot() x86/speculation/l1tf: Add sysfs reporting for l1tf x86/speculation/l1tf: Make sure the first page is always reserved x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation x86/speculation/l1tf: Protect swap entries against L1TF x86/speculation/l1tf: Change order of offset/type in swap entry mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 x86/mm: Fix swap entry comment and macro x86/mm: Move swap offset/type up in PTE to work around erratum x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT x86/irqflags: Provide a declaration for native_save_fl kprobes/x86: Fix %p uses in error messages x86/speculation: Protect against userspace-userspace spectreRSB x86/paravirt: Fix spectre-v2 mitigations for paravirt guests ARM: dts: imx6sx: fix irq for pcie bridge IB/ocrdma: fix out of bounds access to local buffer IB/mlx4: Mark user MR as writable if actual virtual memory is writable IB/core: Make testing MR flags for writability a static inline function fix __legitimize_mnt()/mntput() race fix mntput/mntput race root dentries need RCU-delayed freeing scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices xen/netfront: don't cache skb_shinfo() parisc: Define mb() and add memory barriers to assembler unlock sequences parisc: Enable CONFIG_MLONGCALLS by default fork: unconditionally clear stack on fork ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV tpm: fix race condition in tpm_common_write() ext4: fix check to prevent initializing reserved inodes Linux 4.4.147 jfs: Fix inconsistency between memory allocation and ea_buf->max_size i2c: imx: Fix reinit_completion() use ring_buffer: tracing: Inherit the tracing setting to next ring buffer ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle ext4: fix false negatives *and* false positives in ext4_check_descriptors() netlink: Don't shift on 64 for ngroups netlink: Don't shift with UB on nlk->ngroups netlink: Do not subscribe to non-existent groups nohz: Fix local_timer_softirq_pending() genirq: Make force irq threading setup more robust scsi: qla2xxx: Return error when TMF returns scsi: qla2xxx: Fix ISP recovery on unload Conflicts: include/linux/swapfile.h Removed CONFIG_CRYPTO_ECHAINIV from defconfig files since this upmerge is adding this config to Kconfig file. Change-Id: Ide96c29f919d76590c2bdccf356d1d464a892fd7 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c81
1 files changed, 50 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 12a8867071f3..34e4aaaf03d2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -26,9 +26,11 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
+#include <asm/e820.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
+static void __init l1tf_select_mitigation(void);
/*
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -80,6 +82,8 @@ void __init check_bugs(void)
*/
ssb_select_mitigation();
+ l1tf_select_mitigation();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -309,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6) {
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SKYLAKE_MOBILE:
- case INTEL_FAM6_SKYLAKE_DESKTOP:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_MOBILE:
- case INTEL_FAM6_KABYLAKE_DESKTOP:
- return true;
- }
- }
- return false;
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -386,22 +373,15 @@ retpoline_auto:
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP nor PTI are available, there is a risk of
- * hitting userspace addresses in the RSB after a context switch
- * from a shallow call stack to a deeper one. To prevent this fill
- * the entire RSB, even when using IBRS.
+ * If spectre v2 protection has been enabled, unconditionally fill
+ * RSB during a context switch; this protects against two independent
+ * issues:
*
- * Skylake era CPUs have a separate issue with *underflow* of the
- * RSB, when they will predict 'ret' targets from the generic BTB.
- * The proper mitigation for this is IBRS. If IBRS is not supported
- * or deactivated in favour of retpolines the RSB fill on context
- * switch is required.
+ * - RSB underflow (and switch to BTB) on Skylake+
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
- if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
- }
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -652,6 +632,35 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+#undef pr_fmt
+#define pr_fmt(fmt) "L1TF: " fmt
+static void __init l1tf_select_mitigation(void)
+{
+ u64 half_pa;
+
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
+#if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+#endif
+
+ /*
+ * This is extremely unlikely to happen because almost all
+ * systems have far more MAX_PA/2 than RAM can be fit into
+ * DIMM slots.
+ */
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+ return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+}
+#undef pr_fmt
+
#ifdef CONFIG_SYSFS
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
@@ -679,6 +688,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+ return sprintf(buf, "Mitigation: Page Table Inversion\n");
+ break;
+
default:
break;
}
@@ -705,4 +719,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+}
#endif