diff options
author | Srinivasarao P <spathi@codeaurora.org> | 2019-05-23 14:05:20 +0530 |
---|---|---|
committer | Srinivasarao P <spathi@codeaurora.org> | 2019-05-23 14:22:20 +0530 |
commit | 780ec0d562724e68ed866498dedc7c2ef229609f (patch) | |
tree | 2809bda61108d547b8794ba2495978311d4b7e72 /arch/x86/kernel/cpu/bugs.c | |
parent | d1a5c038050401f5dbd7de222c43a1262083e027 (diff) | |
parent | 71cb827c0249e87c62139bd4d5897cf59f51cb55 (diff) |
Merge android-4.4.180 (71cb827) into msm-4.4
* refs/heads/tmp-71cb827
Linux 4.4.180
powerpc/lib: fix book3s/32 boot failure due to code patching
powerpc/booke64: set RI in default MSR
drivers/virt/fsl_hypervisor.c: prevent integer overflow in ioctl
drivers/virt/fsl_hypervisor.c: dereferencing error pointers in ioctl
bonding: fix arp_validate toggling in active-backup mode
ipv4: Fix raw socket lookup for local traffic
vrf: sit mtu should not be updated when vrf netdev is the link
vlan: disable SIOCSHWTSTAMP in container
packet: Fix error path in packet_init
net: ucc_geth - fix Oops when changing number of buffers in the ring
bridge: Fix error path for kobject_init_and_add()
powerpc/64s: Include cpu header
USB: serial: fix unthrottle races
USB: serial: use variable for status
x86/bugs: Change L1TF mitigation string to match upstream
x86/speculation/mds: Fix documentation typo
Documentation: Correct the possible MDS sysfs values
x86/mds: Add MDSUM variant to the MDS documentation
x86/speculation/mds: Add 'mitigations=' support for MDS
x86/speculation: Support 'mitigations=' cmdline option
cpu/speculation: Add 'mitigations=' cmdline option
x86/speculation/mds: Print SMT vulnerable on MSBDS with mitigations off
x86/speculation/mds: Fix comment
x86/speculation/mds: Add SMT warning message
x86/speculation: Move arch_smt_update() call to after mitigation decisions
x86/cpu/bugs: Use __initconst for 'const' init data
Documentation: Add MDS vulnerability documentation
Documentation: Move L1TF to separate directory
x86/speculation/mds: Add mitigation mode VMWERV
x86/speculation/mds: Add sysfs reporting for MDS
x86/speculation/l1tf: Document l1tf in sysfs
x86/speculation/mds: Add mitigation control for MDS
x86/speculation/mds: Conditionally clear CPU buffers on idle entry
x86/speculation/mds: Clear CPU buffers on exit to user
x86/speculation/mds: Add mds_clear_cpu_buffers()
x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests
x86/speculation/mds: Add BUG_MSBDS_ONLY
x86/speculation/mds: Add basic bug infrastructure for MDS
x86/speculation: Consolidate CPU whitelists
x86/msr-index: Cleanup bit defines
kvm: x86: Report STIBP on GET_SUPPORTED_CPUID
x86/speculation: Provide IBPB always command line options
x86/speculation: Add seccomp Spectre v2 user space protection mode
x86/speculation: Enable prctl mode for spectre_v2_user
x86/speculation: Add prctl() control for indirect branch speculation
x86/speculation: Prevent stale SPEC_CTRL msr content
x86/speculation: Prepare arch_smt_update() for PRCTL mode
x86/speculation: Split out TIF update
x86/speculation: Prepare for conditional IBPB in switch_mm()
x86/speculation: Avoid __switch_to_xtra() calls
x86/process: Consolidate and simplify switch_to_xtra() code
x86/speculation: Prepare for per task indirect branch speculation control
x86/speculation: Add command line control for indirect branch speculation
x86/speculation: Unify conditional spectre v2 print functions
x86/speculataion: Mark command line parser data __initdata
x86/speculation: Mark string arrays const correctly
x86/speculation: Reorder the spec_v2 code
x86/speculation: Rework SMT state change
sched: Add sched_smt_active()
x86/Kconfig: Select SCHED_SMT if SMP enabled
x86/speculation: Reorganize speculation control MSRs update
x86/speculation: Rename SSBD update functions
x86/speculation: Disable STIBP when enhanced IBRS is in use
x86/speculation: Move STIPB/IBPB string conditionals out of cpu_show_common()
x86/speculation: Remove unnecessary ret variable in cpu_show_common()
x86/speculation: Clean up spectre_v2_parse_cmdline()
x86/speculation: Update the TIF_SSBD comment
x86/speculation: Propagate information about RSB filling mitigation to sysfs
x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation
x86/speculation: Apply IBPB more strictly to avoid cross-process data leak
x86/mm: Use WRITE_ONCE() when setting PTEs
KVM: x86: SVM: Call x86_spec_ctrl_set_guest/host() with interrupts disabled
x86/cpu: Sanitize FAM6_ATOM naming
x86/microcode: Update the new microcode revision unconditionally
x86/microcode: Make sure boot_cpu_data.microcode is up-to-date
x86/speculation: Remove SPECTRE_V2_IBRS in enum spectre_v2_mitigation
x86/bugs: Fix the AMD SSBD usage of the SPEC_CTRL MSR
locking/atomics, asm-generic: Move some macros from <linux/bitops.h> to a new <linux/bits.h> file
x86/bugs: Switch the selection of mitigation from CPU vendor to CPU features
x86/bugs: Add AMD's SPEC_CTRL MSR usage
x86/bugs: Add AMD's variant of SSB_NO
x86/speculation: Simplify the CPU bug detection logic
x86/speculation: Support Enhanced IBRS on future CPUs
x86/cpufeatures: Hide AMD-specific speculation flags
x86/MCE: Save microcode revision in machine check records
x86/microcode/intel: Check microcode revision before updating sibling threads
bitops: avoid integer overflow in GENMASK(_ULL)
x86: stop exporting msr-index.h to userland
x86/microcode/intel: Add a helper which gives the microcode revision
locking/static_keys: Provide DECLARE and well as DEFINE macros
Don't jump to compute_result state from check_result state
x86/vdso: Pass --eh-frame-hdr to the linker
cw1200: fix missing unlock on error in cw1200_hw_scan()
gpu: ipu-v3: dp: fix CSC handling
selftests/net: correct the return value for run_netsocktests
s390: ctcm: fix ctcm_new_device error return code
ipvs: do not schedule icmp errors from tunnels
init: initialize jump labels before command line option parsing
tools lib traceevent: Fix missing equality check for strcmp
KVM: x86: avoid misreporting level-triggered irqs as edge-triggered in tracing
s390/3270: fix lockdep false positive on view->lock
s390/dasd: Fix capacity calculation for large volumes
libnvdimm/btt: Fix a kmemdup failure check
HID: input: add mapping for keyboard Brightness Up/Down/Toggle keys
HID: input: add mapping for Expose/Overview key
iio: adc: xilinx: fix potential use-after-free on remove
platform/x86: sony-laptop: Fix unintentional fall-through
netfilter: compat: initialize all fields in xt_init
timer/debug: Change /proc/timer_stats from 0644 to 0600
ASoC: Intel: avoid Oops if DMA setup fails
ipv6: fix a potential deadlock in do_ipv6_setsockopt()
UAS: fix alignment of scatter/gather segments
Bluetooth: Align minimum encryption key size for LE and BR/EDR connections
Bluetooth: hidp: fix buffer overflow
scsi: qla2xxx: Fix incorrect region-size setting in optrom SYSFS routines
usb: dwc3: Fix default lpm_nyet_threshold value
genirq: Prevent use-after-free and work list corruption
iommu/amd: Set exclusion range correctly
scsi: csiostor: fix missing data copy in csio_scsi_err_handler()
perf/x86/intel: Fix handling of wakeup_events for multi-entry PEBS
ASoC: tlv320aic32x4: Fix Common Pins
ASoC: cs4270: Set auto-increment bit for register writes
ASoC:soc-pcm:fix a codec fixup issue in TDM case
scsi: libsas: fix a race condition when smp task timeout
media: v4l2: i2c: ov7670: Fix PLL bypass register values
x86/mce: Improve error message when kernel cannot recover, p2
selinux: never allow relabeling on context mounts
Input: snvs_pwrkey - initialize necessary driver data before enabling IRQ
staging: iio: adt7316: fix the dac write calculation
staging: iio: adt7316: fix the dac read calculation
staging: iio: adt7316: allow adt751x to use internal vref for all dacs
usb: usbip: fix isoc packet num validation in get_pipe
ARM: iop: don't use using 64-bit DMA masks
ARM: orion: don't use using 64-bit DMA masks
xsysace: Fix error handling in ace_setup
hugetlbfs: fix memory leak for resv_map
net: hns: Fix WARNING when remove HNS driver with SMMU enabled
net: hns: Use NAPI_POLL_WEIGHT for hns driver
scsi: storvsc: Fix calculation of sub-channel count
vfio/pci: use correct format characters
rtc: da9063: set uie_unsupported when relevant
debugfs: fix use-after-free on symlink traversal
jffs2: fix use-after-free on symlink traversal
bonding: show full hw address in sysfs for slave entries
igb: Fix WARN_ONCE on runtime suspend
rtc: sh: Fix invalid alarm warning for non-enabled alarm
HID: debug: fix race condition with between rdesc_show() and device removal
USB: core: Fix bug caused by duplicate interface PM usage counter
USB: core: Fix unterminated string returned by usb_string()
USB: w1 ds2490: Fix bug caused by improper use of altsetting array
USB: yurex: Fix protection fault after device removal
packet: validate msg_namelen in send directly
bnxt_en: Improve multicast address setup logic.
ipv6: invert flowlabel sharing check in process and user mode
ipv6/flowlabel: wait rcu grace period before put_pid()
ipv4: ip_do_fragment: Preserve skb_iif during fragmentation
ALSA: line6: use dynamic buffers
vfio/type1: Limit DMA mappings per container
kconfig/[mn]conf: handle backspace (^H) key
libata: fix using DMA buffers on stack
scsi: zfcp: reduce flood of fcrscn1 trace records on multi-element RSCN
ceph: fix use-after-free on symlink traversal
usb: u132-hcd: fix resource leak
scsi: qla4xxx: fix a potential NULL pointer dereference
net: ethernet: ti: fix possible object reference leak
net: ibm: fix possible object reference leak
net: xilinx: fix possible object reference leak
net: ks8851: Set initial carrier state to down
net: ks8851: Delay requesting IRQ until opened
net: ks8851: Reassert reset pin if chip ID check fails
net: ks8851: Dequeue RX packets explicitly
ARM: dts: pfla02: increase phy reset duration
usb: gadget: net2272: Fix net2272_dequeue()
usb: gadget: net2280: Fix net2280_dequeue()
usb: gadget: net2280: Fix overrun of OUT messages
sc16is7xx: missing unregister/delete driver on error in sc16is7xx_init()
netfilter: bridge: set skb transport_header before entering NF_INET_PRE_ROUTING
qlcnic: Avoid potential NULL pointer dereference
usbnet: ipheth: fix potential null pointer dereference in ipheth_carrier_set
usbnet: ipheth: prevent TX queue timeouts when device not ready
Documentation: Add nospectre_v1 parameter
powerpc/fsl: Add FSL_PPC_BOOK3E as supported arch for nospectre_v2 boot arg
powerpc/fsl: Fixed warning: orphan section `__btb_flush_fixup'
powerpc/fsl: Sanitize the syscall table for NXP PowerPC 32 bit platforms
powerpc/fsl: Flush the branch predictor at each kernel entry (32 bit)
powerpc/fsl: Emulate SPRN_BUCSR register
powerpc/fsl: Flush branch predictor when entering KVM
powerpc/fsl: Enable runtime patching if nospectre_v2 boot arg is used
ipv4: set the tcp_min_rtt_wlen range from 0 to one day
net: stmmac: move stmmac_check_ether_addr() to driver probe
team: fix possible recursive locking when add slaves
ipv4: add sanity checks in ipv4_link_failure()
Revert "block/loop: Use global lock for ioctl() operation."
bpf: reject wrong sized filters earlier
tipc: check link name with right length in tipc_nl_compat_link_set
tipc: check bearer name with right length in tipc_nl_compat_bearer_enable
netfilter: ebtables: CONFIG_COMPAT: drop a bogus WARN_ON
NFS: Forbid setting AF_INET6 to "struct sockaddr_in"->sin_family.
fs/proc/proc_sysctl.c: Fix a NULL pointer dereference
intel_th: gth: Fix an off-by-one in output unassigning
slip: make slhc_free() silently accept an error pointer
tipc: handle the err returned from cmd header function
powerpc/fsl: Fix the flush of branch predictor.
powerpc/security: Fix spectre_v2 reporting
powerpc/fsl: Update Spectre v2 reporting
powerpc/fsl: Flush the branch predictor at each kernel entry (64bit)
powerpc/fsl: Add nospectre_v2 command line argument
powerpc/fsl: Fix spectre_v2 mitigations reporting
powerpc/fsl: Add macro to flush the branch predictor
powerpc/fsl: Add infrastructure to fixup branch predictor flush
powerpc: Avoid code patching freed init sections
powerpc/powernv: Query firmware for count cache flush settings
powerpc/pseries: Query hypervisor for count cache flush settings
powerpc/64s: Add support for software count cache flush
powerpc/64s: Add new security feature flags for count cache flush
powerpc/asm: Add a patch_site macro & helpers for patching instructions
powerpc/fsl: Add barrier_nospec implementation for NXP PowerPC Book3E
powerpc/64: Make meltdown reporting Book3S 64 specific
powerpc/64: Call setup_barrier_nospec() from setup_arch()
powerpc/64: Add CONFIG_PPC_BARRIER_NOSPEC
powerpc/64: Make stf barrier PPC_BOOK3S_64 specific.
powerpc/64: Disable the speculation barrier from the command line
powerpc64s: Show ori31 availability in spectre_v1 sysfs file not v2
powerpc/64s: Enhance the information in cpu_show_spectre_v1()
powerpc: Use barrier_nospec in copy_from_user()
powerpc/64: Use barrier_nospec in syscall entry
powerpc/64s: Enable barrier_nospec based on firmware settings
powerpc/64s: Patch barrier_nospec in modules
powerpc/64s: Add support for ori barrier_nospec patching
powerpc/64s: Add barrier_nospec
powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit
powerpc/64s: Fix section mismatch warnings from setup_rfi_flush()
powerpc/pseries: Restore default security feature flags on setup
powerpc: Move default security feature flags
powerpc/pseries: Fix clearing of security feature flags
powerpc/64s: Wire up cpu_show_spectre_v2()
powerpc/64s: Wire up cpu_show_spectre_v1()
powerpc/pseries: Use the security flags in pseries_setup_rfi_flush()
powerpc/powernv: Use the security flags in pnv_setup_rfi_flush()
powerpc/64s: Enhance the information in cpu_show_meltdown()
powerpc/64s: Move cpu_show_meltdown()
powerpc/powernv: Set or clear security feature flags
powerpc/pseries: Set or clear security feature flags
powerpc: Add security feature flags for Spectre/Meltdown
powerpc/rfi-flush: Call setup_rfi_flush() after LPM migration
powerpc/pseries: Add new H_GET_CPU_CHARACTERISTICS flags
powerpc/rfi-flush: Differentiate enabled and patched flush types
powerpc/rfi-flush: Always enable fallback flush on pseries
powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again
powerpc/rfi-flush: Move the logic to avoid a redo into the debugfs code
powerpc/powernv: Support firmware disable of RFI flush
powerpc/pseries: Support firmware disable of RFI flush
powerpc/64s: Improve RFI L1-D cache flush fallback
powerpc/xmon: Add RFI flush related fields to paca dump
USB: Consolidate LPM checks to avoid enabling LPM twice
USB: Add new USB LPM helpers
sunrpc: don't mark uninitialised items as VALID.
nfsd: Don't release the callback slot unless it was actually held
ceph: fix ci->i_head_snapc leak
ceph: ensure d_name stability in ceph_dentry_hash()
sched/numa: Fix a possible divide-by-zero
trace: Fix preempt_enable_no_resched() abuse
MIPS: scall64-o32: Fix indirect syscall number load
cifs: do not attempt cifs operation on smb2+ rename error
KVM: fail KVM_SET_VCPU_EVENTS with invalid exception number
kbuild: simplify ld-option implementation
ANDROID: cuttlefish_defconfig: Disable DEVTMPFS
ANDROID: Move from clang r349610 to r353983c.
f2fs: fix to avoid accessing xattr across the boundary
f2fs: fix to avoid potential race on sbi->unusable_block_count access/update
f2fs: add tracepoint for f2fs_filemap_fault()
f2fs: introduce DATA_GENERIC_ENHANCE
f2fs: fix to handle error in f2fs_disable_checkpoint()
f2fs: remove redundant check in f2fs_file_write_iter()
f2fs: fix to be aware of readonly device in write_checkpoint()
f2fs: fix to skip recovery on readonly device
f2fs: fix to consider multiple device for readonly check
f2fs: relocate chksum_offset for large_nat_bitmap feature
f2fs: allow unfixed f2fs_checkpoint.checksum_offset
f2fs: Replace spaces with tab
f2fs: insert space before the open parenthesis '('
f2fs: allow address pointer number of dnode aligning to specified size
f2fs: introduce f2fs_read_single_page() for cleanup
f2fs: mark is_extension_exist() inline
f2fs: fix to set FI_UPDATE_WRITE correctly
f2fs: fix to avoid panic in f2fs_inplace_write_data()
f2fs: fix to do sanity check on valid block count of segment
f2fs: fix to do sanity check on valid node/block count
f2fs: fix to avoid panic in do_recover_data()
f2fs: fix to do sanity check on free nid
f2fs: fix to do checksum even if inode page is uptodate
f2fs: fix to avoid panic in f2fs_remove_inode_page()
f2fs: fix to clear dirty inode in error path of f2fs_iget()
f2fs: remove new blank line of f2fs kernel message
f2fs: fix wrong __is_meta_io() macro
f2fs: fix to avoid panic in dec_valid_node_count()
f2fs: fix to avoid panic in dec_valid_block_count()
f2fs: fix to use inline space only if inline_xattr is enable
f2fs: fix to retrieve inline xattr space
f2fs: fix error path of recovery
f2fs: fix to avoid deadloop in foreground GC
f2fs: data: fix warning Using plain integer as NULL pointer
f2fs: add tracepoint for f2fs_file_write_iter()
f2fs: add comment for conditional compilation statement
f2fs: fix potential recursive call when enabling data_flush
f2fs: improve discard handling with multi-device volumes
f2fs: Reduce zoned block device memory usage
f2fs: Fix use of number of devices
Sleepable function handle_lmk_event() is called in atomic context,
so ignored the commit "ANDROID: Communicates LMK events to userland
where they can be logged"
Conflicts:
arch/powerpc/include/asm/uaccess.h
kernel/cpu.c
kernel/irq/manage.c
kernel/time/timer_stats.c
net/ipv4/sysctl_net_ipv4.c
Change-Id: I3e5bd447057b44a28fc5000403198ae0fd644480
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 643 |
1 files changed, 540 insertions, 103 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 621bc6561189..2017fa20611c 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/nospec.h> #include <linux/prctl.h> +#include <linux/sched/smt.h> #include <asm/spec-ctrl.h> #include <asm/cmdline.h> @@ -23,6 +24,7 @@ #include <asm/msr.h> #include <asm/paravirt.h> #include <asm/alternative.h> +#include <asm/hypervisor.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/intel-family.h> @@ -31,13 +33,12 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); +static void __init mds_select_mitigation(void); -/* - * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any - * writes to SPEC_CTRL contain whatever reserved bits have been set. - */ +/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); +static DEFINE_MUTEX(spec_ctrl_mutex); /* * The vendor and possibly platform specific bits which can be modified in @@ -52,6 +53,19 @@ static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS; u64 x86_amd_ls_cfg_base; u64 x86_amd_ls_cfg_ssbd_mask; +/* Control conditional STIPB in switch_to() */ +DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); +/* Control conditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +/* Control unconditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +/* Control MDS CPU buffer clear before returning to user space */ +DEFINE_STATIC_KEY_FALSE(mds_user_clear); +/* Control MDS CPU buffer clear before idling (halt, mwait) */ +DEFINE_STATIC_KEY_FALSE(mds_idle_clear); +EXPORT_SYMBOL_GPL(mds_idle_clear); + void __init check_bugs(void) { identify_boot_cpu(); @@ -84,6 +98,10 @@ void __init check_bugs(void) l1tf_select_mitigation(); + mds_select_mitigation(); + + arch_smt_update(); + #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. @@ -116,29 +134,6 @@ void __init check_bugs(void) #endif } -/* The kernel command line selection */ -enum spectre_v2_mitigation_cmd { - SPECTRE_V2_CMD_NONE, - SPECTRE_V2_CMD_AUTO, - SPECTRE_V2_CMD_FORCE, - SPECTRE_V2_CMD_RETPOLINE, - SPECTRE_V2_CMD_RETPOLINE_GENERIC, - SPECTRE_V2_CMD_RETPOLINE_AMD, -}; - -static const char *spectre_v2_strings[] = { - [SPECTRE_V2_NONE] = "Vulnerable", - [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", - [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", - [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", - [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", -}; - -#undef pr_fmt -#define pr_fmt(fmt) "Spectre V2 : " fmt - -static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; - void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) { @@ -156,9 +151,14 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; /* SSBD controlled in MSR_SPEC_CTRL */ - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) hostval |= ssbd_tif_to_spec_ctrl(ti->flags); + /* Conditional STIBP enabled? */ + if (static_branch_unlikely(&switch_to_cond_stibp)) + hostval |= stibp_tif_to_spec_ctrl(ti->flags); + if (hostval != guestval) { msrval = setguest ? guestval : hostval; wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -192,7 +192,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ssbd_spec_ctrl_to_tif(hostval); - speculative_store_bypass_update(tif); + speculation_ctrl_update(tif); } } EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); @@ -207,6 +207,57 @@ static void x86_amd_ssb_disable(void) wrmsrl(MSR_AMD64_LS_CFG, msrval); } +#undef pr_fmt +#define pr_fmt(fmt) "MDS: " fmt + +/* Default mitigation for MDS-affected CPUs */ +static enum mds_mitigations mds_mitigation = MDS_MITIGATION_FULL; + +static const char * const mds_strings[] = { + [MDS_MITIGATION_OFF] = "Vulnerable", + [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", + [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", +}; + +static void __init mds_select_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { + mds_mitigation = MDS_MITIGATION_OFF; + return; + } + + if (mds_mitigation == MDS_MITIGATION_FULL) { + if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) + mds_mitigation = MDS_MITIGATION_VMWERV; + static_branch_enable(&mds_user_clear); + } + pr_info("%s\n", mds_strings[mds_mitigation]); +} + +static int __init mds_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + mds_mitigation = MDS_MITIGATION_OFF; + else if (!strcmp(str, "full")) + mds_mitigation = MDS_MITIGATION_FULL; + + return 0; +} +early_param("mds", mds_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; + +static enum spectre_v2_user_mitigation spectre_v2_user = SPECTRE_V2_USER_NONE; + #ifdef RETPOLINE static bool spectre_v2_bad_module; @@ -228,67 +279,224 @@ static inline const char *spectre_v2_module_string(void) static inline const char *spectre_v2_module_string(void) { return ""; } #endif -static void __init spec2_print_if_insecure(const char *reason) +static inline bool match_option(const char *arg, int arglen, const char *opt) { - if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); } -static void __init spec2_print_if_secure(const char *reason) +/* The kernel command line selection for spectre v2 */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +enum spectre_v2_user_cmd { + SPECTRE_V2_USER_CMD_NONE, + SPECTRE_V2_USER_CMD_AUTO, + SPECTRE_V2_USER_CMD_FORCE, + SPECTRE_V2_USER_CMD_PRCTL, + SPECTRE_V2_USER_CMD_PRCTL_IBPB, + SPECTRE_V2_USER_CMD_SECCOMP, + SPECTRE_V2_USER_CMD_SECCOMP_IBPB, +}; + +static const char * const spectre_v2_user_strings[] = { + [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", + [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", + [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", + [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", +}; + +static const struct { + const char *option; + enum spectre_v2_user_cmd cmd; + bool secure; +} v2_user_options[] __initconst = { + { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, + { "off", SPECTRE_V2_USER_CMD_NONE, false }, + { "on", SPECTRE_V2_USER_CMD_FORCE, true }, + { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, + { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, + { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, + { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, +}; + +static void __init spec_v2_user_print_cond(const char *reason, bool secure) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("spectre_v2_user=%s forced on command line.\n", reason); } -static inline bool retp_compiler(void) +static enum spectre_v2_user_cmd __init +spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) { - return __is_defined(RETPOLINE); + char arg[20]; + int ret, i; + + switch (v2_cmd) { + case SPECTRE_V2_CMD_NONE: + return SPECTRE_V2_USER_CMD_NONE; + case SPECTRE_V2_CMD_FORCE: + return SPECTRE_V2_USER_CMD_FORCE; + default: + break; + } + + ret = cmdline_find_option(boot_command_line, "spectre_v2_user", + arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_USER_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { + if (match_option(arg, ret, v2_user_options[i].option)) { + spec_v2_user_print_cond(v2_user_options[i].option, + v2_user_options[i].secure); + return v2_user_options[i].cmd; + } + } + + pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_USER_CMD_AUTO; } -static inline bool match_option(const char *arg, int arglen, const char *opt) +static void __init +spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) { - int len = strlen(opt); + enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; + bool smt_possible = IS_ENABLED(CONFIG_SMP); + enum spectre_v2_user_cmd cmd; - return len == arglen && !strncmp(arg, opt, len); + if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) + return; + + if (!IS_ENABLED(CONFIG_SMP)) + smt_possible = false; + + cmd = spectre_v2_parse_user_cmdline(v2_cmd); + switch (cmd) { + case SPECTRE_V2_USER_CMD_NONE: + goto set_mode; + case SPECTRE_V2_USER_CMD_FORCE: + mode = SPECTRE_V2_USER_STRICT; + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + mode = SPECTRE_V2_USER_PRCTL; + break; + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + if (IS_ENABLED(CONFIG_SECCOMP)) + mode = SPECTRE_V2_USER_SECCOMP; + else + mode = SPECTRE_V2_USER_PRCTL; + break; + } + + /* Initialize Indirect Branch Prediction Barrier */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + + switch (cmd) { + case SPECTRE_V2_USER_CMD_FORCE: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + static_branch_enable(&switch_mm_always_ibpb); + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + static_branch_enable(&switch_mm_cond_ibpb); + break; + default: + break; + } + + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); + } + + /* If enhanced IBRS is enabled no STIPB required */ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* + * If SMT is not possible or STIBP is not available clear the STIPB + * mode. + */ + if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; +set_mode: + spectre_v2_user = mode; + /* Only print the STIBP mode when SMT possible */ + if (smt_possible) + pr_info("%s\n", spectre_v2_user_strings[mode]); } +static const char * const spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", + [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", + [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", +}; + static const struct { const char *option; enum spectre_v2_mitigation_cmd cmd; bool secure; -} mitigation_options[] = { - { "off", SPECTRE_V2_CMD_NONE, false }, - { "on", SPECTRE_V2_CMD_FORCE, true }, - { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, - { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, - { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, - { "auto", SPECTRE_V2_CMD_AUTO, false }, +} mitigation_options[] __initconst = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, }; +static void __init spec_v2_print_cond(const char *reason, bool secure) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("%s selected on command line.\n", reason); +} + +static inline bool retp_compiler(void) +{ + return __is_defined(RETPOLINE); +} + static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) { + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; char arg[20]; int ret, i; - enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; - if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || + cpu_mitigations_off()) return SPECTRE_V2_CMD_NONE; - else { - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); - if (ret < 0) - return SPECTRE_V2_CMD_AUTO; - for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { - if (!match_option(arg, ret, mitigation_options[i].option)) - continue; - cmd = mitigation_options[i].cmd; - break; - } + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; - if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_CMD_AUTO; - } + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_CMD_AUTO; } if ((cmd == SPECTRE_V2_CMD_RETPOLINE || @@ -305,11 +513,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return SPECTRE_V2_CMD_AUTO; } - if (mitigation_options[i].secure) - spec2_print_if_secure(mitigation_options[i].option); - else - spec2_print_if_insecure(mitigation_options[i].option); - + spec_v2_print_cond(mitigation_options[i].option, + mitigation_options[i].secure); return cmd; } @@ -332,6 +537,13 @@ static void __init spectre_v2_select_mitigation(void) case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: + if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { + mode = SPECTRE_V2_IBRS_ENHANCED; + /* Force it so VMEXIT will restore correctly */ + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + goto specv2_set_mode; + } if (IS_ENABLED(CONFIG_RETPOLINE)) goto retpoline_auto; break; @@ -369,6 +581,7 @@ retpoline_auto: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } +specv2_set_mode: spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); @@ -383,20 +596,114 @@ retpoline_auto: setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); - /* Initialize Indirect Branch Prediction Barrier if supported */ - if (boot_cpu_has(X86_FEATURE_IBPB)) { - setup_force_cpu_cap(X86_FEATURE_USE_IBPB); - pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); - } - /* * Retpoline means the kernel is safe because it has no indirect - * branches. But firmware isn't, so use IBRS to protect that. + * branches. Enhanced IBRS protects firmware too, so, enable restricted + * speculation around firmware calls only when Enhanced IBRS isn't + * supported. + * + * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because + * the user might select retpoline on the kernel command line and if + * the CPU supports Enhanced IBRS, kernel might un-intentionally not + * enable IBRS around firmware calls. */ - if (boot_cpu_has(X86_FEATURE_IBRS)) { + if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } + + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_user_select_mitigation(cmd); +} + +static void update_stibp_msr(void * __unused) +{ + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} + +/* Update x86_spec_ctrl_base in case SMT state changed. */ +static void update_stibp_strict(void) +{ + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; + + if (sched_smt_active()) + mask |= SPEC_CTRL_STIBP; + + if (mask == x86_spec_ctrl_base) + return; + + pr_info("Update user space SMT mitigation: STIBP %s\n", + mask & SPEC_CTRL_STIBP ? "always-on" : "off"); + x86_spec_ctrl_base = mask; + on_each_cpu(update_stibp_msr, NULL, 1); +} + +/* Update the static key controlling the evaluation of TIF_SPEC_IB */ +static void update_indir_branch_cond(void) +{ + if (sched_smt_active()) + static_branch_enable(&switch_to_cond_stibp); + else + static_branch_disable(&switch_to_cond_stibp); +} + +#undef pr_fmt +#define pr_fmt(fmt) fmt + +/* Update the static key controlling the MDS CPU buffer clear in idle */ +static void update_mds_branch_idle(void) +{ + /* + * Enable the idle clearing if SMT is active on CPUs which are + * affected only by MSBDS and not any other MDS variant. + * + * The other variants cannot be mitigated when SMT is enabled, so + * clearing the buffers on idle just to prevent the Store Buffer + * repartitioning leak would be a window dressing exercise. + */ + if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) + return; + + if (sched_smt_active()) + static_branch_enable(&mds_idle_clear); + else + static_branch_disable(&mds_idle_clear); +} + +#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" + +void arch_smt_update(void) +{ + /* Enhanced IBRS implies STIBP. No update required. */ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + mutex_lock(&spec_ctrl_mutex); + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + update_indir_branch_cond(); + break; + } + + switch (mds_mitigation) { + case MDS_MITIGATION_FULL: + case MDS_MITIGATION_VMWERV: + if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) + pr_warn_once(MDS_MSG_SMT); + update_mds_branch_idle(); + break; + case MDS_MITIGATION_OFF: + break; + } + + mutex_unlock(&spec_ctrl_mutex); } #undef pr_fmt @@ -413,7 +720,7 @@ enum ssb_mitigation_cmd { SPEC_STORE_BYPASS_CMD_SECCOMP, }; -static const char *ssb_strings[] = { +static const char * const ssb_strings[] = { [SPEC_STORE_BYPASS_NONE] = "Vulnerable", [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", @@ -423,7 +730,7 @@ static const char *ssb_strings[] = { static const struct { const char *option; enum ssb_mitigation_cmd cmd; -} ssb_mitigation_options[] = { +} ssb_mitigation_options[] __initconst = { { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ @@ -437,7 +744,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) char arg[20]; int ret, i; - if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || + cpu_mitigations_off()) { return SPEC_STORE_BYPASS_CMD_NONE; } else { ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", @@ -507,18 +815,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) if (mode == SPEC_STORE_BYPASS_DISABLE) { setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); /* - * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses - * a completely different MSR and bit dependent on family. + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may + * use a completely different MSR and bit dependent on family. */ - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: + if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && + !static_cpu_has(X86_FEATURE_AMD_SSBD)) { + x86_amd_ssb_disable(); + } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); - break; - case X86_VENDOR_AMD: - x86_amd_ssb_disable(); - break; } } @@ -536,10 +842,25 @@ static void ssb_select_mitigation(void) #undef pr_fmt #define pr_fmt(fmt) "Speculation prctl: " fmt -static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +static void task_update_spec_tif(struct task_struct *tsk) { - bool update; + /* Force the update of the real TIF bits */ + set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); + + /* + * Immediately update the speculation control MSRs for the current + * task, but for a non-current task delay setting the CPU + * mitigation until it is scheduled next. + * + * This can only happen for SECCOMP mitigation. For PRCTL it's + * always the current task. + */ + if (tsk == current) + speculation_ctrl_update_current(); +} +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ssb_mode != SPEC_STORE_BYPASS_SECCOMP) return -ENXIO; @@ -550,28 +871,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); - update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; default: return -ERANGE; } + return 0; +} - /* - * If being set on non-current task, delay setting the CPU - * mitigation until it is next scheduled. - */ - if (task == current && update) - speculative_store_bypass_update_current(); - +static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + switch (ctrl) { + case PR_SPEC_ENABLE: + if (spectre_v2_user == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict + * mode. + */ + if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); + break; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + /* + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ + if (spectre_v2_user == SPECTRE_V2_USER_NONE) + return -EPERM; + if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) + task_set_spec_ib_force_disable(task); + task_update_spec_tif(task); + break; + default: + return -ERANGE; + } return 0; } @@ -581,6 +930,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_set(task, ctrl); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_set(task, ctrl); default: return -ENODEV; } @@ -591,6 +942,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); + if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -613,11 +966,35 @@ static int ssb_prctl_get(struct task_struct *task) } } +static int ib_prctl_get(struct task_struct *task) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + return PR_SPEC_ENABLE; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + case SPECTRE_V2_USER_STRICT: + return PR_SPEC_DISABLE; + default: + return PR_SPEC_NOT_AFFECTED; + } +} + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_get(task); default: return -ENODEV; } @@ -694,16 +1071,66 @@ static void __init l1tf_select_mitigation(void) pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); pr_info("However, doing so will make a part of your RAM unusable.\n"); - pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); + pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); return; } setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); } #undef pr_fmt +#define pr_fmt(fmt) fmt #ifdef CONFIG_SYSFS +static ssize_t mds_show_state(char *buf) +{ +#ifdef CONFIG_HYPERVISOR_GUEST + if (x86_hyper) { + return sprintf(buf, "%s; SMT Host state unknown\n", + mds_strings[mds_mitigation]); + } +#endif + + if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : + sched_smt_active() ? "mitigated" : "disabled")); + } + + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + +static char *stibp_state(void) +{ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) + return ", STIBP: conditional"; + } + return ""; +} + +static char *ibpb_state(void) +{ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + if (static_key_enabled(&switch_mm_always_ibpb)) + return ", IBPB: always-on"; + if (static_key_enabled(&switch_mm_cond_ibpb)) + return ", IBPB: conditional"; + return ", IBPB: disabled"; + } + return ""; +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -721,9 +1148,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return sprintf(buf, "Mitigation: __user pointer sanitization\n"); case X86_BUG_SPECTRE_V2: - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", + return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", spectre_v2_module_string()); case X86_BUG_SPEC_STORE_BYPASS: @@ -731,9 +1160,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_L1TF: if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) - return sprintf(buf, "Mitigation: Page Table Inversion\n"); + return sprintf(buf, "Mitigation: PTE Inversion\n"); break; + case X86_BUG_MDS: + return mds_show_state(buf); + default: break; } @@ -765,4 +1197,9 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b { return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); } + +ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_MDS); +} #endif |