diff options
| author | Greg Kroah-Hartman <gregkh@google.com> | 2018-12-19 19:22:06 +0100 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@google.com> | 2018-12-19 19:22:06 +0100 |
| commit | 4074ea50e0e77edb648493dc57f0735c6e4ac0cb (patch) | |
| tree | d0d64cc7e14c947244a0f0a4ae16dbd42e005461 /arch/x86/kvm/svm.c | |
| parent | e623751cb9e3cdd98661e2412b8448a519b45689 (diff) | |
| parent | d3c67a52a66ba2d44bcf1b8262609148c7c73113 (diff) | |
Merge 4.4.168 into android-4.4-p
Changes in 4.4.168
ipv6: Check available headroom in ip6_xmit() even without options
net: 8139cp: fix a BUG triggered by changing mtu with network traffic
net: phy: don't allow __set_phy_supported to add unsupported modes
net: Prevent invalid access to skb->prev in __qdisc_drop_all
rtnetlink: ndo_dflt_fdb_dump() only work for ARPHRD_ETHER devices
tcp: fix NULL ref in tail loss probe
tun: forbid iface creation with rtnl ops
neighbour: Avoid writing before skb->head in neigh_hh_output()
ARM: OMAP2+: prm44xx: Fix section annotation on omap44xx_prm_enable_io_wakeup
ARM: OMAP1: ams-delta: Fix possible use of uninitialized field
sysv: return 'err' instead of 0 in __sysv_write_inode
s390/cpum_cf: Reject request for sampling in event initialization
hwmon: (ina2xx) Fix current value calculation
ASoC: dapm: Recalculate audio map forcely when card instantiated
hwmon: (w83795) temp4_type has writable permission
Btrfs: send, fix infinite loop due to directory rename dependencies
ASoC: omap-mcpdm: Add pm_qos handling to avoid under/overruns with CPU_IDLE
ASoC: omap-dmic: Add pm_qos handling to avoid overruns with CPU_IDLE
exportfs: do not read dentry after free
bpf: fix check of allowed specifiers in bpf_trace_printk
USB: omap_udc: use devm_request_irq()
USB: omap_udc: fix crashes on probe error and module removal
USB: omap_udc: fix omap_udc_start() on 15xx machines
USB: omap_udc: fix USB gadget functionality on Palm Tungsten E
KVM: x86: fix empty-body warnings
net: thunderx: fix NULL pointer dereference in nic_remove
ixgbe: recognize 1000BaseLX SFP modules as 1Gbps
net: hisilicon: remove unexpected free_netdev
drm/ast: fixed reading monitor EDID not stable issue
xen: xlate_mmu: add missing header to fix 'W=1' warning
fscache: fix race between enablement and dropping of object
fscache, cachefiles: remove redundant variable 'cache'
ocfs2: fix deadlock caused by ocfs2_defrag_extent()
hfs: do not free node before using
hfsplus: do not free node before using
debugobjects: avoid recursive calls with kmemleak
ocfs2: fix potential use after free
pstore: Convert console write to use ->write_buf
ALSA: pcm: remove SNDRV_PCM_IOCTL1_INFO internal command
KVM: nVMX: fix msr bitmaps to prevent L2 from accessing L0 x2APIC
KVM: nVMX: mark vmcs12 pages dirty on L2 exit
KVM: nVMX: Eliminate vmcs02 pool
KVM: VMX: introduce alloc_loaded_vmcs
KVM: VMX: make MSR bitmaps per-VCPU
KVM/x86: Add IBPB support
KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES
KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL
KVM/SVM: Allow direct access to MSR_IA32_SPEC_CTRL
KVM/x86: Remove indirect MSR op calls from SPEC_CTRL
x86: reorganize SMAP handling in user space accesses
x86: fix SMAP in 32-bit environments
x86: Introduce __uaccess_begin_nospec() and uaccess_try_nospec
x86/usercopy: Replace open coded stac/clac with __uaccess_{begin, end}
x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec
x86/bugs, KVM: Support the combination of guest and host IBRS
x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest
KVM: SVM: Move spec control call after restore of GS
x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
bpf: support 8-byte metafield access
bpf/verifier: Add spi variable to check_stack_write()
bpf/verifier: Pass instruction index to check_mem_access() and check_xadd()
bpf: Prevent memory disambiguation attack
wil6210: missing length check in wmi_set_ie
posix-timers: Sanitize overrun handling
mm/hugetlb.c: don't call region_abort if region_chg fails
hugetlbfs: fix offset overflow in hugetlbfs mmap
hugetlbfs: check for pgoff value overflow
hugetlbfs: fix bug in pgoff overflow checking
swiotlb: clean up reporting
sr: pass down correctly sized SCSI sense buffer
mm: remove write/force parameters from __get_user_pages_locked()
mm: remove write/force parameters from __get_user_pages_unlocked()
mm/nommu.c: Switch __get_user_pages_unlocked() to use __get_user_pages()
mm: replace get_user_pages_unlocked() write/force parameters with gup_flags
mm: replace get_user_pages_locked() write/force parameters with gup_flags
mm: replace get_vaddr_frames() write/force parameters with gup_flags
mm: replace get_user_pages() write/force parameters with gup_flags
mm: replace __access_remote_vm() write parameter with gup_flags
mm: replace access_remote_vm() write parameter with gup_flags
proc: don't use FOLL_FORCE for reading cmdline and environment
proc: do not access cmdline nor environ from file-backed areas
media: dvb-frontends: fix i2c access helpers for KASAN
matroxfb: fix size of memcpy
staging: speakup: Replace strncpy with memcpy
rocker: fix rocker_tlv_put_* functions for KASAN
selftests: Move networking/timestamping from Documentation
Linux 4.4.168
Change-Id: Icd04a723739ae5e38258a2f6b0aee875f306a0bc
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
| -rw-r--r-- | arch/x86/kvm/svm.c | 143 |
1 files changed, 141 insertions, 2 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index df7827a981dd..ecdf724da371 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -37,6 +37,7 @@ #include <asm/desc.h> #include <asm/debugreg.h> #include <asm/kvm_para.h> +#include <asm/microcode.h> #include <asm/spec-ctrl.h> #include <asm/virtext.h> @@ -147,6 +148,14 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + /* + * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be + * translated into the appropriate L2_CFG bits on the host to + * perform speculative control. + */ + u64 virt_spec_ctrl; + u32 *msrpm; ulong nmi_iret_rip; @@ -182,6 +191,8 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, + { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -411,6 +422,7 @@ struct svm_cpu_data { struct kvm_ldttss_desc *tss_desc; struct page *save_area; + struct vmcb *current_vmcb; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); @@ -762,6 +774,25 @@ static bool valid_msr_intercept(u32 index) return false; } +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr) +{ + u8 bit_write; + unsigned long tmp; + u32 offset; + u32 *msrpm; + + msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: + to_svm(vcpu)->msrpm; + + offset = svm_msrpm_offset(msr); + bit_write = 2 * (msr & 0x0f) + 1; + tmp = msrpm[offset]; + + BUG_ON(offset == MSR_INVALID); + + return !!test_bit(bit_write, &tmp); +} + static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { @@ -1120,6 +1151,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + svm->virt_spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -1210,11 +1244,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); + /* + * The vmcb page can be recycled, causing a false negative in + * svm_vcpu_load(). So do a full IBPB now. + */ + indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); int i; if (unlikely(cpu != vcpu->cpu)) { @@ -1239,6 +1279,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } + if (sd->current_vmcb != svm->vmcb) { + sd->current_vmcb = svm->vmcb; + indirect_branch_prediction_barrier(); + } } static void svm_vcpu_put(struct kvm_vcpu *vcpu) @@ -3051,6 +3095,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has_spec_ctrl(vcpu)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has_virt_ssbd(vcpu)) + return 1; + + msr_info->data = svm->virt_spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x01000065; break; @@ -3125,6 +3183,59 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has_spec_ctrl(vcpu)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_svm_vmrun_msrpm. + * We update the L1 MSR bit as well since it will end up + * touching the MSR anyway now. + */ + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + break; + case MSR_IA32_PRED_CMD: + if (!msr->host_initiated && + !guest_cpuid_has_ibpb(vcpu)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); + break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has_virt_ssbd(vcpu)) + return 1; + + if (data & ~SPEC_CTRL_SSBD) + return 1; + + svm->virt_spec_ctrl = data; + break; case MSR_STAR: svm->vmcb->save.star = data; break; @@ -3811,6 +3922,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -3915,6 +4034,26 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif #endif + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); + reload_tss(vcpu); local_irq_disable(); @@ -4015,7 +4154,7 @@ static bool svm_cpu_has_accelerated_tpr(void) return false; } -static bool svm_has_high_real_mode_segbase(void) +static bool svm_has_emulated_msr(int index) { return true; } @@ -4299,7 +4438,7 @@ static struct kvm_x86_ops svm_x86_ops = { .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, - .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, + .has_emulated_msr = svm_has_emulated_msr, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, |
