summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/osf_sys.c64
-rw-r--r--arch/arc/include/asm/delay.h3
-rw-r--r--arch/arc/mm/cache.c7
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi1
-rw-r--r--arch/arm/kvm/mmu.c42
-rw-r--r--arch/arm64/kernel/vdso.c1
-rw-r--r--arch/arm64/mm/init.c8
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/include/asm/mipsregs.h3
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/ptrace32.c2
-rw-r--r--arch/mips/lib/multi3.c6
-rw-r--r--arch/powerpc/include/asm/fadump.h3
-rw-r--r--arch/powerpc/kernel/fadump.c92
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/pci/pci.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c22
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c20
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/irqflags.h3
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pat.c14
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/block/zram/zram_drv.h16
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c35
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/iio/frequency/ad9523.c4
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/intel-iommu.c18
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/dm-cache-metadata.c3
-rw-r--r--drivers/misc/vmw_balloon.c67
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c78
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/libiscsi.c12
-rw-r--r--drivers/scsi/scsi_sysfs.c20
-rw-r--r--drivers/scsi/vmw_pvscsi.c11
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c35
-rw-r--r--drivers/usb/gadget/function/f_uac2.c20
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/video/fbdev/core/fbmem.c38
-rw-r--r--fs/9p/xattr.c6
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/cachefiles/namei.c1
-rw-r--r--fs/cachefiles/rdwr.c17
-rw-r--r--fs/cifs/cifs_debug.c30
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb2inode.c2
-rw-r--r--fs/ext4/namei.c1
-rw-r--r--fs/ext4/sysfs.c13
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/fscache/operation.c6
-rw-r--r--fs/fuse/dev.c39
-rw-r--r--fs/fuse/file.c1
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c23
-rw-r--r--fs/nfs/blocklayout/dev.c2
-rw-r--r--fs/quota/quota.c2
-rw-r--r--fs/sysfs/file.c44
-rw-r--r--fs/ubifs/journal.c18
-rw-r--r--fs/ubifs/lprops.c8
-rw-r--r--fs/xattr.c2
-rw-r--r--include/linux/intel-iommu.h8
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/video/udlfb.h2
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/kthread.c8
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/sys.c95
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/user_namespace.c39
-rw-r--r--kernel/utsname_sysctl.c41
-rw-r--r--mm/memory.c12
-rw-r--r--mm/zsmalloc.c43
-rw-r--r--mm/zswap.c9
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/trans_rdma.c3
-rw-r--r--net/9p/trans_virtio.c13
-rw-r--r--net/caif/caif_dev.c4
-rw-r--r--net/ieee802154/6lowpan/tx.c21
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv6/ip6_vti.c11
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/mac802154/tx.c15
-rw-r--r--net/wireless/nl80211.c1
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_user.c10
-rw-r--r--sound/soc/sirf/sirf-usp.c7
-rw-r--r--sound/soc/soc-pcm.c8
-rw-r--r--tools/perf/util/auxtrace.c3
-rw-r--r--tools/power/x86/turbostat/turbostat.c8
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc28
-rw-r--r--tools/usb/ffs-test.c19
138 files changed, 1082 insertions, 489 deletions
diff --git a/Makefile b/Makefile
index 0c5b530ecf02..b00095469841 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 153
+SUBLEVEL = 155
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 63f06a2b1f7f..bbc7cb9faa01 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{
int error;
+ char tmp[5 * 32];
down_read(&uts_sem);
- error = -EFAULT;
- if (copy_to_user(name + 0, utsname()->sysname, 32))
- goto out;
- if (copy_to_user(name + 32, utsname()->nodename, 32))
- goto out;
- if (copy_to_user(name + 64, utsname()->release, 32))
- goto out;
- if (copy_to_user(name + 96, utsname()->version, 32))
- goto out;
- if (copy_to_user(name + 128, utsname()->machine, 32))
- goto out;
+ memcpy(tmp + 0 * 32, utsname()->sysname, 32);
+ memcpy(tmp + 1 * 32, utsname()->nodename, 32);
+ memcpy(tmp + 2 * 32, utsname()->release, 32);
+ memcpy(tmp + 3 * 32, utsname()->version, 32);
+ memcpy(tmp + 4 * 32, utsname()->machine, 32);
+ up_read(&uts_sem);
- error = 0;
- out:
- up_read(&uts_sem);
- return error;
+ if (copy_to_user(name, tmp, sizeof(tmp)))
+ return -EFAULT;
+ return 0;
}
SYSCALL_DEFINE0(getpagesize)
@@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
*/
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
- unsigned len;
- int i;
+ int len, err = 0;
+ char *kname;
+ char tmp[32];
- if (!access_ok(VERIFY_WRITE, name, namelen))
- return -EFAULT;
-
- len = namelen;
- if (len > 32)
- len = 32;
+ if (namelen < 0 || namelen > 32)
+ namelen = 32;
down_read(&uts_sem);
- for (i = 0; i < len; ++i) {
- __put_user(utsname()->domainname[i], name + i);
- if (utsname()->domainname[i] == '\0')
- break;
- }
+ kname = utsname()->domainname;
+ len = strnlen(kname, namelen);
+ len = min(len + 1, namelen);
+ memcpy(tmp, kname, len);
up_read(&uts_sem);
+ if (copy_to_user(name, tmp, len))
+ return -EFAULT;
return 0;
}
@@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
};
unsigned long offset;
const char *res;
- long len, err = -EINVAL;
+ long len;
+ char tmp[__NEW_UTS_LEN + 1];
offset = command-1;
if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
- goto out;
+ return -EINVAL;
}
down_read(&uts_sem);
@@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
len = strlen(res)+1;
if ((unsigned long)len > (unsigned long)count)
len = count;
- if (copy_to_user(buf, res, len))
- err = -EFAULT;
- else
- err = 0;
+ memcpy(tmp, res, len);
up_read(&uts_sem);
- out:
- return err;
+ if (copy_to_user(buf, tmp, len))
+ return -EFAULT;
+ return 0;
}
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index d5da2115d78a..03d6bb0f4e13 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
+#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */
+extern unsigned long loops_per_jiffy;
+
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9a84cbdd44b0..017fb440bba4 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -821,7 +821,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
- unsigned int paddr = pfn << PAGE_SHIFT;
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
@@ -841,8 +841,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page(page_address(page), u_vaddr);
- __flush_dcache_page(page_address(page), page_address(page));
+ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
+ __flush_dcache_page((phys_addr_t)page_address(page),
+ (phys_addr_t)page_address(page));
}
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index bb1ca158273c..1922e7a93e40 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -201,6 +201,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 4c055a63c9c6..f2c4207ac70a 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -899,19 +899,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
pmd = stage2_get_pmd(kvm, cache, addr);
VM_BUG_ON(!pmd);
- /*
- * Mapping in huge pages should only happen through a fault. If a
- * page is merged into a transparent huge page, the individual
- * subpages of that huge page should be unmapped through MMU
- * notifiers before we get here.
- *
- * Merging of CompoundPages is not supported; they should become
- * splitting first, unmapped, merged, and mapped back in on-demand.
- */
- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
-
old_pmd = *pmd;
if (pmd_present(old_pmd)) {
+ /*
+ * Multiple vcpus faulting on the same PMD entry, can
+ * lead to them sequentially updating the PMD with the
+ * same value. Following the break-before-make
+ * (pmd_clear() followed by tlb_flush()) process can
+ * hinder forward progress due to refaults generated
+ * on missing translations.
+ *
+ * Skip updating the page table if the entry is
+ * unchanged.
+ */
+ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
+ return 0;
+
+ /*
+ * Mapping in huge pages should only happen through a
+ * fault. If a page is merged into a transparent huge
+ * page, the individual subpages of that huge page
+ * should be unmapped through MMU notifiers before we
+ * get here.
+ *
+ * Merging of CompoundPages is not supported; they
+ * should become splitting first, unmapped, merged,
+ * and mapped back in on-demand.
+ */
+ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
+
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
@@ -968,6 +984,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
if (pte_present(old_pte)) {
+ /* Skip page table update if there is no change */
+ if (pte_val(old_pte) == pte_val(*new_pte))
+ return 0;
+
kvm_set_pte(pte, __pte(0));
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 7e9dd94452bb..46fa4de29fb1 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -222,7 +222,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- /* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9930190a4929..26b1221faaf4 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -128,11 +128,13 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
}
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
-#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
-
int pfn_valid(unsigned long pfn)
{
- return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
+ phys_addr_t addr = pfn << PAGE_SHIFT;
+
+ if ((addr >> PAGE_SHIFT) != pfn)
+ return 0;
+ return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
#endif
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 4ca33175ec05..6d38948f0f1e 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -249,12 +249,6 @@ static int __init bcm47xx_cpu_fixes(void)
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
-
- /*
- * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
- * Enable ExternalSync for sync instruction to take effect
- */
- set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 202159557e4b..c785da1d8660 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -606,8 +606,6 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
-/* ExternalSync */
-#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@@ -2015,7 +2013,6 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
-__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 25eef1d60f27..0f07ac3bc744 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -137,7 +137,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0b70d62e2231..9d04392f7ef0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -878,7 +878,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index d95117e71f69..286ec2d24d47 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -140,7 +140,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
index 111ad475aa0c..4c2483f410c2 100644
--- a/arch/mips/lib/multi3.c
+++ b/arch/mips/lib/multi3.c
@@ -4,12 +4,12 @@
#include "libgcc.h"
/*
- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
- * specific case only we'll implement it here.
+ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
+ * that specific case only we implement that intrinsic here.
*
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/
-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b)
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 493e72f64b35..5768ec3c1781 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -194,9 +194,6 @@ struct fadump_crash_info_header {
struct cpumask cpu_online_mask;
};
-/* Crash memory ranges */
-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
-
struct fad_crash_memory_ranges {
unsigned long long base;
unsigned long long size;
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 791d4c3329c3..c3c835290131 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -35,6 +35,7 @@
#include <linux/crash_dump.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include <linux/slab.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex);
-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
+struct fad_crash_memory_ranges *crash_memory_ranges;
+int crash_memory_ranges_size;
int crash_mem_ranges;
+int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
@@ -726,38 +729,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0;
}
-static inline void fadump_add_crash_memory(unsigned long long base,
- unsigned long long end)
+static void free_crash_memory_ranges(void)
+{
+ kfree(crash_memory_ranges);
+ crash_memory_ranges = NULL;
+ crash_memory_ranges_size = 0;
+ max_crash_mem_ranges = 0;
+}
+
+/*
+ * Allocate or reallocate crash memory ranges array in incremental units
+ * of PAGE_SIZE.
+ */
+static int allocate_crash_memory_ranges(void)
+{
+ struct fad_crash_memory_ranges *new_array;
+ u64 new_size;
+
+ new_size = crash_memory_ranges_size + PAGE_SIZE;
+ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
+ new_size);
+
+ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
+ if (new_array == NULL) {
+ pr_err("Insufficient memory for setting up crash memory ranges\n");
+ free_crash_memory_ranges();
+ return -ENOMEM;
+ }
+
+ crash_memory_ranges = new_array;
+ crash_memory_ranges_size = new_size;
+ max_crash_mem_ranges = (new_size /
+ sizeof(struct fad_crash_memory_ranges));
+ return 0;
+}
+
+static inline int fadump_add_crash_memory(unsigned long long base,
+ unsigned long long end)
{
if (base == end)
- return;
+ return 0;
+
+ if (crash_mem_ranges == max_crash_mem_ranges) {
+ int ret;
+
+ ret = allocate_crash_memory_ranges();
+ if (ret)
+ return ret;
+ }
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
crash_mem_ranges, base, end - 1, (end - base));
crash_memory_ranges[crash_mem_ranges].base = base;
crash_memory_ranges[crash_mem_ranges].size = end - base;
crash_mem_ranges++;
+ return 0;
}
-static void fadump_exclude_reserved_area(unsigned long long start,
+static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end)
{
unsigned long long ra_start, ra_end;
+ int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
- fadump_add_crash_memory(start, ra_start);
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(start, ra_start);
+ if (ret)
+ return ret;
+
+ ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) {
- fadump_add_crash_memory(start, ra_start);
+ ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) {
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(ra_end, end);
}
} else
- fadump_add_crash_memory(start, end);
+ ret = fadump_add_crash_memory(start, end);
+
+ return ret;
}
static int fadump_init_elfcore_header(char *bufp)
@@ -793,10 +846,11 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
-static void fadump_setup_crash_memory_ranges(void)
+static int fadump_setup_crash_memory_ranges(void)
{
struct memblock_region *reg;
unsigned long long start, end;
+ int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0;
@@ -807,7 +861,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset.
*/
- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ if (ret)
+ return ret;
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
@@ -816,8 +872,12 @@ static void fadump_setup_crash_memory_ranges(void)
start = fw_dump.boot_memory_size;
/* add this range excluding the reserved dump area. */
- fadump_exclude_reserved_area(start, end);
+ ret = fadump_exclude_reserved_area(start, end);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
/*
@@ -941,6 +1001,7 @@ static void register_fadump(void)
{
unsigned long addr;
void *vaddr;
+ int ret;
/*
* If no memory is reserved then we can not register for firmware-
@@ -949,7 +1010,9 @@ static void register_fadump(void)
if (!fw_dump.reserve_dump_area_size)
return;
- fadump_setup_crash_memory_ranges();
+ ret = fadump_setup_crash_memory_ranges();
+ if (ret)
+ return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
@@ -1028,6 +1091,7 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
+ free_crash_memory_ranges();
}
}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 3b6647e574b6..f5313a78e5d6 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -300,7 +300,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
}
savep = __va(regs->gpr[3]);
- regs->gpr[3] = savep[0]; /* restore original r3 */
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 998b61cd0e56..4b39ba700d32 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -261,7 +261,6 @@ struct qdio_outbuf_state {
void *user;
};
-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ec1a30d0d11a..7218689bd6ee 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -459,6 +459,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
+ if (flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out_up;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index a26528afceb2..727693e283da 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -522,8 +522,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/* br %r1 */
_EMIT2(0x07f1);
} else {
- /* larl %r1,.+14 */
- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct _lowcore, br_r1_trampoline));
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index ef0499b76c50..9a5754d4ee87 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -412,6 +412,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
hwirq = 0;
for_each_pci_msi_entry(msi, pdev) {
rc = -EIO;
+ if (hwirq >= msi_vecs)
+ break;
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
if (irq < 0)
goto out_msi;
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 646988d4c1a3..740f43b9b541 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -201,23 +201,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
asmlinkage long sys_getdomainname(char __user *name, int len)
{
- int nlen, err;
-
+ int nlen, err;
+ char tmp[__NEW_UTS_LEN + 1];
+
if (len < 0)
return -EINVAL;
- down_read(&uts_sem);
-
+ down_read(&uts_sem);
+
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
- goto out;
+ goto out_unlock;
+ memcpy(tmp, utsname()->domainname, nlen);
- err = -EFAULT;
- if (!copy_to_user(name, utsname()->domainname, nlen))
- err = 0;
+ up_read(&uts_sem);
-out:
+ if (copy_to_user(name, tmp, nlen))
+ return -EFAULT;
+ return 0;
+
+out_unlock:
up_read(&uts_sem);
return err;
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 98a5cf313d39..7301fa2091bc 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -524,23 +524,27 @@ extern void check_pending(int signum);
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
- int nlen, err;
+ int nlen, err;
+ char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
- down_read(&uts_sem);
-
+ down_read(&uts_sem);
+
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
- goto out;
+ goto out_unlock;
+ memcpy(tmp, utsname()->domainname, nlen);
+
+ up_read(&uts_sem);
- err = -EFAULT;
- if (!copy_to_user(name, utsname()->domainname, nlen))
- err = 0;
+ if (copy_to_user(name, tmp, nlen))
+ return -EFAULT;
+ return 0;
-out:
+out_unlock:
up_read(&uts_sem);
return err;
}
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 9016b4b70375..6c5020163db0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
#define arch_phys_wc_add arch_phys_wc_add
#endif
+#ifdef CONFIG_X86_PAT
+extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
+extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
+#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
+#endif
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index cb7f04981c6b..8afbdcd3032b 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -28,7 +28,8 @@ extern inline unsigned long native_save_fl(void)
return flags;
}
-static inline void native_restore_fl(unsigned long flags)
+extern inline void native_restore_fl(unsigned long flags);
+extern inline void native_restore_fl(unsigned long flags)
{
asm volatile("push %0 ; popf"
: /* no output */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a3a53955f01c..337c52192278 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -172,9 +172,9 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
-static inline unsigned long l1tf_pfn_limit(void)
+static inline unsigned long long l1tf_pfn_limit(void)
{
- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
+ return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
}
extern void early_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 34e4aaaf03d2..b9e6b60df148 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -654,6 +654,10 @@ static void __init l1tf_select_mitigation(void)
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
+ half_pa);
+ pr_info("However, doing so will make a part of your RAM unusable.\n");
+ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
return;
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 4dce22d3cb06..b18fe3d245fe 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -74,6 +74,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false;
+ if (c->x86 != 6)
+ return false;
+
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_mask == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4cbb60fbff3e..c7cc81e9bb84 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -250,6 +250,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
start_thread_common(regs, new_ip, new_sp,
__USER_CS, __USER_DS, 0);
}
+EXPORT_SYMBOL_GPL(start_thread);
#ifdef CONFIG_COMPAT
void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4954a6cef50a..f00eb52c16a6 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -779,7 +779,7 @@ unsigned long max_swapfile_size(void)
if (boot_cpu_has_bug(X86_BUG_L1TF)) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
- unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
+ unsigned long long l1tf_limit = l1tf_pfn_limit();
/*
* We encode swap offsets also with 3 bits below those for pfn
* which makes the usable limit higher.
@@ -787,7 +787,7 @@ unsigned long max_swapfile_size(void)
#if CONFIG_PGTABLE_LEVELS > 2
l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
#endif
- pages = min_t(unsigned long, l1tf_limit, pages);
+ pages = min_t(unsigned long long, l1tf_limit, pages);
}
return pages;
}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 5aad869fa205..74609a957c49 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -138,7 +138,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
/* If it's real memory always allow */
if (pfn_valid(pfn))
return true;
- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
return false;
return true;
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7bd28d45e327..17ea653bf281 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1079,7 +1079,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (end - start >= PUD_SIZE) {
- set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
+ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn >> PAGE_SHIFT,
canon_pgprot(pud_pgprot))));
start += PUD_SIZE;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 3146b1da6d72..5ff0cb74de55 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -726,6 +726,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
free_memtype(start, end);
}
+int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
+
+ return io_reserve_memtype(start, start + size, &type);
+}
+EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
+
+void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ io_free_memtype(start, start + size);
+}
+EXPORT_SYMBOL(arch_io_free_memtype_wc);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 74d18b3d729b..30b0fbd42e4b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -44,6 +44,11 @@ static const char *default_compressor = "lzo";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
+/*
+ * Pages that compress to sizes equals or greater than this are stored
+ * uncompressed in memory.
+ */
+static size_t huge_class_size;
static void zram_free_page(struct zram *zram, size_t index);
@@ -931,6 +936,8 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
return false;
}
+ if (!huge_class_size)
+ huge_class_size = zs_huge_class_size(zram->mem_pool);
return true;
}
@@ -1117,8 +1124,7 @@ compress_again:
return ret;
}
- if (unlikely(comp_len > max_zpage_size)) {
- comp_len = PAGE_SIZE;
+ if (unlikely(comp_len >= huge_class_size)) {
if (zram_wb_enabled(zram) && allow_wb) {
zcomp_stream_put(zram->comp);
ret = write_to_bdev(zram, bvec, index, bio, &element);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index bbda650f0dc1..3a1cac486e96 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -21,22 +21,6 @@
#include "zcomp.h"
-/*-- Configurable parameters */
-
-/*
- * Pages that compress to size greater than this are stored
- * uncompressed in memory.
- */
-static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
-
-/*
- * NOTE: max_zpage_size must be less than or equal to:
- * ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
- * always return failure.
- */
-
-/*-- End of configurable params */
-
#define SECTOR_SHIFT 9
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 0151039bff05..1012b2cb6a16 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2526,7 +2526,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) ||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
- if (((int)arg >= cdi->capacity))
+ if (arg >= cdi->capacity)
return -EINVAL;
return cdrom_slot_status(cdi, arg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 73628c7599e7..3aca9a9011fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -492,6 +492,10 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -507,6 +511,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 08f82eae6939..ac12f74e6b32 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -275,6 +275,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -283,11 +285,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index dfffd528517a..393967025043 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,6 +275,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -284,6 +287,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -293,6 +298,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index dba5c0ea0827..c7c243e9b808 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -450,6 +450,18 @@ static void adv7511_hpd_work(struct work_struct *work)
else
status = connector_status_disconnected;
+ /*
+ * The bridge resets its registers on unplug. So when we get a plug
+ * event and we're already supposed to be powered, cycle the bridge to
+ * restore its state.
+ */
+ if (status == connector_status_connected &&
+ adv7511->connector.status == connector_status_disconnected &&
+ adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ }
+
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
drm_kms_helper_hotplug_event(adv7511->connector.dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 19fb0bddc1cd..359fe2b8bb8a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -842,6 +842,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ if (!args->user_size)
+ return -EINVAL;
+
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index abacc8f67469..31ca56e593f5 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -526,6 +526,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
imx_ldb->dev = dev;
if (of_id)
@@ -566,14 +569,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
+ if (!of_device_is_available(child))
+ continue;
+
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
- if (!of_device_is_available(child))
- continue;
-
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05108b505fbf..d9df8d32fc35 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -274,6 +274,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -282,10 +285,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d2e7d209f651..9835327a3214 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -397,6 +397,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -429,6 +432,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -438,4 +443,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 83aee9e814ba..18ec38d0d3f5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -447,6 +447,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -464,6 +468,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 29bd801f5dad..0c648efd9a58 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -341,7 +341,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 33dbfb2c4748..30bfeb1b2512 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -141,18 +141,13 @@ static void udl_free_urb_list(struct drm_device *dev)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
-
- /* Getting interrupted means a leak, but ok at shutdown*/
- ret = down_interruptible(&udl->urbs.limit_sem);
- if (ret)
- break;
+ down(&udl->urbs.limit_sem);
spin_lock_irqsave(&udl->urbs.lock, flags);
@@ -176,17 +171,22 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&udl->urbs.lock);
+retry:
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
- while (i < count) {
+ sema_init(&udl->urbs.limit_sem, 0);
+ udl->urbs.count = 0;
+ udl->urbs.available = 0;
+
+ while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
@@ -202,11 +202,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ udl_free_urb_list(dev);
+ goto retry;
+ }
break;
}
@@ -217,16 +222,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
list_add_tail(&unode->entry, &udl->urbs.list);
- i++;
+ up(&udl->urbs.limit_sem);
+ udl->urbs.count++;
+ udl->urbs.available++;
}
- sema_init(&udl->urbs.limit_sem, i);
- udl->urbs.count = i;
- udl->urbs.available = i;
-
- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
- return i;
+ return udl->urbs.count;
}
struct urb *udl_get_urb(struct drm_device *dev)
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a8bdcb5292f5..57f6eb1427b4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
/*
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
* to minimal possible clkh in this case.
+ *
+ * Note:
+ * CLKH is not allowed to be 0, in this case I2C clock is not generated
+ * at all
*/
- if (clk >= clkl + d) {
+ if (clk > clkl + d) {
clkh = clk - clkl - d;
clkl -= d;
} else {
- clkh = 0;
+ clkh = 1;
clkl = clk - (d << 1);
}
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 44a30f286de1..57b1812a5a18 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -507,7 +507,7 @@ static ssize_t ad9523_store(struct device *dev,
return ret;
if (!state)
- return 0;
+ return len;
mutex_lock(&indio_dev->mlock);
switch ((u32)this_attr->address) {
@@ -641,7 +641,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
AD9523_CLK_DIST_DIV_REV(ret);
*val = code / 1000000;
- *val2 = (code % 1000000) * 10;
+ *val2 = code % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index e913a930ac80..5a63e32a4a6b 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1315,8 +1315,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
qi_submit_sync(&desc, iommu);
}
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask)
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask)
{
struct qi_desc desc;
@@ -1331,7 +1331,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
- QI_DIOTLB_TYPE;
+ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
qi_submit_sync(&desc, iommu);
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4efec2db4ee2..49b266433f4c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -419,6 +419,7 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
u8 pasid_supported:3;
u8 pasid_enabled:1;
u8 pri_supported:1;
@@ -1479,6 +1480,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
+ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
+ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
+ * queue depth at PF level. If DIT is not set, PFSID will be treated as
+ * reserved, which should be set to 0.
+ */
+ if (!ecap_dit(info->iommu->ecap))
+ info->pfsid = 0;
+ else {
+ struct pci_dev *pf_pdev;
+
+ /* pdev will be returned if device is not a vf */
+ pf_pdev = pci_physfn(pdev);
+ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ }
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
@@ -1537,7 +1552,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index f2c0000de613..95a6ae053714 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -462,8 +462,10 @@ static int bch_writeback_thread(void *arg)
* data on cache. BCACHE_DEV_DETACHING flag is set in
* bch_cached_dev_detach().
*/
- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+ up_write(&dc->writeback_lock);
break;
+ }
}
up_write(&dc->writeback_lock);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index d3c55d7754af..905badc6cb17 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -337,7 +337,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
- disk_super->policy_hint_size = 0;
+ disk_super->policy_hint_size = cpu_to_le32(0);
__copy_sm_root(cmd, disk_super);
@@ -652,6 +652,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
+ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 5e047bfc0cc4..518e2dec2aa2 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
success = false;
}
- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
+ /*
+ * 2MB pages are only supported with batching. If batching is for some
+ * reason disabled, do not use 2MB pages, since otherwise the legacy
+ * mechanism is used with 2MB pages, causing a failure.
+ */
+ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
+ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
b->supported_page_sizes = 2;
else
b->supported_page_sizes = 1;
@@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return -1;
+ return -EINVAL;
STATS_INC(b->stats.lock[false]);
@@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail[false]);
- return 1;
+ return -EIO;
}
static int vmballoon_send_batched_lock(struct vmballoon *b,
@@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
target);
- if (locked > 0) {
+ if (locked) {
STATS_INC(b->stats.refused_alloc[false]);
- if (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+ if (locked == -EIO &&
+ (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
vmballoon_free_page(page, false);
return -EIO;
}
@@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
} else {
vmballoon_free_page(page, false);
}
- return -EIO;
+ return locked;
}
/* track allocated page */
@@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- int error = 0;
+ unsigned long error, dummy;
- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
- error = vmci_doorbell_create(&b->vmci_doorbell,
- VMCI_FLAG_DELAYED_CB,
- VMCI_PRIVILEGE_FLAG_RESTRICTED,
- vmballoon_doorbell, b);
-
- if (error == VMCI_SUCCESS) {
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
- b->vmci_doorbell.context,
- b->vmci_doorbell.resource, error);
- STATS_INC(b->stats.doorbell_set);
- }
- }
+ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
+ return 0;
- if (error != 0) {
- vmballoon_vmci_cleanup(b);
+ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
+ VMCI_PRIVILEGE_FLAG_RESTRICTED,
+ vmballoon_doorbell, b);
- return -EIO;
- }
+ if (error != VMCI_SUCCESS)
+ goto fail;
+
+ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, dummy);
+
+ STATS_INC(b->stats.doorbell_set);
+
+ if (error != VMW_BALLOON_SUCCESS)
+ goto fail;
return 0;
+fail:
+ vmballoon_vmci_cleanup(b);
+ return -EIO;
}
/*
@@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
return 0;
}
-module_init(vmballoon_init);
+
+/*
+ * Using late_initcall() instead of module_init() allows the balloon to use the
+ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
+ * VMCI is probed only after the balloon is initialized. If the balloon is used
+ * as a module, late_initcall() is equivalent to module_init().
+ */
+late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
+ if (!cdm) {
+ of_node_put(np_cdm);
+ dev_err(&ofdev->dev, "can't map clock node!\n");
+ return 0;
+ }
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !PPC32
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 0038709fd317..ec59425fdbff 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be90b5c561d6..3e77c3e843bb 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1683,6 +1683,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index d84efcd34fac..c56b61dce2d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3360,14 +3360,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
}
return 0;
@@ -3481,7 +3485,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, false);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_config_rss_eth(bp, false);
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff..ec0b545197e2 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
depends on ISA || EISA || ARM
+ depends on !PPC32
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8390597aecb8..b20bce2c7da1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1842,10 +1842,32 @@ static int enic_stop(struct net_device *netdev)
return 0;
}
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool running = netif_running(netdev);
+ int err = 0;
+
+ ASSERT_RTNL();
+ if (running) {
+ err = enic_stop(netdev);
+ if (err)
+ return err;
+ }
+
+ netdev->mtu = new_mtu;
+
+ if (running) {
+ err = enic_open(netdev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int enic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct enic *enic = netdev_priv(netdev);
- int running = netif_running(netdev);
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
return -EINVAL;
@@ -1853,20 +1875,12 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (running)
- enic_stop(netdev);
-
- netdev->mtu = new_mtu;
-
if (netdev->mtu > enic->port_mtu)
netdev_warn(netdev,
- "interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ "interface MTU (%d) set higher than port MTU (%d)\n",
+ netdev->mtu, enic->port_mtu);
- if (running)
- enic_open(netdev);
-
- return 0;
+ return _enic_change_mtu(netdev, new_mtu);
}
static void enic_change_mtu_work(struct work_struct *work)
@@ -1874,47 +1888,9 @@ static void enic_change_mtu_work(struct work_struct *work)
struct enic *enic = container_of(work, struct enic, change_mtu_work);
struct net_device *netdev = enic->netdev;
int new_mtu = vnic_dev_mtu(enic->vdev);
- int err;
- unsigned int i;
-
- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
rtnl_lock();
-
- /* Stop RQ */
- del_timer_sync(&enic->notify_timer);
-
- for (i = 0; i < enic->rq_count; i++)
- napi_disable(&enic->napi[i]);
-
- vnic_intr_mask(&enic->intr[0]);
- enic_synchronize_irqs(enic);
- err = vnic_rq_disable(&enic->rq[0]);
- if (err) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to disable RQ.\n");
- return;
- }
- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
- vnic_cq_clean(&enic->cq[0]);
- vnic_intr_clean(&enic->intr[0]);
-
- /* Fill RQ with new_mtu-sized buffers */
- netdev->mtu = new_mtu;
- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
- /* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to alloc receive buffers.\n");
- return;
- }
-
- /* Start RQ */
- vnic_rq_enable(&enic->rq[0]);
- napi_enable(&enic->napi[0]);
- vnic_intr_unmask(&enic->intr[0]);
- enic_notify_timer_start(enic);
-
+ (void)_enic_change_mtu(netdev, new_mtu);
rtnl_unlock();
netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 20d048cdcb88..c898006abb32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -420,6 +420,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
default:
p_link->speed = 0;
+ p_link->link_up = 0;
}
/* Correct speed according to bandwidth allocation */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 507bbb0355c2..f6108413adba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
ret = of_mdiobus_register(bus, np1);
if (ret) {
mdiobus_free(bus);
+ lp->mii_bus = NULL;
return ret;
}
return 0;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index acec4b565511..1aede726052c 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -902,6 +902,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
netif_carrier_on(dev->net);
+
+ tasklet_schedule(&dev->bh);
}
return ret;
@@ -1361,8 +1363,6 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
netif_dbg(dev, ifup, dev->net,
"MAC address set to random addr");
}
-
- tasklet_schedule(&dev->bh);
}
ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 317bc79cc8b9..c178e1218347 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1385,7 +1385,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
case 0x001:
printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
break;
- case 0x010:
+ case 0x002:
printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
break;
default:
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index acaf84cadca3..6c9420ee9e03 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -434,7 +434,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;
- if (group > info->ngroups)
+ if (group >= info->ngroups)
return;
seq_puts(s, "\n");
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 6a41e66015b6..062dff1c902d 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -384,6 +384,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
aqcsfrc_mask = AQCSFRC_CSFA_MASK;
}
+ /* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
* Action Qualifier control on PWM output from next TBCLK
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 742ca57ece8c..d64b401f3d05 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -640,21 +640,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
unsigned long phys_aob = 0;
if (!q->use_cq)
- goto out;
+ return 0;
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
}
-out:
+ q->sbal_state[bufnr].flags = 0;
return phys_aob;
}
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 34a1b1f333b4..d5184aa1ace4 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -752,9 +752,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
case ELS_LOGO:
if (fip->mode == FIP_MODE_VN2VN) {
if (fip->state != FIP_ST_VNMP_UP)
- return -EINVAL;
+ goto drop;
if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
- return -EINVAL;
+ goto drop;
} else {
if (fip->state != FIP_ST_ENABLED)
return 0;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9f0b00c38658..a74f8fbefd33 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -283,11 +283,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x/%x itt "
+ "task [op %x itt "
"0x%x/0x%x] "
"rejected.\n",
- task->hdr->opcode, opcode,
- task->itt, task->hdr_itt);
+ opcode, task->itt,
+ task->hdr_itt);
return -EACCES;
}
/*
@@ -296,10 +296,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (conn->session->fast_abort) {
iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x/%x itt "
+ "task [op %x itt "
"0x%x/0x%x] fast abort.\n",
- task->hdr->opcode, opcode,
- task->itt, task->hdr_itt);
+ opcode, task->itt,
+ task->hdr_itt);
return -EACCES;
}
break;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9f2c9a2fdd74..55fb1088d0b8 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -678,8 +678,24 @@ static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (device_remove_file_self(dev, attr))
- scsi_remove_device(to_scsi_device(dev));
+ struct kernfs_node *kn;
+
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+ /*
+ * Concurrent writes into the "delete" sysfs attribute may trigger
+ * concurrent calls to device_remove_file() and scsi_remove_device().
+ * device_remove_file() handles concurrent removal calls by
+ * serializing these and by ignoring the second and later removal
+ * attempts. Concurrent calls of scsi_remove_device() are
+ * serialized. The second and later calls of scsi_remove_device() are
+ * ignored because the first call of that function changes the device
+ * state into SDEV_DEL.
+ */
+ device_remove_file(dev, attr);
+ scsi_remove_device(to_scsi_device(dev));
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0f133c1817de..0de2f9069e23 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -545,9 +545,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
- cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
+ if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+ cmd->result = (DID_RESET << 16);
+ } else {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION &&
+ cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ }
} else
switch (btstat) {
case BTSTAT_SUCCESS:
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index c872a2e54c4b..2603bee2ce07 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -220,7 +220,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
- if (spicfg->wdelay)
+ if (spicfg && spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
/*
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 2a0158bb4974..5a78ef057635 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -22,6 +21,8 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <asm/cacheflush.h>
+
#include "iss_video.h"
#include "iss.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bc2cbffec27e..63e54beed196 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -323,8 +323,7 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_alloc() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess);
- return -ENOMEM;
+ goto free_sess;
}
sess->creation_time = get_jiffies_64();
@@ -340,20 +339,28 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- kfree(sess);
- return -ENOMEM;
+ goto remove_idr;
}
sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess->sess_ops);
- kfree(sess);
- return -ENOMEM;
+ goto free_ops;
}
return 0;
+
+free_ops:
+ kfree(sess->sess_ops);
+remove_idr:
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+free_sess:
+ kfree(sess);
+ conn->sess = NULL;
+ return -ENOMEM;
}
static int iscsi_login_zero_tsih_s2(
@@ -1142,13 +1149,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
ISCSI_LOGIN_STATUS_INIT_ERR);
if (!zero_tsih || !conn->sess)
goto old_sess_out;
- if (conn->sess->se_sess)
- transport_free_session(conn->sess->se_sess);
- if (conn->sess->session_index != 0) {
- spin_lock_bh(&sess_idr_lock);
- idr_remove(&sess_idr, conn->sess->session_index);
- spin_unlock_bh(&sess_idr_lock);
- }
+
+ transport_free_session(conn->sess->se_sess);
+
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+
kfree(conn->sess->sess_ops);
kfree(conn->sess);
conn->sess = NULL;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index b5dab103be38..e931c3cb0840 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -941,14 +941,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
};
struct cntrl_cur_lay3 {
- __u32 dCUR;
+ __le32 dCUR;
};
struct cntrl_range_lay3 {
- __u16 wNumSubRanges;
- __u32 dMIN;
- __u32 dMAX;
- __u32 dRES;
+ __le16 wNumSubRanges;
+ __le32 dMIN;
+ __le32 dMAX;
+ __le32 dRES;
} __packed;
static inline void
@@ -1296,9 +1296,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
- c.dCUR = p_srate;
+ c.dCUR = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- c.dCUR = c_srate;
+ c.dCUR = cpu_to_le32(c_srate);
value = min_t(unsigned, w_length, sizeof c);
memcpy(req->buf, &c, value);
@@ -1336,15 +1336,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
if (entity_id == USB_IN_CLK_ID)
- r.dMIN = p_srate;
+ r.dMIN = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- r.dMIN = c_srate;
+ r.dMIN = cpu_to_le32(c_srate);
else
return -EOPNOTSUPP;
r.dMAX = r.dMIN;
r.dRES = 0;
- r.wNumSubRanges = 1;
+ r.wNumSubRanges = cpu_to_le16(1);
value = min_t(unsigned, w_length, sizeof r);
memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index baa0609a429d..e34094647603 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597)
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
- msleep(3);
+ mdelay(3);
r8a66597_bset(r8a66597, PLLC, SYSCFG0);
- msleep(1);
+ mdelay(1);
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
@@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock)
r8a66597->ep0_req->length = 2;
/* AV: what happens if we get called again before that gets through? */
spin_unlock(&r8a66597->lock);
- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
spin_lock(&r8a66597->lock);
}
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 94eb2923afed..85d031ce85c1 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -879,6 +879,7 @@ int usb_otg_start(struct platform_device *pdev)
if (pdata->init && pdata->init(pdev) != 0)
return -EINVAL;
+#ifdef CONFIG_PPC32
if (pdata->big_endian_mmio) {
_fsl_readl = _fsl_readl_be;
_fsl_writel = _fsl_writel_be;
@@ -886,6 +887,7 @@ int usb_otg_start(struct platform_device *pdev)
_fsl_readl = _fsl_readl_le;
_fsl_writel = _fsl_writel_le;
}
+#endif
/* request irq */
p_otg->irq = platform_get_irq(pdev, 0);
@@ -976,7 +978,7 @@ int usb_otg_start(struct platform_device *pdev)
/*
* state file in sysfs
*/
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 2a708dd2ecee..ecd9daf8702c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1697,12 +1697,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
return 0;
}
-static int do_unregister_framebuffer(struct fb_info *fb_info)
+static int unbind_console(struct fb_info *fb_info)
{
struct fb_event event;
- int i, ret = 0;
+ int ret;
+ int i = fb_info->node;
- i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
@@ -1717,17 +1717,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
unlock_fb_info(fb_info);
console_unlock();
+ return ret;
+}
+
+static int __unlink_framebuffer(struct fb_info *fb_info);
+
+static int do_unregister_framebuffer(struct fb_info *fb_info)
+{
+ struct fb_event event;
+ int ret;
+
+ ret = unbind_console(fb_info);
+
if (ret)
return -EINVAL;
pm_vt_switch_unregister(fb_info->dev);
- unlink_framebuffer(fb_info);
+ __unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
- registered_fb[i] = NULL;
+ registered_fb[fb_info->node] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
@@ -1740,7 +1752,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
return 0;
}
-int unlink_framebuffer(struct fb_info *fb_info)
+static int __unlink_framebuffer(struct fb_info *fb_info)
{
int i;
@@ -1752,6 +1764,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
+
+ return 0;
+}
+
+int unlink_framebuffer(struct fb_info *fb_info)
+{
+ int ret;
+
+ ret = __unlink_framebuffer(fb_info);
+ if (ret)
+ return ret;
+
+ unbind_console(fb_info);
+
return 0;
}
EXPORT_SYMBOL(unlink_framebuffer);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index e3d026ac382e..f35168ce426b 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
{
struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
struct iov_iter from;
- int retval;
+ int retval, err;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
@@ -128,7 +128,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
retval);
else
p9_client_write(fid, 0, &from, &retval);
- p9_client_clunk(fid);
+ err = p9_client_clunk(fid);
+ if (!retval && err)
+ retval = err;
return retval;
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 982a9d509817..493c7354ec0b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4128,7 +4128,7 @@ commit_trans:
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
- return ret;
+ return 0;
}
/*
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index c4b893453e0e..c43b4b08546b 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -194,7 +194,6 @@ wait_for_old_object:
pr_err("\n");
pr_err("Error: Unexpected object collision\n");
cachefiles_printk_object(object, xobject);
- BUG();
}
atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3926a0..5b68cf526887 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
struct cachefiles_one_read *monitor =
container_of(wait, struct cachefiles_one_read, monitor);
struct cachefiles_object *object;
+ struct fscache_retrieval *op = monitor->op;
struct wait_bit_key *key = _key;
struct page *page = wait->private;
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
list_del(&wait->task_list);
/* move onto the action list and queue for FS-Cache thread pool */
- ASSERT(monitor->op);
+ ASSERT(op);
- object = container_of(monitor->op->op.object,
- struct cachefiles_object, fscache);
+ /* We need to temporarily bump the usage count as we don't own a ref
+ * here otherwise cachefiles_read_copier() may free the op between the
+ * monitor being enqueued on the op->to_do list and the op getting
+ * enqueued on the work queue.
+ */
+ fscache_get_retrieval(op);
+ object = container_of(op->op.object, struct cachefiles_object, fscache);
spin_lock(&object->work_lock);
- list_add_tail(&monitor->op_link, &monitor->op->to_do);
+ list_add_tail(&monitor->op_link, &op->to_do);
spin_unlock(&object->work_lock);
- fscache_enqueue_retrieval(monitor->op);
+ fscache_enqueue_retrieval(op);
+ fscache_put_retrieval(op);
return 0;
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 0a3544fb50f9..bcbe42fb7e92 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
seq_printf(m, "Features:");
#ifdef CONFIG_CIFS_DFS_UPCALL
- seq_printf(m, " dfs");
+ seq_printf(m, " DFS");
#endif
#ifdef CONFIG_CIFS_FSCACHE
- seq_printf(m, " fscache");
+ seq_printf(m, ",FSCACHE");
+#endif
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ seq_printf(m, ",SMB_DIRECT");
+#endif
+#ifdef CONFIG_CIFS_STATS2
+ seq_printf(m, ",STATS2");
+#elif defined(CONFIG_CIFS_STATS)
+ seq_printf(m, ",STATS");
+#endif
+#ifdef CONFIG_CIFS_DEBUG2
+ seq_printf(m, ",DEBUG2");
+#elif defined(CONFIG_CIFS_DEBUG)
+ seq_printf(m, ",DEBUG");
+#endif
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ seq_printf(m, ",ALLOW_INSECURE_LEGACY");
#endif
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- seq_printf(m, " lanman");
+ seq_printf(m, ",WEAK_PW_HASH");
#endif
#ifdef CONFIG_CIFS_POSIX
- seq_printf(m, " posix");
+ seq_printf(m, ",CIFS_POSIX");
#endif
#ifdef CONFIG_CIFS_UPCALL
- seq_printf(m, " spnego");
+ seq_printf(m, ",UPCALL(SPNEGO)");
#endif
#ifdef CONFIG_CIFS_XATTR
- seq_printf(m, " xattr");
+ seq_printf(m, ",XATTR");
#endif
#ifdef CONFIG_CIFS_ACL
- seq_printf(m, " acl");
+ seq_printf(m, ",ACL");
#endif
seq_putc(m, '\n');
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9cdeb0293267..36c8594bb147 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1063,6 +1063,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
if (!server->ops->set_file_info)
return -ENOSYS;
+ info_buf.Pad = 0;
+
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e3548f73bdea..728289c32b32 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -419,7 +419,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_II;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct smb2_file_all_info *pfile_info = NULL;
oparms.tcon = tcon;
@@ -481,7 +481,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
if (backup_cred(cifs_sb))
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index a035d1a95882..9bc7a29f88d6 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
goto setup_ntlmv2_ret;
}
*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+ if (!*pbuffer) {
+ rc = -ENOMEM;
+ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+ *buflen = 0;
+ goto setup_ntlmv2_ret;
+ }
sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1238cd3552f9..0267d8cbc996 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
- (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
(buf->Attributes == 0))
return 0; /* would be a no op, no sense sending this */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b9324d0ff218..f9852b0f7c04 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1401,6 +1401,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto cleanup_and_exit;
dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
"falling back\n"));
+ ret = NULL;
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
if (!nblocks) {
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index c2ee23acf359..ae9929d678d6 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -277,8 +277,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
case attr_pointer_ui:
if (!ptr)
return 0;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- *((unsigned int *) ptr));
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ le32_to_cpup(ptr));
+ else
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ *((unsigned int *) ptr));
case attr_pointer_atomic:
if (!ptr)
return 0;
@@ -311,7 +315,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret)
return ret;
- *((unsigned int *) ptr) = t;
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ *((__le32 *) ptr) = cpu_to_le32(t);
+ else
+ *((unsigned int *) ptr) = t;
return len;
case attr_inode_readahead:
return inode_readahead_blks_store(a, sbi, buf, len);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index b16bfb52edb2..a9b96d73d7d2 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -197,6 +197,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
if ((void *)next >= end)
return -EFSCORRUPTED;
+ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
+ return -EFSCORRUPTED;
e = next;
}
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index de67745e1cd7..77946d6f617d 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
ASSERT(op->processor != NULL);
ASSERT(fscache_object_is_available(op->object));
ASSERTCMP(atomic_read(&op->usage), >, 0);
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+ op->state, ==, FSCACHE_OP_ST_CANCELLED);
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
@@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op)
struct fscache_cache *cache;
_enter("{OBJ%x OP%x,%d}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+ op->object ? op->object->debug_id : 0,
+ op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 028f38f0906c..fa33dbad8871 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -146,6 +146,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
return !fc->initialized || (for_background && fc->blocked);
}
+static void fuse_drop_waiting(struct fuse_conn *fc)
+{
+ if (fc->connected) {
+ atomic_dec(&fc->num_waiting);
+ } else if (atomic_dec_and_test(&fc->num_waiting)) {
+ /* wake up aborters */
+ wake_up_all(&fc->blocked_waitq);
+ }
+}
+
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
bool for_background)
{
@@ -192,7 +202,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
return req;
out:
- atomic_dec(&fc->num_waiting);
+ fuse_drop_waiting(fc);
return ERR_PTR(err);
}
@@ -299,7 +309,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
if (test_bit(FR_WAITING, &req->flags)) {
__clear_bit(FR_WAITING, &req->flags);
- atomic_dec(&fc->num_waiting);
+ fuse_drop_waiting(fc);
}
if (req->stolen_file)
@@ -385,7 +395,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_iqueue *fiq = &fc->iq;
if (test_and_set_bit(FR_FINISHED, &req->flags))
- return;
+ goto put_request;
spin_lock(&fiq->waitq.lock);
list_del_init(&req->intr_entry);
@@ -415,6 +425,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
if (req->end)
req->end(fc, req);
+put_request:
fuse_put_request(fc, req);
}
@@ -2008,11 +2019,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
if (!fud)
return -EPERM;
+ pipe_lock(pipe);
+
bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
- if (!bufs)
+ if (!bufs) {
+ pipe_unlock(pipe);
return -ENOMEM;
+ }
- pipe_lock(pipe);
nbuf = 0;
rem = 0;
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
@@ -2168,6 +2182,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags)) {
set_bit(FR_PRIVATE, &req->flags);
+ __fuse_get_request(req);
list_move(&req->list, &to_end1);
}
spin_unlock(&req->waitq.lock);
@@ -2194,7 +2209,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
while (!list_empty(&to_end1)) {
req = list_first_entry(&to_end1, struct fuse_req, list);
- __fuse_get_request(req);
list_del_init(&req->list);
request_end(fc, req);
}
@@ -2205,6 +2219,11 @@ void fuse_abort_conn(struct fuse_conn *fc)
}
EXPORT_SYMBOL_GPL(fuse_abort_conn);
+void fuse_wait_aborted(struct fuse_conn *fc)
+{
+ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+}
+
int fuse_dev_release(struct inode *inode, struct file *file)
{
struct fuse_dev *fud = fuse_get_dev(file);
@@ -2212,9 +2231,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
if (fud) {
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
+ LIST_HEAD(to_end);
+ spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
- end_requests(fc, &fpq->processing);
+ list_splice_init(&fpq->processing, &to_end);
+ spin_unlock(&fpq->lock);
+
+ end_requests(fc, &to_end);
+
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
WARN_ON(fc->iq.fasync != NULL);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f0de8fe294f4..5fc6f20f1dfc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -897,6 +897,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
}
if (WARN_ON(req->num_pages >= req->max_pages)) {
+ unlock_page(page);
fuse_put_request(fc, req);
return -EIO;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1cc0dce47a2f..994ae8f87cba 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -856,6 +856,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc);
+void fuse_wait_aborted(struct fuse_conn *fc);
/**
* Invalidate inode attributes
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e04db24ed164..ca9c492a1885 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -379,9 +379,6 @@ static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- fuse_send_destroy(fc);
-
- fuse_abort_conn(fc);
mutex_lock(&fuse_mutex);
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
@@ -1180,16 +1177,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
}
-static void fuse_kill_sb_anon(struct super_block *sb)
+static void fuse_sb_destroy(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
if (fc) {
+ fuse_send_destroy(fc);
+
+ fuse_abort_conn(fc);
+ fuse_wait_aborted(fc);
+
down_write(&fc->killsb);
fc->sb = NULL;
up_write(&fc->killsb);
}
+}
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+ fuse_sb_destroy(sb);
kill_anon_super(sb);
}
@@ -1212,14 +1218,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
static void fuse_kill_sb_blk(struct super_block *sb)
{
- struct fuse_conn *fc = get_fuse_conn_super(sb);
-
- if (fc) {
- down_write(&fc->killsb);
- fc->sb = NULL;
- up_write(&fc->killsb);
- }
-
+ fuse_sb_destroy(sb);
kill_block_super(sb);
}
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index a861bbdfe577..fa8b484d035d 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -162,7 +162,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
chunk = div_u64(offset, dev->chunk_size);
div_u64_rem(chunk, dev->nr_children, &chunk_idx);
- if (chunk_idx > dev->nr_children) {
+ if (chunk_idx >= dev->nr_children) {
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
__func__, chunk_idx, offset, dev->chunk_size);
/* error, should not happen */
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 3746367098fd..bb0d643481c8 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -17,6 +17,7 @@
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
+#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
@@ -644,6 +645,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
+ type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 39c75a86c67f..666986b95c5d 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -408,6 +408,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
/**
+ * sysfs_break_active_protection - break "active" protection
+ * @kobj: The kernel object @attr is associated with.
+ * @attr: The attribute to break the "active" protection for.
+ *
+ * With sysfs, just like kernfs, deletion of an attribute is postponed until
+ * all active .show() and .store() callbacks have finished unless this function
+ * is called. Hence this function is useful in methods that implement self
+ * deletion.
+ */
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ struct kernfs_node *kn;
+
+ kobject_get(kobj);
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
+ return kn;
+}
+EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+
+/**
+ * sysfs_unbreak_active_protection - restore "active" protection
+ * @kn: Pointer returned by sysfs_break_active_protection().
+ *
+ * Undo the effects of sysfs_break_active_protection(). Since this function
+ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
+ * argument passed to sysfs_break_active_protection() that attribute may have
+ * been removed between the sysfs_break_active_protection() and
+ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
+ * this function has returned.
+ */
+void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+ struct kobject *kobj = kn->parent->priv;
+
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
+
+/**
* sysfs_remove_file_ns - remove an object attribute with a custom ns tag
* @kobj: object we're acting for
* @attr: attribute descriptor
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 22dba8837a86..539fa934ed93 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -661,6 +661,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
spin_lock(&ui->ui_lock);
ui->synced_i_size = ui->ui_size;
spin_unlock(&ui->ui_lock);
+ if (xent) {
+ spin_lock(&host_ui->ui_lock);
+ host_ui->synced_i_size = host_ui->ui_size;
+ spin_unlock(&host_ui->ui_lock);
+ }
mark_inode_clean(c, ui);
mark_inode_clean(c, host_ui);
return 0;
@@ -1107,7 +1112,7 @@ static int recomp_data_node(const struct ubifs_info *c,
int err, len, compr_type, out_len;
out_len = le32_to_cpu(dn->size);
- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
+ buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
if (!buf)
return -ENOMEM;
@@ -1186,7 +1191,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
else if (err)
goto out_free;
else {
- if (le32_to_cpu(dn->size) <= dlen)
+ int dn_len = le32_to_cpu(dn->size);
+
+ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
+ ubifs_err(c, "bad data node (block %u, inode %lu)",
+ blk, inode->i_ino);
+ ubifs_dump_node(c, dn);
+ goto out_free;
+ }
+
+ if (dn_len <= dlen)
dlen = 0; /* Nothing to do */
else {
int compr_type = le16_to_cpu(dn->compr_type);
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index a0011aa3a779..f43f162e36f4 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
- if (!buf)
- return -ENOMEM;
-
/*
* After an unclean unmount, empty and freeable LEBs
* may contain garbage - do not scan them.
@@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
return LPT_SCAN_CONTINUE;
}
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) {
ret = PTR_ERR(sleb);
diff --git a/fs/xattr.c b/fs/xattr.c
index a40f49cc04c3..7444fb1b3484 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -453,7 +453,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
if (error > 0) {
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_to_user(kvalue, size);
+ posix_acl_fix_xattr_to_user(kvalue, error);
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 23e129ef6726..e353f6600b0b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
+#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
@@ -294,6 +295,7 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -318,6 +320,7 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask);
-
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
diff --git a/include/linux/io.h b/include/linux/io.h
index de64c1e53612..8ab45611fc35 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -154,4 +154,26 @@ enum {
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
+/*
+ * On x86 PAT systems we have memory tracking that keeps track of
+ * the allowed mappings on memory ranges. This tracking works for
+ * all the in-kernel mapping APIs (ioremap*), but where the user
+ * wishes to map a range from a physical device into user memory
+ * the tracking won't be updated. This API is to be used by
+ * drivers which remap physical device pages into userspace,
+ * and wants to make sure they are mapped WC and not UC.
+ */
+#ifndef arch_io_reserve_memtype_wc
+static inline int arch_io_reserve_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline void arch_io_free_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9c452f6db438..2839d624d5ee 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -238,6 +238,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -351,6 +354,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 57a8e98f2708..2219cce81ca4 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
+size_t zs_huge_class_size(struct zs_pool *pool);
+
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index f9466fa54ba4..2ad9a6d37ff4 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -87,7 +87,7 @@ struct dlfb_data {
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
#define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
/* remove these once align.h patch is taken into kernel */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index bbe9dd0886bd..388bcace62f8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2441,7 +2441,7 @@ static int __init debugfs_kprobe_init(void)
if (!dir)
return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir, NULL,
+ file = debugfs_create_file("list", 0400, dir, NULL,
&debugfs_kprobes_operations);
if (!file)
goto error;
@@ -2451,7 +2451,7 @@ static int __init debugfs_kprobe_init(void)
if (!file)
goto error;
- file = debugfs_create_file("blacklist", 0444, dir, NULL,
+ file = debugfs_create_file("blacklist", 0400, dir, NULL,
&debugfs_kprobe_blacklist_ops);
if (!file)
goto error;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index d9b0be5c6a5f..f529aafc1180 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -313,10 +313,16 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
task = create->result;
if (!IS_ERR(task)) {
static const struct sched_param param = { .sched_priority = 0 };
+ char name[TASK_COMM_LEN];
va_list args;
va_start(args, namefmt);
- vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+ /*
+ * task is already visible to other tasks, so updating
+ * COMM must be protected.
+ */
+ vsnprintf(name, sizeof(name), namefmt, args);
+ set_task_comm(task, name);
va_end(args);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 6d6f63be1f9b..4335e7d1c391 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -115,6 +115,7 @@ config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
select PM
+ select SRCU
config PM_SLEEP_SMP
def_bool y
diff --git a/kernel/sys.c b/kernel/sys.c
index 0df4753d4969..ede0c1f4b860 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1144,18 +1144,19 @@ static int override_release(char __user *release, size_t len)
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
- int errno = 0;
+ struct new_utsname tmp;
down_read(&uts_sem);
- if (copy_to_user(name, utsname(), sizeof *name))
- errno = -EFAULT;
+ memcpy(&tmp, utsname(), sizeof(tmp));
up_read(&uts_sem);
+ if (copy_to_user(name, &tmp, sizeof(tmp)))
+ return -EFAULT;
- if (!errno && override_release(name->release, sizeof(name->release)))
- errno = -EFAULT;
- if (!errno && override_architecture(name))
- errno = -EFAULT;
- return errno;
+ if (override_release(name->release, sizeof(name->release)))
+ return -EFAULT;
+ if (override_architecture(name))
+ return -EFAULT;
+ return 0;
}
#ifdef __ARCH_WANT_SYS_OLD_UNAME
@@ -1164,55 +1165,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
*/
SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
{
- int error = 0;
+ struct old_utsname tmp;
if (!name)
return -EFAULT;
down_read(&uts_sem);
- if (copy_to_user(name, utsname(), sizeof(*name)))
- error = -EFAULT;
+ memcpy(&tmp, utsname(), sizeof(tmp));
up_read(&uts_sem);
+ if (copy_to_user(name, &tmp, sizeof(tmp)))
+ return -EFAULT;
- if (!error && override_release(name->release, sizeof(name->release)))
- error = -EFAULT;
- if (!error && override_architecture(name))
- error = -EFAULT;
- return error;
+ if (override_release(name->release, sizeof(name->release)))
+ return -EFAULT;
+ if (override_architecture(name))
+ return -EFAULT;
+ return 0;
}
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
- int error;
+ struct oldold_utsname tmp = {};
if (!name)
return -EFAULT;
- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
- return -EFAULT;
down_read(&uts_sem);
- error = __copy_to_user(&name->sysname, &utsname()->sysname,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->release, &utsname()->release,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->release + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->version, &utsname()->version,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->version + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->machine, &utsname()->machine,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
+ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
+ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
+ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
+ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
up_read(&uts_sem);
+ if (copy_to_user(name, &tmp, sizeof(tmp)))
+ return -EFAULT;
- if (!error && override_architecture(name))
- error = -EFAULT;
- if (!error && override_release(name->release, sizeof(name->release)))
- error = -EFAULT;
- return error ? -EFAULT : 0;
+ if (override_architecture(name))
+ return -EFAULT;
+ if (override_release(name->release, sizeof(name->release)))
+ return -EFAULT;
+ return 0;
}
#endif
@@ -1226,17 +1218,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
- down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- struct new_utsname *u = utsname();
+ struct new_utsname *u;
+ down_write(&uts_sem);
+ u = utsname();
memcpy(u->nodename, tmp, len);
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
uts_proc_notify(UTS_PROC_HOSTNAME);
+ up_write(&uts_sem);
}
- up_write(&uts_sem);
return errno;
}
@@ -1244,8 +1237,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
{
- int i, errno;
+ int i;
struct new_utsname *u;
+ char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
@@ -1254,11 +1248,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
i = 1 + strlen(u->nodename);
if (i > len)
i = len;
- errno = 0;
- if (copy_to_user(name, u->nodename, i))
- errno = -EFAULT;
+ memcpy(tmp, u->nodename, i);
up_read(&uts_sem);
- return errno;
+ if (copy_to_user(name, tmp, i))
+ return -EFAULT;
+ return 0;
}
#endif
@@ -1277,17 +1271,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
- down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- struct new_utsname *u = utsname();
+ struct new_utsname *u;
+ down_write(&uts_sem);
+ u = utsname();
memcpy(u->domainname, tmp, len);
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
uts_proc_notify(UTS_PROC_DOMAINNAME);
+ up_write(&uts_sem);
}
- up_write(&uts_sem);
return errno;
}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b674a7a8d655..c39fc68c4778 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1768,6 +1768,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&bdev->bd_mutex);
if (attr == &dev_attr_enable) {
+ if (!!value == !!q->blk_trace) {
+ ret = 0;
+ goto out_unlock_bdev;
+ }
if (value)
ret = blk_trace_setup_queue(q, bdev);
else
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b30adaae739a..1600d1d6e93f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6657,7 +6657,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
if (buffer) {
mutex_lock(&trace_types_lock);
- if (val) {
+ if (!!val == tracer_tracing_is_on(tr)) {
+ val = 0; /* do nothing */
+ } else if (val) {
tracer_tracing_on(tr);
if (tr->current_trace->start)
tr->current_trace->start(tr);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 64304388993e..31a436f9f13b 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -969,7 +969,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
list_del_rcu(&link->list);
/* synchronize with u{,ret}probe_trace_func */
- synchronize_sched();
+ synchronize_rcu();
kfree(link);
if (!list_empty(&tu->tp.files))
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 88fefa68c516..a965df4b54f5 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -602,9 +602,26 @@ static ssize_t map_write(struct file *file, const char __user *buf,
struct uid_gid_map new_map;
unsigned idx;
struct uid_gid_extent *extent = NULL;
- unsigned long page = 0;
+ unsigned long page;
char *kbuf, *pos, *next_line;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
+
+ /* Only allow < page size writes at the beginning of the file */
+ if ((*ppos != 0) || (count >= PAGE_SIZE))
+ return -EINVAL;
+
+ /* Get a buffer */
+ page = __get_free_page(GFP_TEMPORARY);
+ kbuf = (char *) page;
+ if (!page)
+ return -ENOMEM;
+
+ /* Slurp in the user data */
+ if (copy_from_user(kbuf, buf, count)) {
+ free_page(page);
+ return -EFAULT;
+ }
+ kbuf[count] = '\0';
/*
* The userns_state_mutex serializes all writes to any given map.
@@ -638,24 +655,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
goto out;
- /* Get a buffer */
- ret = -ENOMEM;
- page = __get_free_page(GFP_TEMPORARY);
- kbuf = (char *) page;
- if (!page)
- goto out;
-
- /* Only allow < page size writes at the beginning of the file */
- ret = -EINVAL;
- if ((*ppos != 0) || (count >= PAGE_SIZE))
- goto out;
-
- /* Slurp in the user data */
- ret = -EFAULT;
- if (copy_from_user(kbuf, buf, count))
- goto out;
- kbuf[count] = '\0';
-
/* Parse the user data */
ret = -EINVAL;
pos = kbuf;
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index c8eac43267e9..d2b3b2973456 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -17,7 +17,7 @@
#ifdef CONFIG_PROC_SYSCTL
-static void *get_uts(struct ctl_table *table, int write)
+static void *get_uts(struct ctl_table *table)
{
char *which = table->data;
struct uts_namespace *uts_ns;
@@ -25,21 +25,9 @@ static void *get_uts(struct ctl_table *table, int write)
uts_ns = current->nsproxy->uts_ns;
which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
- if (!write)
- down_read(&uts_sem);
- else
- down_write(&uts_sem);
return which;
}
-static void put_uts(struct ctl_table *table, int write, void *which)
-{
- if (!write)
- up_read(&uts_sem);
- else
- up_write(&uts_sem);
-}
-
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
@@ -49,13 +37,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
{
struct ctl_table uts_table;
int r;
+ char tmp_data[__NEW_UTS_LEN + 1];
+
memcpy(&uts_table, table, sizeof(uts_table));
- uts_table.data = get_uts(table, write);
+ uts_table.data = tmp_data;
+
+ /*
+ * Buffer the value in tmp_data so that proc_dostring() can be called
+ * without holding any locks.
+ * We also need to read the original value in the write==1 case to
+ * support partial writes.
+ */
+ down_read(&uts_sem);
+ memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
+ up_read(&uts_sem);
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
- put_uts(table, write, uts_table.data);
- if (write)
+ if (write) {
+ /*
+ * Write back the new value.
+ * Note that, since we dropped uts_sem, the result can
+ * theoretically be incorrect if there are two parallel writes
+ * at non-zero offsets to the same sysctl.
+ */
+ down_write(&uts_sem);
+ memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
+ up_write(&uts_sem);
proc_sys_poll_notify(table->poll);
+ }
return r;
}
diff --git a/mm/memory.c b/mm/memory.c
index ab4214e214fe..fb55c14556e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -361,15 +361,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
- /*
- * When there's less then two users of this mm there cannot be a
- * concurrent page-table walk.
- */
- if (atomic_read(&tlb->mm->mm_users) < 2) {
- __tlb_remove_table(table);
- return;
- }
-
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
@@ -3702,6 +3693,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+ if (!maddr)
+ return -ENOMEM;
+
if (write)
memcpy_toio(maddr + offset, buf, len);
else
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 1eb00e343523..21de9ad7f151 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -188,6 +188,7 @@ static int zs_size_classes;
* (see: fix_fullness_group())
*/
static const int fullness_threshold_frac = 4;
+static size_t huge_class_size;
struct size_class {
spinlock_t lock;
@@ -1489,6 +1490,25 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
+/**
+ * zs_huge_class_size() - Returns the size (in bytes) of the first huge
+ * zsmalloc &size_class.
+ * @pool: zsmalloc pool to use
+ *
+ * The function returns the size of the first huge class - any object of equal
+ * or bigger size will be stored in zspage consisting of a single physical
+ * page.
+ *
+ * Context: Any context.
+ *
+ * Return: the size (in bytes) of the first huge zsmalloc &size_class.
+ */
+size_t zs_huge_class_size(struct zs_pool *pool)
+{
+ return huge_class_size;
+}
+EXPORT_SYMBOL_GPL(zs_huge_class_size);
+
static unsigned long obj_malloc(struct size_class *class,
struct zspage *zspage, unsigned long handle)
{
@@ -2432,6 +2452,7 @@ struct zs_pool *zs_create_pool(const char *name)
for (i = zs_size_classes - 1; i >= 0; i--) {
int size;
int pages_per_zspage;
+ int objs_per_zspage;
struct size_class *class;
int fullness = 0;
@@ -2439,6 +2460,28 @@ struct zs_pool *zs_create_pool(const char *name)
if (size > ZS_MAX_ALLOC_SIZE)
size = ZS_MAX_ALLOC_SIZE;
pages_per_zspage = get_pages_per_zspage(size);
+ objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
+
+ /*
+ * We iterate from biggest down to smallest classes,
+ * so huge_class_size holds the size of the first huge
+ * class. Any object bigger than or equal to that will
+ * endup in the huge class.
+ */
+ if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
+ !huge_class_size) {
+ huge_class_size = size;
+ /*
+ * The object uses ZS_HANDLE_SIZE bytes to store the
+ * handle. We need to subtract it, because zs_malloc()
+ * unconditionally adds handle size before it performs
+ * size class search - so object may be smaller than
+ * huge class size, yet it still can end up in the huge
+ * class because it grows by ZS_HANDLE_SIZE extra bytes
+ * right before class lookup.
+ */
+ huge_class_size -= (ZS_HANDLE_SIZE - 1);
+ }
/*
* size_class is used for normal zsmalloc operation such
diff --git a/mm/zswap.c b/mm/zswap.c
index 568015e2fe7a..87a8491909ee 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1018,6 +1018,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
ret = -ENOMEM;
goto reject;
}
+
+ /* A second zswap_is_full() check after
+ * zswap_shrink() to make sure it's now
+ * under the max_pool_percent
+ */
+ if (zswap_is_full()) {
+ ret = -ENOMEM;
+ goto reject;
+ }
}
/* allocate entry */
diff --git a/net/9p/client.c b/net/9p/client.c
index 3ff26eb1ea20..ed8738c4dc09 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -931,7 +931,7 @@ static int p9_client_version(struct p9_client *c)
{
int err = 0;
struct p9_req_t *req;
- char *version;
+ char *version = NULL;
int msize;
p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index bced8c074c12..2f68ffda3715 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
spin_lock_irqsave(&p9_poll_lock, flags);
list_del_init(&m->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
+
+ flush_work(&p9_poll_work);
}
/**
@@ -933,7 +935,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
if (err < 0)
return err;
- if (valid_ipaddr4(addr) < 0)
+ if (addr == NULL || valid_ipaddr4(addr) < 0)
return -EINVAL;
csocket = NULL;
@@ -981,6 +983,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
csocket = NULL;
+ if (addr == NULL)
+ return -EINVAL;
+
if (strlen(addr) >= UNIX_PATH_MAX) {
pr_err("%s (%d): address too long: %s\n",
__func__, task_pid_nr(current), addr);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 52b4a2f993f2..f42550dd3560 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct ib_qp_init_attr qp_attr;
struct ib_cq_init_attr cq_attr = {};
+ if (addr == NULL)
+ return -EINVAL;
+
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
if (err < 0)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2ddeecca5b12..6018a1c0dc28 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -192,7 +192,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
s = rest_of_page(data);
if (s > count)
s = count;
- BUG_ON(index > limit);
+ BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_buf(&sg[index++], data, s);
@@ -237,6 +237,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
s = PAGE_SIZE - data_off;
if (s > count)
s = count;
+ BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off);
@@ -409,6 +410,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
+ __le32 sz;
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
outlen, &offs, &need_drop);
if (n < 0)
@@ -419,6 +421,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
outlen = n;
}
+ /* The size field of the message must include the length of the
+ * header and the length of the data. We didn't actually know
+ * the length of the data until this point so add it in now.
+ */
+ sz = cpu_to_le32(req->tc->size + outlen);
+ memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
} else if (uidata) {
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
inlen, &offs, &need_drop);
@@ -646,6 +654,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
int ret = -ENOENT;
int found = 0;
+ if (devname == NULL)
+ return -EINVAL;
+
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
if (!strncmp(devname, chan->tag, chan->tag_len) &&
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index d730a0f68f46..a0443d40d677 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
caifd = caif_get(skb->dev);
WARN_ON(caifd == NULL);
- if (caifd == NULL)
+ if (!caifd) {
+ rcu_read_unlock();
return;
+ }
caifd_hold(caifd);
rcu_read_unlock();
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index d4353faced35..a10db45b2e1e 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb)
- return NET_XMIT_DROP;
+ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
+ skb_tailroom(skb) < ldev->needed_tailroom)) {
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(skb, ldev->needed_headroom,
+ ldev->needed_tailroom, GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+ } else {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+ }
ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
if (ret < 0) {
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 5f3b81941a6f..5169b9b36b6a 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1593,9 +1593,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
int taglen;
for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
- if (optptr[0] == IPOPT_CIPSO)
+ switch (optptr[0]) {
+ case IPOPT_CIPSO:
return optptr;
- taglen = optptr[1];
+ case IPOPT_END:
+ return NULL;
+ case IPOPT_NOOP:
+ taglen = 1;
+ break;
+ default:
+ taglen = optptr[1];
+ }
optlen -= taglen;
optptr += taglen;
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b8bf123f7f79..060862a6f2f2 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -469,10 +469,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
goto tx_err_dst_release;
}
- skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
- skb_dst_set(skb, dst);
- skb->dev = skb_dst(skb)->dev;
-
mtu = dst_mtu(dst);
if (!skb->ignore_df && skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
@@ -487,9 +483,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
htonl(mtu));
}
- return -EMSGSIZE;
+ err = -EMSGSIZE;
+ goto tx_err_dst_release;
}
+ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+ skb_dst_set(skb, dst);
+ skb->dev = skb_dst(skb)->dev;
+
err = dst_output(t->net, skb->sk, skb);
if (net_xmit_eval(err) == 0) {
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index f4b9f97af092..c8cc9bd7cac1 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2006,7 +2006,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (!sta->uploaded)
continue;
- if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+ if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
continue;
for (state = IEEE80211_STA_NOTEXIST;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 3827f359b336..9e1ff9d4cf2d 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -72,8 +72,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
int ret;
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
- u16 crc = crc_ccitt(0, skb->data, skb->len);
+ struct sk_buff *nskb;
+ u16 crc;
+
+ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
+ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
+ GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ goto err_tx;
+ }
+ }
+ crc = crc_ccitt(0, skb->data, skb->len);
put_unaligned_le16(crc, skb_put(skb, 2));
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c0bdd8fb4c8d..0a13d55cb4d3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3998,6 +3998,7 @@ static int parse_station_flags(struct genl_info *info,
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
BIT(NL80211_STA_FLAG_MFP) |
BIT(NL80211_STA_FLAG_AUTHORIZED);
+ break;
default:
return -EINVAL;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 6173a55af214..4615138b104f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2327,6 +2327,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
return make_blackhole(net, dst_orig->ops->family, dst_orig);
+ if (IS_ERR(dst))
+ dst_release(dst_orig);
+
return dst;
}
EXPORT_SYMBOL(xfrm_lookup_route);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8efc7c39275d..ee6037dd0a1c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -988,10 +988,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
{
struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
- if (nlsk)
- return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
- else
- return -1;
+ if (!nlsk) {
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+
+ return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
}
static inline size_t xfrm_spdinfo_msgsize(void)
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 45fc06c0e0e5..6b504f407079 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -367,10 +367,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, usp);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap(&pdev->dev, mem_res->start,
- resource_size(mem_res));
- if (base == NULL)
- return -ENOMEM;
+ base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&sirf_usp_regmap_config);
if (IS_ERR(usp->regmap))
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 609b9b294429..c3f173fbb135 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1684,6 +1684,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
int i;
for (i = 0; i < be->num_codecs; i++) {
+ /*
+ * Skip CODECs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(be->codec_dais[i],
+ stream))
+ continue;
+
codec_dai_drv = be->codec_dais[i]->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 67551225764e..5053fac29f05 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -187,6 +187,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
for (i = 0; i < queues->nr_queues; i++) {
list_splice_tail(&queues->queue_array[i].head,
&queue_array[i].head);
+ queue_array[i].tid = queues->queue_array[i].tid;
+ queue_array[i].cpu = queues->queue_array[i].cpu;
+ queue_array[i].set = queues->queue_array[i].set;
queue_array[i].priv = queues->queue_array[i].priv;
}
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0dac7e05a6ac..33c79e415075 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -663,9 +663,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
if (!printed || !summary_only)
print_header();
- if (topo.num_cpus > 1)
- format_counters(&average.threads, &average.cores,
- &average.packages);
+ format_counters(&average.threads, &average.cores, &average.packages);
printed = 1;
@@ -2693,7 +2691,9 @@ void process_cpuid()
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
stepping = fms & 0xf;
- if (family == 6 || family == 0xf)
+ if (family == 0xf)
+ family += (fms >> 20) & 0xff;
+ if (family >= 6)
model += ((fms >> 16) & 0xf) << 4;
if (debug)
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644
index 000000000000..3b1f45e13a2e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 88d5e71be044..47dfa0b0fcd7 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -44,12 +44,25 @@
/******************** Little Endian Handling ********************************/
-#define cpu_to_le16(x) htole16(x)
-#define cpu_to_le32(x) htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x) (x)
+#define cpu_to_le32(x) (x)
+#else
+#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x) \
+ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
+ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
+#endif
+
#define le32_to_cpu(x) le32toh(x)
#define le16_to_cpu(x) le16toh(x)
-
/******************** Messages and Errors ***********************************/
static const char argv0[] = "ffs-test";