summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/sysctl/fs.txt7
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kvm/mmu.c12
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pmem.h45
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--block/genhd.c1
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/hv/channel.c16
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv.c5
-rw-r--r--drivers/hv/hv_balloon.c136
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/misc/uid_sys_stats.c9
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c15
-rw-r--r--drivers/tty/nozomi.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/smb1ops.c10
-rw-r--r--fs/cifs/smb2pdu.c5
-rw-r--r--fs/f2fs/super.c8
-rw-r--r--fs/gfs2/dir.c4
-rw-r--r--fs/mount.h2
-rw-r--r--fs/namespace.c50
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/pnode.h1
-rw-r--r--fs/sdcardfs/dentry.c8
-rw-r--r--fs/sdcardfs/derived_perm.c8
-rw-r--r--fs/sdcardfs/file.c12
-rw-r--r--fs/sdcardfs/lookup.c19
-rw-r--r--fs/sdcardfs/multiuser.h7
-rw-r--r--include/linux/mount.h2
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/events/core.c57
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/mac80211/rx.c21
-rw-r--r--net/tipc/bearer.c13
-rw-r--r--net/tipc/bearer.h13
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c24
-rw-r--r--net/tipc/node.c28
-rw-r--r--net/tipc/socket.c56
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--security/keys/gc.c2
-rw-r--r--security/keys/keyctl.c20
-rw-r--r--security/keys/process_keys.c44
-rw-r--r--tools/hv/hv_kvp_daemon.c2
57 files changed, 590 insertions, 209 deletions
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 302b5ed616a6..35e17f748ca7 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -265,6 +265,13 @@ aio-nr can grow to.
==============================================================
+mount-max:
+
+This denotes the maximum number of mounts that may exist
+in a mount namespace.
+
+==============================================================
+
2. /proc/sys/fs/binfmt_misc
----------------------------------------------------------
diff --git a/Makefile b/Makefile
index 1de081602f3e..933d159a74d7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 63
+SUBLEVEL = 65
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index dd7f20a490c8..33ee522bb76f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -301,6 +301,14 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
next = kvm_pgd_addr_end(addr, end);
if (!pgd_none(*pgd))
unmap_puds(kvm, pgd, addr, next);
+ /*
+ * If we are dealing with a large range in
+ * stage2 table, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector
+ * warnings.
+ */
+ if (kvm && (next != end))
+ cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@@ -745,6 +753,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
*/
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
{
+ assert_spin_locked(&kvm->mmu_lock);
unmap_range(kvm, kvm->arch.pgd, start, size);
}
@@ -831,7 +840,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
if (kvm->arch.pgd == NULL)
return;
+ spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ spin_unlock(&kvm->mmu_lock);
+
kvm_free_hwpgd(kvm_get_hwpgd(kvm));
if (KVM_PREALLOC_LEVEL > 0)
kfree(kvm->arch.pgd);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index edba294620db..f6fd0332c3a2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -716,7 +716,7 @@ resume_kernel:
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
- lwz r3,GPR1(r1)
+ ld r3,GPR1(r1)
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
mr r4,r1 /* src: current exception frame */
mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -730,8 +730,8 @@ resume_kernel:
addi r6,r6,8
bdnz 2b
- /* Do real store operation to complete stwu */
- lwz r5,GPR1(r1)
+ /* Do real store operation to complete stdu */
+ ld r5,GPR1(r1)
std r8,0(r5)
/* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 024f85f947ae..e2c0e4eab037 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -829,6 +829,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
pgste_t pgste;
+ if (pte_present(entry))
+ pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index d8ce3ec816ab..bd8ce6bcdfc9 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -72,8 +72,8 @@ static inline void arch_wmb_pmem(void)
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction. This function requires explicit ordering with an
- * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
*/
static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
{
@@ -87,15 +87,6 @@ static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
clwb(p);
}
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
- return iter_is_iovec(i) == false;
-}
-
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
@@ -114,8 +105,36 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(vaddr, bytes, i);
- if (__iter_needs_pmem_wb(i))
- __arch_wb_cache_pmem(vaddr, bytes);
+ /*
+ * In the iovec case on x86_64 copy_from_iter_nocache() uses
+ * non-temporal stores for the bulk of the transfer, but we need
+ * to manually flush if the transfer is unaligned. A cached
+ * memory copy is used when destination or size is not naturally
+ * aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ *
+ * In the non-iovec case the entire destination needs to be
+ * flushed.
+ */
+ if (iter_is_iovec(i)) {
+ unsigned long flushed, dest = (unsigned long) addr;
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+ __arch_wb_cache_pmem(addr, 1);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ __arch_wb_cache_pmem(addr, 1);
+ }
+
+ flushed = dest - (unsigned long) addr;
+ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+ __arch_wb_cache_pmem(addr + bytes - 1, 1);
+ }
+ } else
+ __arch_wb_cache_pmem(addr, bytes);
return len;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index e99b15077e94..62aca448726a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -53,7 +53,7 @@ static const char * const th_names[] = {
"load_store",
"insn_fetch",
"combined_unit",
- "",
+ "decode_unit",
"northbridge",
"execution_unit",
};
diff --git a/block/genhd.c b/block/genhd.c
index 7f1e8f81ceb4..de2a6162e3f3 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -673,7 +673,6 @@ void del_gendisk(struct gendisk *disk)
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
- disk->driverfs_dev = NULL;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6f78d5..1c2b846c5776 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1ef37c727572..d037454fe7b8 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -73,7 +73,6 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
void *in, *out;
unsigned long flags;
int ret, err = 0;
- unsigned long t;
struct page *page;
spin_lock_irqsave(&newchannel->lock, flags);
@@ -183,11 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
goto error1;
}
- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
- if (t == 0) {
- err = -ETIMEDOUT;
- goto error1;
- }
+ wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
@@ -375,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo;
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
u32 msgcount;
struct list_head *curr;
u32 next_gpadl_handle;
@@ -437,6 +432,13 @@ cleanup:
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (msgcount > 1) {
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
+ }
+
kfree(msginfo);
return ret;
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 4fc2e8836e60..2bbc53025549 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -429,7 +429,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
union hv_connection_id conn_id;
int ret = 0;
int retries = 0;
- u32 msec = 1;
+ u32 usec = 1;
conn_id.asu32 = 0;
conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
@@ -462,9 +462,9 @@ int vmbus_post_msg(void *buffer, size_t buflen)
}
retries++;
- msleep(msec);
- if (msec < 2048)
- msec *= 2;
+ udelay(usec);
+ if (usec < 2048)
+ usec *= 2;
}
return ret;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index ddbf7e7e0d98..8ce1f2e22912 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -305,9 +305,10 @@ void hv_cleanup(bool crash)
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
- if (!crash)
+ if (!crash) {
vfree(hv_context.tsc_page);
- hv_context.tsc_page = NULL;
+ hv_context.tsc_page = NULL;
+ }
}
#endif
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 43af91362be5..354da7f207b7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -430,16 +430,27 @@ struct dm_info_msg {
* currently hot added. We hot add in multiples of 128M
* chunks; it is possible that we may not be able to bring
* online all the pages in the region. The range
- * covered_end_pfn defines the pages that can
+ * covered_start_pfn:covered_end_pfn defines the pages that can
* be brough online.
*/
struct hv_hotadd_state {
struct list_head list;
unsigned long start_pfn;
+ unsigned long covered_start_pfn;
unsigned long covered_end_pfn;
unsigned long ha_end_pfn;
unsigned long end_pfn;
+ /*
+ * A list of gaps.
+ */
+ struct list_head gap_list;
+};
+
+struct hv_hotadd_gap {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long end_pfn;
};
struct balloon_state {
@@ -595,18 +606,46 @@ static struct notifier_block hv_memory_nb = {
.priority = 0
};
+/* Check if the particular page is backed and can be onlined and online it. */
+static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
+{
+ unsigned long cur_start_pgp;
+ unsigned long cur_end_pgp;
+ struct hv_hotadd_gap *gap;
+
+ cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+
+ /* The page is not backed. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ return;
+
+ /* Check for gaps. */
+ list_for_each_entry(gap, &has->gap_list, list) {
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(gap->start_pfn);
+ cur_end_pgp = (unsigned long)
+ pfn_to_page(gap->end_pfn);
+ if (((unsigned long)pg >= cur_start_pgp) &&
+ ((unsigned long)pg < cur_end_pgp)) {
+ return;
+ }
+ }
-static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
+ /* This frame is currently backed; online the page. */
+ __online_page_set_limits(pg);
+ __online_page_increment_counters(pg);
+ __online_page_free(pg);
+}
+
+static void hv_bring_pgs_online(struct hv_hotadd_state *has,
+ unsigned long start_pfn, unsigned long size)
{
int i;
- for (i = 0; i < size; i++) {
- struct page *pg;
- pg = pfn_to_page(start_pfn + i);
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ for (i = 0; i < size; i++)
+ hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
static void hv_mem_hot_add(unsigned long start, unsigned long size,
@@ -682,26 +721,25 @@ static void hv_online_page(struct page *pg)
list_for_each(cur, &dm_device.ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
- cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(has->start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
- if (((unsigned long)pg >= cur_start_pgp) &&
- ((unsigned long)pg < cur_end_pgp)) {
- /*
- * This frame is currently backed; online the
- * page.
- */
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ /* The page belongs to a different HAS. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ continue;
+
+ hv_page_online_one(has, pg);
+ break;
}
}
-static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct list_head *cur;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
if (list_empty(&dm_device.ha_region_list))
@@ -716,6 +754,24 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
*/
if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
+
+ /*
+ * If the current start pfn is not where the covered_end
+ * is, create a gap and update covered_end_pfn.
+ */
+ if (has->covered_end_pfn != start_pfn) {
+ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
+ if (!gap)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&gap->list);
+ gap->start_pfn = has->covered_end_pfn;
+ gap->end_pfn = start_pfn;
+ list_add_tail(&gap->list, &has->gap_list);
+
+ has->covered_end_pfn = start_pfn;
+ }
+
/*
* If the current hot add-request extends beyond
* our current limit; extend it.
@@ -732,19 +788,10 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
has->end_pfn += new_inc;
}
- /*
- * If the current start pfn is not where the covered_end
- * is, update it.
- */
-
- if (has->covered_end_pfn != start_pfn)
- has->covered_end_pfn = start_pfn;
-
- return true;
-
+ return 1;
}
- return false;
+ return 0;
}
static unsigned long handle_pg_range(unsigned long pg_start,
@@ -783,6 +830,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
+ has->covered_end_pfn += pgs_ol;
+ pfn_cnt -= pgs_ol;
/*
* Check if the corresponding memory block is already
* online by checking its last previously backed page.
@@ -791,10 +840,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
*/
if (start_pfn > has->start_pfn &&
!PageReserved(pfn_to_page(start_pfn - 1)))
- hv_bring_pgs_online(start_pfn, pgs_ol);
+ hv_bring_pgs_online(has, start_pfn, pgs_ol);
- has->covered_end_pfn += pgs_ol;
- pfn_cnt -= pgs_ol;
}
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
@@ -832,13 +879,19 @@ static unsigned long process_hot_add(unsigned long pg_start,
unsigned long rg_size)
{
struct hv_hotadd_state *ha_region = NULL;
+ int covered;
if (pfn_cnt == 0)
return 0;
- if (!dm_device.host_specified_ha_region)
- if (pfn_covered(pg_start, pfn_cnt))
+ if (!dm_device.host_specified_ha_region) {
+ covered = pfn_covered(pg_start, pfn_cnt);
+ if (covered < 0)
+ return 0;
+
+ if (covered)
goto do_pg_range;
+ }
/*
* If the host has specified a hot-add range; deal with it first.
@@ -850,10 +903,12 @@ static unsigned long process_hot_add(unsigned long pg_start,
return 0;
INIT_LIST_HEAD(&ha_region->list);
+ INIT_LIST_HEAD(&ha_region->gap_list);
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
ha_region->start_pfn = rg_start;
ha_region->ha_end_pfn = rg_start;
+ ha_region->covered_start_pfn = pg_start;
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
}
@@ -1581,6 +1636,7 @@ static int balloon_remove(struct hv_device *dev)
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct list_head *cur, *tmp;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap, *tmp_gap;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -1597,6 +1653,10 @@ static int balloon_remove(struct hv_device *dev)
#endif
list_for_each_safe(cur, tmp, &dm->ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
+ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
+ list_del(&gap->list);
+ kfree(gap);
+ }
list_del(&has->list);
kfree(has);
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 43482ae1e049..1a2b2620421e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1122,6 +1122,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
@@ -1528,6 +1529,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index eef9097c60b3..41ba86576d04 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -71,10 +71,6 @@ ifeq ($(CONFIG_DM_VERITY_FEC),y)
dm-verity-objs += dm-verity-fec.o
endif
-ifeq ($(CONFIG_DM_ANDROID_VERITY),y)
-dm-verity-objs += dm-android-verity.o
-endif
-
ifeq ($(CONFIG_DM_VERITY_AVB),y)
dm-verity-objs += dm-verity-avb.o
endif
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 618617f3e867..ad21276c8d9e 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -95,9 +95,11 @@ static int uid_cputime_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
+ struct user_namespace *user_ns = current_user_ns();
cputime_t utime;
cputime_t stime;
unsigned long bkt;
+ uid_t uid;
rt_mutex_lock(&uid_lock);
@@ -108,14 +110,13 @@ static int uid_cputime_show(struct seq_file *m, void *v)
read_lock(&tasklist_lock);
do_each_thread(temp, task) {
- uid_entry = find_or_register_uid(from_kuid_munged(
- current_user_ns(), task_uid(task)));
+ uid = from_kuid_munged(user_ns, task_uid(task));
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
read_unlock(&tasklist_lock);
rt_mutex_unlock(&uid_lock);
pr_err("%s: failed to find the uid_entry for uid %d\n",
- __func__, from_kuid_munged(current_user_ns(),
- task_uid(task)));
+ __func__, uid);
return -ENOMEM;
}
task_cputime_adjusted(task, &utime, &stime);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f1582f6cccb..8d838779fd1b 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -804,6 +804,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
switch (uhs) {
case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_DDR50:
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba32a057..39712560b4c1 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6df3ee561d52..515aa3f993f3 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
+
+ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
if (le16_to_cpu(rec.len) == 0) {
/* RID not available */
res = -ENODATA;
+ goto unlock;
}
rlen = (le16_to_cpu(rec.len) - 1) * 2;
- if (!res && exact_len && rlen != len) {
+ if (exact_len && rlen != len) {
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
"rid=0x%04x, len=%d (expected %d)\n",
dev->name, rid, rlen, len);
res = -ENODATA;
}
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, buf, len);
+ res = hfa384x_from_bap(dev, BAP0, buf, len);
+unlock:
spin_unlock_bh(&local->baplock);
mutex_unlock(&local->rid_bap_mtx);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 80f9de907563..5cc80b80c82b 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- read_mem32((u32 *) &size, addr, 4);
+ size = __le32_to_cpu(readl(addr));
/* DBG1( "%d bytes port: %d", size, index); */
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 9982cb176ce8..830e2fd47642 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -562,8 +562,9 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -571,23 +572,31 @@ static long vfio_pci_ioctl(void *device_data,
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 20e9a86d2dcf..5c8f767b6368 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -255,7 +255,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b76883606e4b..94906aaa9b7c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -906,7 +906,6 @@ struct cifs_tcon {
bool use_persistent:1; /* use persistent instead of durable handles */
#ifdef CONFIG_CIFS_SMB2
bool print:1; /* set if connection to printer share */
- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index fc537c29044e..87b87e091e8e 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
}
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+ if (server->tcpStatus == CifsGood)
+ return true;
+
+ return false;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
.get_dfs_refer = CIFSGetDFSRefer,
.qfs_tcon = cifs_qfs_tcon,
.is_path_accessible = cifs_is_path_accessible,
+ .can_echo = cifs_can_echo,
.query_path_info = cifs_query_path_info,
.query_file_info = cifs_query_file_info,
.get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 6cb5c4b30e78..6cb2603f8a5c 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -932,9 +932,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
else
return -EIO;
- if (tcon && tcon->bad_network_name)
- return -ENOENT;
-
if ((tcon && tcon->seal) &&
((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
cifs_dbg(VFS, "encryption requested but no server support");
@@ -1036,8 +1033,6 @@ tcon_exit:
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
- if (tcon)
- tcon->bad_network_name = true;
}
goto tcon_exit;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 106dda1e743d..92ac61c3ccf7 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -947,6 +947,14 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
+ /* check log blocks per segment */
+ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid log blocks per segment (%u)\n",
+ le32_to_cpu(raw_super->log_blocks_per_seg));
+ return 1;
+ }
+
/* Currently, support 512/1024/2048/4096 bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index ad8a5b757cc7..a443c6e54412 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -760,7 +760,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
int error;
error = get_leaf_nr(dip, index, &leaf_no);
- if (!error)
+ if (!IS_ERR_VALUE(error))
error = get_leaf(dip, leaf_no, bh_out);
return error;
@@ -976,7 +976,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
- if (error)
+ if (IS_ERR_VALUE(error))
return error;
/* Get the old leaf block */
diff --git a/fs/mount.h b/fs/mount.h
index 3dc7dea5a357..13a4ebbbaa74 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -13,6 +13,8 @@ struct mnt_namespace {
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
+ unsigned int mounts; /* # of mounts in the namespace */
+ unsigned int pending_mounts;
};
struct mnt_pcp {
diff --git a/fs/namespace.c b/fs/namespace.c
index c1477882a853..a22959c97384 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -27,6 +27,9 @@
#include "pnode.h"
#include "internal.h"
+/* Maximum number of mounts in a mount namespace */
+unsigned int sysctl_mount_max __read_mostly = 100000;
+
static unsigned int m_hash_mask __read_mostly;
static unsigned int m_hash_shift __read_mostly;
static unsigned int mp_hash_mask __read_mostly;
@@ -926,6 +929,9 @@ static void commit_tree(struct mount *mnt)
list_splice(&head, n->list.prev);
+ n->mounts += n->pending_mounts;
+ n->pending_mounts = 0;
+
__attach_mnt(mnt, parent);
touch_mnt_namespace(n);
}
@@ -1465,11 +1471,16 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) {
+ struct mnt_namespace *ns;
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
- __touch_mnt_namespace(p->mnt_ns);
+ ns = p->mnt_ns;
+ if (ns) {
+ ns->mounts--;
+ __touch_mnt_namespace(ns);
+ }
p->mnt_ns = NULL;
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
@@ -1870,6 +1881,28 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
return 0;
}
+int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
+{
+ unsigned int max = READ_ONCE(sysctl_mount_max);
+ unsigned int mounts = 0, old, pending, sum;
+ struct mount *p;
+
+ for (p = mnt; p; p = next_mnt(p, mnt))
+ mounts++;
+
+ old = ns->mounts;
+ pending = ns->pending_mounts;
+ sum = old + pending;
+ if ((old > sum) ||
+ (pending > sum) ||
+ (max < sum) ||
+ (mounts > (max - sum)))
+ return -ENOSPC;
+
+ ns->pending_mounts = pending + mounts;
+ return 0;
+}
+
/*
* @source_mnt : mount tree to be attached
* @nd : place the mount tree @source_mnt is attached
@@ -1939,6 +1972,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
struct path *parent_path)
{
HLIST_HEAD(tree_list);
+ struct mnt_namespace *ns = dest_mnt->mnt_ns;
struct mountpoint *smp;
struct mount *child, *p;
struct hlist_node *n;
@@ -1951,6 +1985,13 @@ static int attach_recursive_mnt(struct mount *source_mnt,
if (IS_ERR(smp))
return PTR_ERR(smp);
+ /* Is there space to add these mounts to the mount namespace? */
+ if (!parent_path) {
+ err = count_mounts(ns, source_mnt);
+ if (err)
+ goto out;
+ }
+
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
@@ -1990,11 +2031,14 @@ static int attach_recursive_mnt(struct mount *source_mnt,
out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+ child->mnt_parent->mnt_ns->pending_mounts = 0;
umount_tree(child, UMOUNT_SYNC);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL);
out:
+ ns->pending_mounts = 0;
+
read_seqlock_excl(&mount_lock);
put_mountpoint(smp);
read_sequnlock_excl(&mount_lock);
@@ -2830,6 +2874,8 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
new_ns->user_ns = get_user_ns(user_ns);
+ new_ns->mounts = 0;
+ new_ns->pending_mounts = 0;
return new_ns;
}
@@ -2879,6 +2925,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
q = new;
while (p) {
q->mnt_ns = new_ns;
+ new_ns->mounts++;
if (new_fs) {
if (&p->mnt == new_fs->root.mnt) {
new_fs->root.mnt = mntget(&q->mnt);
@@ -2917,6 +2964,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
new_ns->root = mnt;
+ new_ns->mounts++;
list_add(&mnt->mnt_list, &new_ns->list);
} else {
mntput(m);
diff --git a/fs/pnode.c b/fs/pnode.c
index 1d16bb3bdf5e..b5f97c605d98 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -259,7 +259,7 @@ static int propagate_one(struct mount *m)
read_sequnlock_excl(&mount_lock);
}
hlist_add_head(&child->mnt_hash, list);
- return 0;
+ return count_mounts(m->mnt_ns, child);
}
/*
diff --git a/fs/pnode.h b/fs/pnode.h
index f41fc9a6c48e..a9a6576540ad 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -55,4 +55,5 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
struct mount *copy_tree(struct mount *, struct dentry *, int);
bool is_path_reachable(struct mount *, struct dentry *,
const struct path *root);
+int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
#endif /* _LINUX_PNODE_H */
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index 8e31d1a80f0c..7a19e77fce99 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -60,6 +60,14 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
lower_dentry = lower_path.dentry;
lower_cur_parent_dentry = dget_parent(lower_dentry);
+ if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+ err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
+ if (err == 0) {
+ d_drop(dentry);
+ goto out;
+ }
+ }
+
spin_lock(&lower_dentry->d_lock);
if (d_unhashed(lower_dentry)) {
spin_unlock(&lower_dentry->d_lock);
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index cddfc79ea365..b4595aab5713 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -215,16 +215,16 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
gid = AID_MEDIA_OBB;
break;
case PERM_ANDROID_PACKAGE:
- if (info->d_uid != 0)
+ if (uid_is_app(info->d_uid))
gid = multiuser_get_ext_gid(info->d_uid);
else
- gid = multiuser_get_uid(info->userid, uid);
+ gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
break;
case PERM_ANDROID_PACKAGE_CACHE:
- if (info->d_uid != 0)
+ if (uid_is_app(info->d_uid))
gid = multiuser_get_ext_cache_gid(info->d_uid);
else
- gid = multiuser_get_uid(info->userid, uid);
+ gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
break;
case PERM_PRE_ROOT:
default:
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 1f6921e2ffbf..6076c342dae6 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -358,9 +358,12 @@ ssize_t sdcardfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
get_file(lower_file); /* prevent lower_file from being released */
iocb->ki_filp = lower_file;
err = lower_file->f_op->read_iter(iocb, iter);
- /* ? wait IO finish to update atime as ecryptfs ? */
iocb->ki_filp = file;
fput(lower_file);
+ /* update upper inode atime as needed */
+ if (err >= 0 || err == -EIOCBQUEUED)
+ fsstack_copy_attr_atime(file->f_path.dentry->d_inode,
+ file_inode(lower_file));
out:
return err;
}
@@ -384,6 +387,13 @@ ssize_t sdcardfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
err = lower_file->f_op->write_iter(iocb, iter);
iocb->ki_filp = file;
fput(lower_file);
+ /* update upper inode times/sizes as needed */
+ if (err >= 0 || err == -EIOCBQUEUED) {
+ fsstack_copy_inode_size(file->f_path.dentry->d_inode,
+ file_inode(lower_file));
+ fsstack_copy_attr_times(file->f_path.dentry->d_inode,
+ file_inode(lower_file));
+ }
out:
return err;
}
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index a0f221501b4c..509d5fbcb472 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -366,19 +366,22 @@ put_name:
/* instatiate a new negative dentry */
dname.name = name->name;
dname.len = name->len;
- dname.hash = full_name_hash(dname.name, dname.len);
- lower_dentry = d_lookup(lower_dir_dentry, &dname);
- if (lower_dentry)
- goto setup_lower;
- lower_dentry = d_alloc(lower_dir_dentry, &dname);
+ /* See if the low-level filesystem might want
+ * to use its own hash
+ */
+ lower_dentry = d_hash_and_lookup(lower_dir_dentry, &dname);
+ if (IS_ERR(lower_dentry))
+ return lower_dentry;
if (!lower_dentry) {
- err = -ENOMEM;
+ /* We called vfs_path_lookup earlier, and did not get a negative
+ * dentry then. Don't confuse the lower filesystem by forcing
+ * one on it now...
+ */
+ err = -ENOENT;
goto out;
}
- d_add(lower_dentry, NULL); /* instantiate and hash */
-setup_lower:
lower_path.dentry = lower_dentry;
lower_path.mnt = mntget(lower_dir_mnt);
sdcardfs_set_lower_path(dentry, &lower_path);
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index d0c925cda299..85341e753f8c 100644
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -35,6 +35,13 @@ static inline uid_t multiuser_get_uid(userid_t user_id, appid_t app_id)
return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
}
+static inline bool uid_is_app(uid_t uid)
+{
+ appid_t appid = uid % AID_USER_OFFSET;
+
+ return appid >= AID_APP_START && appid <= AID_APP_END;
+}
+
static inline gid_t multiuser_get_ext_cache_gid(uid_t uid)
{
return uid - AID_APP_START + AID_EXT_CACHE_GID_START;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 0e9b0977237a..b606d8f57adf 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -96,4 +96,6 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
+extern unsigned int sysctl_mount_max;
+
#endif /* _LINUX_MOUNT_H */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b52f88e44182..25cf44889559 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2693,7 +2693,7 @@ static int cgroup_procs_write_permission(struct task_struct *task,
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid) &&
- !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
+ !ns_capable(tcred->user_ns, CAP_SYS_NICE))
ret = -EACCES;
if (!ret && cgroup_on_dfl(dst_cgrp)) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 06db8b64129d..95c447e658f7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8454,6 +8454,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
return 0;
}
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+ struct perf_event_context *ctx)
+{
+ struct perf_event_context *gctx;
+
+again:
+ rcu_read_lock();
+ gctx = READ_ONCE(group_leader->ctx);
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ if (group_leader->ctx != gctx) {
+ mutex_unlock(&ctx->mutex);
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ goto again;
+ }
+
+ return gctx;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -8707,8 +8738,26 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (move_group) {
- gctx = group_leader->ctx;
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
+
+ /*
+ * Check if we raced against another sys_perf_event_open() call
+ * moving the software group underneath us.
+ */
+ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+ /*
+ * If someone moved the group out from under us, check
+ * if this new event wound up on the same ctx, if so
+ * its the regular !move_group case, otherwise fail.
+ */
+ if (gctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
+ }
+ }
} else {
mutex_lock(&ctx->mutex);
}
@@ -8803,7 +8852,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_unpin_context(ctx);
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
if (group_leader)
mutex_unlock(&group_leader->group_leader_mutex);
@@ -8833,7 +8882,7 @@ SYSCALL_DEFINE5(perf_event_open,
err_locked:
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
/* err_file: */
fput(event_file);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8cc5167e4b04..816999804a16 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -65,6 +65,7 @@
#include <linux/sched/sysctl.h>
#include <linux/kexec.h>
#include <linux/bpf.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -2077,6 +2078,14 @@ static struct ctl_table fs_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
+ {
+ .procname = "mount-max",
+ .data = &sysctl_mount_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ },
{ }
};
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7d7f99b0db47..1275175b0946 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3440,11 +3440,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *reader;
+ struct buffer_page *head_page;
+ struct buffer_page *commit_page;
+ unsigned commit;
cpu_buffer = iter->cpu_buffer;
- return iter->head_page == cpu_buffer->commit_page &&
- iter->head == rb_commit_index(cpu_buffer);
+ /* Remember, trace recording is off when iterator is in use */
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+ commit_page = cpu_buffer->commit_page;
+ commit = rb_page_commit(commit_page);
+
+ return ((iter->head_page == commit_page && iter->head == commit) ||
+ (iter->head_page == reader && commit_page == head_page &&
+ head_page->read == commit &&
+ iter->head == rb_page_commit(cpu_buffer->reader_page)));
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c0c10a335b3b..60d246c4eefa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6170,11 +6170,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
return ret;
out_reg:
- ret = register_ftrace_function_probe(glob, ops, count);
+ ret = alloc_snapshot(&global_trace);
+ if (ret < 0)
+ goto out;
- if (ret >= 0)
- alloc_snapshot(&global_trace);
+ ret = register_ftrace_function_probe(glob, ops, count);
+ out:
return ret < 0 ? ret : 0;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 3a2b21e22629..72e1e831589a 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -154,17 +154,18 @@ void ping_hash(struct sock *sk)
void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
+
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+ write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&ping_table.lock);
}
+ write_unlock_bh(&ping_table.lock);
}
EXPORT_SYMBOL_GPL(ping_unhash);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 4f4c45ba7b70..25b5bacaa434 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3397,6 +3397,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
+
+ /*
+ * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+ * the BSSID - we've checked that already but may have accepted
+ * the wildcard (ff:ff:ff:ff:ff:ff).
+ *
+ * It also says:
+ * The BSSID of the Data frame is determined as follows:
+ * a) If the STA is contained within an AP or is associated
+ * with an AP, the BSSID is the address currently in use
+ * by the STA contained in the AP.
+ *
+ * So we should not accept data frames with an address that's
+ * multicast.
+ *
+ * Accepting it also opens a security problem because stations
+ * could encrypt it with the GTK and inject traffic that way.
+ */
+ if (ieee80211_is_data(hdr->frame_control) && multicast)
+ return false;
+
return true;
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 648f2a67f314..cb1381513c82 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -381,6 +381,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
dev = dev_get_by_name(net, driver_name);
if (!dev)
return -ENODEV;
+ if (tipc_mtu_bad(dev, 0)) {
+ dev_put(dev);
+ return -EINVAL;
+ }
/* Associate TIPC bearer with L2 bearer */
rcu_assign_pointer(b->media_ptr, dev);
@@ -570,14 +574,19 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
if (!b_ptr)
return NOTIFY_DONE;
- b_ptr->mtu = dev->mtu;
-
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
break;
case NETDEV_GOING_DOWN:
+ tipc_reset_bearer(net, b_ptr);
+ break;
case NETDEV_CHANGEMTU:
+ if (tipc_mtu_bad(dev, 0)) {
+ bearer_disable(net, b_ptr);
+ break;
+ }
+ b_ptr->mtu = dev->mtu;
tipc_reset_bearer(net, b_ptr);
break;
case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 552185bc4773..5f11e18b1fa1 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
#include "netlink.h"
#include "core.h"
+#include "msg.h"
#include <net/genetlink.h>
#define MAX_MEDIA 3
@@ -61,6 +62,9 @@
#define TIPC_MEDIA_TYPE_IB 2
#define TIPC_MEDIA_TYPE_UDP 3
+/* minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+
/**
* struct tipc_node_map - set of node identifiers
* @count: # of nodes in set
@@ -226,4 +230,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq);
+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+{
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+ return false;
+ netdev_warn(dev, "MTU too low for tipc bearer\n");
+ return true;
+}
+
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
if (err)
goto out_nametbl;
+ INIT_LIST_HEAD(&tn->dist_queue);
err = tipc_topsrv_start(net);
if (err)
goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 18e95a8020cd..fe3b89e9cde4 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
spinlock_t nametbl_lock;
struct name_table *nametbl;
+ /* Name dist queue */
+ struct list_head dist_queue;
+
/* Topology subscription server */
struct tipc_server *topsrv;
atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index f51c8bdbea1c..c4c151bc000c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
int sysctl_tipc_named_timeout __read_mostly = 2000;
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
struct distr_queue_item {
struct distr_item i;
u32 dtype;
@@ -67,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
/**
* named_prepare_buf - allocate & initialize a publication message
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
@@ -171,9 +168,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
- ITEM_SIZE;
- uint msg_rem = msg_dsz;
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
list_for_each_entry(publ, pls, local_list) {
/* Prepare next buffer: */
@@ -340,9 +337,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
* tipc_named_add_backlog - add a failed name table update to the backlog
*
*/
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+ u32 type, u32 node)
{
struct distr_queue_item *e;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
unsigned long now = get_jiffies_64();
e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -352,7 +351,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
e->node = node;
e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
memcpy(e, i, sizeof(*i));
- list_add_tail(&e->next, &tipc_dist_queue);
+ list_add_tail(&e->next, &tn->dist_queue);
}
/**
@@ -362,10 +361,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
void tipc_named_process_backlog(struct net *net)
{
struct distr_queue_item *e, *tmp;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
char addr[16];
unsigned long now = get_jiffies_64();
- list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+ list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
if (time_after(e->expires, now)) {
if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
continue;
@@ -405,7 +405,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
node = msg_orignode(msg);
while (count--) {
if (!tipc_update_nametbl(net, item, node, mtype))
- tipc_named_add_backlog(item, mtype, node);
+ tipc_named_add_backlog(net, item, mtype, node);
item++;
}
kfree_skb(skb);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3926b561f873..2df0b98d4a32 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -102,9 +102,10 @@ static unsigned int tipc_hashfn(u32 addr)
static void tipc_node_kref_release(struct kref *kref)
{
- struct tipc_node *node = container_of(kref, struct tipc_node, kref);
+ struct tipc_node *n = container_of(kref, struct tipc_node, kref);
- tipc_node_delete(node);
+ kfree(n->bc_entry.link);
+ kfree_rcu(n, rcu);
}
void tipc_node_put(struct tipc_node *node)
@@ -216,21 +217,20 @@ static void tipc_node_delete(struct tipc_node *node)
{
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
- kfree(node->bc_entry.link);
- kfree_rcu(node, rcu);
+ tipc_node_put(node);
+
+ del_timer_sync(&node->timer);
+ tipc_node_put(node);
}
void tipc_node_stop(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
struct tipc_node *node, *t_node;
spin_lock_bh(&tn->node_list_lock);
- list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
- if (del_timer(&node->timer))
- tipc_node_put(node);
- tipc_node_put(node);
- }
+ list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+ tipc_node_delete(node);
spin_unlock_bh(&tn->node_list_lock);
}
@@ -313,9 +313,7 @@ static void tipc_node_timeout(unsigned long data)
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
}
- if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
- tipc_node_get(n);
- tipc_node_put(n);
+ mod_timer(&n->timer, jiffies + n->keepalive_intv);
}
/**
@@ -730,7 +728,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
state = SELF_UP_PEER_UP;
break;
case SELF_LOST_CONTACT_EVT:
- state = SELF_DOWN_PEER_LEAVING;
+ state = SELF_DOWN_PEER_DOWN;
break;
case SELF_ESTABL_CONTACT_EVT:
case PEER_LOST_CONTACT_EVT:
@@ -749,7 +747,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
state = SELF_UP_PEER_UP;
break;
case PEER_LOST_CONTACT_EVT:
- state = SELF_LEAVING_PEER_DOWN;
+ state = SELF_DOWN_PEER_DOWN;
break;
case SELF_LOST_CONTACT_EVT:
case PEER_ESTABL_CONTACT_EVT:
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b26b7a127773..65171f8e8c45 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -777,9 +777,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
* @tsk: receiving socket
* @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
+ u32 onode = tsk_own_node(tsk);
struct tipc_msg *hdr = buf_msg(skb);
int mtyp = msg_type(hdr);
int conn_cong;
@@ -792,7 +794,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
if (mtyp == CONN_PROBE) {
msg_set_type(hdr, CONN_PROBE_REPLY);
- tipc_sk_respond(sk, skb, TIPC_OK);
+ if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+ __skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
@@ -1647,7 +1650,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
*
* Returns true if message was added to socket receive queue, otherwise false
*/
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
@@ -1657,7 +1661,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
int usr = msg_user(hdr);
if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
- tipc_sk_proto_rcv(tsk, skb);
+ tipc_sk_proto_rcv(tsk, skb, xmitq);
return false;
}
@@ -1700,7 +1704,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
return true;
reject:
- tipc_sk_respond(sk, skb, err);
+ if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+ __skb_queue_tail(xmitq, skb);
return false;
}
@@ -1716,9 +1721,24 @@ reject:
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
unsigned int truesize = skb->truesize;
+ struct sk_buff_head xmitq;
+ u32 dnode, selector;
- if (likely(filter_rcv(sk, skb)))
+ __skb_queue_head_init(&xmitq);
+
+ if (likely(filter_rcv(sk, skb, &xmitq))) {
atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+ return 0;
+ }
+
+ if (skb_queue_empty(&xmitq))
+ return 0;
+
+ /* Send response/rejected message */
+ skb = __skb_dequeue(&xmitq);
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
return 0;
}
@@ -1732,12 +1752,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport)
+ u32 dport, struct sk_buff_head *xmitq)
{
+ unsigned long time_limit = jiffies + 2;
+ struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
- struct sk_buff *skb;
- unsigned long time_limit = jiffies + 2;
+ u32 onode;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1749,20 +1770,22 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- filter_rcv(sk, skb);
+ filter_rcv(sk, skb, xmitq);
continue;
}
/* Try backlog, compensating for double-counted bytes */
dcnt = &tipc_sk(sk)->dupl_rcvcnt;
- if (sk->sk_backlog.len)
+ if (!sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
if (likely(!sk_add_backlog(sk, skb, lim)))
continue;
/* Overload => reject message back to sender */
- tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ onode = tipc_own_addr(sock_net(sk));
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ __skb_queue_tail(xmitq, skb);
break;
}
}
@@ -1775,12 +1798,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
*/
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
+ struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
+ __skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
@@ -1788,9 +1813,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- tipc_sk_enqueue(inputq, sk, dport);
+ tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
+ /* Send pending response/rejected messages, if any */
+ while ((skb = __skb_dequeue(&xmitq))) {
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
+ }
sock_put(sk);
continue;
}
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 6af78c6276b4..78d6b78de29d 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,7 +52,7 @@
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
-#define UDP_MIN_HEADROOM 28
+#define UDP_MIN_HEADROOM 48
static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
[TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
@@ -376,6 +376,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
+ if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
+ sizeof(struct udphdr))) {
+ err = -EINVAL;
+ goto err;
+ }
b->mtu = dev->mtu - sizeof(struct iphdr)
- sizeof(struct udphdr);
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb440e7..662bdd20a748 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
* qp_handle.
*/
if (vmci_handle_is_invalid(e_payload->handle) ||
- vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+ !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
return;
/* We don't ask for delayed CBs when we subscribe to this event (we
@@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.2.0-k");
+MODULE_VERSION("1.0.3.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/security/keys/gc.c b/security/keys/gc.c
index addf060399e0..9cb4fe4478a1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
* immediately unlinked.
*/
struct key_type key_type_dead = {
- .name = "dead",
+ .name = ".dead",
};
/*
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 1c3872aeed14..442e350c209d 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -271,7 +271,8 @@ error:
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
- * be skipped over.
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
@@ -288,12 +289,16 @@ long keyctl_join_session_keyring(const char __user *_name)
ret = PTR_ERR(name);
goto error;
}
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
+error_name:
kfree(name);
-
error:
return ret;
}
@@ -1223,8 +1228,8 @@ error:
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist. The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
@@ -1249,11 +1254,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
- if (ret < 0) {
- if (ret != -EEXIST)
- goto error;
- ret = 0;
- }
+ if (ret < 0)
+ goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index e6d50172872f..4ed909142956 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -125,13 +125,18 @@ error:
}
/*
- * Install a fresh thread keyring directly to new credentials. This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
+ if (new->thread_keyring)
+ return 0;
+
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
@@ -143,7 +148,9 @@ int install_thread_keyring_to_cred(struct cred *new)
}
/*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
@@ -154,8 +161,6 @@ static int install_thread_keyring(void)
if (!new)
return -ENOMEM;
- BUG_ON(new->thread_keyring);
-
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
@@ -166,17 +171,17 @@ static int install_thread_keyring(void)
}
/*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
*
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
- return -EEXIST;
+ return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
@@ -189,11 +194,9 @@ int install_process_keyring_to_cred(struct cred *new)
}
/*
- * Make sure a process keyring is installed for the current process. The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
*
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
@@ -207,14 +210,18 @@ static int install_process_keyring(void)
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ? ret : 0;
+ return ret;
}
return commit_creds(new);
}
/*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
@@ -249,8 +256,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
}
/*
- * Install a session keyring, discarding the old one. If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 0d9f48ec42bb..bc7adb84e679 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1433,7 +1433,7 @@ int main(int argc, char *argv[])
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
- kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR);
+ kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC);
if (kvp_fd < 0) {
syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",