summaryrefslogtreecommitdiff
path: root/kernel/padata.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2020-05-27 16:54:34 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2020-05-27 16:54:34 +0200
commitaa9dc801acdcb14a7793e759596c97c61b3d9dee (patch)
tree6fd6c85949905042795eeea01d9e502fcd15ec55 /kernel/padata.c
parent53454c3459bc25f3678f0ec9632609082685c766 (diff)
parent646cdb183bb780e11e0ad4534d9054f77c31c8dc (diff)
Merge 4.4.225 into android-4.4-p
Changes in 4.4.225 igb: use igb_adapter->io_addr instead of e1000_hw->hw_addr padata: Remove unused but set variables padata: get_next is never NULL padata: ensure the reorder timer callback runs on the correct CPU padata: ensure padata_do_serial() runs on the correct CPU evm: Check also if *tfm is an error pointer in init_desc() fix multiplication overflow in copy_fdtable() HID: multitouch: add eGalaxTouch P80H84 support ceph: fix double unlock in handle_cap_export() USB: core: Fix misleading driver bug report platform/x86: asus-nb-wmi: Do not load on Asus T100TA and T200TA ARM: futex: Address build warning media: Fix media_open() to clear filp->private_data in error leg drivers/media/media-devnode: clear private_data before put_device() media-devnode: add missing mutex lock in error handler media-devnode: fix namespace mess media-device: dynamically allocate struct media_devnode media: fix use-after-free in cdev_put() when app exits after driver unbind media: fix media devnode ioctl/syscall and unregister race i2c: dev: switch from register_chrdev to cdev API i2c: dev: don't start function name with 'return' i2c: dev: use after free in detach i2c-dev: don't get i2c adapter via i2c_dev i2c: dev: Fix the race between the release of i2c_dev and cdev padata: set cpu_index of unused CPUs to -1 sched/fair, cpumask: Export for_each_cpu_wrap() padata: Replace delayed timer with immediate workqueue in padata_reorder padata: initialize pd->cpu with effective cpumask padata: purge get_cpu and reorder_via_wq from padata_do_serial ALSA: pcm: fix incorrect hw_base increase ext4: lock the xattr block before checksuming it platform/x86: alienware-wmi: fix kfree on potentially uninitialized pointer libnvdimm/btt: Remove unnecessary code in btt_freelist_init l2tp: lock socket before checking flags in connect() l2tp: fix racy socket lookup in l2tp_ip and l2tp_ip6 bind() l2tp: hold session while sending creation notifications l2tp: take a reference on sessions used in genetlink handlers l2tp: don't use l2tp_tunnel_find() in l2tp_ip and l2tp_ip6 net: l2tp: export debug flags to UAPI net: l2tp: deprecate PPPOL2TP_MSG_* in favour of L2TP_MSG_* net: l2tp: ppp: change PPPOL2TP_MSG_* => L2TP_MSG_* New kernel function to get IP overhead on a socket. L2TP:Adjust intf MTU, add underlay L3, L2 hdrs. l2tp: remove useless duplicate session detection in l2tp_netlink l2tp: remove l2tp_session_find() l2tp: define parameters of l2tp_session_get*() as "const" l2tp: define parameters of l2tp_tunnel_find*() as "const" l2tp: initialise session's refcount before making it reachable l2tp: hold tunnel while looking up sessions in l2tp_netlink l2tp: hold tunnel while processing genl delete command l2tp: hold tunnel while handling genl tunnel updates l2tp: hold tunnel while handling genl TUNNEL_GET commands l2tp: hold tunnel used while creating sessions with netlink l2tp: prevent creation of sessions on terminated tunnels l2tp: pass tunnel pointer to ->session_create() l2tp: fix l2tp_eth module loading l2tp: don't register sessions in l2tp_session_create() l2tp: initialise l2tp_eth sessions before registering them l2tp: protect sock pointer of struct pppol2tp_session with RCU l2tp: initialise PPP sessions before registering them Revert "gfs2: Don't demote a glock until its revokes are written" staging: iio: ad2s1210: Fix SPI reading mei: release me_cl object reference iio: sca3000: Remove an erroneous 'get_device()' l2tp: device MTU setup, tunnel socket needs a lock cpumask: Make for_each_cpu_wrap() available on UP as well Linux 4.4.225 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I87dc4ca47f34d594fff7c1da28c7a4596659c029
Diffstat (limited to 'kernel/padata.c')
-rw-r--r--kernel/padata.c88
1 files changed, 34 insertions, 54 deletions
diff --git a/kernel/padata.c b/kernel/padata.c
index ae036af3f012..c50975f43b34 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -65,15 +65,11 @@ static int padata_cpu_hash(struct parallel_data *pd)
static void padata_parallel_worker(struct work_struct *parallel_work)
{
struct padata_parallel_queue *pqueue;
- struct parallel_data *pd;
- struct padata_instance *pinst;
LIST_HEAD(local_list);
local_bh_disable();
pqueue = container_of(parallel_work,
struct padata_parallel_queue, work);
- pd = pqueue->pd;
- pinst = pd->pinst;
spin_lock(&pqueue->parallel.lock);
list_replace_init(&pqueue->parallel.list, &local_list);
@@ -136,6 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst,
padata->cb_cpu = cb_cpu;
target_cpu = padata_cpu_hash(pd);
+ padata->cpu = target_cpu;
queue = per_cpu_ptr(pd->pqueue, target_cpu);
spin_lock(&queue->parallel.lock);
@@ -159,8 +156,6 @@ EXPORT_SYMBOL(padata_do_parallel);
* A pointer to the control struct of the next object that needs
* serialization, if present in one of the percpu reorder queues.
*
- * NULL, if all percpu reorder queues are empty.
- *
* -EINPROGRESS, if the next object that needs serialization will
* be parallel processed by another cpu and is not yet present in
* the cpu's reorder queue.
@@ -170,25 +165,12 @@ EXPORT_SYMBOL(padata_do_parallel);
*/
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
- int cpu, num_cpus;
- unsigned int next_nr, next_index;
struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
+ int cpu = pd->cpu;
- num_cpus = cpumask_weight(pd->cpumask.pcpu);
-
- /*
- * Calculate the percpu reorder queue and the sequence
- * number of the next object.
- */
- next_nr = pd->processed;
- next_index = next_nr % num_cpus;
- cpu = padata_index_to_cpu(pd, next_index);
next_queue = per_cpu_ptr(pd->pqueue, cpu);
-
- padata = NULL;
-
reorder = &next_queue->reorder;
spin_lock(&reorder->lock);
@@ -199,7 +181,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- pd->processed++;
+ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
+ false);
spin_unlock(&reorder->lock);
goto out;
@@ -222,6 +205,7 @@ static void padata_reorder(struct parallel_data *pd)
struct padata_priv *padata;
struct padata_serial_queue *squeue;
struct padata_instance *pinst = pd->pinst;
+ struct padata_parallel_queue *next_queue;
/*
* We need to ensure that only one cpu can work on dequeueing of
@@ -240,12 +224,11 @@ static void padata_reorder(struct parallel_data *pd)
padata = padata_get_next(pd);
/*
- * All reorder queues are empty, or the next object that needs
- * serialization is parallel processed by another cpu and is
- * still on it's way to the cpu's reorder queue, nothing to
- * do for now.
+ * If the next object that needs serialization is parallel
+ * processed by another cpu and is still on it's way to the
+ * cpu's reorder queue, nothing to do for now.
*/
- if (!padata || PTR_ERR(padata) == -EINPROGRESS)
+ if (PTR_ERR(padata) == -EINPROGRESS)
break;
/*
@@ -254,7 +237,6 @@ static void padata_reorder(struct parallel_data *pd)
* so exit immediately.
*/
if (PTR_ERR(padata) == -ENODATA) {
- del_timer(&pd->timer);
spin_unlock_bh(&pd->lock);
return;
}
@@ -273,28 +255,27 @@ static void padata_reorder(struct parallel_data *pd)
/*
* The next object that needs serialization might have arrived to
- * the reorder queues in the meantime, we will be called again
- * from the timer function if no one else cares for it.
+ * the reorder queues in the meantime.
*
- * Ensure reorder_objects is read after pd->lock is dropped so we see
- * an increment from another task in padata_do_serial. Pairs with
+ * Ensure reorder queue is read after pd->lock is dropped so we see
+ * new objects from another task in padata_do_serial. Pairs with
* smp_mb__after_atomic in padata_do_serial.
*/
smp_mb();
- if (atomic_read(&pd->reorder_objects)
- && !(pinst->flags & PADATA_RESET))
- mod_timer(&pd->timer, jiffies + HZ);
- else
- del_timer(&pd->timer);
- return;
+ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
+ if (!list_empty(&next_queue->reorder.list))
+ queue_work(pinst->wq, &pd->reorder_work);
}
-static void padata_reorder_timer(unsigned long arg)
+static void invoke_padata_reorder(struct work_struct *work)
{
- struct parallel_data *pd = (struct parallel_data *)arg;
+ struct parallel_data *pd;
+ local_bh_disable();
+ pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd);
+ local_bh_enable();
}
static void padata_serial_worker(struct work_struct *serial_work)
@@ -341,29 +322,22 @@ static void padata_serial_worker(struct work_struct *serial_work)
*/
void padata_do_serial(struct padata_priv *padata)
{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct parallel_data *pd;
-
- pd = padata->pd;
-
- cpu = get_cpu();
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
+ struct parallel_data *pd = padata->pd;
+ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
+ padata->cpu);
spin_lock(&pqueue->reorder.lock);
- atomic_inc(&pd->reorder_objects);
list_add_tail(&padata->list, &pqueue->reorder.list);
+ atomic_inc(&pd->reorder_objects);
spin_unlock(&pqueue->reorder.lock);
/*
- * Ensure the atomic_inc of reorder_objects above is ordered correctly
+ * Ensure the addition to the reorder list is ordered correctly
* with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
* in padata_reorder.
*/
smp_mb__after_atomic();
- put_cpu();
-
padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);
@@ -412,9 +386,14 @@ static void padata_init_pqueues(struct parallel_data *pd)
struct padata_parallel_queue *pqueue;
cpu_index = 0;
- for_each_cpu(cpu, pd->cpumask.pcpu) {
+ for_each_possible_cpu(cpu) {
pqueue = per_cpu_ptr(pd->pqueue, cpu);
- pqueue->pd = pd;
+
+ if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
+ pqueue->cpu_index = -1;
+ continue;
+ }
+
pqueue->cpu_index = cpu_index;
cpu_index++;
@@ -448,12 +427,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd);
padata_init_squeues(pd);
- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 1);
pd->pinst = pinst;
spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+ INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
return pd;