diff options
| author | Guchun Chen <guchunc@codeaurora.org> | 2017-10-20 16:51:20 +0800 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2017-10-24 01:09:54 -0700 |
| commit | d0db21101bf6cc0130a9f33382368ca59a488eb1 (patch) | |
| tree | 05397a8e574da40d7d3436632c0c18cb75207abf /drivers/gpu/drm/msm/msm_drv.c | |
| parent | 9551e129dd2625ea2ab4fa5620820849b069ce2f (diff) | |
drm: msm: fix list corruption problem
When multiple worker threads compete to update event_list, with current
vblank_ctrl_worker mechanism, there is one risk which can casue list node
is deleted for twice. This is because, due to the protected scope by
the spin_lock, mutex can exit when one thread is transvering the list,
but another queue thread may continue to add new event to the list.
This brings conflict risk. This patch is to correct this.
Change-Id: Ice31462d196c57ce18d7b998c1a1f0b7feeb08fc
Signed-off-by: Xiaowen Wu <wxiaowen@codeaurora.org>
Signed-off-by: Guchun Chen <guchunc@codeaurora.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_drv.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b245a4c7c826..6f968e93d959 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -185,12 +185,16 @@ static void vblank_ctrl_worker(struct kthread_work *work) struct msm_kms *kms = priv->kms; struct vblank_event *vbl_ev, *tmp; unsigned long flags; + LIST_HEAD(tmp_head); spin_lock_irqsave(&vbl_ctrl->lock, flags); list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { list_del(&vbl_ev->node); - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + list_add_tail(&vbl_ev->node, &tmp_head); + } + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) { if (vbl_ev->enable) kms->funcs->enable_vblank(kms, priv->crtcs[vbl_ev->crtc_id]); @@ -199,11 +203,7 @@ static void vblank_ctrl_worker(struct kthread_work *work) priv->crtcs[vbl_ev->crtc_id]); kfree(vbl_ev); - - spin_lock_irqsave(&vbl_ctrl->lock, flags); } - - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); } static int vblank_ctrl_queue_work(struct msm_drm_private *priv, |
