diff options
| author | Alan Kwong <akwong@codeaurora.org> | 2016-10-31 13:50:13 -0400 |
|---|---|---|
| committer | Alan Kwong <akwong@codeaurora.org> | 2016-12-14 17:20:26 -0500 |
| commit | 30c9551d5cf6a3f61235f94d7d1162d3ffe2f402 (patch) | |
| tree | 33be3d0302404dd582ef5b3ca244e0eef2510bcb | |
| parent | f94d0f347a9df0ad387b3793d2a69e7dfe3bbcc7 (diff) | |
drm/msm/sde: add frame done callback event to crtc
Frame done is signaled when the current commit request
completes after wb/pp/cmd done or vsync are received. Frame
done callback is used by crtc to control clock and bandwidth
setting dynamically.
Change-Id: I48d633bfc1174dec5084487da17faa91ec5fa52d
Signed-off-by: Alan Kwong <akwong@codeaurora.org>
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_crtc.c | 175 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_crtc.h | 29 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder.c | 134 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder.h | 31 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c | 36 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c | 10 |
8 files changed, 409 insertions, 19 deletions
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index be83ca095de4..4a6aedada52b 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -387,6 +387,111 @@ static void sde_crtc_vblank_cb(void *data) SDE_EVT32_IRQ(DRMID(crtc)); } +static void sde_crtc_frame_event_work(struct kthread_work *work) +{ + struct sde_crtc_frame_event *fevent; + struct drm_crtc *crtc; + struct sde_crtc *sde_crtc; + struct sde_kms *sde_kms; + unsigned long flags; + + if (!work) { + SDE_ERROR("invalid work handle\n"); + return; + } + + fevent = container_of(work, struct sde_crtc_frame_event, work); + if (!fevent->crtc) { + SDE_ERROR("invalid crtc\n"); + return; + } + + crtc = fevent->crtc; + sde_crtc = to_sde_crtc(crtc); + + sde_kms = _sde_crtc_get_kms(crtc); + if (!sde_kms) { + SDE_ERROR("invalid kms handle\n"); + return; + } + + SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, + ktime_to_ns(fevent->ts)); + + if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || + fevent->event == SDE_ENCODER_FRAME_EVENT_ERROR) { + + if (atomic_read(&sde_crtc->frame_pending) < 1) { + /* this should not happen */ + SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n", + crtc->base.id, + ktime_to_ns(fevent->ts), + atomic_read(&sde_crtc->frame_pending)); + SDE_EVT32(DRMID(crtc), fevent->event, 0); + } else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) { + /* release bandwidth and other resources */ + SDE_DEBUG("crtc%d ts:%lld last pending\n", + crtc->base.id, + ktime_to_ns(fevent->ts)); + SDE_EVT32(DRMID(crtc), fevent->event, 1); + } else { + SDE_EVT32(DRMID(crtc), fevent->event, 2); + } + } else { + SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id, + ktime_to_ns(fevent->ts), + fevent->event); + SDE_EVT32(DRMID(crtc), fevent->event, 3); + } + + spin_lock_irqsave(&sde_crtc->spin_lock, flags); + list_add_tail(&fevent->list, &sde_crtc->frame_event_list); + spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); +} + +static void sde_crtc_frame_event_cb(void *data, u32 event) +{ + struct drm_crtc *crtc = (struct drm_crtc *)data; + struct sde_crtc *sde_crtc; + struct msm_drm_private *priv; + struct list_head *list, *next; + struct sde_crtc_frame_event *fevent; + unsigned long flags; + int pipe_id; + + if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + SDE_ERROR("invalid parameters\n"); + return; + } + sde_crtc = to_sde_crtc(crtc); + priv = crtc->dev->dev_private; + pipe_id = drm_crtc_index(crtc); + + SDE_DEBUG("crtc%d\n", crtc->base.id); + + SDE_EVT32(DRMID(crtc), event); + + spin_lock_irqsave(&sde_crtc->spin_lock, flags); + list_for_each_safe(list, next, &sde_crtc->frame_event_list) { + list_del_init(list); + break; + } + spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); + + if (!list) { + SDE_ERROR("crtc%d event %d overflow\n", + crtc->base.id, event); + SDE_EVT32(DRMID(crtc), event); + return; + } + + fevent = container_of(list, struct sde_crtc_frame_event, list); + fevent->event = event; + fevent->crtc = crtc; + fevent->ts = ktime_get(); + queue_kthread_work(&priv->disp_thread[pipe_id].worker, &fevent->work); +} + void sde_crtc_complete_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -692,12 +797,14 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct drm_device *dev; + struct sde_crtc *sde_crtc; if (!crtc) { SDE_ERROR("invalid argument\n"); return; } dev = crtc->dev; + sde_crtc = to_sde_crtc(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) @@ -707,7 +814,29 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) * Encoder will flush/start now, unless it has a tx pending. * If so, it may delay and flush at an irq event (e.g. ppdone) */ - sde_encoder_schedule_kickoff(encoder); + sde_encoder_prepare_for_kickoff(encoder); + } + + if (atomic_read(&sde_crtc->frame_pending) > 2) { + /* framework allows only 1 outstanding + current */ + SDE_ERROR("crtc%d invalid frame pending\n", + crtc->base.id); + SDE_EVT32(DRMID(crtc), 0); + return; + } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { + /* acquire bandwidth and other resources */ + SDE_DEBUG("crtc%d first commit\n", crtc->base.id); + SDE_EVT32(DRMID(crtc), 1); + } else { + SDE_DEBUG("crtc%d commit\n", crtc->base.id); + SDE_EVT32(DRMID(crtc), 2); + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc != crtc) + continue; + + sde_encoder_kickoff(encoder); } } @@ -798,10 +927,12 @@ static void sde_crtc_disable(struct drm_crtc *crtc) SDE_DEBUG("crtc%d\n", crtc->base.id); mutex_lock(&sde_crtc->crtc_lock); + SDE_EVT32(DRMID(crtc)); + if (atomic_read(&sde_crtc->vblank_refcount)) { - SDE_ERROR("crtc%d invalid vblank refcount %d\n", - crtc->base.id, - atomic_read(&sde_crtc->vblank_refcount)); + SDE_ERROR("crtc%d invalid vblank refcount\n", + crtc->base.id); + SDE_EVT32(DRMID(crtc)); drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; @@ -811,6 +942,20 @@ static void sde_crtc_disable(struct drm_crtc *crtc) atomic_set(&sde_crtc->vblank_refcount, 0); } + if (atomic_read(&sde_crtc->frame_pending)) { + /* release bandwidth and other resources */ + SDE_ERROR("crtc%d invalid frame pending\n", + crtc->base.id); + SDE_EVT32(DRMID(crtc)); + atomic_set(&sde_crtc->frame_pending, 0); + } + + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + sde_encoder_register_frame_event_callback(encoder, NULL, NULL); + } + memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers)); sde_crtc->num_mixers = 0; mutex_unlock(&sde_crtc->crtc_lock); @@ -823,6 +968,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) struct sde_hw_mixer *lm; struct drm_display_mode *mode; struct sde_hw_mixer_cfg cfg; + struct drm_encoder *encoder; int i; if (!crtc) { @@ -831,6 +977,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) } SDE_DEBUG("crtc%d\n", crtc->base.id); + SDE_EVT32(DRMID(crtc)); sde_crtc = to_sde_crtc(crtc); mixer = sde_crtc->mixers; @@ -842,6 +989,13 @@ static void sde_crtc_enable(struct drm_crtc *crtc) drm_mode_debug_printmodeline(mode); + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + sde_encoder_register_frame_event_callback(encoder, + sde_crtc_frame_event_cb, (void *)crtc); + } + for (i = 0; i < sde_crtc->num_mixers; i++) { lm = mixer[i].hw_lm; cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode); @@ -1372,6 +1526,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) struct sde_crtc *sde_crtc = NULL; struct msm_drm_private *priv = NULL; struct sde_kms *kms = NULL; + int i; priv = dev->dev_private; kms = to_sde_kms(priv->kms); @@ -1384,6 +1539,18 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) crtc->dev = dev; atomic_set(&sde_crtc->vblank_refcount, 0); + spin_lock_init(&sde_crtc->spin_lock); + atomic_set(&sde_crtc->frame_pending, 0); + + INIT_LIST_HEAD(&sde_crtc->frame_event_list); + for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) { + INIT_LIST_HEAD(&sde_crtc->frame_events[i].list); + list_add(&sde_crtc->frame_events[i].list, + &sde_crtc->frame_event_list); + init_kthread_work(&sde_crtc->frame_events[i].work, + sde_crtc_frame_event_work); + } + drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs); drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 995c26bb89ce..9902c9364939 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -20,6 +20,9 @@ #define SDE_CRTC_NAME_SIZE 12 +/* define the maximum number of in-flight frame events */ +#define SDE_CRTC_FRAME_EVENT_SIZE 2 + /** * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context @@ -39,6 +42,22 @@ struct sde_crtc_mixer { }; /** + * struct sde_crtc_frame_event: stores crtc frame event for crtc processing + * @work: base work structure + * @crtc: Pointer to crtc handling this event + * @list: event list + * @ts: timestamp at queue entry + * @event: event identifier + */ +struct sde_crtc_frame_event { + struct kthread_work work; + struct drm_crtc *crtc; + struct list_head list; + ktime_t ts; + u32 event; +}; + +/** * struct sde_crtc - virtualized CRTC data structure * @base : Base drm crtc structure * @name : ASCII description of this crtc @@ -47,7 +66,6 @@ struct sde_crtc_mixer { * @mixer : List of active mixers * @event : Pointer to last received drm vblank event. If there is a * pending vblank event, this will be non-null. - * @pending : Whether or not an update is pending * @vsync_count : Running count of received vsync events * @drm_requested_vblank : Whether vblanks have been enabled in the encoder * @property_info : Opaque structure for generic property support @@ -61,6 +79,10 @@ struct sde_crtc_mixer { * @active_list : list of color processing features are active * @dirty_list : list of color processing features are dirty * @crtc_lock : crtc lock around create, destroy and access. + * @frame_pending : Whether or not an update is pending + * @frame_events : static allocation of in-flight frame events + * @frame_event_list : available frame event list + * @spin_lock : spin lock for frame event, transaction status, etc... */ struct sde_crtc { struct drm_crtc base; @@ -93,6 +115,11 @@ struct sde_crtc { struct list_head dirty_list; struct mutex crtc_lock; + + atomic_t frame_pending; + struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE]; + struct list_head frame_event_list; + spinlock_t spin_lock; }; #define to_sde_crtc(x) container_of(x, struct sde_crtc, base) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 545dfa7c4f16..4fc2d7e5b35d 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -33,6 +33,9 @@ #define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) +/* timeout in frames waiting for frame done */ +#define SDE_ENCODER_FRAME_DONE_TIMEOUT 60 + /* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between @@ -69,6 +72,14 @@ * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder create/destroy and access. + * @frame_busy_mask: Bitmask tracking which phys_enc we are still + * busy processing current command. + * Bit0 = phys_encs[0] etc. + * @crtc_frame_event_cb: callback handler for frame event + * @crtc_frame_event_cb_data: callback handler private data + * @crtc_frame_event: callback event + * @frame_done_timeout: frame done timeout in Hz + * @frame_done_timer: watchdog timer for frame done event */ struct sde_encoder_virt { struct drm_encoder base; @@ -87,6 +98,13 @@ struct sde_encoder_virt { struct dentry *debugfs_root; struct mutex enc_lock; + DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); + void (*crtc_frame_event_cb)(void *, u32 event); + void *crtc_frame_event_cb_data; + u32 crtc_frame_event; + + atomic_t frame_done_timeout; + struct timer_list frame_done_timer; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) @@ -505,6 +523,11 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) SDE_EVT32(DRMID(drm_enc)); + if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { + SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); + del_timer_sync(&sde_enc->frame_done_timer); + } + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; @@ -622,6 +645,56 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, } } +void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, + void (*frame_event_cb)(void *, u32 event), + void *frame_event_cb_data) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + unsigned long lock_flags; + bool enable; + + enable = frame_event_cb ? true : false; + + if (!drm_enc) { + SDE_ERROR("invalid encoder\n"); + return; + } + SDE_DEBUG_ENC(sde_enc, "\n"); + SDE_EVT32(DRMID(drm_enc), enable, 0); + + spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); + sde_enc->crtc_frame_event_cb = frame_event_cb; + sde_enc->crtc_frame_event_cb_data = frame_event_cb_data; + spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); +} + +static void sde_encoder_frame_done_callback( + struct drm_encoder *drm_enc, + struct sde_encoder_phys *ready_phys, u32 event) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + unsigned int i; + + /* One of the physical encoders has become idle */ + for (i = 0; i < sde_enc->num_phys_encs; i++) + if (sde_enc->phys_encs[i] == ready_phys) { + clear_bit(i, sde_enc->frame_busy_mask); + sde_enc->crtc_frame_event |= event; + SDE_EVT32(DRMID(drm_enc), i, + sde_enc->frame_busy_mask[0]); + } + + if (!sde_enc->frame_busy_mask[0]) { + atomic_set(&sde_enc->frame_done_timeout, 0); + del_timer(&sde_enc->frame_done_timer); + + if (sde_enc->crtc_frame_event_cb) + sde_enc->crtc_frame_event_cb( + sde_enc->crtc_frame_event_cb_data, + sde_enc->crtc_frame_event); + } +} + /** * _sde_encoder_trigger_flush - trigger flush for a physical encoder * drm_enc: Pointer to drm encoder structure @@ -737,6 +810,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) } pending_flush = 0x0; + sde_enc->crtc_frame_event = 0; /* update pending counts and trigger kickoff ctl flush atomically */ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); @@ -752,6 +826,8 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) if (!ctl) continue; + set_bit(i, sde_enc->frame_busy_mask); + if (!phys->ops.needs_single_flush || !phys->ops.needs_single_flush(phys)) _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0); @@ -772,7 +848,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } -void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc) +void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; @@ -793,8 +869,29 @@ void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc) if (phys && phys->ops.prepare_for_kickoff) phys->ops.prepare_for_kickoff(phys); } +} + +void sde_encoder_kickoff(struct drm_encoder *drm_enc) +{ + struct sde_encoder_virt *sde_enc; + struct sde_encoder_phys *phys; + unsigned int i; + + if (!drm_enc) { + SDE_ERROR("invalid encoder\n"); + return; + } + sde_enc = to_sde_encoder_virt(drm_enc); + + SDE_DEBUG_ENC(sde_enc, "\n"); + + atomic_set(&sde_enc->frame_done_timeout, + SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 / + drm_enc->crtc->state->adjusted_mode.vrefresh); + mod_timer(&sde_enc->frame_done_timer, jiffies + + ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000)); - /* all phys encs are ready to go, trigger the kickoff */ + /* All phys encs are ready to go, trigger the kickoff */ _sde_encoder_kickoff_phys(sde_enc); /* allow phys encs to handle any post-kickoff business */ @@ -1089,6 +1186,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback, sde_encoder_underrun_callback, + sde_encoder_frame_done_callback, }; struct sde_enc_phys_init_params phys_params; @@ -1191,6 +1289,34 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, return ret; } +static void sde_encoder_frame_done_timeout(unsigned long data) +{ + struct drm_encoder *drm_enc = (struct drm_encoder *) data; + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + struct msm_drm_private *priv; + + if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { + SDE_ERROR("invalid parameters\n"); + return; + } + priv = drm_enc->dev->dev_private; + + if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) { + SDE_DEBUG("enc%d invalid timeout\n", drm_enc->base.id); + SDE_EVT32(DRMID(drm_enc), + sde_enc->frame_busy_mask[0], 0); + return; + } else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) { + SDE_ERROR("enc%d invalid timeout\n", drm_enc->base.id); + SDE_EVT32(DRMID(drm_enc), 0, 1); + return; + } + + SDE_EVT32(DRMID(drm_enc), 0, 2); + sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, + SDE_ENCODER_FRAME_EVENT_ERROR); +} + struct drm_encoder *sde_encoder_init( struct drm_device *dev, struct msm_display_info *disp_info) @@ -1221,6 +1347,10 @@ struct drm_encoder *sde_encoder_init( drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); + atomic_set(&sde_enc->frame_done_timeout, 0); + setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout, + (unsigned long) sde_enc); + _sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms); SDE_DEBUG_ENC(sde_enc, "created\n"); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index fccc2649dc59..81f63e00d8b5 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -18,6 +18,9 @@ #include "msm_prop.h" #include "sde_hw_mdss.h" +#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0) +#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1) + /** * Encoder functions and data types * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused @@ -53,16 +56,30 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *encoder, void (*cb)(void *), void *data); /** - * sde_encoder_schedule_kickoff - Register a callback with the encoder to - * trigger a double buffer flip of the ctl path (i.e. ctl flush and start) - * at the appropriate time. + * sde_encoder_register_frame_event_callback - provide callback to encoder that + * will be called after the request is complete, or other events. + * @encoder: encoder pointer + * @cb: callback pointer, provide NULL to deregister + * @data: user data provided to callback + */ +void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder, + void (*cb)(void *, u32), void *data); + +/** + * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl + * path (i.e. ctl flush and start) at next appropriate time. * Immediately: if no previous commit is outstanding. - * Delayed: Save the callback, and return. Does not block. Callback will - * be triggered later. E.g. cmd encoder will trigger at pp_done irq - * irq if it outstanding. + * Delayed: Block until next trigger can be issued. + * @encoder: encoder pointer + */ +void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder); + +/** + * sde_encoder_kickoff - trigger a double buffer flip of the ctl path + * (i.e. ctl flush and start) immediately. * @encoder: encoder pointer */ -void sde_encoder_schedule_kickoff(struct drm_encoder *encoder); +void sde_encoder_kickoff(struct drm_encoder *drm_enc); /** * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 355bd7e7fc11..ed4b7be34281 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -56,12 +56,16 @@ struct sde_encoder_phys; * Note: This is called from IRQ handler context. * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception * Note: This is called from IRQ handler context. + * @handle_frame_done: Notify virtual encoder that this phys encoder + * completes last request frame. */ struct sde_encoder_virt_ops { void (*handle_vblank_virt)(struct drm_encoder *, struct sde_encoder_phys *phys); void (*handle_underrun_virt)(struct drm_encoder *, struct sde_encoder_phys *phys); + void (*handle_frame_done)(struct drm_encoder *, + struct sde_encoder_phys *phys, u32 event); }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 17ed73dc5155..76d6fe0e3023 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -104,6 +104,11 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx) phys_enc = &cmd_enc->base; + /* notify all synchronous clients first, then asynchronous clients */ + if (phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done(phys_enc->parent, + phys_enc, SDE_ENCODER_FRAME_EVENT_DONE); + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); @@ -191,6 +196,10 @@ static int _sde_encoder_phys_cmd_wait_for_idle( phys_enc->hw_pp->idx - PINGPONG_0); SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n", phys_enc->hw_pp->idx - PINGPONG_0); + if (phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_ERROR); ret = -ETIMEDOUT; } } else { diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index eddb28a86b52..3127e2003a16 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -571,16 +571,22 @@ static void sde_encoder_phys_vid_get_hw_resources( hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO; } -static int sde_encoder_phys_vid_wait_for_commit_done( - struct sde_encoder_phys *phys_enc) +static int sde_encoder_phys_vid_wait_for_vblank( + struct sde_encoder_phys *phys_enc, bool notify) { struct sde_encoder_phys_vid *vid_enc = to_sde_encoder_phys_vid(phys_enc); u32 irq_status; int ret; - if (!sde_encoder_phys_vid_is_master(phys_enc)) + if (!sde_encoder_phys_vid_is_master(phys_enc)) { + /* always signal done for slave video encoder */ + if (notify && phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_DONE); return 0; + } if (phys_enc->enable_state != SDE_ENC_ENABLED) { SDE_ERROR("encoder not enabled\n"); @@ -604,6 +610,10 @@ static int sde_encoder_phys_vid_wait_for_commit_done( SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0); SDE_DEBUG_VIDENC(vid_enc, "done, irq not triggered\n"); + if (notify && phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_DONE); sde_encoder_phys_vid_vblank_irq(vid_enc, INTR_IDX_VSYNC); ret = 0; @@ -611,15 +621,33 @@ static int sde_encoder_phys_vid_wait_for_commit_done( SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0); SDE_ERROR_VIDENC(vid_enc, "kickoff timed out\n"); + if (notify && phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_ERROR); ret = -ETIMEDOUT; } } else { + if (notify && phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_DONE); ret = 0; } return 0; } +static int sde_encoder_phys_vid_wait_for_commit_done( + struct sde_encoder_phys *phys_enc) +{ + int ret; + + ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, true); + + return ret; +} + static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) { struct sde_encoder_phys_vid *vid_enc; @@ -663,7 +691,7 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) * scanout buffer) don't latch properly.. */ if (sde_encoder_phys_vid_is_master(phys_enc)) { - ret = sde_encoder_phys_vid_wait_for_commit_done(phys_enc); + ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, false); if (ret) { atomic_set(&phys_enc->pending_kickoff_cnt, 0); SDE_ERROR_VIDENC(vid_enc, diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index 9510e57cb6a8..9943e3906df0 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -537,10 +537,14 @@ static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx) SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0, wb_enc->frame_count); - complete_all(&wb_enc->wbdone_complete); + if (phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done(phys_enc->parent, + phys_enc, SDE_ENCODER_FRAME_EVENT_DONE); phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent, phys_enc); + + complete_all(&wb_enc->wbdone_complete); } /** @@ -689,6 +693,10 @@ static int sde_encoder_phys_wb_wait_for_commit_done( } else { SDE_ERROR("wb:%d kickoff timed out\n", wb_enc->wb_dev->wb_idx - WB_0); + if (phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_ERROR); rc = -ETIMEDOUT; } } |
