diff options
author | Tarun Karra <tkarra@codeaurora.org> | 2016-07-22 22:18:43 -0700 |
---|---|---|
committer | Tarun Karra <tkarra@codeaurora.org> | 2016-10-06 15:54:39 -0700 |
commit | 1f2662704f5bf1f4902b73d9fb48840a80f80ef5 (patch) | |
tree | 5a760e5eac53f8a73ad800eb4f5fd4c93e2e4acb /drivers/gpu/msm | |
parent | 2811b6d610686d5c91b48741d6f66eaf63f29e8c (diff) |
msm: kgsl: Modify dispatcher to accept generic objects
Currently dispatcher accepts kgsl_cmdbatch object. This object
is a superset of all the types of objects dispatcher accepts.
Split kgsl_cmdbatch object to SYNC and IB/MARKER objects and
structure the code to make it easier for new type of objects
to be added to the dispatcher queue.
CRs-Fixed: 1054354
Change-Id: I2d482d1081ce6fdb7925243c88ce00ea6b864efe
Signed-off-by: Tarun Karra <tkarra@codeaurora.org>
Diffstat (limited to 'drivers/gpu/msm')
-rw-r--r-- | drivers/gpu/msm/adreno.c | 3 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno.h | 4 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_debugfs.c | 58 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_dispatch.c | 899 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_dispatch.h | 10 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_drawctxt.c | 20 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_drawctxt.h | 8 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.c | 148 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.h | 2 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_trace.h | 24 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl.c | 234 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_cffdump.c | 4 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_cffdump.h | 4 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_device.h | 8 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_drawobj.c | 548 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_drawobj.h | 188 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_trace.h | 42 |
17 files changed, 1243 insertions, 961 deletions
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 3900da6af7da..18fbbc88325b 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -40,6 +40,7 @@ /* Include the master list of GPU cores that are supported */ #include "adreno-gpulist.h" +#include "adreno_dispatch.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "adreno." @@ -2813,7 +2814,7 @@ static const struct kgsl_functable adreno_functable = { .getproperty_compat = adreno_getproperty_compat, .waittimestamp = adreno_waittimestamp, .readtimestamp = adreno_readtimestamp, - .issueibcmds = adreno_ringbuffer_issueibcmds, + .queue_cmds = adreno_dispatcher_queue_cmds, .ioctl = adreno_ioctl, .compat_ioctl = adreno_compat_ioctl, .power_stats = adreno_power_stats, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index d037d8248ba5..693497c6793b 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -776,7 +776,7 @@ struct adreno_gpudev { * @KGSL_FT_REPLAY: Replay the faulting command * @KGSL_FT_SKIPIB: Skip the faulting indirect buffer * @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB - * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the drawobj + * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj * @KGSL_FT_TEMP_DISABLE: Disables FT for all commands * @KGSL_FT_THROTTLE: Disable the context if it faults too often * @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB @@ -793,7 +793,7 @@ enum kgsl_ft_policy_bits { /* KGSL_FT_MAX_BITS is used to calculate the mask */ KGSL_FT_MAX_BITS, /* Internal bits - set during GFT */ - /* Skip the PM dump on replayed drawobjs */ + /* Skip the PM dump on replayed command obj's */ KGSL_FT_SKIP_PMDUMP = 31, }; diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index edf5c634079e..fffe08038bcd 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -155,10 +155,10 @@ struct flag_entry { static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS}; -static const struct flag_entry drawobj_priv[] = { - { DRAWOBJ_FLAG_SKIP, "skip"}, - { DRAWOBJ_FLAG_FORCE_PREAMBLE, "force_preamble"}, - { DRAWOBJ_FLAG_WFI, "wait_for_idle" }, +static const struct flag_entry cmdobj_priv[] = { + { CMDOBJ_SKIP, "skip"}, + { CMDOBJ_FORCE_PREAMBLE, "force_preamble"}, + { CMDOBJ_WFI, "wait_for_idle" }, }; static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS}; @@ -199,43 +199,55 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table, seq_puts(s, "None"); } -static void drawobj_print(struct seq_file *s, struct kgsl_drawobj *drawobj) +static void syncobj_print(struct seq_file *s, + struct kgsl_drawobj_sync *syncobj) { struct kgsl_drawobj_sync_event *event; unsigned int i; - /* print fences first, since they block this drawobj */ + seq_puts(s, " syncobj "); - for (i = 0; i < drawobj->numsyncs; i++) { - event = &drawobj->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_drawobj_event_pending(drawobj, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; - /* - * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways - * so that it is clear if the fence was a separate submit - * or part of an IB submit. - */ - seq_printf(s, "\t%d ", drawobj->timestamp); sync_event_print(s, event); seq_puts(s, "\n"); } +} - /* if this flag is set, there won't be an IB */ - if (drawobj->flags & KGSL_CONTEXT_SYNC) - return; +static void cmdobj_print(struct seq_file *s, + struct kgsl_drawobj_cmd *cmdobj) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + + if (drawobj->type == CMDOBJ_TYPE) + seq_puts(s, " cmdobj "); + else + seq_puts(s, " markerobj "); + + seq_printf(s, "\t %d ", drawobj->timestamp); - seq_printf(s, "\t%d: ", drawobj->timestamp); + seq_puts(s, " priv: "); + print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv), + cmdobj->priv); +} + +static void drawobj_print(struct seq_file *s, + struct kgsl_drawobj *drawobj) +{ + if (drawobj->type == SYNCOBJ_TYPE) + syncobj_print(s, SYNCOBJ(drawobj)); + else if ((drawobj->type == CMDOBJ_TYPE) || + (drawobj->type == MARKEROBJ_TYPE)) + cmdobj_print(s, CMDOBJ(drawobj)); seq_puts(s, " flags: "); print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags), drawobj->flags); - seq_puts(s, " priv: "); - print_flags(s, drawobj_priv, ARRAY_SIZE(drawobj_priv), - drawobj->priv); - seq_puts(s, "\n"); } diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index e42588004c50..cb4108b4e1f9 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -273,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev) } /** - * _retire_marker() - Retire a marker drawobj without sending it to the - * hardware - * @drawobj: Pointer to the drawobj to retire + * _retire_timestamp() - Retire object without sending it + * to the hardware + * @drawobj: Pointer to the object to retire * - * In some cases marker commands can be retired by the software without going to - * the GPU. In those cases, update the memstore from the CPU, kick off the - * event engine to handle expired events and destroy the drawobj. + * In some cases ibs can be retired by the software + * without going to the GPU. In those cases, update the + * memstore from the CPU, kick off the event engine to handle + * expired events and destroy the ib. */ -static void _retire_marker(struct kgsl_drawobj *drawobj) +static void _retire_timestamp(struct kgsl_drawobj *drawobj) { struct kgsl_context *context = drawobj->context; - struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); + struct adreno_context *drawctxt = ADRENO_CONTEXT(context); struct kgsl_device *device = context->device; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); /* * Write the start and end timestamp to the memstore to keep the @@ -309,12 +309,12 @@ static void _retire_marker(struct kgsl_drawobj *drawobj) * rptr scratch out address. At this point GPU clocks turned off. * So avoid reading GPU register directly for A3xx. */ - if (adreno_is_a3xx(adreno_dev)) + if (adreno_is_a3xx(ADRENO_DEVICE(device))) trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, - 0); + 0, 0); else trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, - adreno_get_rptr(drawctxt->rb)); + adreno_get_rptr(drawctxt->rb), 0); kgsl_drawobj_destroy(drawobj); } @@ -343,11 +343,13 @@ static int _check_context_queue(struct adreno_context *drawctxt) * return true if this is a marker command and the dependent timestamp has * retired */ -static bool _marker_expired(struct kgsl_drawobj *drawobj) +static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj) { + struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); + return (drawobj->flags & KGSL_DRAWOBJ_MARKER) && kgsl_check_timestamp(drawobj->device, drawobj->context, - drawobj->marker_timestamp); + markerobj->marker_timestamp); } static inline void _pop_drawobj(struct adreno_context *drawctxt) @@ -356,137 +358,110 @@ static inline void _pop_drawobj(struct adreno_context *drawctxt) ADRENO_CONTEXT_DRAWQUEUE_SIZE); drawctxt->queued--; } -/** - * Removes all expired marker and sync obj's from - * the context queue when marker command and dependent - * timestamp are retired. This function is recursive. - * returns drawobj if context has command, NULL otherwise. - */ -static struct kgsl_drawobj *_expire_markers(struct adreno_context *drawctxt) -{ - struct kgsl_drawobj *drawobj; - if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail) - return NULL; - - drawobj = drawctxt->drawqueue[drawctxt->drawqueue_head]; - - if (drawobj == NULL) - return NULL; - - /* Check to see if this is a marker we can skip over */ - if ((drawobj->flags & KGSL_DRAWOBJ_MARKER) && - _marker_expired(drawobj)) { +static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj, + struct adreno_context *drawctxt) +{ + if (_marker_expired(cmdobj)) { _pop_drawobj(drawctxt); - _retire_marker(drawobj); - return _expire_markers(drawctxt); - } - - if (drawobj->flags & KGSL_DRAWOBJ_SYNC) { - if (!kgsl_drawobj_events_pending(drawobj)) { - _pop_drawobj(drawctxt); - kgsl_drawobj_destroy(drawobj); - return _expire_markers(drawctxt); - } + _retire_timestamp(DRAWOBJ(cmdobj)); + return 0; } - return drawobj; -} - -static void expire_markers(struct adreno_context *drawctxt) -{ - spin_lock(&drawctxt->lock); - _expire_markers(drawctxt); - spin_unlock(&drawctxt->lock); -} - -static struct kgsl_drawobj *_get_drawobj(struct adreno_context *drawctxt) -{ - struct kgsl_drawobj *drawobj; - bool pending = false; - - drawobj = _expire_markers(drawctxt); - - if (drawobj == NULL) - return NULL; - /* - * If the marker isn't expired but the SKIP bit is set - * then there are real commands following this one in - * the queue. This means that we need to dispatch the - * command so that we can keep the timestamp accounting - * correct. If skip isn't set then we block this queue + * If the marker isn't expired but the SKIP bit + * is set then there are real commands following + * this one in the queue. This means that we + * need to dispatch the command so that we can + * keep the timestamp accounting correct. If + * skip isn't set then we block this queue * until the dependent timestamp expires */ - if ((drawobj->flags & KGSL_DRAWOBJ_MARKER) && - (!test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv))) - pending = true; + return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN; +} - if (kgsl_drawobj_events_pending(drawobj)) - pending = true; +static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj, + struct adreno_context *drawctxt) +{ + if (!kgsl_drawobj_events_pending(syncobj)) { + _pop_drawobj(drawctxt); + kgsl_drawobj_destroy(DRAWOBJ(syncobj)); + return 0; + } /* - * If changes are pending and the canary timer hasn't been - * started yet, start it + * If we got here, there are pending events for sync object. + * Start the canary timer if it hasnt been started already. */ - if (pending) { - /* - * If syncpoints are pending start the canary timer if - * it hasn't already been started - */ - if (!drawobj->timeout_jiffies) { - drawobj->timeout_jiffies = - jiffies + msecs_to_jiffies(5000); - mod_timer(&drawobj->timer, drawobj->timeout_jiffies); - } - - return ERR_PTR(-EAGAIN); + if (!syncobj->timeout_jiffies) { + syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000); + mod_timer(&syncobj->timer, syncobj->timeout_jiffies); } - _pop_drawobj(drawctxt); - return drawobj; + return -EAGAIN; } -/** - * adreno_dispatcher_get_drawobj() - Get a new command from a context queue - * @drawctxt: Pointer to the adreno draw context - * - * Dequeue a new drawobj from the context list +/* + * Retires all expired marker and sync objs from the context + * queue and returns one of the below + * a) next drawobj that needs to be sent to ringbuffer + * b) -EAGAIN for syncobj with syncpoints pending. + * c) -EAGAIN for markerobj whose marker timestamp has not expired yet. + * c) NULL for no commands remaining in drawqueue. */ -static struct kgsl_drawobj *adreno_dispatcher_get_drawobj( - struct adreno_context *drawctxt) +static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj( + struct adreno_context *drawctxt) { struct kgsl_drawobj *drawobj; + unsigned int i = drawctxt->drawqueue_head; + int ret = 0; - spin_lock(&drawctxt->lock); - drawobj = _get_drawobj(drawctxt); - spin_unlock(&drawctxt->lock); + if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail) + return NULL; - /* - * Delete the timer and wait for timer handler to finish executing - * on another core before queueing the buffer. We must do this - * without holding any spin lock that the timer handler might be using - */ - if (!IS_ERR_OR_NULL(drawobj)) - del_timer_sync(&drawobj->timer); + for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail; + i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) { + + drawobj = drawctxt->drawqueue[i]; + + if (drawobj == NULL) + return NULL; + + if (drawobj->type == CMDOBJ_TYPE) + return drawobj; + else if (drawobj->type == MARKEROBJ_TYPE) { + ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt); + /* Special case where marker needs to be sent to GPU */ + if (ret == 1) + return drawobj; + } else if (drawobj->type == SYNCOBJ_TYPE) + ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt); - return drawobj; + if (ret == -EAGAIN) + return ERR_PTR(-EAGAIN); + + continue; + } + + return NULL; } /** - * adreno_dispatcher_requeue_drawobj() - Put a command back on the context + * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context * queue * @drawctxt: Pointer to the adreno draw context - * @drawobj: Pointer to the KGSL drawobj to requeue + * @cmdobj: Pointer to the KGSL command object to requeue * * Failure to submit a command to the ringbuffer isn't the fault of the command * being submitted so if a failure happens, push it back on the head of the the * context queue to be reconsidered again unless the context got detached. */ -static inline int adreno_dispatcher_requeue_drawobj( - struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj) +static inline int adreno_dispatcher_requeue_cmdobj( + struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) { unsigned int prev; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); spin_lock(&drawctxt->lock); if (kgsl_context_detached(&drawctxt->base) || @@ -554,9 +529,10 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev, * Send a KGSL drawobj to the GPU hardware */ static int sendcmd(struct adreno_device *adreno_dev, - struct kgsl_drawobj *drawobj) + struct kgsl_drawobj_cmd *cmdobj) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); @@ -591,14 +567,14 @@ static int sendcmd(struct adreno_device *adreno_dev, } if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) { - set_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv); - drawobj->profile_index = adreno_dev->profile_index; + set_bit(CMDOBJ_PROFILE, &cmdobj->priv); + cmdobj->profile_index = adreno_dev->profile_index; adreno_dev->profile_index = (adreno_dev->profile_index + 1) % ADRENO_DRAWOBJ_PROFILE_COUNT; } - ret = adreno_ringbuffer_submitcmd(adreno_dev, drawobj, &time); + ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time); /* * On the first command, if the submission was successful, then read the @@ -657,9 +633,9 @@ static int sendcmd(struct adreno_device *adreno_dev, mutex_unlock(&device->mutex); - drawobj->submit_ticks = time.ticks; + cmdobj->submit_ticks = time.ticks; - dispatch_q->cmd_q[dispatch_q->tail] = drawobj; + dispatch_q->cmd_q[dispatch_q->tail] = cmdobj; dispatch_q->tail = (dispatch_q->tail + 1) % ADRENO_DISPATCH_DRAWQUEUE_SIZE; @@ -714,7 +690,9 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, unsigned int timestamp; if (dispatch_q->inflight >= inflight) { - expire_markers(drawctxt); + spin_lock(&drawctxt->lock); + _process_drawqueue_get_next_drawobj(drawctxt); + spin_unlock(&drawctxt->lock); return -EBUSY; } @@ -724,11 +702,13 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, while ((count < _context_drawobj_burst) && (dispatch_q->inflight < inflight)) { struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_cmd *cmdobj; if (adreno_gpu_fault(adreno_dev) != 0) break; - drawobj = adreno_dispatcher_get_drawobj(drawctxt); + spin_lock(&drawctxt->lock); + drawobj = _process_drawqueue_get_next_drawobj(drawctxt); /* * adreno_context_get_drawobj returns -EAGAIN if the current @@ -740,32 +720,23 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, if (IS_ERR_OR_NULL(drawobj)) { if (IS_ERR(drawobj)) ret = PTR_ERR(drawobj); + spin_unlock(&drawctxt->lock); break; } - - /* - * If this is a synchronization submission then there are no - * commands to submit. Discard it and get the next item from - * the queue. Decrement count so this packet doesn't count - * against the burst for the context - */ - - if (drawobj->flags & KGSL_DRAWOBJ_SYNC) { - kgsl_drawobj_destroy(drawobj); - continue; - } + _pop_drawobj(drawctxt); + spin_unlock(&drawctxt->lock); timestamp = drawobj->timestamp; - - ret = sendcmd(adreno_dev, drawobj); + cmdobj = CMDOBJ(drawobj); + ret = sendcmd(adreno_dev, cmdobj); /* - * On error from sendcmd() try to requeue the drawobj + * On error from sendcmd() try to requeue the cmdobj * unless we got back -ENOENT which means that the context has * been detached and there will be no more deliveries from here */ if (ret != 0) { - /* Destroy the drawobj on -ENOENT */ + /* Destroy the cmdobj on -ENOENT */ if (ret == -ENOENT) kgsl_drawobj_destroy(drawobj); else { @@ -773,8 +744,8 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, * If the requeue returns an error, return that * instead of whatever sendcmd() sent us */ - int r = adreno_dispatcher_requeue_drawobj( - drawctxt, drawobj); + int r = adreno_dispatcher_requeue_cmdobj( + drawctxt, cmdobj); if (r) ret = r; } @@ -938,97 +909,85 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) * @drawctxt - Pointer to an adreno draw context struct * @drawobj - Pointer to a drawobj * @timestamp - Pointer to a timestamp value possibly passed from the user + * @user_ts - user generated timestamp * * Assign a timestamp based on the settings of the draw context and the command * batch. */ static int get_timestamp(struct adreno_context *drawctxt, - struct kgsl_drawobj *drawobj, unsigned int *timestamp) + struct kgsl_drawobj *drawobj, unsigned int *timestamp, + unsigned int user_ts) { - /* Synchronization commands don't get a timestamp */ - if (drawobj->flags & KGSL_DRAWOBJ_SYNC) { - *timestamp = 0; - return 0; - } if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) { /* * User specified timestamps need to be greater than the last * issued timestamp in the context */ - if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) + if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0) return -ERANGE; - drawctxt->timestamp = *timestamp; + drawctxt->timestamp = user_ts; } else drawctxt->timestamp++; *timestamp = drawctxt->timestamp; + drawobj->timestamp = *timestamp; return 0; } -/** - * adreno_dispactcher_queue_cmd() - Queue a new command in the context - * @adreno_dev: Pointer to the adreno device struct - * @drawctxt: Pointer to the adreno draw context - * @drawobj: Pointer to the drawobj being submitted - * @timestamp: Pointer to the requested timestamp - * - * Queue a command in the context - if there isn't any room in the queue, then - * block until there is - */ -int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj, - uint32_t *timestamp) +static void _set_ft_policy(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) { - struct adreno_dispatcher_drawqueue *dispatch_q = - ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); - int ret; - - spin_lock(&drawctxt->lock); - - if (kgsl_context_detached(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -ENOENT; - } + /* + * Set the fault tolerance policy for the command batch - assuming the + * context hasn't disabled FT use the current device policy + */ + if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) + set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy); + else + cmdobj->fault_policy = adreno_dev->ft_policy; +} +static void _cmdobj_set_flags(struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) +{ /* * Force the preamble for this submission only - this is usually * requested by the dispatcher as part of fault recovery */ - if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv)) - set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); /* - * Force the premable if set from userspace in the context or drawobj - * flags + * Force the premable if set from userspace in the context or + * command obj flags */ - if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) || - (drawobj->flags & KGSL_DRAWOBJ_CTX_SWITCH)) - set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv); + (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH)) + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); - /* Skip this drawobj commands if IFH_NOP is enabled */ + /* Skip this ib if IFH_NOP is enabled */ if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP) - set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv); + set_bit(CMDOBJ_SKIP, &cmdobj->priv); /* * If we are waiting for the end of frame and it hasn't appeared yet, - * then mark the drawobj as skipped. It will still progress + * then mark the command obj as skipped. It will still progress * through the pipeline but it won't actually send any commands */ if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) { - set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv); + set_bit(CMDOBJ_SKIP, &cmdobj->priv); /* - * If this drawobj represents the EOF then clear the way + * If this command obj represents the EOF then clear the way * for the dispatcher to continue submitting */ - if (drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME) { + if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) { clear_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv); @@ -1040,9 +999,83 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, &drawctxt->base.priv); } } +} - /* Wait for room in the context queue */ +static inline int _check_context_state(struct kgsl_context *context) +{ + if (kgsl_context_invalid(context)) + return -EDEADLK; + + if (kgsl_context_detached(context)) + return -ENOENT; + + return 0; +} + +static inline bool _verify_ib(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_memobj_node *ib) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_process_private *private = dev_priv->process_priv; + + /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */ + if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) { + pr_context(device, context, "ctxt %d invalid ib size %lld\n", + context->id, ib->size); + return false; + } + + /* Make sure that the address is mapped */ + if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) { + pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n", + context->id, ib->gpuaddr); + return false; + } + + return true; +} + +static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_memobj_node *ib; + unsigned int i; + for (i = 0; i < count; i++) { + /* Verify the IBs before they get queued */ + if (drawobj[i]->type == CMDOBJ_TYPE) { + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]); + + list_for_each_entry(ib, &cmdobj->cmdlist, node) + if (_verify_ib(dev_priv, + &ADRENO_CONTEXT(context)->base, ib) + == false) + return -EINVAL; + /* + * Clear the wake on touch bit to indicate an IB has + * been submitted since the last time we set it. + * But only clear it when we have rendering commands. + */ + device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; + } + + /* A3XX does not have support for drawobj profiling */ + if (adreno_is_a3xx(ADRENO_DEVICE(device)) && + (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING)) + return -EOPNOTSUPP; + } + + return 0; +} + +static inline int _wait_for_room_in_context_queue( + struct adreno_context *drawctxt) +{ + int ret = 0; + + /* Wait for room in the context queue */ while (drawctxt->queued >= _context_drawqueue_size) { trace_adreno_drawctxt_sleep(drawctxt); spin_unlock(&drawctxt->lock); @@ -1054,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, spin_lock(&drawctxt->lock); trace_adreno_drawctxt_wake(drawctxt); - if (ret <= 0) { - spin_unlock(&drawctxt->lock); + if (ret <= 0) return (ret == 0) ? -ETIMEDOUT : (int) ret; - } } + + return 0; +} + +static unsigned int _check_context_state_to_queue_cmds( + struct adreno_context *drawctxt) +{ + int ret = _check_context_state(&drawctxt->base); + + if (ret) + return ret; + + ret = _wait_for_room_in_context_queue(drawctxt); + if (ret) + return ret; + /* * Account for the possiblity that the context got invalidated * while we were sleeping */ + return _check_context_state(&drawctxt->base); +} - if (kgsl_context_invalid(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -EDEADLK; - } - if (kgsl_context_detached(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -ENOENT; - } +static void _queue_drawobj(struct adreno_context *drawctxt, + struct kgsl_drawobj *drawobj) +{ + /* Put the command into the queue */ + drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj; + drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) % + ADRENO_CONTEXT_DRAWQUEUE_SIZE; + drawctxt->queued++; + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); +} - ret = get_timestamp(drawctxt, drawobj, timestamp); - if (ret) { - spin_unlock(&drawctxt->lock); +static int _queue_markerobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj, + uint32_t *timestamp, unsigned int user_ts) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); + int ret; + + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) return ret; + + /* + * See if we can fastpath this thing - if nothing is queued + * and nothing is inflight retire without bothering the GPU + */ + if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device, + drawobj->context, drawctxt->queued_timestamp)) { + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); + _retire_timestamp(drawobj); + return 1; } - drawobj->timestamp = *timestamp; + /* + * Remember the last queued timestamp - the marker will block + * until that timestamp is expired (unless another command + * comes along and forces the marker to execute) + */ - if (drawobj->flags & KGSL_DRAWOBJ_MARKER) { + markerobj->marker_timestamp = drawctxt->queued_timestamp; + drawctxt->queued_timestamp = *timestamp; + _set_ft_policy(adreno_dev, drawctxt, markerobj); + _cmdobj_set_flags(drawctxt, markerobj); - /* - * See if we can fastpath this thing - if nothing is queued - * and nothing is inflight retire without bothering the GPU - */ + _queue_drawobj(drawctxt, drawobj); - if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device, - drawobj->context, drawctxt->queued_timestamp)) { - trace_adreno_cmdbatch_queued(drawobj, - drawctxt->queued); + return 0; +} - _retire_marker(drawobj); - spin_unlock(&drawctxt->lock); - return 0; - } +static int _queue_cmdobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj, + uint32_t *timestamp, unsigned int user_ts) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + unsigned int j; + int ret; - /* - * Remember the last queued timestamp - the marker will block - * until that timestamp is expired (unless another command - * comes along and forces the marker to execute) - */ + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) + return ret; + + /* + * If this is a real command then we need to force any markers + * queued before it to dispatch to keep time linear - set the + * skip bit so the commands get NOPed. + */ + j = drawctxt->drawqueue_head; + + while (j != drawctxt->drawqueue_tail) { + if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) { + struct kgsl_drawobj_cmd *markerobj = + CMDOBJ(drawctxt->drawqueue[j]); + set_bit(CMDOBJ_SKIP, &markerobj->priv); + } - drawobj->marker_timestamp = drawctxt->queued_timestamp; + j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE); } - /* SYNC commands have timestamp 0 and will get optimized out anyway */ - if (!(drawobj->flags & KGSL_CONTEXT_SYNC)) - drawctxt->queued_timestamp = *timestamp; + drawctxt->queued_timestamp = *timestamp; + _set_ft_policy(adreno_dev, drawctxt, cmdobj); + _cmdobj_set_flags(drawctxt, cmdobj); - /* - * Set the fault tolerance policy for the drawobj - assuming the - * context hasn't disabled FT use the current device policy - */ + _queue_drawobj(drawctxt, drawobj); - if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) - set_bit(KGSL_FT_DISABLE, &drawobj->fault_policy); - else - drawobj->fault_policy = adreno_dev->ft_policy; + return 0; +} - /* Put the command into the queue */ - drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj; - drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) % - ADRENO_CONTEXT_DRAWQUEUE_SIZE; +static void _queue_syncobj(struct adreno_context *drawctxt, + struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); - /* - * If this is a real command then we need to force any markers queued - * before it to dispatch to keep time linear - set the skip bit so - * the commands get NOPed. - */ + *timestamp = 0; + drawobj->timestamp = 0; + + _queue_drawobj(drawctxt, drawobj); +} + +/** + * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context + * @dev_priv: Pointer to the device private struct + * @context: Pointer to the kgsl draw context + * @drawobj: Pointer to the array of drawobj's being submitted + * @count: Number of drawobj's being submitted + * @timestamp: Pointer to the requested timestamp + * + * Queue a command in the context - if there isn't any room in the queue, then + * block until there is + */ +int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp) + +{ + struct kgsl_device *device = dev_priv->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_context *drawctxt = ADRENO_CONTEXT(context); + struct adreno_dispatcher_drawqueue *dispatch_q; + int ret; + unsigned int i, user_ts; + + ret = _check_context_state(&drawctxt->base); + if (ret) + return ret; + + ret = _verify_cmdobj(dev_priv, context, drawobj, count); + if (ret) + return ret; + + /* wait for the suspend gate */ + wait_for_completion(&device->halt_gate); + + spin_lock(&drawctxt->lock); + + ret = _check_context_state_to_queue_cmds(drawctxt); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } - if (!(drawobj->flags & KGSL_DRAWOBJ_MARKER)) { - unsigned int i = drawctxt->drawqueue_head; + user_ts = *timestamp; - while (i != drawctxt->drawqueue_tail) { - if (drawctxt->drawqueue[i]->flags & KGSL_DRAWOBJ_MARKER) - set_bit(DRAWOBJ_FLAG_SKIP, - &drawctxt->drawqueue[i]->priv); + for (i = 0; i < count; i++) { - i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE); + switch (drawobj[i]->type) { + case MARKEROBJ_TYPE: + ret = _queue_markerobj(adreno_dev, drawctxt, + CMDOBJ(drawobj[i]), + timestamp, user_ts); + if (ret == 1) { + spin_unlock(&drawctxt->lock); + goto done; + } else if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + break; + case CMDOBJ_TYPE: + ret = _queue_cmdobj(adreno_dev, drawctxt, + CMDOBJ(drawobj[i]), + timestamp, user_ts); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + break; + case SYNCOBJ_TYPE: + _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]), + timestamp); + break; + default: + spin_unlock(&drawctxt->lock); + return -EINVAL; } + } - drawctxt->queued++; - trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); + dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]); _track_context(adreno_dev, dispatch_q, drawctxt); @@ -1167,6 +1312,9 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, if (dispatch_q->inflight < _context_drawobj_burst) adreno_dispatcher_issuecmds(adreno_dev); +done: + if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv)) + return -EPROTO; return 0; } @@ -1214,11 +1362,11 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id) * passed in then zero the size which effectively skips it when it is submitted * in the ringbuffer. */ -static void drawobj_skip_ib(struct kgsl_drawobj *drawobj, uint64_t base) +static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base) { struct kgsl_memobj_node *ib; - list_for_each_entry(ib, &drawobj->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { if (ib->gpuaddr == base) { ib->priv |= MEMOBJ_SKIP; if (base) @@ -1227,9 +1375,10 @@ static void drawobj_skip_ib(struct kgsl_drawobj *drawobj, uint64_t base) } } -static void drawobj_skip_cmd(struct kgsl_drawobj *drawobj, - struct kgsl_drawobj **replay, int count) +static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj, + struct kgsl_drawobj_cmd **replay, int count) { + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int i; @@ -1245,9 +1394,9 @@ static void drawobj_skip_cmd(struct kgsl_drawobj *drawobj, * b) force preamble for next commandbatch */ for (i = 1; i < count; i++) { - if (replay[i]->context->id == drawobj->context->id) { + if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) { replay[i]->fault_policy = replay[0]->fault_policy; - set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery); break; } @@ -1264,26 +1413,29 @@ static void drawobj_skip_cmd(struct kgsl_drawobj *drawobj, drawctxt->fault_policy = replay[0]->fault_policy; } - /* set the flags to skip this drawobj */ - set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv); - drawobj->fault_recovery = 0; + /* set the flags to skip this cmdobj */ + set_bit(CMDOBJ_SKIP, &cmdobj->priv); + cmdobj->fault_recovery = 0; } -static void drawobj_skip_frame(struct kgsl_drawobj *drawobj, - struct kgsl_drawobj **replay, int count) +static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj, + struct kgsl_drawobj_cmd **replay, int count) { + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int skip = 1; int i; for (i = 0; i < count; i++) { + struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]); + /* - * Only operate on drawobjs that belong to the + * Only operate on drawobj's that belong to the * faulting context */ - if (replay[i]->context->id != drawobj->context->id) + if (replay_obj->context->id != drawobj->context->id) continue; /* @@ -1293,12 +1445,12 @@ static void drawobj_skip_frame(struct kgsl_drawobj *drawobj, */ if (skip) { - set_bit(DRAWOBJ_FLAG_SKIP, &replay[i]->priv); + set_bit(CMDOBJ_SKIP, &replay[i]->priv); - if (replay[i]->flags & KGSL_DRAWOBJ_END_OF_FRAME) + if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME) skip = 0; } else { - set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); return; } } @@ -1320,26 +1472,28 @@ static void drawobj_skip_frame(struct kgsl_drawobj *drawobj, set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv); } -static void remove_invalidated_drawobjs(struct kgsl_device *device, - struct kgsl_drawobj **replay, int count) +static void remove_invalidated_cmdobjs(struct kgsl_device *device, + struct kgsl_drawobj_cmd **replay, int count) { int i; for (i = 0; i < count; i++) { - struct kgsl_drawobj *cmd = replay[i]; - if (cmd == NULL) + struct kgsl_drawobj_cmd *cmdobj = replay[i]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + + if (cmdobj == NULL) continue; - if (kgsl_context_detached(cmd->context) || - kgsl_context_invalid(cmd->context)) { + if (kgsl_context_detached(drawobj->context) || + kgsl_context_invalid(drawobj->context)) { replay[i] = NULL; mutex_lock(&device->mutex); kgsl_cancel_events_timestamp(device, - &cmd->context->events, cmd->timestamp); + &drawobj->context->events, drawobj->timestamp); mutex_unlock(&device->mutex); - kgsl_drawobj_destroy(cmd); + kgsl_drawobj_destroy(drawobj); } } } @@ -1363,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context) static void adreno_fault_header(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_drawobj *drawobj) + struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); unsigned int status, rptr, wptr, ib1sz, ib2sz; uint64_t ib1base, ib2base; @@ -1424,22 +1579,23 @@ void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, } /** - * process_drawobj_fault() - Process a drawobj for fault policies - * @device: Device on which the drawobj caused a fault - * @replay: List of drawobj's that are to be replayed on the device. The - * faulting drawobj is the first command in the replay list and the remaining - * drawobj's in the list are commands that were submitted to the same queue + * process_cmdobj_fault() - Process a cmdobj for fault policies + * @device: Device on which the cmdobj caused a fault + * @replay: List of cmdobj's that are to be replayed on the device. The + * first command in the replay list is the faulting command and the remaining + * cmdobj's in the list are commands that were submitted to the same queue * as the faulting one. - * @count: Number of drawobj's in replay + * @count: Number of cmdobj's in replay * @base: The IB1 base at the time of fault * @fault: The fault type */ -static void process_drawobj_fault(struct kgsl_device *device, - struct kgsl_drawobj **replay, int count, +static void process_cmdobj_fault(struct kgsl_device *device, + struct kgsl_drawobj_cmd **replay, int count, unsigned int base, int fault) { - struct kgsl_drawobj *drawobj = replay[0]; + struct kgsl_drawobj_cmd *cmdobj = replay[0]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); int i; char *state = "failed"; @@ -1453,7 +1609,7 @@ static void process_drawobj_fault(struct kgsl_device *device, * where 1st and 4th gpu hang are more than 3 seconds apart we * won't disable GFT and invalidate the context. */ - if (test_bit(KGSL_FT_THROTTLE, &drawobj->fault_policy)) { + if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) { if (time_after(jiffies, (drawobj->context->fault_time + msecs_to_jiffies(_fault_throttle_time)))) { drawobj->context->fault_time = jiffies; @@ -1463,7 +1619,7 @@ static void process_drawobj_fault(struct kgsl_device *device, if (drawobj->context->fault_count > _fault_throttle_burst) { set_bit(KGSL_FT_DISABLE, - &drawobj->fault_policy); + &cmdobj->fault_policy); pr_context(device, drawobj->context, "gpu fault threshold exceeded %d faults in %d msecs\n", _fault_throttle_burst, @@ -1473,45 +1629,45 @@ static void process_drawobj_fault(struct kgsl_device *device, } /* - * If FT is disabled for this drawobj invalidate immediately + * If FT is disabled for this cmdobj invalidate immediately */ - if (test_bit(KGSL_FT_DISABLE, &drawobj->fault_policy) || - test_bit(KGSL_FT_TEMP_DISABLE, &drawobj->fault_policy)) { + if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) || + test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) { state = "skipped"; - bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); } /* If the context is detached do not run FT on context */ if (kgsl_context_detached(drawobj->context)) { state = "detached"; - bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); } /* - * Set a flag so we don't print another PM dump if the drawobj fails + * Set a flag so we don't print another PM dump if the cmdobj fails * again on replay */ - set_bit(KGSL_FT_SKIP_PMDUMP, &drawobj->fault_policy); + set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy); /* * A hardware fault generally means something was deterministically - * wrong with the drawobj - no point in trying to replay it + * wrong with the cmdobj - no point in trying to replay it * Clear the replay bit and move on to the next policy level */ if (fault & ADRENO_HARD_FAULT) - clear_bit(KGSL_FT_REPLAY, &(drawobj->fault_policy)); + clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy)); /* * A timeout fault means the IB timed out - clear the policy and * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay - * because we won't see this drawobj again + * because we won't see this cmdobj again */ if (fault & ADRENO_TIMEOUT_FAULT) - bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); /* * If the context had a GPU page fault then it is likely it would fault @@ -1521,68 +1677,69 @@ static void process_drawobj_fault(struct kgsl_device *device, if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &drawobj->context->priv)) { /* we'll need to resume the mmu later... */ - clear_bit(KGSL_FT_REPLAY, &drawobj->fault_policy); + clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy); clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &drawobj->context->priv); } /* - * Execute the fault tolerance policy. Each drawobj stores the + * Execute the fault tolerance policy. Each cmdobj stores the * current fault policy that was set when it was queued. * As the options are tried in descending priority * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared - * from the drawobj policy so the next thing can be tried if the + * from the cmdobj policy so the next thing can be tried if the * change comes around again */ - /* Replay the hanging drawobj again */ - if (test_and_clear_bit(KGSL_FT_REPLAY, &drawobj->fault_policy)) { - trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_REPLAY)); - set_bit(KGSL_FT_REPLAY, &drawobj->fault_recovery); + /* Replay the hanging cmdobj again */ + if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY)); + set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery); return; } /* * Skip the last IB1 that was played but replay everything else. - * Note that the last IB1 might not be in the "hung" drawobj + * Note that the last IB1 might not be in the "hung" cmdobj * because the CP may have caused a page-fault while it was prefetching * the next IB1/IB2. walk all outstanding commands and zap the * supposedly bad IB1 where ever it lurks. */ - if (test_and_clear_bit(KGSL_FT_SKIPIB, &drawobj->fault_policy)) { - trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPIB)); - set_bit(KGSL_FT_SKIPIB, &drawobj->fault_recovery); + if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB)); + set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery); for (i = 0; i < count; i++) { if (replay[i] != NULL && - replay[i]->context->id == drawobj->context->id) - drawobj_skip_ib(replay[i], base); + DRAWOBJ(replay[i])->context->id == + drawobj->context->id) + _skip_ib(replay[i], base); } return; } - /* Skip the faulted drawobj submission */ - if (test_and_clear_bit(KGSL_FT_SKIPCMD, &drawobj->fault_policy)) { - trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPCMD)); + /* Skip the faulted cmdobj submission */ + if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD)); - /* Skip faulting drawobj */ - drawobj_skip_cmd(drawobj, replay, count); + /* Skip faulting cmdobj */ + _skip_cmd(cmdobj, replay, count); return; } - if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &drawobj->fault_policy)) { - trace_adreno_cmdbatch_recovery(drawobj, + if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPFRAME)); - set_bit(KGSL_FT_SKIPFRAME, &drawobj->fault_recovery); + set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery); /* - * Skip all the pending drawobjs for this context until + * Skip all the pending cmdobj's for this context until * the EOF frame is seen */ - drawobj_skip_frame(drawobj, replay, count); + _skip_frame(cmdobj, replay, count); return; } @@ -1612,7 +1769,7 @@ static void recover_dispatch_q(struct kgsl_device *device, unsigned int base) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct kgsl_drawobj **replay = NULL; + struct kgsl_drawobj_cmd **replay; unsigned int ptr; int first = 0; int count = 0; @@ -1626,12 +1783,13 @@ static void recover_dispatch_q(struct kgsl_device *device, /* Recovery failed - mark everybody on this q guilty */ while (ptr != dispatch_q->tail) { - struct kgsl_context *context = - dispatch_q->cmd_q[ptr]->context; + struct kgsl_drawobj_cmd *cmdobj = + dispatch_q->cmd_q[ptr]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - mark_guilty_context(device, context->id); - adreno_drawctxt_invalidate(device, context); - kgsl_drawobj_destroy(dispatch_q->cmd_q[ptr]); + mark_guilty_context(device, drawobj->context->id); + adreno_drawctxt_invalidate(device, drawobj->context); + kgsl_drawobj_destroy(drawobj); ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE); @@ -1646,7 +1804,7 @@ static void recover_dispatch_q(struct kgsl_device *device, goto replay; } - /* Copy the inflight drawobjs into the temporary storage */ + /* Copy the inflight cmdobj's into the temporary storage */ ptr = dispatch_q->head; while (ptr != dispatch_q->tail) { @@ -1655,13 +1813,13 @@ static void recover_dispatch_q(struct kgsl_device *device, } if (fault && count) - process_drawobj_fault(device, replay, + process_cmdobj_fault(device, replay, count, base, fault); replay: dispatch_q->inflight = 0; dispatch_q->head = dispatch_q->tail = 0; - /* Remove any pending drawobjs that have been invalidated */ - remove_invalidated_drawobjs(device, replay, count); + /* Remove any pending cmdobj's that have been invalidated */ + remove_invalidated_cmdobjs(device, replay, count); /* Replay the pending command buffers */ for (i = 0; i < count; i++) { @@ -1677,16 +1835,16 @@ replay: */ if (first == 0) { - set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); first = 1; } /* - * Force each drawobj to wait for idle - this avoids weird + * Force each cmdobj to wait for idle - this avoids weird * CP parse issues */ - set_bit(DRAWOBJ_FLAG_WFI, &replay[i]->priv); + set_bit(CMDOBJ_WFI, &replay[i]->priv); ret = sendcmd(adreno_dev, replay[i]); @@ -1696,15 +1854,18 @@ replay: */ if (ret) { - pr_context(device, replay[i]->context, + pr_context(device, replay[i]->base.context, "gpu reset failed ctx %d ts %d\n", - replay[i]->context->id, replay[i]->timestamp); + replay[i]->base.context->id, + replay[i]->base.timestamp); /* Mark this context as guilty (failed recovery) */ - mark_guilty_context(device, replay[i]->context->id); + mark_guilty_context(device, + replay[i]->base.context->id); - adreno_drawctxt_invalidate(device, replay[i]->context); - remove_invalidated_drawobjs(device, &replay[i], + adreno_drawctxt_invalidate(device, + replay[i]->base.context); + remove_invalidated_cmdobjs(device, &replay[i], count - i); } } @@ -1716,21 +1877,23 @@ replay: } static void do_header_and_snapshot(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_drawobj *drawobj) + struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj) { + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + /* Always dump the snapshot on a non-drawobj failure */ - if (drawobj == NULL) { + if (cmdobj == NULL) { adreno_fault_header(device, rb, NULL); kgsl_device_snapshot(device, NULL); return; } /* Skip everything if the PMDUMP flag is set */ - if (test_bit(KGSL_FT_SKIP_PMDUMP, &drawobj->fault_policy)) + if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy)) return; /* Print the fault header */ - adreno_fault_header(device, rb, drawobj); + adreno_fault_header(device, rb, cmdobj); if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) kgsl_device_snapshot(device, drawobj->context); @@ -1745,7 +1908,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) struct adreno_ringbuffer *hung_rb = NULL; unsigned int reg; uint64_t base; - struct kgsl_drawobj *drawobj = NULL; + struct kgsl_drawobj_cmd *cmdobj = NULL; int ret, i; int fault; int halt; @@ -1795,7 +1958,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg); } /* - * retire drawobj's from all the dispatch_q's before starting recovery + * retire cmdobj's from all the dispatch_q's before starting recovery */ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { adreno_dispatch_retire_drawqueue(adreno_dev, @@ -1818,14 +1981,14 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) } if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) { - drawobj = dispatch_q->cmd_q[dispatch_q->head]; - trace_adreno_cmdbatch_fault(drawobj, fault); + cmdobj = dispatch_q->cmd_q[dispatch_q->head]; + trace_adreno_cmdbatch_fault(cmdobj, fault); } adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, ADRENO_REG_CP_IB1_BASE_HI, &base); - do_header_and_snapshot(device, hung_rb, drawobj); + do_header_and_snapshot(device, hung_rb, cmdobj); /* Terminate the stalled transaction and resume the IOMMU */ if (fault & ADRENO_IOMMU_PAGE_FAULT) @@ -1887,15 +2050,16 @@ static inline int drawobj_consumed(struct kgsl_drawobj *drawobj, } static void _print_recovery(struct kgsl_device *device, - struct kgsl_drawobj *drawobj) + struct kgsl_drawobj_cmd *cmdobj) { static struct { unsigned int mask; const char *str; } flags[] = { ADRENO_FT_TYPES }; - int i, nr = find_first_bit(&drawobj->fault_recovery, BITS_PER_LONG); + int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG); char *result = "unknown"; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); for (i = 0; i < ARRAY_SIZE(flags); i++) { if (flags[i].mask == BIT(nr)) { @@ -1907,37 +2071,38 @@ static void _print_recovery(struct kgsl_device *device, pr_context(device, drawobj->context, "gpu %s ctx %d ts %d policy %lX\n", result, drawobj->context->id, drawobj->timestamp, - drawobj->fault_recovery); + cmdobj->fault_recovery); } -static void drawobj_profile_ticks(struct adreno_device *adreno_dev, - struct kgsl_drawobj *drawobj, uint64_t *start, uint64_t *retire) +static void cmdobj_profile_ticks(struct adreno_device *adreno_dev, + struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire) { void *ptr = adreno_dev->profile_buffer.hostptr; struct adreno_drawobj_profile_entry *entry; entry = (struct adreno_drawobj_profile_entry *) - (ptr + (drawobj->profile_index * sizeof(*entry))); + (ptr + (cmdobj->profile_index * sizeof(*entry))); rmb(); *start = entry->started; *retire = entry->retired; } -static void retire_drawobj(struct adreno_device *adreno_dev, - struct kgsl_drawobj *drawobj) +static void retire_cmdobj(struct adreno_device *adreno_dev, + struct kgsl_drawobj_cmd *cmdobj) { struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); uint64_t start = 0, end = 0; - if (drawobj->fault_recovery != 0) { + if (cmdobj->fault_recovery != 0) { set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv); - _print_recovery(KGSL_DEVICE(adreno_dev), drawobj); + _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj); } - if (test_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv)) - drawobj_profile_ticks(adreno_dev, drawobj, &start, &end); + if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) + cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end); /* * For A3xx we still get the rptr from the CP_RB_RPTR instead of @@ -1946,16 +2111,16 @@ static void retire_drawobj(struct adreno_device *adreno_dev, */ if (adreno_is_a3xx(adreno_dev)) trace_adreno_cmdbatch_retired(drawobj, - (int) dispatcher->inflight, start, end, - ADRENO_DRAWOBJ_RB(drawobj), 0); + (int) dispatcher->inflight, start, end, + ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery); else trace_adreno_cmdbatch_retired(drawobj, - (int) dispatcher->inflight, start, end, - ADRENO_DRAWOBJ_RB(drawobj), - adreno_get_rptr(drawctxt->rb)); + (int) dispatcher->inflight, start, end, + ADRENO_DRAWOBJ_RB(drawobj), + adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery); drawctxt->submit_retire_ticks[drawctxt->ticks_index] = - end - drawobj->submit_ticks; + end - cmdobj->submit_ticks; drawctxt->ticks_index = (drawctxt->ticks_index + 1) % SUBMIT_RETIRE_TICKS_SIZE; @@ -1971,14 +2136,15 @@ static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, int count = 0; while (!adreno_drawqueue_is_empty(drawqueue)) { - struct kgsl_drawobj *drawobj = + struct kgsl_drawobj_cmd *cmdobj = drawqueue->cmd_q[drawqueue->head]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); if (!kgsl_check_timestamp(device, drawobj->context, drawobj->timestamp)) break; - retire_drawobj(adreno_dev, drawobj); + retire_cmdobj(adreno_dev, cmdobj); dispatcher->inflight--; drawqueue->inflight--; @@ -1998,7 +2164,8 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - struct kgsl_drawobj *drawobj = drawqueue->cmd_q[drawqueue->head]; + struct kgsl_drawobj *drawobj = + DRAWOBJ(drawqueue->cmd_q[drawqueue->head]); /* Don't timeout if the timer hasn't expired yet (duh) */ if (time_is_after_jiffies(drawqueue->expires)) @@ -2181,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device, } /* - * This is called on a regular basis while drawobjs are inflight. Fault + * This is called on a regular basis while cmdobj's are inflight. Fault * detection registers are read and compared to the existing values - if they * changed then the GPU is still running. If they are the same between * subsequent calls then the GPU may have faulted @@ -2274,7 +2441,7 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev) &(rb->dispatch_q); while (!adreno_drawqueue_is_empty(dispatch_q)) { kgsl_drawobj_destroy( - dispatch_q->cmd_q[dispatch_q->head]); + DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head])); dispatch_q->head = (dispatch_q->head + 1) % ADRENO_DISPATCH_DRAWQUEUE_SIZE; } diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index 8386129bc143..cb9106fedc82 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -50,7 +50,7 @@ enum adreno_dispatcher_starve_timer_states { /** * struct adreno_dispatcher_drawqueue - List of commands for a RB level - * @cmd_q: List of drawobjs submitted to dispatcher + * @cmd_q: List of command obj's submitted to dispatcher * @inflight: Number of commands inflight in this q * @head: Head pointer to the q * @tail: Queues tail pointer @@ -58,7 +58,7 @@ enum adreno_dispatcher_starve_timer_states { * @expires: The jiffies value at which this drawqueue has run too long */ struct adreno_dispatcher_drawqueue { - struct kgsl_drawobj *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE]; + struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE]; unsigned int inflight; unsigned int head; unsigned int tail; @@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev); void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev); void adreno_dispatcher_stop(struct adreno_device *adreno_dev); -int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj, - uint32_t *timestamp); +int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp); void adreno_dispatcher_schedule(struct kgsl_device *device); void adreno_dispatcher_pause(struct adreno_device *adreno_dev); diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index c3709efb3c67..f36497aa595b 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire); /* - * We may have drawobj timer running, which also uses same + * We may have kgsl sync obj timer running, which also uses same * lock, take a lock with software interrupt disabled (bh) * to avoid spin lock recursion. * * Use Spin trylock because dispatcher can acquire drawctxt->lock * if context is pending and the fence it is waiting on just got * signalled. Dispatcher acquires drawctxt->lock and tries to - * delete the drawobj timer using del_timer_sync(). + * delete the sync obj timer using del_timer_sync(). * del_timer_sync() waits till timer and its pending handlers * are deleted. But if the timer expires at the same time, * timer handler could be waiting on drawctxt->lock leading to a @@ -87,19 +87,23 @@ void adreno_drawctxt_dump(struct kgsl_device *device, struct kgsl_drawobj *drawobj = drawctxt->drawqueue[drawctxt->drawqueue_head]; - if (test_bit(DRAWOBJ_FLAG_FENCE_LOG, &drawobj->priv)) { + if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) { dev_err(device->dev, " possible deadlock. Context %d might be blocked for itself\n", context->id); goto stats; } - if (kgsl_drawobj_events_pending(drawobj)) { - dev_err(device->dev, - " context[%d] (ts=%d) Active sync points:\n", - context->id, drawobj->timestamp); + if (drawobj->type == SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); + + if (kgsl_drawobj_events_pending(syncobj)) { + dev_err(device->dev, + " context[%d] (ts=%d) Active sync points:\n", + context->id, drawobj->timestamp); - kgsl_dump_syncpoints(device, drawobj); + kgsl_dump_syncpoints(device, syncobj); + } } } diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index 76b4e8ce63c3..0578f16ae9e1 100644 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -41,12 +41,12 @@ struct kgsl_context; * @wq: Workqueue structure for contexts to sleep pending room in the queue * @waiting: Workqueue structure for contexts waiting for a timestamp or event * @queued: Number of commands queued in the drawqueue - * @fault_policy: GFT fault policy set in drawobj_skip_cmd(); + * @fault_policy: GFT fault policy set in _skip_cmd(); * @debug_root: debugfs entry for this context. * @queued_timestamp: The last timestamp that was queued on this context * @rb: The ringbuffer in which this context submits commands. * @submitted_timestamp: The last timestamp that was submitted for this context - * @submit_retire_ticks: Array to hold drawobj execution times from submit + * @submit_retire_ticks: Array to hold command obj execution times from submit * to retire * @ticks_index: The index into submit_retire_ticks[] where the new delta will * be written. @@ -93,8 +93,9 @@ struct adreno_context { * @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame * marker. * @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission. - * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj is skipped during + * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during fault tolerance. + * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context. */ enum adreno_context_priv { ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC, @@ -103,6 +104,7 @@ enum adreno_context_priv { ADRENO_CONTEXT_SKIP_EOF, ADRENO_CONTEXT_FORCE_PREAMBLE, ADRENO_CONTEXT_SKIP_CMD, + ADRENO_CONTEXT_FENCE_LOG, }; /* Flags for adreno_drawctxt_switch() */ diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index de84aad4668c..fc0602a60ac1 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -671,85 +671,6 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb, sizedwords, 0, NULL); } -/** - * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit - * @device: The kgsl device pointer - * @ibdesc: Pointer to the IB descriptor - */ -static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv, - struct kgsl_context *context, struct kgsl_memobj_node *ib) -{ - struct kgsl_device *device = dev_priv->device; - struct kgsl_process_private *private = dev_priv->process_priv; - - /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */ - if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) { - pr_context(device, context, "ctxt %d invalid ib size %lld\n", - context->id, ib->size); - return false; - } - - /* Make sure that the address is mapped */ - if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) { - pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n", - context->id, ib->gpuaddr); - return false; - } - - return true; -} - -int -adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, - struct kgsl_context *context, - struct kgsl_drawobj *drawobj, - uint32_t *timestamp) -{ - struct kgsl_device *device = dev_priv->device; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct adreno_context *drawctxt = ADRENO_CONTEXT(context); - struct kgsl_memobj_node *ib; - int ret; - - if (kgsl_context_invalid(context)) - return -EDEADLK; - - /* Verify the IBs before they get queued */ - list_for_each_entry(ib, &drawobj->cmdlist, node) - if (_ringbuffer_verify_ib(dev_priv, context, ib) == false) - return -EINVAL; - - /* wait for the suspend gate */ - wait_for_completion(&device->halt_gate); - - /* - * Clear the wake on touch bit to indicate an IB has been - * submitted since the last time we set it. But only clear - * it when we have rendering commands. - */ - if (!(drawobj->flags & KGSL_DRAWOBJ_MARKER) - && !(drawobj->flags & KGSL_DRAWOBJ_SYNC)) - device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; - - /* A3XX does not have support for drawobj profiling */ - if (adreno_is_a3xx(adreno_dev) && - (drawobj->flags & KGSL_DRAWOBJ_PROFILING)) - return -EOPNOTSUPP; - - /* Queue the command in the ringbuffer */ - ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, drawobj, - timestamp); - - /* - * Return -EPROTO if the device has faulted since the last time we - * checked - userspace uses this to perform post-fault activities - */ - if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv)) - ret = -EPROTO; - - return ret; -} - static void adreno_ringbuffer_set_constraint(struct kgsl_device *device, struct kgsl_drawobj *drawobj) { @@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev, /* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, - struct kgsl_drawobj *drawobj, struct adreno_submit_time *time) + struct kgsl_drawobj_cmd *cmdobj, + struct adreno_submit_time *time) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; @@ -803,8 +726,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_context *context; struct adreno_context *drawctxt; bool use_preamble = true; - bool drawobj_user_profiling = false; - bool drawobj_kernel_profiling = false; + bool user_profiling = false; + bool kernel_profiling = false; int flags = KGSL_CMD_FLAGS_NONE; int ret; struct adreno_ringbuffer *rb; @@ -812,16 +735,16 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, unsigned int dwords = 0; struct adreno_submit_time local; - struct kgsl_mem_entry *entry = drawobj->profiling_buf_entry; + struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry; if (entry) profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc, - drawobj->profiling_buffer_gpuaddr); + cmdobj->profiling_buffer_gpuaddr); context = drawobj->context; drawctxt = ADRENO_CONTEXT(context); /* Get the total IBs in the list */ - list_for_each_entry(ib, &drawobj->cmdlist, node) + list_for_each_entry(ib, &cmdobj->cmdlist, node) numibs++; rb = drawctxt->rb; @@ -838,11 +761,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, * c) force preamble for commandbatch */ if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) && - (!test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv))) { + (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) { - set_bit(KGSL_FT_SKIPCMD, &drawobj->fault_recovery); - drawobj->fault_policy = drawctxt->fault_policy; - set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv); + set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery); + cmdobj->fault_policy = drawctxt->fault_policy; + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); /* if context is detached print fault recovery */ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj); @@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if a context switch hasn't occured */ if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) && - !test_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv) && + !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) && (rb->drawctxt_active == drawctxt)) use_preamble = false; @@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, * the accounting sane. Set start_index and numibs to 0 to just * generate the start and end markers and skip everything else */ - if (test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv)) { + if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) { use_preamble = false; numibs = 0; } @@ -886,7 +809,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if (drawobj->flags & KGSL_DRAWOBJ_PROFILING && !adreno_is_a3xx(adreno_dev) && profile_buffer) { - drawobj_user_profiling = true; + user_profiling = true; dwords += 6; /* @@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, time = &local; } - if (test_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv)) { - drawobj_kernel_profiling = true; + if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) { + kernel_profiling = true; dwords += 6; if (adreno_is_a5xx(adreno_dev)) dwords += 2; @@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, *cmds++ = cp_packet(adreno_dev, CP_NOP, 1); *cmds++ = KGSL_START_OF_IB_IDENTIFIER; - if (drawobj_kernel_profiling) { + if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, adreno_dev->profile_buffer.gpuaddr + - ADRENO_DRAWOBJ_PROFILE_OFFSET(drawobj->profile_index, + ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index, started)); } /* - * Add cmds to read the GPU ticks at the start of the drawobj and - * write it into the appropriate drawobj profiling buffer offset + * Add cmds to read the GPU ticks at the start of command obj and + * write it into the appropriate command obj profiling buffer offset */ - if (drawobj_user_profiling) { + if (user_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - drawobj->profiling_buffer_gpuaddr + + cmdobj->profiling_buffer_gpuaddr + offsetof(struct kgsl_drawobj_profiling_buffer, gpu_ticks_submitted)); } if (numibs) { - list_for_each_entry(ib, &drawobj->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { /* * Skip 0 sized IBs - these are presumed to have been * removed from consideration by the FT policy @@ -972,20 +895,20 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, adreno_is_preemption_enabled(adreno_dev)) cmds += gpudev->preemption_yield_enable(cmds); - if (drawobj_kernel_profiling) { + if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, adreno_dev->profile_buffer.gpuaddr + - ADRENO_DRAWOBJ_PROFILE_OFFSET(drawobj->profile_index, + ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index, retired)); } /* - * Add cmds to read the GPU ticks at the end of the drawobj and - * write it into the appropriate drawobj profiling buffer offset + * Add cmds to read the GPU ticks at the end of command obj and + * write it into the appropriate command obj profiling buffer offset */ - if (drawobj_user_profiling) { + if (user_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - drawobj->profiling_buffer_gpuaddr + + cmdobj->profiling_buffer_gpuaddr + offsetof(struct kgsl_drawobj_profiling_buffer, gpu_ticks_retired)); } @@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, goto done; } - if (test_bit(DRAWOBJ_FLAG_WFI, &drawobj->priv)) + if (test_bit(CMDOBJ_WFI, &cmdobj->priv)) flags = KGSL_CMD_FLAGS_WFI; /* @@ -1028,7 +951,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, adreno_ringbuffer_set_constraint(device, drawobj); /* CFF stuff executed only if CFF is enabled */ - kgsl_cffdump_capture_ib_desc(device, context, drawobj); + kgsl_cffdump_capture_ib_desc(device, context, cmdobj); ret = adreno_ringbuffer_addcmds(rb, flags, @@ -1036,10 +959,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, drawobj->timestamp, time); if (!ret) { - drawobj->global_ts = drawctxt->internal_timestamp; + cmdobj->global_ts = drawctxt->internal_timestamp; /* Put the timevalues in the profiling buffer */ - if (drawobj_user_profiling) { + if (user_profiling) { /* * Return kernel clock time to the the client * if requested @@ -1069,8 +992,7 @@ done: kgsl_memdesc_unmap(&entry->memdesc); - trace_kgsl_issueibcmds(device, context->id, drawobj, - numibs, drawobj->timestamp, + trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp, drawobj->flags, ret, drawctxt->type); kfree(link); diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index 3cf4c23bd4c8..63374af1e3f7 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -140,7 +140,7 @@ int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, uint32_t *timestamp); int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, - struct kgsl_drawobj *drawobj, + struct kgsl_drawobj_cmd *cmdobj, struct adreno_submit_time *time); int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt); diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index e021efcd2676..16ca0980cfbe 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -100,8 +100,9 @@ TRACE_EVENT(adreno_cmdbatch_submitted, TRACE_EVENT(adreno_cmdbatch_retired, TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t start, uint64_t retire, - struct adreno_ringbuffer *rb, unsigned int rptr), - TP_ARGS(drawobj, inflight, start, retire, rb, rptr), + struct adreno_ringbuffer *rb, unsigned int rptr, + unsigned long fault_recovery), + TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -115,12 +116,13 @@ TRACE_EVENT(adreno_cmdbatch_retired, __field(unsigned int, rptr) __field(unsigned int, wptr) __field(int, q_inflight) + __field(unsigned long, fault_recovery) ), TP_fast_assign( __entry->id = drawobj->context->id; __entry->timestamp = drawobj->timestamp; __entry->inflight = inflight; - __entry->recovery = drawobj->fault_recovery; + __entry->recovery = fault_recovery; __entry->flags = drawobj->flags; __entry->start = start; __entry->retire = retire; @@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired, ); TRACE_EVENT(adreno_cmdbatch_fault, - TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int fault), - TP_ARGS(drawobj, fault), + TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault), + TP_ARGS(cmdobj, fault), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) __field(unsigned int, fault) ), TP_fast_assign( - __entry->id = drawobj->context->id; - __entry->timestamp = drawobj->timestamp; + __entry->id = cmdobj->base.context->id; + __entry->timestamp = cmdobj->base.timestamp; __entry->fault = fault; ), TP_printk( @@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault, ); TRACE_EVENT(adreno_cmdbatch_recovery, - TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int action), - TP_ARGS(drawobj, action), + TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action), + TP_ARGS(cmdobj, action), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) __field(unsigned int, action) ), TP_fast_assign( - __entry->id = drawobj->context->id; - __entry->timestamp = drawobj->timestamp; + __entry->id = cmdobj->base.context->id; + __entry->timestamp = cmdobj->base.timestamp; __entry->action = action; ), TP_printk( diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 70eae9f27991..add4590bbb90 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1497,7 +1497,8 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_ringbuffer_issueibcmds *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_drawobj *drawobj = NULL; + struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_cmd *cmdobj; long result = -EINVAL; /* The legacy functions don't support synchronization commands */ @@ -1514,15 +1515,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, if (context == NULL) return -EINVAL; - /* Create a drawobj */ - drawobj = kgsl_drawobj_create(device, context, param->flags); - if (IS_ERR(drawobj)) { + cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags, + CMDOBJ_TYPE); + if (IS_ERR(cmdobj)) { kgsl_context_put(context); - return PTR_ERR(drawobj); + return PTR_ERR(cmdobj); } + drawobj = DRAWOBJ(cmdobj); + if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST) - result = kgsl_drawobj_add_ibdesc_list(device, drawobj, + result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj, (void __user *) param->ibdesc_addr, param->numibs); else { @@ -1533,12 +1536,12 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, ibdesc.sizedwords = param->numibs; ibdesc.ctrl = 0; - result = kgsl_drawobj_add_ibdesc(device, drawobj, &ibdesc); + result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); } if (result == 0) - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - drawobj, ¶m->timestamp); + result = dev_priv->device->ftbl->queue_cmds(dev_priv, context, + &drawobj, 1, ¶m->timestamp); /* * -EPROTO is a "success" error - it just tells the user that the @@ -1551,59 +1554,101 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, return result; } +/* Returns 0 on failure. Returns command type(s) on success */ +static unsigned int _process_command_input(struct kgsl_device *device, + unsigned int flags, unsigned int numcmds, + unsigned int numobjs, unsigned int numsyncs) +{ + if (numcmds > KGSL_MAX_NUMIBS || + numobjs > KGSL_MAX_NUMIBS || + numsyncs > KGSL_MAX_SYNCPOINTS) + return 0; + + /* + * The SYNC bit is supposed to identify a dummy sync object + * so warn the user if they specified any IBs with it. + * A MARKER command can either have IBs or not but if the + * command has 0 IBs it is automatically assumed to be a marker. + */ + + /* If they specify the flag, go with what they say */ + if (flags & KGSL_DRAWOBJ_MARKER) + return MARKEROBJ_TYPE; + else if (flags & KGSL_DRAWOBJ_SYNC) + return SYNCOBJ_TYPE; + + /* If not, deduce what they meant */ + if (numsyncs && numcmds) + return SYNCOBJ_TYPE | CMDOBJ_TYPE; + else if (numsyncs) + return SYNCOBJ_TYPE; + else if (numcmds) + return CMDOBJ_TYPE; + else if (numcmds == 0) + return MARKEROBJ_TYPE; + + return 0; +} + long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_submit_commands *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_drawobj *drawobj = NULL; - long result = -EINVAL; - - /* - * The SYNC bit is supposed to identify a dummy sync object so warn the - * user if they specified any IBs with it. A MARKER command can either - * have IBs or not but if the command has 0 IBs it is automatically - * assumed to be a marker. - */ - - if ((param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds) - KGSL_DEV_ERR_ONCE(device, - "Commands specified with the SYNC flag. They will be ignored\n"); - else if (!(param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds == 0) - param->flags |= KGSL_DRAWOBJ_MARKER; + struct kgsl_drawobj *drawobj[2]; + unsigned int type; + long result; + unsigned int i = 0; - if (param->numcmds > KGSL_MAX_NUMIBS || - param->numsyncs > KGSL_MAX_SYNCPOINTS) + type = _process_command_input(device, param->flags, param->numcmds, 0, + param->numsyncs); + if (!type) return -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; - /* Create a drawobj */ - drawobj = kgsl_drawobj_create(device, context, param->flags); - if (IS_ERR(drawobj)) { - result = PTR_ERR(drawobj); - goto done; + if (type & SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + if (IS_ERR(syncobj)) { + result = PTR_ERR(syncobj); + goto done; + } + + drawobj[i++] = DRAWOBJ(syncobj); + + result = kgsl_drawobj_sync_add_syncpoints(device, syncobj, + param->synclist, param->numsyncs); + if (result) + goto done; } - result = kgsl_drawobj_add_ibdesc_list(device, drawobj, - param->cmdlist, param->numcmds); - if (result) - goto done; + if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) { + struct kgsl_drawobj_cmd *cmdobj = + kgsl_drawobj_cmd_create(device, + context, param->flags, type); + if (IS_ERR(cmdobj)) { + result = PTR_ERR(cmdobj); + goto done; + } - result = kgsl_drawobj_add_syncpoints(device, drawobj, - param->synclist, param->numsyncs); - if (result) - goto done; + drawobj[i++] = DRAWOBJ(cmdobj); + + result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj, + param->cmdlist, param->numcmds); + if (result) + goto done; - /* If no profiling buffer was specified, clear the flag */ - if (drawobj->profiling_buf_entry == NULL) - drawobj->flags &= ~KGSL_DRAWOBJ_PROFILING; + /* If no profiling buffer was specified, clear the flag */ + if (cmdobj->profiling_buf_entry == NULL) + DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING; + } - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - drawobj, ¶m->timestamp); + result = device->ftbl->queue_cmds(dev_priv, context, drawobj, + i, ¶m->timestamp); done: /* @@ -1611,7 +1656,9 @@ done: * context had previously faulted */ if (result && result != -EPROTO) - kgsl_drawobj_destroy(drawobj); + while (i--) + kgsl_drawobj_destroy(drawobj[i]); + kgsl_context_put(context); return result; @@ -1623,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv, struct kgsl_gpu_command *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_drawobj *drawobj = NULL; - - long result = -EINVAL; + struct kgsl_drawobj *drawobj[2]; + unsigned int type; + long result; + unsigned int i = 0; - /* - * The SYNC bit is supposed to identify a dummy sync object so warn the - * user if they specified any IBs with it. A MARKER command can either - * have IBs or not but if the command has 0 IBs it is automatically - * assumed to be a marker. If none of the above make sure that the user - * specified a sane number of IBs - */ - if ((param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds) - KGSL_DEV_ERR_ONCE(device, - "Commands specified with the SYNC flag. They will be ignored\n"); - else if (!(param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds == 0) - param->flags |= KGSL_DRAWOBJ_MARKER; - - /* Make sure that the memobj and syncpoint count isn't too big */ - if (param->numcmds > KGSL_MAX_NUMIBS || - param->numobjs > KGSL_MAX_NUMIBS || - param->numsyncs > KGSL_MAX_SYNCPOINTS) + type = _process_command_input(device, param->flags, param->numcmds, + param->numobjs, param->numsyncs); + if (!type) return -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; - drawobj = kgsl_drawobj_create(device, context, param->flags); - if (IS_ERR(drawobj)) { - result = PTR_ERR(drawobj); - goto done; + if (type & SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + + if (IS_ERR(syncobj)) { + result = PTR_ERR(syncobj); + goto done; + } + + drawobj[i++] = DRAWOBJ(syncobj); + + result = kgsl_drawobj_sync_add_synclist(device, syncobj, + to_user_ptr(param->synclist), + param->syncsize, param->numsyncs); + if (result) + goto done; } - result = kgsl_drawobj_add_cmdlist(device, drawobj, - to_user_ptr(param->cmdlist), - param->cmdsize, param->numcmds); - if (result) - goto done; + if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) { + struct kgsl_drawobj_cmd *cmdobj = + kgsl_drawobj_cmd_create(device, + context, param->flags, type); - result = kgsl_drawobj_add_memlist(device, drawobj, - to_user_ptr(param->objlist), - param->objsize, param->numobjs); - if (result) - goto done; + if (IS_ERR(cmdobj)) { + result = PTR_ERR(cmdobj); + goto done; + } - result = kgsl_drawobj_add_synclist(device, drawobj, - to_user_ptr(param->synclist), - param->syncsize, param->numsyncs); - if (result) - goto done; + drawobj[i++] = DRAWOBJ(cmdobj); - /* If no profiling buffer was specified, clear the flag */ - if (drawobj->profiling_buf_entry == NULL) - drawobj->flags &= ~KGSL_DRAWOBJ_PROFILING; + result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj, + to_user_ptr(param->cmdlist), + param->cmdsize, param->numcmds); + if (result) + goto done; - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - drawobj, ¶m->timestamp); + result = kgsl_drawobj_cmd_add_memlist(device, cmdobj, + to_user_ptr(param->objlist), + param->objsize, param->numobjs); + if (result) + goto done; + + /* If no profiling buffer was specified, clear the flag */ + if (cmdobj->profiling_buf_entry == NULL) + DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING; + } + + result = device->ftbl->queue_cmds(dev_priv, context, drawobj, + i, ¶m->timestamp); done: /* @@ -1687,7 +1740,8 @@ done: * context had previously faulted */ if (result && result != -EPROTO) - kgsl_drawobj_destroy(drawobj); + while (i--) + kgsl_drawobj_destroy(drawobj[i]); kgsl_context_put(context); return result; diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c index 62bb0146ba2e..3337570477f9 100644 --- a/drivers/gpu/msm/kgsl_cffdump.c +++ b/drivers/gpu/msm/kgsl_cffdump.c @@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device, */ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_drawobj *drawobj) + struct kgsl_drawobj_cmd *cmdobj) { int ret = 0; struct kgsl_memobj_node *ib; @@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, if (!device->cff_dump_enable) return 0; /* Dump CFF for IB and all objects in it */ - list_for_each_entry(ib, &drawobj->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { ret = kgsl_cffdump_capture_adreno_ib_cff( device, context->proc_priv, ib->gpuaddr, ib->size >> 2); diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h index 928cde1c8771..14bc397cb570 100644 --- a/drivers/gpu/msm/kgsl_cffdump.h +++ b/drivers/gpu/msm/kgsl_cffdump.h @@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val); int kgsl_cff_dump_enable_get(void *data, u64 *val); int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_drawobj *drawobj); + struct kgsl_drawobj_cmd *cmdobj); void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2, uint op3, uint op4, uint op5); @@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device, static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_drawobj *drawobj) + struct kgsl_drawobj_cmd *cmdobj) { return 0; } diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index ef07a8594870..3307e0232bc4 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -127,9 +127,9 @@ struct kgsl_functable { unsigned int msecs); int (*readtimestamp) (struct kgsl_device *device, void *priv, enum kgsl_timestamp_type type, unsigned int *timestamp); - int (*issueibcmds) (struct kgsl_device_private *dev_priv, - struct kgsl_context *context, struct kgsl_drawobj *drawobj, - uint32_t *timestamps); + int (*queue_cmds)(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp); void (*power_stats)(struct kgsl_device *device, struct kgsl_power_stats *stats); unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid); @@ -184,7 +184,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg, /** * struct kgsl_memobj_node - Memory object descriptor - * @node: Local list node for the drawobj + * @node: Local list node for the object * @id: GPU memory ID for the object * offset: Offset within the object * @gpuaddr: GPU address for the object diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index e19859945fa8..7840daa6a3e2 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -42,26 +42,43 @@ */ static struct kmem_cache *memobjs_cache; -/** - * kgsl_drawobj_put() - Decrement the refcount for a drawobj object - * @drawobj: Pointer to the drawobj object - */ -static inline void kgsl_drawobj_put(struct kgsl_drawobj *drawobj) +static void drawobj_destroy_object(struct kref *kref) +{ + struct kgsl_drawobj *drawobj = container_of(kref, + struct kgsl_drawobj, refcount); + struct kgsl_drawobj_sync *syncobj; + + kgsl_context_put(drawobj->context); + + switch (drawobj->type) { + case SYNCOBJ_TYPE: + syncobj = SYNCOBJ(drawobj); + kfree(syncobj->synclist); + kfree(syncobj); + break; + case CMDOBJ_TYPE: + case MARKEROBJ_TYPE: + kfree(CMDOBJ(drawobj)); + break; + } +} + +static inline void drawobj_put(struct kgsl_drawobj *drawobj) { if (drawobj) - kref_put(&drawobj->refcount, kgsl_drawobj_destroy_object); + kref_put(&drawobj->refcount, drawobj_destroy_object); } void kgsl_dump_syncpoints(struct kgsl_device *device, - struct kgsl_drawobj *drawobj) + struct kgsl_drawobj_sync *syncobj) { struct kgsl_drawobj_sync_event *event; unsigned int i; - for (i = 0; i < drawobj->numsyncs; i++) { - event = &drawobj->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_drawobj_event_pending(drawobj, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; switch (event->type) { @@ -90,14 +107,15 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, } } -static void _kgsl_drawobj_timer(unsigned long data) +static void syncobj_timer(unsigned long data) { struct kgsl_device *device; - struct kgsl_drawobj *drawobj = (struct kgsl_drawobj *) data; + struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data; + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; unsigned int i; - if (drawobj == NULL || drawobj->context == NULL) + if (syncobj == NULL || drawobj->context == NULL) return; device = drawobj->context->device; @@ -106,16 +124,16 @@ static void _kgsl_drawobj_timer(unsigned long data) "kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n", drawobj->context->id, drawobj->timestamp); - set_bit(DRAWOBJ_FLAG_FENCE_LOG, &drawobj->priv); + set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); kgsl_context_dump(drawobj->context); - clear_bit(DRAWOBJ_FLAG_FENCE_LOG, &drawobj->priv); + clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); dev_err(device->dev, " pending events:\n"); - for (i = 0; i < drawobj->numsyncs; i++) { - event = &drawobj->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_drawobj_event_pending(drawobj, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; switch (event->type) { @@ -137,48 +155,31 @@ static void _kgsl_drawobj_timer(unsigned long data) dev_err(device->dev, "--gpu syncpoint deadlock print end--\n"); } -/** - * kgsl_drawobj_destroy_object() - Destroy a drawobj object - * @kref: Pointer to the kref structure for this object - * - * Actually destroy a drawobj object. Called from kgsl_drawobj_put - */ -void kgsl_drawobj_destroy_object(struct kref *kref) -{ - struct kgsl_drawobj *drawobj = container_of(kref, - struct kgsl_drawobj, refcount); - - kgsl_context_put(drawobj->context); - - kfree(drawobj->synclist); - kfree(drawobj); -} -EXPORT_SYMBOL(kgsl_drawobj_destroy_object); - /* * a generic function to retire a pending sync event and (possibly) * kick the dispatcher */ -static void kgsl_drawobj_sync_expire(struct kgsl_device *device, +static void drawobj_sync_expire(struct kgsl_device *device, struct kgsl_drawobj_sync_event *event) { + struct kgsl_drawobj_sync *syncobj = event->syncobj; /* * Clear the event from the pending mask - if it is already clear, then * leave without doing anything useful */ - if (!test_and_clear_bit(event->id, &event->drawobj->pending)) + if (!test_and_clear_bit(event->id, &syncobj->pending)) return; /* * If no more pending events, delete the timer and schedule the command * for dispatch */ - if (!kgsl_drawobj_events_pending(event->drawobj)) { - del_timer_sync(&event->drawobj->timer); + if (!kgsl_drawobj_events_pending(event->syncobj)) { + del_timer_sync(&syncobj->timer); if (device->ftbl->drawctxt_sched) device->ftbl->drawctxt_sched(device, - event->drawobj->context); + event->syncobj->base.context); } } @@ -186,20 +187,20 @@ static void kgsl_drawobj_sync_expire(struct kgsl_device *device, * This function is called by the GPU event when the sync event timestamp * expires */ -static void kgsl_drawobj_sync_func(struct kgsl_device *device, +static void drawobj_sync_func(struct kgsl_device *device, struct kgsl_event_group *group, void *priv, int result) { struct kgsl_drawobj_sync_event *event = priv; - trace_syncpoint_timestamp_expire(event->drawobj, + trace_syncpoint_timestamp_expire(event->syncobj, event->context, event->timestamp); - kgsl_drawobj_sync_expire(device, event); + drawobj_sync_expire(device, event); kgsl_context_put(event->context); - kgsl_drawobj_put(event->drawobj); + drawobj_put(&event->syncobj->base); } -static inline void _free_memobj_list(struct list_head *list) +static inline void memobj_list_free(struct list_head *list) { struct kgsl_memobj_node *mem, *tmpmem; @@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list) } } -/** - * kgsl_drawobj_destroy() - Destroy a drawobj structure - * @drawobj: Pointer to the drawobj object to destroy - * - * Start the process of destroying a drawobj. Cancel any pending events - * and decrement the refcount. Asynchronous events can still signal after - * kgsl_drawobj_destroy has returned. - */ -void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) +static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { - unsigned int i; + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); unsigned long pending; - - if (IS_ERR_OR_NULL(drawobj)) - return; + unsigned int i; /* Zap the canary timer */ - del_timer_sync(&drawobj->timer); + del_timer_sync(&syncobj->timer); /* * Copy off the pending list and clear all pending events - this will * render any subsequent asynchronous callback harmless */ - bitmap_copy(&pending, &drawobj->pending, KGSL_MAX_SYNCPOINTS); - bitmap_zero(&drawobj->pending, KGSL_MAX_SYNCPOINTS); + bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS); + bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS); /* * Clear all pending events - this will render any subsequent async * callbacks harmless */ - - for (i = 0; i < drawobj->numsyncs; i++) { - struct kgsl_drawobj_sync_event *event = &drawobj->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; /* Don't do anything if the event has already expired */ if (!test_bit(i, &pending)) @@ -252,29 +242,16 @@ void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: kgsl_cancel_event(drawobj->device, &event->context->events, event->timestamp, - kgsl_drawobj_sync_func, event); + drawobj_sync_func, event); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: if (kgsl_sync_fence_async_cancel(event->handle)) - kgsl_drawobj_put(drawobj); + drawobj_put(drawobj); break; } } /* - * Release the the refcount on the mem entry associated with the - * drawobj profiling buffer - */ - if (drawobj->flags & KGSL_DRAWOBJ_PROFILING) - kgsl_mem_entry_put(drawobj->profiling_buf_entry); - - /* Destroy the cmdlist we created */ - _free_memobj_list(&drawobj->cmdlist); - - /* Destroy the memlist we created */ - _free_memobj_list(&drawobj->memlist); - - /* * If we cancelled an event, there's a good chance that the context is * on a dispatcher queue, so schedule to get it removed. */ @@ -283,92 +260,130 @@ void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) drawobj->device->ftbl->drawctxt_sched(drawobj->device, drawobj->context); - kgsl_drawobj_put(drawobj); +} + +static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj) +{ + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj); + + /* + * Release the refcount on the mem entry associated with the + * ib profiling buffer + */ + if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING) + kgsl_mem_entry_put(cmdobj->profiling_buf_entry); + + /* Destroy the cmdlist we created */ + memobj_list_free(&cmdobj->cmdlist); + + /* Destroy the memlist we created */ + memobj_list_free(&cmdobj->memlist); +} + +/** + * kgsl_drawobj_destroy() - Destroy a kgsl object structure + * @obj: Pointer to the kgsl object to destroy + * + * Start the process of destroying a command batch. Cancel any pending events + * and decrement the refcount. Asynchronous events can still signal after + * kgsl_drawobj_destroy has returned. + */ +void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) +{ + if (!drawobj) + return; + + if (drawobj->type & SYNCOBJ_TYPE) + drawobj_destroy_sync(drawobj); + else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) + drawobj_destroy_cmd(drawobj); + else + return; + + drawobj_put(drawobj); } EXPORT_SYMBOL(kgsl_drawobj_destroy); -/* - * A callback that gets registered with kgsl_sync_fence_async_wait and is fired - * when a fence is expired - */ -static void kgsl_drawobj_sync_fence_func(void *priv) +static void drawobj_sync_fence_func(void *priv) { struct kgsl_drawobj_sync_event *event = priv; - trace_syncpoint_fence_expire(event->drawobj, + trace_syncpoint_fence_expire(event->syncobj, event->handle ? event->handle->name : "unknown"); - kgsl_drawobj_sync_expire(event->device, event); + drawobj_sync_expire(event->device, event); - kgsl_drawobj_put(event->drawobj); + drawobj_put(&event->syncobj->base); } -/* kgsl_drawobj_add_sync_fence() - Add a new sync fence syncpoint +/* drawobj_add_sync_fence() - Add a new sync fence syncpoint * @device: KGSL device - * @drawobj: KGSL drawobj to add the sync point to + * @syncobj: KGSL sync obj to add the sync point to * @priv: Private structure passed by the user * - * Add a new fence sync syncpoint to the drawobj. + * Add a new fence sync syncpoint to the sync obj. */ -static int kgsl_drawobj_add_sync_fence(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void *priv) +static int drawobj_add_sync_fence(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv) { struct kgsl_cmd_syncpoint_fence *sync = priv; + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; unsigned int id; kref_get(&drawobj->refcount); - id = drawobj->numsyncs++; + id = syncobj->numsyncs++; - event = &drawobj->synclist[id]; + event = &syncobj->synclist[id]; event->id = id; event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE; - event->drawobj = drawobj; + event->syncobj = syncobj; event->device = device; event->context = NULL; - set_bit(event->id, &drawobj->pending); + set_bit(event->id, &syncobj->pending); event->handle = kgsl_sync_fence_async_wait(sync->fd, - kgsl_drawobj_sync_fence_func, event); + drawobj_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { int ret = PTR_ERR(event->handle); - clear_bit(event->id, &drawobj->pending); + clear_bit(event->id, &syncobj->pending); event->handle = NULL; - kgsl_drawobj_put(drawobj); + drawobj_put(drawobj); /* * If ret == 0 the fence was already signaled - print a trace * message so we can track that */ if (ret == 0) - trace_syncpoint_fence_expire(drawobj, "signaled"); + trace_syncpoint_fence_expire(syncobj, "signaled"); return ret; } - trace_syncpoint_fence(drawobj, event->handle->name); + trace_syncpoint_fence(syncobj, event->handle->name); return 0; } -/* kgsl_drawobj_add_sync_timestamp() - Add a new sync point for a drawobj +/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj * @device: KGSL device - * @drawobj: KGSL drawobj to add the sync point to + * @syncobj: KGSL sync obj to add the sync point to * @priv: Private structure passed by the user * - * Add a new sync point timestamp event to the drawobj. + * Add a new sync point timestamp event to the sync obj. */ -static int kgsl_drawobj_add_sync_timestamp(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void *priv) +static int drawobj_add_sync_timestamp(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv) { struct kgsl_cmd_syncpoint_timestamp *sync = priv; - struct kgsl_context *context = kgsl_context_get(drawobj->device, + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_context *context = kgsl_context_get(device, sync->context_id); struct kgsl_drawobj_sync_event *event; int ret = -EINVAL; @@ -400,27 +415,27 @@ static int kgsl_drawobj_add_sync_timestamp(struct kgsl_device *device, kref_get(&drawobj->refcount); - id = drawobj->numsyncs++; + id = syncobj->numsyncs++; - event = &drawobj->synclist[id]; + event = &syncobj->synclist[id]; event->id = id; event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP; - event->drawobj = drawobj; + event->syncobj = syncobj; event->context = context; event->timestamp = sync->timestamp; event->device = device; - set_bit(event->id, &drawobj->pending); + set_bit(event->id, &syncobj->pending); ret = kgsl_add_event(device, &context->events, sync->timestamp, - kgsl_drawobj_sync_func, event); + drawobj_sync_func, event); if (ret) { - clear_bit(event->id, &drawobj->pending); - kgsl_drawobj_put(drawobj); + clear_bit(event->id, &syncobj->pending); + drawobj_put(drawobj); } else { - trace_syncpoint_timestamp(drawobj, context, sync->timestamp); + trace_syncpoint_timestamp(syncobj, context, sync->timestamp); } done: @@ -431,31 +446,34 @@ done: } /** - * kgsl_drawobj_add_sync() - Add a sync point to a drawobj + * kgsl_drawobj_sync_add_sync() - Add a sync point to a command + * batch * @device: Pointer to the KGSL device struct for the GPU - * @drawobj: Pointer to the drawobj + * @syncobj: Pointer to the sync obj * @sync: Pointer to the user-specified struct defining the syncpoint * - * Create a new sync point in the drawobj based on the user specified - * parameters + * Create a new sync point in the sync obj based on the + * user specified parameters */ -int kgsl_drawobj_add_sync(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, +int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, struct kgsl_cmd_syncpoint *sync) { void *priv; int ret, psize; - int (*func)(struct kgsl_device *device, struct kgsl_drawobj *drawobj, + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + int (*func)(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv); switch (sync->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: psize = sizeof(struct kgsl_cmd_syncpoint_timestamp); - func = kgsl_drawobj_add_sync_timestamp; + func = drawobj_add_sync_timestamp; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: psize = sizeof(struct kgsl_cmd_syncpoint_fence); - func = kgsl_drawobj_add_sync_fence; + func = drawobj_add_sync_fence; break; default: KGSL_DRV_ERR(device, @@ -480,23 +498,25 @@ int kgsl_drawobj_add_sync(struct kgsl_device *device, return -EFAULT; } - ret = func(device, drawobj, priv); + ret = func(device, syncobj, priv); kfree(priv); return ret; } static void add_profiling_buffer(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, uint64_t gpuaddr, uint64_t size, + struct kgsl_drawobj_cmd *cmdobj, + uint64_t gpuaddr, uint64_t size, unsigned int id, uint64_t offset) { struct kgsl_mem_entry *entry; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING)) return; /* Only the first buffer entry counts - ignore the rest */ - if (drawobj->profiling_buf_entry != NULL) + if (cmdobj->profiling_buf_entry != NULL) return; if (id != 0) @@ -520,29 +540,31 @@ static void add_profiling_buffer(struct kgsl_device *device, return; } - drawobj->profiling_buf_entry = entry; + cmdobj->profiling_buf_entry = entry; if (id != 0) - drawobj->profiling_buffer_gpuaddr = + cmdobj->profiling_buffer_gpuaddr = entry->memdesc.gpuaddr + offset; else - drawobj->profiling_buffer_gpuaddr = gpuaddr; + cmdobj->profiling_buffer_gpuaddr = gpuaddr; } /** - * kgsl_drawobj_add_ibdesc() - Add a legacy ibdesc to a drawobj - * @drawobj: Pointer to the drawobj + * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command + * batch + * @cmdobj: Pointer to the ib * @ibdesc: Pointer to the user-specified struct defining the memory or IB * - * Create a new memory entry in the drawobj based on the user specified - * parameters + * Create a new memory entry in the ib based on the + * user specified parameters */ -int kgsl_drawobj_add_ibdesc(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, struct kgsl_ibdesc *ibdesc) +int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc) { uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr; uint64_t size = (uint64_t) ibdesc->sizedwords << 2; struct kgsl_memobj_node *mem; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); /* sanitize the ibdesc ctrl flags */ ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER; @@ -550,13 +572,14 @@ int kgsl_drawobj_add_ibdesc(struct kgsl_device *device, if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST && ibdesc->ctrl & KGSL_IBDESC_MEMLIST) { if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) { - add_profiling_buffer(device, drawobj, + add_profiling_buffer(device, cmdobj, gpuaddr, size, 0, 0); return 0; } } - if (drawobj->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)) + /* Ignore if SYNC or MARKER is specified */ + if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE)) return 0; mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL); @@ -571,74 +594,120 @@ int kgsl_drawobj_add_ibdesc(struct kgsl_device *device, mem->flags = 0; if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST && - ibdesc->ctrl & KGSL_IBDESC_MEMLIST) { + ibdesc->ctrl & KGSL_IBDESC_MEMLIST) /* add to the memlist */ - list_add_tail(&mem->node, &drawobj->memlist); - } else { + list_add_tail(&mem->node, &cmdobj->memlist); + else { /* set the preamble flag if directed to */ if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE && - list_empty(&drawobj->cmdlist)) + list_empty(&cmdobj->cmdlist)) mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE; /* add to the cmd list */ - list_add_tail(&mem->node, &drawobj->cmdlist); + list_add_tail(&mem->node, &cmdobj->cmdlist); } return 0; } +static inline int drawobj_init(struct kgsl_device *device, + struct kgsl_context *context, struct kgsl_drawobj *drawobj, + unsigned int type) +{ + /* + * Increase the reference count on the context so it doesn't disappear + * during the lifetime of this object + */ + if (!_kgsl_context_get(context)) + return -ENOENT; + + kref_init(&drawobj->refcount); + + drawobj->device = device; + drawobj->context = context; + drawobj->type = type; + + return 0; +} + +/** + * kgsl_drawobj_sync_create() - Create a new sync obj + * structure + * @device: Pointer to a KGSL device struct + * @context: Pointer to a KGSL context struct + * + * Allocate an new kgsl_drawobj_sync structure + */ +struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, + struct kgsl_context *context) +{ + struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj), + GFP_KERNEL); + if (syncobj == NULL) + return ERR_PTR(-ENOMEM); + + if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) { + kfree(syncobj); + return ERR_PTR(-ENOENT); + } + + /* Add a timer to help debug sync deadlocks */ + setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj); + + return syncobj; +} + /** - * kgsl_drawobj_create() - Create a new drawobj structure + * kgsl_drawobj_cmd_create() - Create a new command obj + * structure * @device: Pointer to a KGSL device struct * @context: Pointer to a KGSL context struct - * @flags: Flags for the drawobj + * @flags: Flags for the command obj + * @type: type of cmdobj MARKER/CMD * - * Allocate an new drawobj structure + * Allocate a new kgsl_drawobj_cmd structure */ -struct kgsl_drawobj *kgsl_drawobj_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int flags) +struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, + struct kgsl_context *context, unsigned int flags, + unsigned int type) { - struct kgsl_drawobj *drawobj = kzalloc(sizeof(*drawobj), GFP_KERNEL); + struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + struct kgsl_drawobj *drawobj; - if (drawobj == NULL) + if (cmdobj == NULL) return ERR_PTR(-ENOMEM); - /* - * Increase the reference count on the context so it doesn't disappear - * during the lifetime of this drawobj - */ + type &= CMDOBJ_TYPE | MARKEROBJ_TYPE; + if (type == 0) { + kfree(cmdobj); + return ERR_PTR(-EINVAL); + } + + drawobj = DRAWOBJ(cmdobj); - if (!_kgsl_context_get(context)) { - kfree(drawobj); + if (drawobj_init(device, context, drawobj, type)) { + kfree(cmdobj); return ERR_PTR(-ENOENT); } - kref_init(&drawobj->refcount); - INIT_LIST_HEAD(&drawobj->cmdlist); - INIT_LIST_HEAD(&drawobj->memlist); - - drawobj->device = device; - drawobj->context = context; /* sanitize our flags for drawobj's */ drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH | KGSL_DRAWOBJ_MARKER | KGSL_DRAWOBJ_END_OF_FRAME - | KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_PWR_CONSTRAINT | KGSL_DRAWOBJ_MEMLIST | KGSL_DRAWOBJ_PROFILING | KGSL_DRAWOBJ_PROFILING_KTIME); - /* Add a timer to help debug sync deadlocks */ - setup_timer(&drawobj->timer, _kgsl_drawobj_timer, - (unsigned long) drawobj); + INIT_LIST_HEAD(&cmdobj->cmdlist); + INIT_LIST_HEAD(&cmdobj->memlist); - return drawobj; + return cmdobj; } #ifdef CONFIG_COMPAT static int add_ibdesc_list_compat(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count) + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { int i, ret = 0; struct kgsl_ibdesc_compat ibdesc32; @@ -656,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device, ibdesc.sizedwords = (size_t) ibdesc32.sizedwords; ibdesc.ctrl = (unsigned int) ibdesc32.ctrl; - ret = kgsl_drawobj_add_ibdesc(device, drawobj, &ibdesc); + ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); if (ret) break; @@ -667,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device, } static int add_syncpoints_compat(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count) + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { struct kgsl_cmd_syncpoint_compat sync32; struct kgsl_cmd_syncpoint sync; @@ -685,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device, sync.priv = compat_ptr(sync32.priv); sync.size = (size_t) sync32.size; - ret = kgsl_drawobj_add_sync(device, drawobj, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) break; @@ -696,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device, } #else static int add_ibdesc_list_compat(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count) + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { return -EINVAL; } static int add_syncpoints_compat(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count) + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { return -EINVAL; } #endif -int kgsl_drawobj_add_ibdesc_list(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count) +/* Returns: + * -EINVAL: Bad data + * 0: All data fields are empty (nothing to do) + * 1: All list information is valid + */ +static int _verify_input_list(unsigned int count, void __user *ptr, + unsigned int size) +{ + /* Return early if nothing going on */ + if (count == 0 && ptr == NULL && size == 0) + return 0; + + /* Sanity check inputs */ + if (count == 0 || ptr == NULL || size == 0) + return -EINVAL; + + return 1; +} + +int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { struct kgsl_ibdesc ibdesc; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); int i, ret; + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) + return 0; + + ret = _verify_input_list(count, ptr, sizeof(ibdesc)); + if (ret <= 0) + return -EINVAL; + if (is_compat_task()) - return add_ibdesc_list_compat(device, drawobj, ptr, count); + return add_ibdesc_list_compat(device, cmdobj, ptr, count); for (i = 0; i < count; i++) { memset(&ibdesc, 0, sizeof(ibdesc)); @@ -723,7 +820,7 @@ int kgsl_drawobj_add_ibdesc_list(struct kgsl_device *device, if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc))) return -EFAULT; - ret = kgsl_drawobj_add_ibdesc(device, drawobj, &ibdesc); + ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); if (ret) return ret; @@ -733,8 +830,8 @@ int kgsl_drawobj_add_ibdesc_list(struct kgsl_device *device, return 0; } -int kgsl_drawobj_add_syncpoints(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count) +int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { struct kgsl_cmd_syncpoint sync; int i, ret; @@ -742,17 +839,14 @@ int kgsl_drawobj_add_syncpoints(struct kgsl_device *device, if (count == 0) return 0; - if (count > KGSL_MAX_SYNCPOINTS) - return -EINVAL; - - drawobj->synclist = kcalloc(count, + syncobj->synclist = kcalloc(count, sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL); - if (drawobj->synclist == NULL) + if (syncobj->synclist == NULL) return -ENOMEM; if (is_compat_task()) - return add_syncpoints_compat(device, drawobj, ptr, count); + return add_syncpoints_compat(device, syncobj, ptr, count); for (i = 0; i < count; i++) { memset(&sync, 0, sizeof(sync)); @@ -760,7 +854,7 @@ int kgsl_drawobj_add_syncpoints(struct kgsl_device *device, if (copy_from_user(&sync, ptr, sizeof(sync))) return -EFAULT; - ret = kgsl_drawobj_add_sync(device, drawobj, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) return ret; @@ -770,7 +864,7 @@ int kgsl_drawobj_add_syncpoints(struct kgsl_device *device, return 0; } -static int kgsl_drawobj_add_object(struct list_head *head, +static int drawobj_add_object(struct list_head *head, struct kgsl_command_object *obj) { struct kgsl_memobj_node *mem; @@ -795,24 +889,22 @@ static int kgsl_drawobj_add_object(struct list_head *head, KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \ KGSL_CMDLIST_IB_PREAMBLE) -int kgsl_drawobj_add_cmdlist(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, +/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */ +int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_object obj; - int i, ret = 0; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); + int i, ret; - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) return 0; - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; - - /* Ignore all if SYNC or MARKER is specified */ - if (drawobj->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)) - return 0; + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) + return ret; for (i = 0; i < count; i++) { memset(&obj, 0, sizeof(obj)); @@ -825,12 +917,12 @@ int kgsl_drawobj_add_cmdlist(struct kgsl_device *device, if (!(obj.flags & CMDLIST_FLAGS)) { KGSL_DRV_ERR(device, "invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n", - drawobj->context->id, obj.flags, obj.id, + baseobj->context->id, obj.flags, obj.id, obj.offset, obj.gpuaddr, obj.size); return -EINVAL; } - ret = kgsl_drawobj_add_object(&drawobj->cmdlist, &obj); + ret = drawobj_add_object(&cmdobj->cmdlist, &obj); if (ret) return ret; @@ -840,20 +932,21 @@ int kgsl_drawobj_add_cmdlist(struct kgsl_device *device, return 0; } -int kgsl_drawobj_add_memlist(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, +int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_object obj; - int i, ret = 0; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); + int i, ret; - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) return 0; - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) + return ret; for (i = 0; i < count; i++) { memset(&obj, 0, sizeof(obj)); @@ -865,17 +958,16 @@ int kgsl_drawobj_add_memlist(struct kgsl_device *device, if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) { KGSL_DRV_ERR(device, "invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n", - drawobj->context->id, obj.flags, obj.id, - obj.offset, obj.gpuaddr, obj.size); + DRAWOBJ(cmdobj)->context->id, obj.flags, + obj.id, obj.offset, obj.gpuaddr, obj.size); return -EINVAL; } if (obj.flags & KGSL_OBJLIST_PROFILE) - add_profiling_buffer(device, drawobj, obj.gpuaddr, + add_profiling_buffer(device, cmdobj, obj.gpuaddr, obj.size, obj.id, obj.offset); else { - ret = kgsl_drawobj_add_object(&drawobj->memlist, - &obj); + ret = drawobj_add_object(&cmdobj->memlist, &obj); if (ret) return ret; } @@ -886,29 +978,23 @@ int kgsl_drawobj_add_memlist(struct kgsl_device *device, return 0; } -int kgsl_drawobj_add_synclist(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, +int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_syncpoint syncpoint; struct kgsl_cmd_syncpoint sync; - int i, ret = 0; - - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) - return 0; - - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; + int i, ret; - if (count > KGSL_MAX_SYNCPOINTS) + /* If creating a sync and the data is not there or wrong then error */ + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) return -EINVAL; - drawobj->synclist = kcalloc(count, + syncobj->synclist = kcalloc(count, sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL); - if (drawobj->synclist == NULL) + if (syncobj->synclist == NULL) return -ENOMEM; for (i = 0; i < count; i++) { @@ -922,7 +1008,7 @@ int kgsl_drawobj_add_synclist(struct kgsl_device *device, sync.priv = to_user_ptr(syncpoint.priv); sync.size = syncpoint.size; - ret = kgsl_drawobj_add_sync(device, drawobj, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) return ret; diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h index 48aa7af24029..89ed944c539a 100644 --- a/drivers/gpu/msm/kgsl_drawobj.h +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -13,31 +13,49 @@ #ifndef __KGSL_DRAWOBJ_H #define __KGSL_DRAWOBJ_H -#define KGSL_DRAWOBJ_FLAGS \ - { KGSL_DRAWOBJ_MARKER, "MARKER" }, \ - { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \ - { KGSL_DRAWOBJ_SYNC, "SYNC" }, \ - { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \ - { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \ - { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" } +#define DRAWOBJ(obj) (&obj->base) +#define SYNCOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_sync, base) +#define CMDOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_cmd, base) + +#define CMDOBJ_TYPE BIT(0) +#define MARKEROBJ_TYPE BIT(1) +#define SYNCOBJ_TYPE BIT(2) /** - * struct kgsl_drawobj - KGSl command descriptor + * struct kgsl_drawobj - KGSL drawobj descriptor * @device: KGSL GPU device that the command was created for * @context: KGSL context that created the command + * @type: Object type * @timestamp: Timestamp assigned to the command * @flags: flags + * @refcount: kref structure to maintain the reference count + */ +struct kgsl_drawobj { + struct kgsl_device *device; + struct kgsl_context *context; + uint32_t type; + uint32_t timestamp; + unsigned long flags; + struct kref refcount; +}; + +/** + * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker + * cmds also since markers are special form of cmds that do not + * need their cmds to be executed. + * @base: Base kgsl_drawobj * @priv: Internal flags + * @global_ts: The ringbuffer timestamp corresponding to this + * command obj * @fault_policy: Internal policy describing how to handle this command in case * of a fault * @fault_recovery: recovery actions actually tried for this batch + * be hung * @refcount: kref structure to maintain the reference count * @cmdlist: List of IBs to issue - * @memlist: List of all memory used in this drawobj - * @synclist: Array of context/timestamp tuples to wait for before issuing - * @numsyncs: Number of sync entries in the array - * @pending: Bitmask of sync events that are active - * @timer: a timer used to track possible sync timeouts for this drawobj + * @memlist: List of all memory used in this command batch * @marker_timestamp: For markers, the timestamp of the last "real" command that * was queued * @profiling_buf_entry: Mem entry containing the profiling buffer @@ -45,34 +63,42 @@ * for easy access * @profile_index: Index to store the start/stop ticks in the kernel profiling * buffer - * @submit_ticks: Variable to hold ticks at the time of drawobj submit. - * @global_ts: The ringbuffer timestamp corresponding to this drawobj - * @timeout_jiffies: For a syncpoint drawobj the jiffies at which the - * timer will expire - * This structure defines an atomic batch of command buffers issued from - * userspace. + * @submit_ticks: Variable to hold ticks at the time of + * command obj submit. + */ -struct kgsl_drawobj { - struct kgsl_device *device; - struct kgsl_context *context; - uint32_t timestamp; - uint32_t flags; +struct kgsl_drawobj_cmd { + struct kgsl_drawobj base; unsigned long priv; + unsigned int global_ts; unsigned long fault_policy; unsigned long fault_recovery; - struct kref refcount; struct list_head cmdlist; struct list_head memlist; - struct kgsl_drawobj_sync_event *synclist; - unsigned int numsyncs; - unsigned long pending; - struct timer_list timer; unsigned int marker_timestamp; struct kgsl_mem_entry *profiling_buf_entry; uint64_t profiling_buffer_gpuaddr; unsigned int profile_index; uint64_t submit_ticks; - unsigned int global_ts; +}; + +/** + * struct kgsl_drawobj_sync - KGSL sync object + * @base: Base kgsl_drawobj, this needs to be the first entry + * @synclist: Array of context/timestamp tuples to wait for before issuing + * @numsyncs: Number of sync entries in the array + * @pending: Bitmask of sync events that are active + * @timer: a timer used to track possible sync timeouts for this + * sync obj + * @timeout_jiffies: For a sync obj the jiffies at + * which the timer will expire + */ +struct kgsl_drawobj_sync { + struct kgsl_drawobj base; + struct kgsl_drawobj_sync_event *synclist; + unsigned int numsyncs; + unsigned long pending; + struct timer_list timer; unsigned long timeout_jiffies; }; @@ -80,8 +106,9 @@ struct kgsl_drawobj { * struct kgsl_drawobj_sync_event * @id: identifer (positiion within the pending bitmap) * @type: Syncpoint type - * @drawobj: Pointer to the drawobj that owns the sync event - * @context: Pointer to the KGSL context that owns the drawobj + * @syncobj: Pointer to the syncobj that owns the sync event + * @context: KGSL context for whose timestamp we want to + * register this event * @timestamp: Pending timestamp for the event * @handle: Pointer to a sync fence handle * @device: Pointer to the KGSL device @@ -89,80 +116,83 @@ struct kgsl_drawobj { struct kgsl_drawobj_sync_event { unsigned int id; int type; - struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_sync *syncobj; struct kgsl_context *context; unsigned int timestamp; struct kgsl_sync_fence_waiter *handle; struct kgsl_device *device; }; +#define KGSL_DRAWOBJ_FLAGS \ + { KGSL_DRAWOBJ_MARKER, "MARKER" }, \ + { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \ + { KGSL_DRAWOBJ_SYNC, "SYNC" }, \ + { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \ + { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \ + { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" } + /** - * enum kgsl_drawobj_priv - Internal drawobj flags - * @DRAWOBJ_FLAG_SKIP - skip the entire drawobj - * @DRAWOBJ_FLAG_FORCE_PREAMBLE - Force the preamble on for the drawobj - * @DRAWOBJ_FLAG_WFI - Force wait-for-idle for the submission - * @DRAWOBJ_FLAG_PROFILE - store the start / retire ticks for the drawobj - * in the profiling buffer - * @DRAWOBJ_FLAG_FENCE_LOG - Set if the drawobj is dumping fence logs via the - * drawobj timer - this is used to avoid recursion + * enum kgsl_drawobj_cmd_priv - Internal command obj flags + * @CMDOBJ_SKIP - skip the entire command obj + * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for + * command obj + * @CMDOBJ_WFI - Force wait-for-idle for the submission + * @CMDOBJ_PROFILE - store the start / retire ticks for + * the command obj in the profiling buffer */ - -enum kgsl_drawobj_priv { - DRAWOBJ_FLAG_SKIP = 0, - DRAWOBJ_FLAG_FORCE_PREAMBLE, - DRAWOBJ_FLAG_WFI, - DRAWOBJ_FLAG_PROFILE, - DRAWOBJ_FLAG_FENCE_LOG, +enum kgsl_drawobj_cmd_priv { + CMDOBJ_SKIP = 0, + CMDOBJ_FORCE_PREAMBLE, + CMDOBJ_WFI, + CMDOBJ_PROFILE, }; - -int kgsl_drawobj_add_memobj(struct kgsl_drawobj *drawobj, - struct kgsl_ibdesc *ibdesc); - -int kgsl_drawobj_add_sync(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, - struct kgsl_cmd_syncpoint *sync); - -struct kgsl_drawobj *kgsl_drawobj_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int flags); -int kgsl_drawobj_add_ibdesc(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, struct kgsl_ibdesc *ibdesc); -int kgsl_drawobj_add_ibdesc_list(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count); -int kgsl_drawobj_add_syncpoints(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, int count); -int kgsl_drawobj_add_cmdlist(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, +struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, + struct kgsl_context *context, unsigned int flags, + unsigned int type); +int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc); +int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count); +int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count); -int kgsl_drawobj_add_memlist(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, +int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count); -int kgsl_drawobj_add_synclist(struct kgsl_device *device, - struct kgsl_drawobj *drawobj, void __user *ptr, + +struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, + struct kgsl_context *context); +int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, + int count); +int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, unsigned int size, unsigned int count); +int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, + struct kgsl_cmd_syncpoint *sync); int kgsl_drawobj_init(void); void kgsl_drawobj_exit(void); void kgsl_dump_syncpoints(struct kgsl_device *device, - struct kgsl_drawobj *drawobj); + struct kgsl_drawobj_sync *syncobj); void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj); -void kgsl_drawobj_destroy_object(struct kref *kref); - -static inline bool kgsl_drawobj_events_pending(struct kgsl_drawobj *drawobj) +static inline bool kgsl_drawobj_events_pending( + struct kgsl_drawobj_sync *syncobj) { - return !bitmap_empty(&drawobj->pending, KGSL_MAX_SYNCPOINTS); + return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS); } -static inline bool kgsl_drawobj_event_pending(struct kgsl_drawobj *drawobj, - unsigned int bit) +static inline bool kgsl_drawobj_event_pending( + struct kgsl_drawobj_sync *syncobj, unsigned int bit) { if (bit >= KGSL_MAX_SYNCPOINTS) return false; - return test_bit(bit, &drawobj->pending); + return test_bit(bit, &syncobj->pending); } - #endif /* __KGSL_DRAWOBJ_H */ diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 2dd462454565..6438c6e65b97 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds, TP_PROTO(struct kgsl_device *device, int drawctxt_id, - struct kgsl_drawobj *drawobj, unsigned int numibs, int timestamp, int flags, int result, unsigned int type), - TP_ARGS(device, drawctxt_id, drawobj, numibs, timestamp, + TP_ARGS(device, drawctxt_id, numibs, timestamp, flags, result, type), TP_STRUCT__entry( @@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy, ); DECLARE_EVENT_CLASS(syncpoint_timestamp_template, - TP_PROTO(struct kgsl_drawobj *drawobj, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(drawobj, context, timestamp), + TP_ARGS(syncobj, context, timestamp), TP_STRUCT__entry( - __field(unsigned int, drawobj_context_id) + __field(unsigned int, syncobj_context_id) __field(unsigned int, context_id) __field(unsigned int, timestamp) ), TP_fast_assign( - __entry->drawobj_context_id = drawobj->context->id; + __entry->syncobj_context_id = syncobj->base.context->id; __entry->context_id = context->id; __entry->timestamp = timestamp; ), TP_printk("ctx=%d sync ctx=%d ts=%d", - __entry->drawobj_context_id, __entry->context_id, + __entry->syncobj_context_id, __entry->context_id, __entry->timestamp) ); DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp, - TP_PROTO(struct kgsl_drawobj *drawobj, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(drawobj, context, timestamp) + TP_ARGS(syncobj, context, timestamp) ); DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire, - TP_PROTO(struct kgsl_drawobj *drawobj, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(drawobj, context, timestamp) + TP_ARGS(syncobj, context, timestamp) ); DECLARE_EVENT_CLASS(syncpoint_fence_template, - TP_PROTO(struct kgsl_drawobj *drawobj, char *name), - TP_ARGS(drawobj, name), + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name), TP_STRUCT__entry( __string(fence_name, name) - __field(unsigned int, drawobj_context_id) + __field(unsigned int, syncobj_context_id) ), TP_fast_assign( - __entry->drawobj_context_id = drawobj->context->id; + __entry->syncobj_context_id = syncobj->base.context->id; __assign_str(fence_name, name); ), TP_printk("ctx=%d fence=%s", - __entry->drawobj_context_id, __get_str(fence_name)) + __entry->syncobj_context_id, __get_str(fence_name)) ); DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence, - TP_PROTO(struct kgsl_drawobj *drawobj, char *name), - TP_ARGS(drawobj, name) + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name) ); DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire, - TP_PROTO(struct kgsl_drawobj *drawobj, char *name), - TP_ARGS(drawobj, name) + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name) ); TRACE_EVENT(kgsl_msg, |