diff options
Diffstat (limited to 'drivers/gpu/msm/adreno_dispatch.c')
| -rw-r--r-- | drivers/gpu/msm/adreno_dispatch.c | 749 |
1 files changed, 376 insertions, 373 deletions
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 522c32743d3d..e42588004c50 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -25,7 +25,7 @@ #include "adreno_trace.h" #include "kgsl_sharedmem.h" -#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) +#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) /* Time in ms after which the dispatcher tries to schedule an unscheduled RB */ unsigned int adreno_dispatch_starvation_time = 2000; @@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25; unsigned int adreno_disp_preempt_fair_sched; /* Number of commands that can be queued in a context before it sleeps */ -static unsigned int _context_cmdqueue_size = 50; +static unsigned int _context_drawqueue_size = 50; /* Number of milliseconds to wait for the context queue to clear */ static unsigned int _context_queue_wait = 10000; -/* Number of command batches sent at a time from a single context */ -static unsigned int _context_cmdbatch_burst = 5; +/* Number of drawobjs sent at a time from a single context */ +static unsigned int _context_drawobj_burst = 5; /* * GFT throttle parameters. If GFT recovered more than @@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15; static unsigned int _dispatcher_q_inflight_lo = 4; /* Command batch timeout (in milliseconds) */ -unsigned int adreno_cmdbatch_timeout = 2000; +unsigned int adreno_drawobj_timeout = 2000; /* Interval for reading and comparing fault detection registers */ static unsigned int _fault_timer_interval = 200; -#define CMDQUEUE_RB(_cmdqueue) \ +#define DRAWQUEUE_RB(_drawqueue) \ ((struct adreno_ringbuffer *) \ - container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q)) + container_of((_drawqueue),\ + struct adreno_ringbuffer, dispatch_q)) -#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q) +#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q) -static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue); +static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue); -static inline bool cmdqueue_is_current( - struct adreno_dispatcher_cmdqueue *cmdqueue) +static inline bool drawqueue_is_current( + struct adreno_dispatcher_drawqueue *drawqueue) { - struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue); + struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue); struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); return (adreno_dev->cur_rb == rb); @@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data) return time_after(jiffies, expires) ? 0 : 1; } -static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data) +static int __count_drawqueue_context(struct adreno_context *drawctxt, + void *data) { unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100); @@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data) return 0; return (&drawctxt->rb->dispatch_q == - (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0; + (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0; } static int _adreno_count_active_contexts(struct adreno_device *adreno_dev, @@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev, } static void _track_context(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue, + struct adreno_dispatcher_drawqueue *drawqueue, struct adreno_context *drawctxt) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev, device->active_context_count = _adreno_count_active_contexts(adreno_dev, __count_context, NULL); - cmdqueue->active_context_count = + drawqueue->active_context_count = _adreno_count_active_contexts(adreno_dev, - __count_cmdqueue_context, cmdqueue); + __count_drawqueue_context, drawqueue); spin_unlock(&adreno_dev->active_list_lock); } @@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev, */ static inline int -_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue) +_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue) { - return (cmdqueue->active_context_count > 1) + return (drawqueue->active_context_count > 1) ? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi; } @@ -271,18 +273,18 @@ static void start_fault_timer(struct adreno_device *adreno_dev) } /** - * _retire_marker() - Retire a marker command batch without sending it to the + * _retire_marker() - Retire a marker drawobj without sending it to the * hardware - * @cmdbatch: Pointer to the cmdbatch to retire + * @drawobj: Pointer to the drawobj to retire * * In some cases marker commands can be retired by the software without going to * the GPU. In those cases, update the memstore from the CPU, kick off the - * event engine to handle expired events and destroy the command batch. + * event engine to handle expired events and destroy the drawobj. */ -static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) +static void _retire_marker(struct kgsl_drawobj *drawobj) { - struct kgsl_context *context = cmdbatch->context; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_context *context = drawobj->context; + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); struct kgsl_device *device = context->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); @@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) */ kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), - cmdbatch->timestamp); + drawobj->timestamp); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), - cmdbatch->timestamp); + drawobj->timestamp); /* Retire pending GPU events for the object */ @@ -308,12 +310,12 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) * So avoid reading GPU register directly for A3xx. */ if (adreno_is_a3xx(adreno_dev)) - trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb, + trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, 0); else - trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb, + trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, adreno_get_rptr(drawctxt->rb)); - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); } static int _check_context_queue(struct adreno_context *drawctxt) @@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt) if (kgsl_context_invalid(&drawctxt->base)) ret = 1; else - ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0; + ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0; spin_unlock(&drawctxt->lock); @@ -341,54 +343,54 @@ static int _check_context_queue(struct adreno_context *drawctxt) * return true if this is a marker command and the dependent timestamp has * retired */ -static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch) +static bool _marker_expired(struct kgsl_drawobj *drawobj) { - return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - kgsl_check_timestamp(cmdbatch->device, cmdbatch->context, - cmdbatch->marker_timestamp); + return (drawobj->flags & KGSL_DRAWOBJ_MARKER) && + kgsl_check_timestamp(drawobj->device, drawobj->context, + drawobj->marker_timestamp); } -static inline void _pop_cmdbatch(struct adreno_context *drawctxt) +static inline void _pop_drawobj(struct adreno_context *drawctxt) { - drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head, - ADRENO_CONTEXT_CMDQUEUE_SIZE); + drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head, + ADRENO_CONTEXT_DRAWQUEUE_SIZE); drawctxt->queued--; } /** - * Removes all expired marker and sync cmdbatches from + * Removes all expired marker and sync obj's from * the context queue when marker command and dependent * timestamp are retired. This function is recursive. - * returns cmdbatch if context has command, NULL otherwise. + * returns drawobj if context has command, NULL otherwise. */ -static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt) +static struct kgsl_drawobj *_expire_markers(struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; - if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail) + if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail) return NULL; - cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; + drawobj = drawctxt->drawqueue[drawctxt->drawqueue_head]; - if (cmdbatch == NULL) + if (drawobj == NULL) return NULL; /* Check to see if this is a marker we can skip over */ - if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - _marker_expired(cmdbatch)) { - _pop_cmdbatch(drawctxt); - _retire_marker(cmdbatch); + if ((drawobj->flags & KGSL_DRAWOBJ_MARKER) && + _marker_expired(drawobj)) { + _pop_drawobj(drawctxt); + _retire_marker(drawobj); return _expire_markers(drawctxt); } - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - if (!kgsl_cmdbatch_events_pending(cmdbatch)) { - _pop_cmdbatch(drawctxt); - kgsl_cmdbatch_destroy(cmdbatch); + if (drawobj->flags & KGSL_DRAWOBJ_SYNC) { + if (!kgsl_drawobj_events_pending(drawobj)) { + _pop_drawobj(drawctxt); + kgsl_drawobj_destroy(drawobj); return _expire_markers(drawctxt); } } - return cmdbatch; + return drawobj; } static void expire_markers(struct adreno_context *drawctxt) @@ -398,14 +400,14 @@ static void expire_markers(struct adreno_context *drawctxt) spin_unlock(&drawctxt->lock); } -static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt) +static struct kgsl_drawobj *_get_drawobj(struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; bool pending = false; - cmdbatch = _expire_markers(drawctxt); + drawobj = _expire_markers(drawctxt); - if (cmdbatch == NULL) + if (drawobj == NULL) return NULL; /* @@ -416,11 +418,11 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt) * correct. If skip isn't set then we block this queue * until the dependent timestamp expires */ - if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) + if ((drawobj->flags & KGSL_DRAWOBJ_MARKER) && + (!test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv))) pending = true; - if (kgsl_cmdbatch_events_pending(cmdbatch)) + if (kgsl_drawobj_events_pending(drawobj)) pending = true; /* @@ -432,32 +434,32 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt) * If syncpoints are pending start the canary timer if * it hasn't already been started */ - if (!cmdbatch->timeout_jiffies) { - cmdbatch->timeout_jiffies = + if (!drawobj->timeout_jiffies) { + drawobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000); - mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies); + mod_timer(&drawobj->timer, drawobj->timeout_jiffies); } return ERR_PTR(-EAGAIN); } - _pop_cmdbatch(drawctxt); - return cmdbatch; + _pop_drawobj(drawctxt); + return drawobj; } /** - * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue + * adreno_dispatcher_get_drawobj() - Get a new command from a context queue * @drawctxt: Pointer to the adreno draw context * - * Dequeue a new command batch from the context list + * Dequeue a new drawobj from the context list */ -static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch( +static struct kgsl_drawobj *adreno_dispatcher_get_drawobj( struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; spin_lock(&drawctxt->lock); - cmdbatch = _get_cmdbatch(drawctxt); + drawobj = _get_drawobj(drawctxt); spin_unlock(&drawctxt->lock); /* @@ -465,24 +467,24 @@ static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch( * on another core before queueing the buffer. We must do this * without holding any spin lock that the timer handler might be using */ - if (!IS_ERR_OR_NULL(cmdbatch)) - del_timer_sync(&cmdbatch->timer); + if (!IS_ERR_OR_NULL(drawobj)) + del_timer_sync(&drawobj->timer); - return cmdbatch; + return drawobj; } /** - * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context + * adreno_dispatcher_requeue_drawobj() - Put a command back on the context * queue * @drawctxt: Pointer to the adreno draw context - * @cmdbatch: Pointer to the KGSL cmdbatch to requeue + * @drawobj: Pointer to the KGSL drawobj to requeue * * Failure to submit a command to the ringbuffer isn't the fault of the command * being submitted so if a failure happens, push it back on the head of the the * context queue to be reconsidered again unless the context got detached. */ -static inline int adreno_dispatcher_requeue_cmdbatch( - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch) +static inline int adreno_dispatcher_requeue_drawobj( + struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj) { unsigned int prev; spin_lock(&drawctxt->lock); @@ -490,27 +492,27 @@ static inline int adreno_dispatcher_requeue_cmdbatch( if (kgsl_context_detached(&drawctxt->base) || kgsl_context_invalid(&drawctxt->base)) { spin_unlock(&drawctxt->lock); - /* get rid of this cmdbatch since the context is bad */ - kgsl_cmdbatch_destroy(cmdbatch); + /* get rid of this drawobj since the context is bad */ + kgsl_drawobj_destroy(drawobj); return -ENOENT; } - prev = drawctxt->cmdqueue_head == 0 ? - (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) : - (drawctxt->cmdqueue_head - 1); + prev = drawctxt->drawqueue_head == 0 ? + (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) : + (drawctxt->drawqueue_head - 1); /* * The maximum queue size always needs to be one less then the size of - * the ringbuffer queue so there is "room" to put the cmdbatch back in + * the ringbuffer queue so there is "room" to put the drawobj back in */ - BUG_ON(prev == drawctxt->cmdqueue_tail); + WARN_ON(prev == drawctxt->drawqueue_tail); - drawctxt->cmdqueue[prev] = cmdbatch; + drawctxt->drawqueue[prev] = drawobj; drawctxt->queued++; /* Reset the command queue head to reflect the newly requeued change */ - drawctxt->cmdqueue_head = prev; + drawctxt->drawqueue_head = prev; spin_unlock(&drawctxt->lock); return 0; } @@ -545,21 +547,21 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev, } /** - * sendcmd() - Send a command batch to the GPU hardware + * sendcmd() - Send a drawobj to the GPU hardware * @dispatcher: Pointer to the adreno dispatcher struct - * @cmdbatch: Pointer to the KGSL cmdbatch being sent + * @drawobj: Pointer to the KGSL drawobj being sent * - * Send a KGSL command batch to the GPU hardware + * Send a KGSL drawobj to the GPU hardware */ static int sendcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); - struct adreno_dispatcher_cmdqueue *dispatch_q = - ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); + struct adreno_dispatcher_drawqueue *dispatch_q = + ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); struct adreno_submit_time time; uint64_t secs = 0; unsigned long nsecs = 0; @@ -588,15 +590,15 @@ static int sendcmd(struct adreno_device *adreno_dev, set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv); } - if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) { - set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv); - cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index; - adreno_dev->cmdbatch_profile_index = - (adreno_dev->cmdbatch_profile_index + 1) % - ADRENO_CMDBATCH_PROFILE_COUNT; + if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) { + set_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv); + drawobj->profile_index = adreno_dev->profile_index; + adreno_dev->profile_index = + (adreno_dev->profile_index + 1) % + ADRENO_DRAWOBJ_PROFILE_COUNT; } - ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time); + ret = adreno_ringbuffer_submitcmd(adreno_dev, drawobj, &time); /* * On the first command, if the submission was successful, then read the @@ -649,17 +651,17 @@ static int sendcmd(struct adreno_device *adreno_dev, secs = time.ktime; nsecs = do_div(secs, 1000000000); - trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight, + trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight, time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb, adreno_get_rptr(drawctxt->rb)); mutex_unlock(&device->mutex); - cmdbatch->submit_ticks = time.ticks; + drawobj->submit_ticks = time.ticks; - dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch; + dispatch_q->cmd_q[dispatch_q->tail] = drawobj; dispatch_q->tail = (dispatch_q->tail + 1) % - ADRENO_DISPATCH_CMDQUEUE_SIZE; + ADRENO_DISPATCH_DRAWQUEUE_SIZE; /* * For the first submission in any given command queue update the @@ -670,7 +672,7 @@ static int sendcmd(struct adreno_device *adreno_dev, if (dispatch_q->inflight == 1) dispatch_q->expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + msecs_to_jiffies(adreno_drawobj_timeout); /* * If we believe ourselves to be current and preemption isn't a thing, @@ -678,7 +680,7 @@ static int sendcmd(struct adreno_device *adreno_dev, * thing and the timer will be set up in due time */ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) { - if (cmdqueue_is_current(dispatch_q)) + if (drawqueue_is_current(dispatch_q)) mod_timer(&dispatcher->timer, dispatch_q->expires); } @@ -704,11 +706,11 @@ static int sendcmd(struct adreno_device *adreno_dev, static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { - struct adreno_dispatcher_cmdqueue *dispatch_q = + struct adreno_dispatcher_drawqueue *dispatch_q = &(drawctxt->rb->dispatch_q); int count = 0; int ret = 0; - int inflight = _cmdqueue_inflight(dispatch_q); + int inflight = _drawqueue_inflight(dispatch_q); unsigned int timestamp; if (dispatch_q->inflight >= inflight) { @@ -717,27 +719,27 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, } /* - * Each context can send a specific number of command batches per cycle + * Each context can send a specific number of drawobjs per cycle */ - while ((count < _context_cmdbatch_burst) && + while ((count < _context_drawobj_burst) && (dispatch_q->inflight < inflight)) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; if (adreno_gpu_fault(adreno_dev) != 0) break; - cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt); + drawobj = adreno_dispatcher_get_drawobj(drawctxt); /* - * adreno_context_get_cmdbatch returns -EAGAIN if the current - * cmdbatch has pending sync points so no more to do here. + * adreno_context_get_drawobj returns -EAGAIN if the current + * drawobj has pending sync points so no more to do here. * When the sync points are satisfied then the context will get * reqeueued */ - if (IS_ERR_OR_NULL(cmdbatch)) { - if (IS_ERR(cmdbatch)) - ret = PTR_ERR(cmdbatch); + if (IS_ERR_OR_NULL(drawobj)) { + if (IS_ERR(drawobj)) + ret = PTR_ERR(drawobj); break; } @@ -748,31 +750,31 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, * against the burst for the context */ - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - kgsl_cmdbatch_destroy(cmdbatch); + if (drawobj->flags & KGSL_DRAWOBJ_SYNC) { + kgsl_drawobj_destroy(drawobj); continue; } - timestamp = cmdbatch->timestamp; + timestamp = drawobj->timestamp; - ret = sendcmd(adreno_dev, cmdbatch); + ret = sendcmd(adreno_dev, drawobj); /* - * On error from sendcmd() try to requeue the command batch + * On error from sendcmd() try to requeue the drawobj * unless we got back -ENOENT which means that the context has * been detached and there will be no more deliveries from here */ if (ret != 0) { - /* Destroy the cmdbatch on -ENOENT */ + /* Destroy the drawobj on -ENOENT */ if (ret == -ENOENT) - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); else { /* * If the requeue returns an error, return that * instead of whatever sendcmd() sent us */ - int r = adreno_dispatcher_requeue_cmdbatch( - drawctxt, cmdbatch); + int r = adreno_dispatcher_requeue_drawobj( + drawctxt, drawobj); if (r) ret = r; } @@ -934,17 +936,17 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) /** * get_timestamp() - Return the next timestamp for the context * @drawctxt - Pointer to an adreno draw context struct - * @cmdbatch - Pointer to a command batch + * @drawobj - Pointer to a drawobj * @timestamp - Pointer to a timestamp value possibly passed from the user * * Assign a timestamp based on the settings of the draw context and the command * batch. */ static int get_timestamp(struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp) + struct kgsl_drawobj *drawobj, unsigned int *timestamp) { /* Synchronization commands don't get a timestamp */ - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { + if (drawobj->flags & KGSL_DRAWOBJ_SYNC) { *timestamp = 0; return 0; } @@ -969,18 +971,18 @@ static int get_timestamp(struct adreno_context *drawctxt, * adreno_dispactcher_queue_cmd() - Queue a new command in the context * @adreno_dev: Pointer to the adreno device struct * @drawctxt: Pointer to the adreno draw context - * @cmdbatch: Pointer to the command batch being submitted + * @drawobj: Pointer to the drawobj being submitted * @timestamp: Pointer to the requested timestamp * * Queue a command in the context - if there isn't any room in the queue, then * block until there is */ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch, + struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj, uint32_t *timestamp) { - struct adreno_dispatcher_cmdqueue *dispatch_q = - ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch); + struct adreno_dispatcher_drawqueue *dispatch_q = + ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); int ret; spin_lock(&drawctxt->lock); @@ -997,36 +999,36 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv)) - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv); /* - * Force the premable if set from userspace in the context or cmdbatch + * Force the premable if set from userspace in the context or drawobj * flags */ if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) || - (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH)) - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + (drawobj->flags & KGSL_DRAWOBJ_CTX_SWITCH)) + set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv); - /* Skip this cmdbatch commands if IFH_NOP is enabled */ + /* Skip this drawobj commands if IFH_NOP is enabled */ if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP) - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); + set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv); /* * If we are waiting for the end of frame and it hasn't appeared yet, - * then mark the command batch as skipped. It will still progress + * then mark the drawobj as skipped. It will still progress * through the pipeline but it won't actually send any commands */ if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) { - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); + set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv); /* - * If this command batch represents the EOF then clear the way + * If this drawobj represents the EOF then clear the way * for the dispatcher to continue submitting */ - if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) { + if (drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME) { clear_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv); @@ -1041,7 +1043,7 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, /* Wait for room in the context queue */ - while (drawctxt->queued >= _context_cmdqueue_size) { + while (drawctxt->queued >= _context_drawqueue_size) { trace_adreno_drawctxt_sleep(drawctxt); spin_unlock(&drawctxt->lock); @@ -1071,27 +1073,27 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, return -ENOENT; } - ret = get_timestamp(drawctxt, cmdbatch, timestamp); + ret = get_timestamp(drawctxt, drawobj, timestamp); if (ret) { spin_unlock(&drawctxt->lock); return ret; } - cmdbatch->timestamp = *timestamp; + drawobj->timestamp = *timestamp; - if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) { + if (drawobj->flags & KGSL_DRAWOBJ_MARKER) { /* * See if we can fastpath this thing - if nothing is queued * and nothing is inflight retire without bothering the GPU */ - if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device, - cmdbatch->context, drawctxt->queued_timestamp)) { - trace_adreno_cmdbatch_queued(cmdbatch, + if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device, + drawobj->context, drawctxt->queued_timestamp)) { + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); - _retire_marker(cmdbatch); + _retire_marker(drawobj); spin_unlock(&drawctxt->lock); return 0; } @@ -1102,27 +1104,27 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, * comes along and forces the marker to execute) */ - cmdbatch->marker_timestamp = drawctxt->queued_timestamp; + drawobj->marker_timestamp = drawctxt->queued_timestamp; } /* SYNC commands have timestamp 0 and will get optimized out anyway */ - if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC)) + if (!(drawobj->flags & KGSL_CONTEXT_SYNC)) drawctxt->queued_timestamp = *timestamp; /* - * Set the fault tolerance policy for the command batch - assuming the + * Set the fault tolerance policy for the drawobj - assuming the * context hasn't disabled FT use the current device policy */ if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) - set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy); + set_bit(KGSL_FT_DISABLE, &drawobj->fault_policy); else - cmdbatch->fault_policy = adreno_dev->ft_policy; + drawobj->fault_policy = adreno_dev->ft_policy; /* Put the command into the queue */ - drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch; - drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) % - ADRENO_CONTEXT_CMDQUEUE_SIZE; + drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj; + drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) % + ADRENO_CONTEXT_DRAWQUEUE_SIZE; /* * If this is a real command then we need to force any markers queued @@ -1130,20 +1132,20 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, * the commands get NOPed. */ - if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) { - unsigned int i = drawctxt->cmdqueue_head; + if (!(drawobj->flags & KGSL_DRAWOBJ_MARKER)) { + unsigned int i = drawctxt->drawqueue_head; - while (i != drawctxt->cmdqueue_tail) { - if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER) - set_bit(CMDBATCH_FLAG_SKIP, - &drawctxt->cmdqueue[i]->priv); + while (i != drawctxt->drawqueue_tail) { + if (drawctxt->drawqueue[i]->flags & KGSL_DRAWOBJ_MARKER) + set_bit(DRAWOBJ_FLAG_SKIP, + &drawctxt->drawqueue[i]->priv); - i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE); + i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE); } } drawctxt->queued++; - trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued); + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); _track_context(adreno_dev, dispatch_q, drawctxt); @@ -1163,7 +1165,7 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, * queue will try to schedule new commands anyway. */ - if (dispatch_q->inflight < _context_cmdbatch_burst) + if (dispatch_q->inflight < _context_drawobj_burst) adreno_dispatcher_issuecmds(adreno_dev); return 0; @@ -1208,15 +1210,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id) } /* - * If an IB inside of the command batch has a gpuaddr that matches the base + * If an IB inside of the drawobj has a gpuaddr that matches the base * passed in then zero the size which effectively skips it when it is submitted * in the ringbuffer. */ -static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base) +static void drawobj_skip_ib(struct kgsl_drawobj *drawobj, uint64_t base) { struct kgsl_memobj_node *ib; - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &drawobj->cmdlist, node) { if (ib->gpuaddr == base) { ib->priv |= MEMOBJ_SKIP; if (base) @@ -1225,10 +1227,10 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base) } } -static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmdbatch **replay, int count) +static void drawobj_skip_cmd(struct kgsl_drawobj *drawobj, + struct kgsl_drawobj **replay, int count) { - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int i; /* @@ -1243,9 +1245,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, * b) force preamble for next commandbatch */ for (i = 1; i < count; i++) { - if (replay[i]->context->id == cmdbatch->context->id) { + if (replay[i]->context->id == drawobj->context->id) { replay[i]->fault_policy = replay[0]->fault_policy; - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv); set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery); break; } @@ -1262,41 +1264,41 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, drawctxt->fault_policy = replay[0]->fault_policy; } - /* set the flags to skip this cmdbatch */ - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); - cmdbatch->fault_recovery = 0; + /* set the flags to skip this drawobj */ + set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv); + drawobj->fault_recovery = 0; } -static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmdbatch **replay, int count) +static void drawobj_skip_frame(struct kgsl_drawobj *drawobj, + struct kgsl_drawobj **replay, int count) { - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int skip = 1; int i; for (i = 0; i < count; i++) { /* - * Only operate on command batches that belong to the + * Only operate on drawobjs that belong to the * faulting context */ - if (replay[i]->context->id != cmdbatch->context->id) + if (replay[i]->context->id != drawobj->context->id) continue; /* - * Skip all the command batches in this context until + * Skip all the drawobjs in this context until * the EOF flag is seen. If the EOF flag is seen then * force the preamble for the next command. */ if (skip) { - set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv); + set_bit(DRAWOBJ_FLAG_SKIP, &replay[i]->priv); - if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME) + if (replay[i]->flags & KGSL_DRAWOBJ_END_OF_FRAME) skip = 0; } else { - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv); return; } } @@ -1318,13 +1320,13 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch, set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv); } -static void remove_invalidated_cmdbatches(struct kgsl_device *device, - struct kgsl_cmdbatch **replay, int count) +static void remove_invalidated_drawobjs(struct kgsl_device *device, + struct kgsl_drawobj **replay, int count) { int i; for (i = 0; i < count; i++) { - struct kgsl_cmdbatch *cmd = replay[i]; + struct kgsl_drawobj *cmd = replay[i]; if (cmd == NULL) continue; @@ -1337,7 +1339,7 @@ static void remove_invalidated_cmdbatches(struct kgsl_device *device, &cmd->context->events, cmd->timestamp); mutex_unlock(&device->mutex); - kgsl_cmdbatch_destroy(cmd); + kgsl_drawobj_destroy(cmd); } } } @@ -1361,7 +1363,7 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context) static void adreno_fault_header(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch) + struct adreno_ringbuffer *rb, struct kgsl_drawobj *drawobj) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); unsigned int status, rptr, wptr, ib1sz, ib2sz; @@ -1377,22 +1379,22 @@ static void adreno_fault_header(struct kgsl_device *device, ADRENO_REG_CP_IB2_BASE_HI, &ib2base); adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz); - if (cmdbatch != NULL) { + if (drawobj != NULL) { struct adreno_context *drawctxt = - ADRENO_CONTEXT(cmdbatch->context); + ADRENO_CONTEXT(drawobj->context); - trace_adreno_gpu_fault(cmdbatch->context->id, - cmdbatch->timestamp, + trace_adreno_gpu_fault(drawobj->context->id, + drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz, drawctxt->rb->id); - pr_fault(device, cmdbatch, + pr_fault(device, drawobj, "gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", - cmdbatch->context->id, cmdbatch->timestamp, status, + drawobj->context->id, drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz); if (rb != NULL) - pr_fault(device, cmdbatch, + pr_fault(device, drawobj, "gpu fault rb %d rb sw r/w %4.4x/%4.4x\n", rb->id, rptr, rb->wptr); } else { @@ -1411,33 +1413,33 @@ static void adreno_fault_header(struct kgsl_device *device, void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) && kgsl_context_detached(&drawctxt->base)) { - pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context, - "gpu detached context %d\n", cmdbatch->context->id); + pr_context(KGSL_DEVICE(adreno_dev), drawobj->context, + "gpu detached context %d\n", drawobj->context->id); clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv); } } /** - * process_cmdbatch_fault() - Process a cmdbatch for fault policies - * @device: Device on which the cmdbatch caused a fault - * @replay: List of cmdbatches that are to be replayed on the device. The - * faulting cmdbatch is the first command in the replay list and the remaining - * cmdbatches in the list are commands that were submitted to the same queue + * process_drawobj_fault() - Process a drawobj for fault policies + * @device: Device on which the drawobj caused a fault + * @replay: List of drawobj's that are to be replayed on the device. The + * faulting drawobj is the first command in the replay list and the remaining + * drawobj's in the list are commands that were submitted to the same queue * as the faulting one. - * @count: Number of cmdbatches in replay + * @count: Number of drawobj's in replay * @base: The IB1 base at the time of fault * @fault: The fault type */ -static void process_cmdbatch_fault(struct kgsl_device *device, - struct kgsl_cmdbatch **replay, int count, +static void process_drawobj_fault(struct kgsl_device *device, + struct kgsl_drawobj **replay, int count, unsigned int base, int fault) { - struct kgsl_cmdbatch *cmdbatch = replay[0]; + struct kgsl_drawobj *drawobj = replay[0]; int i; char *state = "failed"; @@ -1451,18 +1453,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device, * where 1st and 4th gpu hang are more than 3 seconds apart we * won't disable GFT and invalidate the context. */ - if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) { - if (time_after(jiffies, (cmdbatch->context->fault_time + if (test_bit(KGSL_FT_THROTTLE, &drawobj->fault_policy)) { + if (time_after(jiffies, (drawobj->context->fault_time + msecs_to_jiffies(_fault_throttle_time)))) { - cmdbatch->context->fault_time = jiffies; - cmdbatch->context->fault_count = 1; + drawobj->context->fault_time = jiffies; + drawobj->context->fault_count = 1; } else { - cmdbatch->context->fault_count++; - if (cmdbatch->context->fault_count > + drawobj->context->fault_count++; + if (drawobj->context->fault_count > _fault_throttle_burst) { set_bit(KGSL_FT_DISABLE, - &cmdbatch->fault_policy); - pr_context(device, cmdbatch->context, + &drawobj->fault_policy); + pr_context(device, drawobj->context, "gpu fault threshold exceeded %d faults in %d msecs\n", _fault_throttle_burst, _fault_throttle_time); @@ -1471,45 +1473,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device, } /* - * If FT is disabled for this cmdbatch invalidate immediately + * If FT is disabled for this drawobj invalidate immediately */ - if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) || - test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) { + if (test_bit(KGSL_FT_DISABLE, &drawobj->fault_policy) || + test_bit(KGSL_FT_TEMP_DISABLE, &drawobj->fault_policy)) { state = "skipped"; - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG); } /* If the context is detached do not run FT on context */ - if (kgsl_context_detached(cmdbatch->context)) { + if (kgsl_context_detached(drawobj->context)) { state = "detached"; - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG); } /* - * Set a flag so we don't print another PM dump if the cmdbatch fails + * Set a flag so we don't print another PM dump if the drawobj fails * again on replay */ - set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy); + set_bit(KGSL_FT_SKIP_PMDUMP, &drawobj->fault_policy); /* * A hardware fault generally means something was deterministically - * wrong with the command batch - no point in trying to replay it + * wrong with the drawobj - no point in trying to replay it * Clear the replay bit and move on to the next policy level */ if (fault & ADRENO_HARD_FAULT) - clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy)); + clear_bit(KGSL_FT_REPLAY, &(drawobj->fault_policy)); /* * A timeout fault means the IB timed out - clear the policy and * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay - * because we won't see this cmdbatch again + * because we won't see this drawobj again */ if (fault & ADRENO_TIMEOUT_FAULT) - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG); /* * If the context had a GPU page fault then it is likely it would fault @@ -1517,83 +1519,83 @@ static void process_cmdbatch_fault(struct kgsl_device *device, */ if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, - &cmdbatch->context->priv)) { + &drawobj->context->priv)) { /* we'll need to resume the mmu later... */ - clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy); + clear_bit(KGSL_FT_REPLAY, &drawobj->fault_policy); clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, - &cmdbatch->context->priv); + &drawobj->context->priv); } /* - * Execute the fault tolerance policy. Each command batch stores the + * Execute the fault tolerance policy. Each drawobj stores the * current fault policy that was set when it was queued. * As the options are tried in descending priority * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared - * from the cmdbatch policy so the next thing can be tried if the + * from the drawobj policy so the next thing can be tried if the * change comes around again */ - /* Replay the hanging command batch again */ - if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY)); - set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery); + /* Replay the hanging drawobj again */ + if (test_and_clear_bit(KGSL_FT_REPLAY, &drawobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_REPLAY)); + set_bit(KGSL_FT_REPLAY, &drawobj->fault_recovery); return; } /* * Skip the last IB1 that was played but replay everything else. - * Note that the last IB1 might not be in the "hung" command batch + * Note that the last IB1 might not be in the "hung" drawobj * because the CP may have caused a page-fault while it was prefetching * the next IB1/IB2. walk all outstanding commands and zap the * supposedly bad IB1 where ever it lurks. */ - if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB)); - set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery); + if (test_and_clear_bit(KGSL_FT_SKIPIB, &drawobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPIB)); + set_bit(KGSL_FT_SKIPIB, &drawobj->fault_recovery); for (i = 0; i < count; i++) { if (replay[i] != NULL && - replay[i]->context->id == cmdbatch->context->id) - cmdbatch_skip_ib(replay[i], base); + replay[i]->context->id == drawobj->context->id) + drawobj_skip_ib(replay[i], base); } return; } - /* Skip the faulted command batch submission */ - if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD)); + /* Skip the faulted drawobj submission */ + if (test_and_clear_bit(KGSL_FT_SKIPCMD, &drawobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPCMD)); - /* Skip faulting command batch */ - cmdbatch_skip_cmd(cmdbatch, replay, count); + /* Skip faulting drawobj */ + drawobj_skip_cmd(drawobj, replay, count); return; } - if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, + if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &drawobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPFRAME)); - set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery); + set_bit(KGSL_FT_SKIPFRAME, &drawobj->fault_recovery); /* - * Skip all the pending command batches for this context until + * Skip all the pending drawobjs for this context until * the EOF frame is seen */ - cmdbatch_skip_frame(cmdbatch, replay, count); + drawobj_skip_frame(drawobj, replay, count); return; } /* If we get here then all the policies failed */ - pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n", - state, cmdbatch->context->id, cmdbatch->timestamp); + pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n", + state, drawobj->context->id, drawobj->timestamp); /* Mark the context as failed */ - mark_guilty_context(device, cmdbatch->context->id); + mark_guilty_context(device, drawobj->context->id); /* Invalidate the context */ - adreno_drawctxt_invalidate(device, cmdbatch->context); + adreno_drawctxt_invalidate(device, drawobj->context); } /** @@ -1605,12 +1607,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device, * @base: The IB1 base during the fault */ static void recover_dispatch_q(struct kgsl_device *device, - struct adreno_dispatcher_cmdqueue *dispatch_q, + struct adreno_dispatcher_drawqueue *dispatch_q, int fault, unsigned int base) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct kgsl_cmdbatch **replay = NULL; + struct kgsl_drawobj **replay = NULL; unsigned int ptr; int first = 0; int count = 0; @@ -1629,9 +1631,10 @@ static void recover_dispatch_q(struct kgsl_device *device, mark_guilty_context(device, context->id); adreno_drawctxt_invalidate(device, context); - kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]); + kgsl_drawobj_destroy(dispatch_q->cmd_q[ptr]); - ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE); + ptr = DRAWQUEUE_NEXT(ptr, + ADRENO_DISPATCH_DRAWQUEUE_SIZE); } /* @@ -1643,22 +1646,22 @@ static void recover_dispatch_q(struct kgsl_device *device, goto replay; } - /* Copy the inflight command batches into the temporary storage */ + /* Copy the inflight drawobjs into the temporary storage */ ptr = dispatch_q->head; while (ptr != dispatch_q->tail) { replay[count++] = dispatch_q->cmd_q[ptr]; - ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE); + ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE); } if (fault && count) - process_cmdbatch_fault(device, replay, + process_drawobj_fault(device, replay, count, base, fault); replay: dispatch_q->inflight = 0; dispatch_q->head = dispatch_q->tail = 0; - /* Remove any pending command batches that have been invalidated */ - remove_invalidated_cmdbatches(device, replay, count); + /* Remove any pending drawobjs that have been invalidated */ + remove_invalidated_drawobjs(device, replay, count); /* Replay the pending command buffers */ for (i = 0; i < count; i++) { @@ -1674,16 +1677,16 @@ replay: */ if (first == 0) { - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv); first = 1; } /* - * Force each command batch to wait for idle - this avoids weird + * Force each drawobj to wait for idle - this avoids weird * CP parse issues */ - set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv); + set_bit(DRAWOBJ_FLAG_WFI, &replay[i]->priv); ret = sendcmd(adreno_dev, replay[i]); @@ -1701,7 +1704,7 @@ replay: mark_guilty_context(device, replay[i]->context->id); adreno_drawctxt_invalidate(device, replay[i]->context); - remove_invalidated_cmdbatches(device, &replay[i], + remove_invalidated_drawobjs(device, &replay[i], count - i); } } @@ -1713,36 +1716,36 @@ replay: } static void do_header_and_snapshot(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch) + struct adreno_ringbuffer *rb, struct kgsl_drawobj *drawobj) { - /* Always dump the snapshot on a non-cmdbatch failure */ - if (cmdbatch == NULL) { + /* Always dump the snapshot on a non-drawobj failure */ + if (drawobj == NULL) { adreno_fault_header(device, rb, NULL); kgsl_device_snapshot(device, NULL); return; } /* Skip everything if the PMDUMP flag is set */ - if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) + if (test_bit(KGSL_FT_SKIP_PMDUMP, &drawobj->fault_policy)) return; /* Print the fault header */ - adreno_fault_header(device, rb, cmdbatch); + adreno_fault_header(device, rb, drawobj); - if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) - kgsl_device_snapshot(device, cmdbatch->context); + if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) + kgsl_device_snapshot(device, drawobj->context); } static int dispatcher_do_fault(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp; + struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp; struct adreno_ringbuffer *rb; struct adreno_ringbuffer *hung_rb = NULL; unsigned int reg; uint64_t base; - struct kgsl_cmdbatch *cmdbatch = NULL; + struct kgsl_drawobj *drawobj = NULL; int ret, i; int fault; int halt; @@ -1792,10 +1795,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg); } /* - * retire cmdbatches from all the dispatch_q's before starting recovery + * retire drawobj's from all the dispatch_q's before starting recovery */ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - adreno_dispatch_retire_cmdqueue(adreno_dev, + adreno_dispatch_retire_drawqueue(adreno_dev, &(rb->dispatch_q)); /* Select the active dispatch_q */ if (base == rb->buffer_desc.gpuaddr) { @@ -1814,15 +1817,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) } } - if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) { - cmdbatch = dispatch_q->cmd_q[dispatch_q->head]; - trace_adreno_cmdbatch_fault(cmdbatch, fault); + if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) { + drawobj = dispatch_q->cmd_q[dispatch_q->head]; + trace_adreno_cmdbatch_fault(drawobj, fault); } adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, ADRENO_REG_CP_IB1_BASE_HI, &base); - do_header_and_snapshot(device, hung_rb, cmdbatch); + do_header_and_snapshot(device, hung_rb, drawobj); /* Terminate the stalled transaction and resume the IOMMU */ if (fault & ADRENO_IOMMU_PAGE_FAULT) @@ -1876,22 +1879,22 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) return 1; } -static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch, +static inline int drawobj_consumed(struct kgsl_drawobj *drawobj, unsigned int consumed, unsigned int retired) { - return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) && - (timestamp_cmp(retired, cmdbatch->timestamp) < 0)); + return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) && + (timestamp_cmp(retired, drawobj->timestamp) < 0)); } static void _print_recovery(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { static struct { unsigned int mask; const char *str; } flags[] = { ADRENO_FT_TYPES }; - int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG); + int i, nr = find_first_bit(&drawobj->fault_recovery, BITS_PER_LONG); char *result = "unknown"; for (i = 0; i < ARRAY_SIZE(flags); i++) { @@ -1901,40 +1904,40 @@ static void _print_recovery(struct kgsl_device *device, } } - pr_context(device, cmdbatch->context, + pr_context(device, drawobj->context, "gpu %s ctx %d ts %d policy %lX\n", - result, cmdbatch->context->id, cmdbatch->timestamp, - cmdbatch->fault_recovery); + result, drawobj->context->id, drawobj->timestamp, + drawobj->fault_recovery); } -static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire) +static void drawobj_profile_ticks(struct adreno_device *adreno_dev, + struct kgsl_drawobj *drawobj, uint64_t *start, uint64_t *retire) { - void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr; - struct adreno_cmdbatch_profile_entry *entry; + void *ptr = adreno_dev->profile_buffer.hostptr; + struct adreno_drawobj_profile_entry *entry; - entry = (struct adreno_cmdbatch_profile_entry *) - (ptr + (cmdbatch->profile_index * sizeof(*entry))); + entry = (struct adreno_drawobj_profile_entry *) + (ptr + (drawobj->profile_index * sizeof(*entry))); rmb(); *start = entry->started; *retire = entry->retired; } -static void retire_cmdbatch(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch) +static void retire_drawobj(struct adreno_device *adreno_dev, + struct kgsl_drawobj *drawobj) { struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); uint64_t start = 0, end = 0; - if (cmdbatch->fault_recovery != 0) { - set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv); - _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch); + if (drawobj->fault_recovery != 0) { + set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv); + _print_recovery(KGSL_DEVICE(adreno_dev), drawobj); } - if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) - cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end); + if (test_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv)) + drawobj_profile_ticks(adreno_dev, drawobj, &start, &end); /* * For A3xx we still get the rptr from the CP_RB_RPTR instead of @@ -1942,48 +1945,48 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev, * So avoid reading GPU register directly for A3xx. */ if (adreno_is_a3xx(adreno_dev)) - trace_adreno_cmdbatch_retired(cmdbatch, + trace_adreno_cmdbatch_retired(drawobj, (int) dispatcher->inflight, start, end, - ADRENO_CMDBATCH_RB(cmdbatch), 0); + ADRENO_DRAWOBJ_RB(drawobj), 0); else - trace_adreno_cmdbatch_retired(cmdbatch, + trace_adreno_cmdbatch_retired(drawobj, (int) dispatcher->inflight, start, end, - ADRENO_CMDBATCH_RB(cmdbatch), + ADRENO_DRAWOBJ_RB(drawobj), adreno_get_rptr(drawctxt->rb)); drawctxt->submit_retire_ticks[drawctxt->ticks_index] = - end - cmdbatch->submit_ticks; + end - drawobj->submit_ticks; drawctxt->ticks_index = (drawctxt->ticks_index + 1) % SUBMIT_RETIRE_TICKS_SIZE; - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); } -static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) +static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; int count = 0; - while (!adreno_cmdqueue_is_empty(cmdqueue)) { - struct kgsl_cmdbatch *cmdbatch = - cmdqueue->cmd_q[cmdqueue->head]; + while (!adreno_drawqueue_is_empty(drawqueue)) { + struct kgsl_drawobj *drawobj = + drawqueue->cmd_q[drawqueue->head]; - if (!kgsl_check_timestamp(device, cmdbatch->context, - cmdbatch->timestamp)) + if (!kgsl_check_timestamp(device, drawobj->context, + drawobj->timestamp)) break; - retire_cmdbatch(adreno_dev, cmdbatch); + retire_drawobj(adreno_dev, drawobj); dispatcher->inflight--; - cmdqueue->inflight--; + drawqueue->inflight--; - cmdqueue->cmd_q[cmdqueue->head] = NULL; + drawqueue->cmd_q[drawqueue->head] = NULL; - cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head, - ADRENO_DISPATCH_CMDQUEUE_SIZE); + drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head, + ADRENO_DISPATCH_DRAWQUEUE_SIZE); count++; } @@ -1992,13 +1995,13 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, } static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) + struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head]; + struct kgsl_drawobj *drawobj = drawqueue->cmd_q[drawqueue->head]; /* Don't timeout if the timer hasn't expired yet (duh) */ - if (time_is_after_jiffies(cmdqueue->expires)) + if (time_is_after_jiffies(drawqueue->expires)) return; /* Don't timeout if the IB timeout is disabled globally */ @@ -2006,30 +2009,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, return; /* Don't time out if the context has disabled it */ - if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) + if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) return; - pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n", - cmdbatch->context->id, cmdbatch->timestamp); + pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n", + drawobj->context->id, drawobj->timestamp); adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT); } -static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) +static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue) { - int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue); + int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue); /* Nothing to do if there are no pending commands */ - if (adreno_cmdqueue_is_empty(cmdqueue)) + if (adreno_drawqueue_is_empty(drawqueue)) return count; - /* Don't update the cmdqueue timeout if we are about to preempt out */ + /* Don't update the drawqueue timeout if we are about to preempt out */ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) return count; - /* Don't update the cmdqueue timeout if it isn't active */ - if (!cmdqueue_is_current(cmdqueue)) + /* Don't update the drawqueue timeout if it isn't active */ + if (!drawqueue_is_current(drawqueue)) return count; /* @@ -2038,17 +2041,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, */ if (count) { - cmdqueue->expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + drawqueue->expires = jiffies + + msecs_to_jiffies(adreno_drawobj_timeout); return count; } /* * If we get here then 1) the ringbuffer is current and 2) we haven't * retired anything. Check to see if the timeout if valid for the - * current cmdbatch and fault if it has expired + * current drawobj and fault if it has expired */ - _adreno_dispatch_check_timeout(adreno_dev, cmdqueue); + _adreno_dispatch_check_timeout(adreno_dev, drawqueue); return 0; } @@ -2067,11 +2070,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev) /* Check to see if we need to update the command timer */ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) { - struct adreno_dispatcher_cmdqueue *cmdqueue = - CMDQUEUE(adreno_dev->cur_rb); + struct adreno_dispatcher_drawqueue *drawqueue = + DRAWQUEUE(adreno_dev->cur_rb); - if (!adreno_cmdqueue_is_empty(cmdqueue)) - mod_timer(&dispatcher->timer, cmdqueue->expires); + if (!adreno_drawqueue_is_empty(drawqueue)) + mod_timer(&dispatcher->timer, drawqueue->expires); } } @@ -2111,14 +2114,14 @@ static void adreno_dispatcher_work(struct work_struct *work) /* * As long as there are inflight commands, process retired comamnds from - * all cmdqueues + * all drawqueues */ for (i = 0; i < adreno_dev->num_ringbuffers; i++) { - struct adreno_dispatcher_cmdqueue *cmdqueue = - CMDQUEUE(&adreno_dev->ringbuffers[i]); + struct adreno_dispatcher_drawqueue *drawqueue = + DRAWQUEUE(&adreno_dev->ringbuffers[i]); - count += adreno_dispatch_process_cmdqueue(adreno_dev, - cmdqueue); + count += adreno_dispatch_process_drawqueue(adreno_dev, + drawqueue); if (dispatcher->inflight == 0) break; } @@ -2178,7 +2181,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device, } /* - * This is called on a regular basis while command batches are inflight. Fault + * This is called on a regular basis while drawobjs are inflight. Fault * detection registers are read and compared to the existing values - if they * changed then the GPU is still running. If they are the same between * subsequent calls then the GPU may have faulted @@ -2230,7 +2233,7 @@ static void adreno_dispatcher_timer(unsigned long data) */ void adreno_dispatcher_start(struct kgsl_device *device) { - complete_all(&device->cmdbatch_gate); + complete_all(&device->halt_gate); /* Schedule the work loop to get things going */ adreno_dispatcher_schedule(device); @@ -2267,13 +2270,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev) del_timer_sync(&dispatcher->fault_timer); FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - struct adreno_dispatcher_cmdqueue *dispatch_q = + struct adreno_dispatcher_drawqueue *dispatch_q = &(rb->dispatch_q); - while (!adreno_cmdqueue_is_empty(dispatch_q)) { - kgsl_cmdbatch_destroy( + while (!adreno_drawqueue_is_empty(dispatch_q)) { + kgsl_drawobj_destroy( dispatch_q->cmd_q[dispatch_q->head]); dispatch_q->head = (dispatch_q->head + 1) - % ADRENO_DISPATCH_CMDQUEUE_SIZE; + % ADRENO_DISPATCH_DRAWQUEUE_SIZE; } } @@ -2332,23 +2335,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher, *((unsigned int *) attr->value)); } -static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE, +static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_hi); static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644, - ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo); + ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo); /* * Our code that "puts back" a command from the context is much cleaner * if we are sure that there will always be enough room in the * ringbuffer so restrict the maximum size of the context queue to - * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1 + * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1 */ -static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644, - ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size); +static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644, + ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size); static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0, - _context_cmdbatch_burst); -static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, - adreno_cmdbatch_timeout); + _context_drawobj_burst); +static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0, + adreno_drawobj_timeout); static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait); static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0, _fault_timer_interval); @@ -2366,9 +2369,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0, static struct attribute *dispatcher_attrs[] = { &dispatcher_attr_inflight.attr, &dispatcher_attr_inflight_low_latency.attr, - &dispatcher_attr_context_cmdqueue_size.attr, + &dispatcher_attr_context_drawqueue_size.attr, &dispatcher_attr_context_burst_count.attr, - &dispatcher_attr_cmdbatch_timeout.attr, + &dispatcher_attr_drawobj_timeout.attr, &dispatcher_attr_context_queue_wait.attr, &dispatcher_attr_fault_detect_interval.attr, &dispatcher_attr_fault_throttle_time.attr, |
