diff options
| author | Linux Build Service Account <lnxbuild@localhost> | 2016-10-10 18:28:36 -0700 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2016-10-10 18:28:35 -0700 |
| commit | 6a9cd0c699936bda26c8154834b99e7a39e1be74 (patch) | |
| tree | e0193abd2c271256b689eaea8296b1177d18e832 | |
| parent | 664fd91a1737044ae7c2f09a91e82282a86ad7bb (diff) | |
| parent | 1f2662704f5bf1f4902b73d9fb48840a80f80ef5 (diff) | |
Merge "msm: kgsl: Modify dispatcher to accept generic objects"
| -rw-r--r-- | drivers/gpu/msm/Makefile | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno.c | 21 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno.h | 42 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_a5xx_preempt.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_debugfs.c | 76 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_dispatch.c | 1244 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_dispatch.h | 38 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_drawctxt.c | 54 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_drawctxt.h | 27 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.c | 178 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.h | 6 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_trace.h | 64 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl.c | 268 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl.h | 19 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_cffdump.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_cffdump.h | 6 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_cmdbatch.h | 168 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_compat.h | 8 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_device.h | 14 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_drawobj.c (renamed from drivers/gpu/msm/kgsl_cmdbatch.c) | 642 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_drawobj.h | 198 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_trace.h | 44 |
22 files changed, 1713 insertions, 1412 deletions
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 90aee3cad5ad..625a2640b4c4 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android msm_kgsl_core-y = \ kgsl.o \ kgsl_trace.o \ - kgsl_cmdbatch.o \ + kgsl_drawobj.o \ kgsl_ioctl.o \ kgsl_sharedmem.o \ kgsl_pwrctrl.o \ diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 2e2a0501c20a..e9d16426d4a5 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -40,6 +40,7 @@ /* Include the master list of GPU cores that are supported */ #include "adreno-gpulist.h" +#include "adreno_dispatch.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "adreno." @@ -1015,8 +1016,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) - kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer); + if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) + kgsl_free_global(device, &adreno_dev->profile_buffer); /* Free local copies of firmware and other command streams */ kfree(adreno_dev->pfp_fw); @@ -1187,22 +1188,22 @@ static int adreno_init(struct kgsl_device *device) } /* - * Allocate a small chunk of memory for precise cmdbatch profiling for + * Allocate a small chunk of memory for precise drawobj profiling for * those targets that have the always on timer */ if (!adreno_is_a3xx(adreno_dev)) { int r = kgsl_allocate_global(device, - &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, + &adreno_dev->profile_buffer, PAGE_SIZE, 0, 0, "alwayson"); - adreno_dev->cmdbatch_profile_index = 0; + adreno_dev->profile_index = 0; if (r == 0) { - set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, + set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv); kgsl_sharedmem_set(device, - &adreno_dev->cmdbatch_profile_buffer, 0, 0, + &adreno_dev->profile_buffer, 0, 0, PAGE_SIZE); } @@ -2335,12 +2336,12 @@ int adreno_idle(struct kgsl_device *device) * adreno_drain() - Drain the dispatch queue * @device: Pointer to the KGSL device structure for the GPU * - * Drain the dispatcher of existing command batches. This halts + * Drain the dispatcher of existing drawobjs. This halts * additional commands from being issued until the gate is completed. */ static int adreno_drain(struct kgsl_device *device) { - reinit_completion(&device->cmdbatch_gate); + reinit_completion(&device->halt_gate); return 0; } @@ -2820,7 +2821,7 @@ static const struct kgsl_functable adreno_functable = { .getproperty_compat = adreno_getproperty_compat, .waittimestamp = adreno_waittimestamp, .readtimestamp = adreno_readtimestamp, - .issueibcmds = adreno_ringbuffer_issueibcmds, + .queue_cmds = adreno_dispatcher_queue_cmds, .ioctl = adreno_ioctl, .compat_ioctl = adreno_compat_ioctl, .power_stats = adreno_power_stats, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index b2e1ce148b66..295a3d80d476 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -76,13 +76,13 @@ KGSL_CONTEXT_PREEMPT_STYLE_SHIFT) /* - * return the dispatcher cmdqueue in which the given cmdbatch should + * return the dispatcher drawqueue in which the given drawobj should * be submitted */ -#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \ +#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \ (&((ADRENO_CONTEXT(c->context))->rb->dispatch_q)) -#define ADRENO_CMDBATCH_RB(c) \ +#define ADRENO_DRAWOBJ_RB(c) \ ((ADRENO_CONTEXT(c->context))->rb) /* Adreno core features */ @@ -346,8 +346,8 @@ struct adreno_gpu_core { * @halt: Atomic variable to check whether the GPU is currently halted * @ctx_d_debugfs: Context debugfs node * @pwrctrl_flag: Flag to hold adreno specific power attributes - * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer - * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling + * @profile_buffer: Memdesc holding the drawobj profiling buffer + * @profile_index: Index to store the start/stop ticks in the profiling * buffer * @sp_local_gpuaddr: Base GPU virtual address for SP local memory * @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory @@ -404,8 +404,8 @@ struct adreno_device { struct dentry *ctx_d_debugfs; unsigned long pwrctrl_flag; - struct kgsl_memdesc cmdbatch_profile_buffer; - unsigned int cmdbatch_profile_index; + struct kgsl_memdesc profile_buffer; + unsigned int profile_index; uint64_t sp_local_gpuaddr; uint64_t sp_pvt_gpuaddr; const struct firmware *lm_fw; @@ -441,7 +441,7 @@ struct adreno_device { * @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress * @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't * send any more commands to the ringbuffer) - * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch + * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj * profiling via the ALWAYSON counter * @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption * @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled @@ -459,7 +459,7 @@ enum adreno_device_flags { ADRENO_DEVICE_HANG_INTR = 4, ADRENO_DEVICE_STARTED = 5, ADRENO_DEVICE_FAULT = 6, - ADRENO_DEVICE_CMDBATCH_PROFILE = 7, + ADRENO_DEVICE_DRAWOBJ_PROFILE = 7, ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8, ADRENO_DEVICE_PREEMPTION = 9, ADRENO_DEVICE_SOFT_FAULT_DETECT = 10, @@ -469,22 +469,22 @@ enum adreno_device_flags { }; /** - * struct adreno_cmdbatch_profile_entry - a single command batch entry in the + * struct adreno_drawobj_profile_entry - a single drawobj entry in the * kernel profiling buffer - * @started: Number of GPU ticks at start of the command batch - * @retired: Number of GPU ticks at the end of the command batch + * @started: Number of GPU ticks at start of the drawobj + * @retired: Number of GPU ticks at the end of the drawobj */ -struct adreno_cmdbatch_profile_entry { +struct adreno_drawobj_profile_entry { uint64_t started; uint64_t retired; }; -#define ADRENO_CMDBATCH_PROFILE_COUNT \ - (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry)) +#define ADRENO_DRAWOBJ_PROFILE_COUNT \ + (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry)) -#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \ - ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \ - + offsetof(struct adreno_cmdbatch_profile_entry, _member)) +#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \ + ((_index) * sizeof(struct adreno_drawobj_profile_entry) \ + + offsetof(struct adreno_drawobj_profile_entry, _member)) /** @@ -775,7 +775,7 @@ struct adreno_gpudev { * @KGSL_FT_REPLAY: Replay the faulting command * @KGSL_FT_SKIPIB: Skip the faulting indirect buffer * @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB - * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch + * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj * @KGSL_FT_TEMP_DISABLE: Disables FT for all commands * @KGSL_FT_THROTTLE: Disable the context if it faults too often * @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB @@ -792,7 +792,7 @@ enum kgsl_ft_policy_bits { /* KGSL_FT_MAX_BITS is used to calculate the mask */ KGSL_FT_MAX_BITS, /* Internal bits - set during GFT */ - /* Skip the PM dump on replayed command batches */ + /* Skip the PM dump on replayed command obj's */ KGSL_FT_SKIP_PMDUMP = 31, }; @@ -881,7 +881,7 @@ int adreno_reset(struct kgsl_device *device, int fault); void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch); + struct kgsl_drawobj *drawobj); int adreno_coresight_init(struct adreno_device *adreno_dev); diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index 4baee4a5c0b1..09c550c9f58c 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev) rb->wptr); rb->dispatch_q.expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + msecs_to_jiffies(adreno_drawobj_timeout); } spin_unlock_irqrestore(&rb->preempt_lock, flags); diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 680827e5b848..fffe08038bcd 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i, static void sync_event_print(struct seq_file *s, - struct kgsl_cmdbatch_sync_event *sync_event) + struct kgsl_drawobj_sync_event *sync_event) { switch (sync_event->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: { @@ -153,12 +153,12 @@ struct flag_entry { const char *str; }; -static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS}; +static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS}; -static const struct flag_entry cmdbatch_priv[] = { - { CMDBATCH_FLAG_SKIP, "skip"}, - { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"}, - { CMDBATCH_FLAG_WFI, "wait_for_idle" }, +static const struct flag_entry cmdobj_priv[] = { + { CMDOBJ_SKIP, "skip"}, + { CMDOBJ_FORCE_PREAMBLE, "force_preamble"}, + { CMDOBJ_WFI, "wait_for_idle" }, }; static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS}; @@ -199,42 +199,54 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table, seq_puts(s, "None"); } -static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch) +static void syncobj_print(struct seq_file *s, + struct kgsl_drawobj_sync *syncobj) { - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync_event *event; unsigned int i; - /* print fences first, since they block this cmdbatch */ + seq_puts(s, " syncobj "); - for (i = 0; i < cmdbatch->numsyncs; i++) { - event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_cmdbatch_event_pending(cmdbatch, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; - /* - * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways - * so that it is clear if the fence was a separate submit - * or part of an IB submit. - */ - seq_printf(s, "\t%d ", cmdbatch->timestamp); sync_event_print(s, event); seq_puts(s, "\n"); } +} - /* if this flag is set, there won't be an IB */ - if (cmdbatch->flags & KGSL_CONTEXT_SYNC) - return; +static void cmdobj_print(struct seq_file *s, + struct kgsl_drawobj_cmd *cmdobj) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - seq_printf(s, "\t%d: ", cmdbatch->timestamp); + if (drawobj->type == CMDOBJ_TYPE) + seq_puts(s, " cmdobj "); + else + seq_puts(s, " markerobj "); - seq_puts(s, " flags: "); - print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags), - cmdbatch->flags); + seq_printf(s, "\t %d ", drawobj->timestamp); seq_puts(s, " priv: "); - print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv), - cmdbatch->priv); + print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv), + cmdobj->priv); +} + +static void drawobj_print(struct seq_file *s, + struct kgsl_drawobj *drawobj) +{ + if (drawobj->type == SYNCOBJ_TYPE) + syncobj_print(s, SYNCOBJ(drawobj)); + else if ((drawobj->type == CMDOBJ_TYPE) || + (drawobj->type == MARKEROBJ_TYPE)) + cmdobj_print(s, CMDOBJ(drawobj)); + + seq_puts(s, " flags: "); + print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags), + drawobj->flags); seq_puts(s, "\n"); } @@ -285,13 +297,13 @@ static int ctx_print(struct seq_file *s, void *unused) queued, consumed, retired, drawctxt->internal_timestamp); - seq_puts(s, "cmdqueue:\n"); + seq_puts(s, "drawqueue:\n"); spin_lock(&drawctxt->lock); - for (i = drawctxt->cmdqueue_head; - i != drawctxt->cmdqueue_tail; - i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE)) - cmdbatch_print(s, drawctxt->cmdqueue[i]); + for (i = drawctxt->drawqueue_head; + i != drawctxt->drawqueue_tail; + i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) + drawobj_print(s, drawctxt->drawqueue[i]); spin_unlock(&drawctxt->lock); seq_puts(s, "events:\n"); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 522c32743d3d..cb4108b4e1f9 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -25,7 +25,7 @@ #include "adreno_trace.h" #include "kgsl_sharedmem.h" -#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) +#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) /* Time in ms after which the dispatcher tries to schedule an unscheduled RB */ unsigned int adreno_dispatch_starvation_time = 2000; @@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25; unsigned int adreno_disp_preempt_fair_sched; /* Number of commands that can be queued in a context before it sleeps */ -static unsigned int _context_cmdqueue_size = 50; +static unsigned int _context_drawqueue_size = 50; /* Number of milliseconds to wait for the context queue to clear */ static unsigned int _context_queue_wait = 10000; -/* Number of command batches sent at a time from a single context */ -static unsigned int _context_cmdbatch_burst = 5; +/* Number of drawobjs sent at a time from a single context */ +static unsigned int _context_drawobj_burst = 5; /* * GFT throttle parameters. If GFT recovered more than @@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15; static unsigned int _dispatcher_q_inflight_lo = 4; /* Command batch timeout (in milliseconds) */ -unsigned int adreno_cmdbatch_timeout = 2000; +unsigned int adreno_drawobj_timeout = 2000; /* Interval for reading and comparing fault detection registers */ static unsigned int _fault_timer_interval = 200; -#define CMDQUEUE_RB(_cmdqueue) \ +#define DRAWQUEUE_RB(_drawqueue) \ ((struct adreno_ringbuffer *) \ - container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q)) + container_of((_drawqueue),\ + struct adreno_ringbuffer, dispatch_q)) -#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q) +#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q) -static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue); +static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue); -static inline bool cmdqueue_is_current( - struct adreno_dispatcher_cmdqueue *cmdqueue) +static inline bool drawqueue_is_current( + struct adreno_dispatcher_drawqueue *drawqueue) { - struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue); + struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue); struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); return (adreno_dev->cur_rb == rb); @@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data) return time_after(jiffies, expires) ? 0 : 1; } -static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data) +static int __count_drawqueue_context(struct adreno_context *drawctxt, + void *data) { unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100); @@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data) return 0; return (&drawctxt->rb->dispatch_q == - (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0; + (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0; } static int _adreno_count_active_contexts(struct adreno_device *adreno_dev, @@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev, } static void _track_context(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue, + struct adreno_dispatcher_drawqueue *drawqueue, struct adreno_context *drawctxt) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev, device->active_context_count = _adreno_count_active_contexts(adreno_dev, __count_context, NULL); - cmdqueue->active_context_count = + drawqueue->active_context_count = _adreno_count_active_contexts(adreno_dev, - __count_cmdqueue_context, cmdqueue); + __count_drawqueue_context, drawqueue); spin_unlock(&adreno_dev->active_list_lock); } @@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev, */ static inline int -_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue) +_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue) { - return (cmdqueue->active_context_count > 1) + return (drawqueue->active_context_count > 1) ? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi; } @@ -271,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev) } /** - * _retire_marker() - Retire a marker command batch without sending it to the - * hardware - * @cmdbatch: Pointer to the cmdbatch to retire + * _retire_timestamp() - Retire object without sending it + * to the hardware + * @drawobj: Pointer to the object to retire * - * In some cases marker commands can be retired by the software without going to - * the GPU. In those cases, update the memstore from the CPU, kick off the - * event engine to handle expired events and destroy the command batch. + * In some cases ibs can be retired by the software + * without going to the GPU. In those cases, update the + * memstore from the CPU, kick off the event engine to handle + * expired events and destroy the ib. */ -static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) +static void _retire_timestamp(struct kgsl_drawobj *drawobj) { - struct kgsl_context *context = cmdbatch->context; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_context *context = drawobj->context; + struct adreno_context *drawctxt = ADRENO_CONTEXT(context); struct kgsl_device *device = context->device; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); /* * Write the start and end timestamp to the memstore to keep the @@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) */ kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), - cmdbatch->timestamp); + drawobj->timestamp); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), - cmdbatch->timestamp); + drawobj->timestamp); /* Retire pending GPU events for the object */ @@ -307,13 +309,13 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) * rptr scratch out address. At this point GPU clocks turned off. * So avoid reading GPU register directly for A3xx. */ - if (adreno_is_a3xx(adreno_dev)) - trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb, - 0); + if (adreno_is_a3xx(ADRENO_DEVICE(device))) + trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, + 0, 0); else - trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb, - adreno_get_rptr(drawctxt->rb)); - kgsl_cmdbatch_destroy(cmdbatch); + trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, + adreno_get_rptr(drawctxt->rb), 0); + kgsl_drawobj_destroy(drawobj); } static int _check_context_queue(struct adreno_context *drawctxt) @@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt) if (kgsl_context_invalid(&drawctxt->base)) ret = 1; else - ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0; + ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0; spin_unlock(&drawctxt->lock); @@ -341,176 +343,151 @@ static int _check_context_queue(struct adreno_context *drawctxt) * return true if this is a marker command and the dependent timestamp has * retired */ -static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch) -{ - return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - kgsl_check_timestamp(cmdbatch->device, cmdbatch->context, - cmdbatch->marker_timestamp); -} - -static inline void _pop_cmdbatch(struct adreno_context *drawctxt) +static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj) { - drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head, - ADRENO_CONTEXT_CMDQUEUE_SIZE); - drawctxt->queued--; -} -/** - * Removes all expired marker and sync cmdbatches from - * the context queue when marker command and dependent - * timestamp are retired. This function is recursive. - * returns cmdbatch if context has command, NULL otherwise. - */ -static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt) -{ - struct kgsl_cmdbatch *cmdbatch; - - if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail) - return NULL; - - cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; - - if (cmdbatch == NULL) - return NULL; + struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); - /* Check to see if this is a marker we can skip over */ - if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - _marker_expired(cmdbatch)) { - _pop_cmdbatch(drawctxt); - _retire_marker(cmdbatch); - return _expire_markers(drawctxt); - } - - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - if (!kgsl_cmdbatch_events_pending(cmdbatch)) { - _pop_cmdbatch(drawctxt); - kgsl_cmdbatch_destroy(cmdbatch); - return _expire_markers(drawctxt); - } - } - - return cmdbatch; + return (drawobj->flags & KGSL_DRAWOBJ_MARKER) && + kgsl_check_timestamp(drawobj->device, drawobj->context, + markerobj->marker_timestamp); } -static void expire_markers(struct adreno_context *drawctxt) +static inline void _pop_drawobj(struct adreno_context *drawctxt) { - spin_lock(&drawctxt->lock); - _expire_markers(drawctxt); - spin_unlock(&drawctxt->lock); + drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head, + ADRENO_CONTEXT_DRAWQUEUE_SIZE); + drawctxt->queued--; } -static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt) +static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj, + struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; - bool pending = false; - - cmdbatch = _expire_markers(drawctxt); - - if (cmdbatch == NULL) - return NULL; + if (_marker_expired(cmdobj)) { + _pop_drawobj(drawctxt); + _retire_timestamp(DRAWOBJ(cmdobj)); + return 0; + } /* - * If the marker isn't expired but the SKIP bit is set - * then there are real commands following this one in - * the queue. This means that we need to dispatch the - * command so that we can keep the timestamp accounting - * correct. If skip isn't set then we block this queue + * If the marker isn't expired but the SKIP bit + * is set then there are real commands following + * this one in the queue. This means that we + * need to dispatch the command so that we can + * keep the timestamp accounting correct. If + * skip isn't set then we block this queue * until the dependent timestamp expires */ - if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) - pending = true; + return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN; +} - if (kgsl_cmdbatch_events_pending(cmdbatch)) - pending = true; +static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj, + struct adreno_context *drawctxt) +{ + if (!kgsl_drawobj_events_pending(syncobj)) { + _pop_drawobj(drawctxt); + kgsl_drawobj_destroy(DRAWOBJ(syncobj)); + return 0; + } /* - * If changes are pending and the canary timer hasn't been - * started yet, start it + * If we got here, there are pending events for sync object. + * Start the canary timer if it hasnt been started already. */ - if (pending) { - /* - * If syncpoints are pending start the canary timer if - * it hasn't already been started - */ - if (!cmdbatch->timeout_jiffies) { - cmdbatch->timeout_jiffies = - jiffies + msecs_to_jiffies(5000); - mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies); - } - - return ERR_PTR(-EAGAIN); + if (!syncobj->timeout_jiffies) { + syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000); + mod_timer(&syncobj->timer, syncobj->timeout_jiffies); } - _pop_cmdbatch(drawctxt); - return cmdbatch; + return -EAGAIN; } -/** - * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue - * @drawctxt: Pointer to the adreno draw context - * - * Dequeue a new command batch from the context list +/* + * Retires all expired marker and sync objs from the context + * queue and returns one of the below + * a) next drawobj that needs to be sent to ringbuffer + * b) -EAGAIN for syncobj with syncpoints pending. + * c) -EAGAIN for markerobj whose marker timestamp has not expired yet. + * c) NULL for no commands remaining in drawqueue. */ -static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch( - struct adreno_context *drawctxt) +static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj( + struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; + unsigned int i = drawctxt->drawqueue_head; + int ret = 0; - spin_lock(&drawctxt->lock); - cmdbatch = _get_cmdbatch(drawctxt); - spin_unlock(&drawctxt->lock); + if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail) + return NULL; - /* - * Delete the timer and wait for timer handler to finish executing - * on another core before queueing the buffer. We must do this - * without holding any spin lock that the timer handler might be using - */ - if (!IS_ERR_OR_NULL(cmdbatch)) - del_timer_sync(&cmdbatch->timer); + for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail; + i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) { + + drawobj = drawctxt->drawqueue[i]; + + if (drawobj == NULL) + return NULL; + + if (drawobj->type == CMDOBJ_TYPE) + return drawobj; + else if (drawobj->type == MARKEROBJ_TYPE) { + ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt); + /* Special case where marker needs to be sent to GPU */ + if (ret == 1) + return drawobj; + } else if (drawobj->type == SYNCOBJ_TYPE) + ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt); + + if (ret == -EAGAIN) + return ERR_PTR(-EAGAIN); + + continue; + } - return cmdbatch; + return NULL; } /** - * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context + * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context * queue * @drawctxt: Pointer to the adreno draw context - * @cmdbatch: Pointer to the KGSL cmdbatch to requeue + * @cmdobj: Pointer to the KGSL command object to requeue * * Failure to submit a command to the ringbuffer isn't the fault of the command * being submitted so if a failure happens, push it back on the head of the the * context queue to be reconsidered again unless the context got detached. */ -static inline int adreno_dispatcher_requeue_cmdbatch( - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch) +static inline int adreno_dispatcher_requeue_cmdobj( + struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) { unsigned int prev; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); spin_lock(&drawctxt->lock); if (kgsl_context_detached(&drawctxt->base) || kgsl_context_invalid(&drawctxt->base)) { spin_unlock(&drawctxt->lock); - /* get rid of this cmdbatch since the context is bad */ - kgsl_cmdbatch_destroy(cmdbatch); + /* get rid of this drawobj since the context is bad */ + kgsl_drawobj_destroy(drawobj); return -ENOENT; } - prev = drawctxt->cmdqueue_head == 0 ? - (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) : - (drawctxt->cmdqueue_head - 1); + prev = drawctxt->drawqueue_head == 0 ? + (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) : + (drawctxt->drawqueue_head - 1); /* * The maximum queue size always needs to be one less then the size of - * the ringbuffer queue so there is "room" to put the cmdbatch back in + * the ringbuffer queue so there is "room" to put the drawobj back in */ - BUG_ON(prev == drawctxt->cmdqueue_tail); + WARN_ON(prev == drawctxt->drawqueue_tail); - drawctxt->cmdqueue[prev] = cmdbatch; + drawctxt->drawqueue[prev] = drawobj; drawctxt->queued++; /* Reset the command queue head to reflect the newly requeued change */ - drawctxt->cmdqueue_head = prev; + drawctxt->drawqueue_head = prev; spin_unlock(&drawctxt->lock); return 0; } @@ -545,21 +522,22 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev, } /** - * sendcmd() - Send a command batch to the GPU hardware + * sendcmd() - Send a drawobj to the GPU hardware * @dispatcher: Pointer to the adreno dispatcher struct - * @cmdbatch: Pointer to the KGSL cmdbatch being sent + * @drawobj: Pointer to the KGSL drawobj being sent * - * Send a KGSL command batch to the GPU hardware + * Send a KGSL drawobj to the GPU hardware */ static int sendcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); - struct adreno_dispatcher_cmdqueue *dispatch_q = - ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); + struct adreno_dispatcher_drawqueue *dispatch_q = + ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); struct adreno_submit_time time; uint64_t secs = 0; unsigned long nsecs = 0; @@ -588,15 +566,15 @@ static int sendcmd(struct adreno_device *adreno_dev, set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv); } - if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) { - set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv); - cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index; - adreno_dev->cmdbatch_profile_index = - (adreno_dev->cmdbatch_profile_index + 1) % - ADRENO_CMDBATCH_PROFILE_COUNT; + if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) { + set_bit(CMDOBJ_PROFILE, &cmdobj->priv); + cmdobj->profile_index = adreno_dev->profile_index; + adreno_dev->profile_index = + (adreno_dev->profile_index + 1) % + ADRENO_DRAWOBJ_PROFILE_COUNT; } - ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time); + ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time); /* * On the first command, if the submission was successful, then read the @@ -649,17 +627,17 @@ static int sendcmd(struct adreno_device *adreno_dev, secs = time.ktime; nsecs = do_div(secs, 1000000000); - trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight, + trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight, time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb, adreno_get_rptr(drawctxt->rb)); mutex_unlock(&device->mutex); - cmdbatch->submit_ticks = time.ticks; + cmdobj->submit_ticks = time.ticks; - dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch; + dispatch_q->cmd_q[dispatch_q->tail] = cmdobj; dispatch_q->tail = (dispatch_q->tail + 1) % - ADRENO_DISPATCH_CMDQUEUE_SIZE; + ADRENO_DISPATCH_DRAWQUEUE_SIZE; /* * For the first submission in any given command queue update the @@ -670,7 +648,7 @@ static int sendcmd(struct adreno_device *adreno_dev, if (dispatch_q->inflight == 1) dispatch_q->expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + msecs_to_jiffies(adreno_drawobj_timeout); /* * If we believe ourselves to be current and preemption isn't a thing, @@ -678,7 +656,7 @@ static int sendcmd(struct adreno_device *adreno_dev, * thing and the timer will be set up in due time */ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) { - if (cmdqueue_is_current(dispatch_q)) + if (drawqueue_is_current(dispatch_q)) mod_timer(&dispatcher->timer, dispatch_q->expires); } @@ -704,75 +682,70 @@ static int sendcmd(struct adreno_device *adreno_dev, static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { - struct adreno_dispatcher_cmdqueue *dispatch_q = + struct adreno_dispatcher_drawqueue *dispatch_q = &(drawctxt->rb->dispatch_q); int count = 0; int ret = 0; - int inflight = _cmdqueue_inflight(dispatch_q); + int inflight = _drawqueue_inflight(dispatch_q); unsigned int timestamp; if (dispatch_q->inflight >= inflight) { - expire_markers(drawctxt); + spin_lock(&drawctxt->lock); + _process_drawqueue_get_next_drawobj(drawctxt); + spin_unlock(&drawctxt->lock); return -EBUSY; } /* - * Each context can send a specific number of command batches per cycle + * Each context can send a specific number of drawobjs per cycle */ - while ((count < _context_cmdbatch_burst) && + while ((count < _context_drawobj_burst) && (dispatch_q->inflight < inflight)) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_cmd *cmdobj; if (adreno_gpu_fault(adreno_dev) != 0) break; - cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt); + spin_lock(&drawctxt->lock); + drawobj = _process_drawqueue_get_next_drawobj(drawctxt); /* - * adreno_context_get_cmdbatch returns -EAGAIN if the current - * cmdbatch has pending sync points so no more to do here. + * adreno_context_get_drawobj returns -EAGAIN if the current + * drawobj has pending sync points so no more to do here. * When the sync points are satisfied then the context will get * reqeueued */ - if (IS_ERR_OR_NULL(cmdbatch)) { - if (IS_ERR(cmdbatch)) - ret = PTR_ERR(cmdbatch); + if (IS_ERR_OR_NULL(drawobj)) { + if (IS_ERR(drawobj)) + ret = PTR_ERR(drawobj); + spin_unlock(&drawctxt->lock); break; } + _pop_drawobj(drawctxt); + spin_unlock(&drawctxt->lock); - /* - * If this is a synchronization submission then there are no - * commands to submit. Discard it and get the next item from - * the queue. Decrement count so this packet doesn't count - * against the burst for the context - */ - - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - kgsl_cmdbatch_destroy(cmdbatch); - continue; - } - - timestamp = cmdbatch->timestamp; - - ret = sendcmd(adreno_dev, cmdbatch); + timestamp = drawobj->timestamp; + cmdobj = CMDOBJ(drawobj); + ret = sendcmd(adreno_dev, cmdobj); /* - * On error from sendcmd() try to requeue the command batch + * On error from sendcmd() try to requeue the cmdobj * unless we got back -ENOENT which means that the context has * been detached and there will be no more deliveries from here */ if (ret != 0) { - /* Destroy the cmdbatch on -ENOENT */ + /* Destroy the cmdobj on -ENOENT */ if (ret == -ENOENT) - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); else { /* * If the requeue returns an error, return that * instead of whatever sendcmd() sent us */ - int r = adreno_dispatcher_requeue_cmdbatch( - drawctxt, cmdbatch); + int r = adreno_dispatcher_requeue_cmdobj( + drawctxt, cmdobj); if (r) ret = r; } @@ -934,99 +907,87 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) /** * get_timestamp() - Return the next timestamp for the context * @drawctxt - Pointer to an adreno draw context struct - * @cmdbatch - Pointer to a command batch + * @drawobj - Pointer to a drawobj * @timestamp - Pointer to a timestamp value possibly passed from the user + * @user_ts - user generated timestamp * * Assign a timestamp based on the settings of the draw context and the command * batch. */ static int get_timestamp(struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp) + struct kgsl_drawobj *drawobj, unsigned int *timestamp, + unsigned int user_ts) { - /* Synchronization commands don't get a timestamp */ - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - *timestamp = 0; - return 0; - } if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) { /* * User specified timestamps need to be greater than the last * issued timestamp in the context */ - if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) + if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0) return -ERANGE; - drawctxt->timestamp = *timestamp; + drawctxt->timestamp = user_ts; } else drawctxt->timestamp++; *timestamp = drawctxt->timestamp; + drawobj->timestamp = *timestamp; return 0; } -/** - * adreno_dispactcher_queue_cmd() - Queue a new command in the context - * @adreno_dev: Pointer to the adreno device struct - * @drawctxt: Pointer to the adreno draw context - * @cmdbatch: Pointer to the command batch being submitted - * @timestamp: Pointer to the requested timestamp - * - * Queue a command in the context - if there isn't any room in the queue, then - * block until there is - */ -int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamp) +static void _set_ft_policy(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) { - struct adreno_dispatcher_cmdqueue *dispatch_q = - ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch); - int ret; - - spin_lock(&drawctxt->lock); - - if (kgsl_context_detached(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -ENOENT; - } + /* + * Set the fault tolerance policy for the command batch - assuming the + * context hasn't disabled FT use the current device policy + */ + if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) + set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy); + else + cmdobj->fault_policy = adreno_dev->ft_policy; +} +static void _cmdobj_set_flags(struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) +{ /* * Force the preamble for this submission only - this is usually * requested by the dispatcher as part of fault recovery */ - if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv)) - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); /* - * Force the premable if set from userspace in the context or cmdbatch - * flags + * Force the premable if set from userspace in the context or + * command obj flags */ - if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) || - (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH)) - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH)) + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); - /* Skip this cmdbatch commands if IFH_NOP is enabled */ + /* Skip this ib if IFH_NOP is enabled */ if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP) - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); + set_bit(CMDOBJ_SKIP, &cmdobj->priv); /* * If we are waiting for the end of frame and it hasn't appeared yet, - * then mark the command batch as skipped. It will still progress + * then mark the command obj as skipped. It will still progress * through the pipeline but it won't actually send any commands */ if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) { - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); + set_bit(CMDOBJ_SKIP, &cmdobj->priv); /* - * If this command batch represents the EOF then clear the way + * If this command obj represents the EOF then clear the way * for the dispatcher to continue submitting */ - if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) { + if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) { clear_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv); @@ -1038,10 +999,84 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, &drawctxt->base.priv); } } +} - /* Wait for room in the context queue */ +static inline int _check_context_state(struct kgsl_context *context) +{ + if (kgsl_context_invalid(context)) + return -EDEADLK; + + if (kgsl_context_detached(context)) + return -ENOENT; + + return 0; +} + +static inline bool _verify_ib(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_memobj_node *ib) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_process_private *private = dev_priv->process_priv; + + /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */ + if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) { + pr_context(device, context, "ctxt %d invalid ib size %lld\n", + context->id, ib->size); + return false; + } + + /* Make sure that the address is mapped */ + if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) { + pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n", + context->id, ib->gpuaddr); + return false; + } + + return true; +} + +static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_memobj_node *ib; + unsigned int i; + + for (i = 0; i < count; i++) { + /* Verify the IBs before they get queued */ + if (drawobj[i]->type == CMDOBJ_TYPE) { + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]); + + list_for_each_entry(ib, &cmdobj->cmdlist, node) + if (_verify_ib(dev_priv, + &ADRENO_CONTEXT(context)->base, ib) + == false) + return -EINVAL; + /* + * Clear the wake on touch bit to indicate an IB has + * been submitted since the last time we set it. + * But only clear it when we have rendering commands. + */ + device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; + } + + /* A3XX does not have support for drawobj profiling */ + if (adreno_is_a3xx(ADRENO_DEVICE(device)) && + (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING)) + return -EOPNOTSUPP; + } - while (drawctxt->queued >= _context_cmdqueue_size) { + return 0; +} + +static inline int _wait_for_room_in_context_queue( + struct adreno_context *drawctxt) +{ + int ret = 0; + + /* Wait for room in the context queue */ + while (drawctxt->queued >= _context_drawqueue_size) { trace_adreno_drawctxt_sleep(drawctxt); spin_unlock(&drawctxt->lock); @@ -1052,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, spin_lock(&drawctxt->lock); trace_adreno_drawctxt_wake(drawctxt); - if (ret <= 0) { - spin_unlock(&drawctxt->lock); + if (ret <= 0) return (ret == 0) ? -ETIMEDOUT : (int) ret; - } } + + return 0; +} + +static unsigned int _check_context_state_to_queue_cmds( + struct adreno_context *drawctxt) +{ + int ret = _check_context_state(&drawctxt->base); + + if (ret) + return ret; + + ret = _wait_for_room_in_context_queue(drawctxt); + if (ret) + return ret; + /* * Account for the possiblity that the context got invalidated * while we were sleeping */ + return _check_context_state(&drawctxt->base); +} - if (kgsl_context_invalid(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -EDEADLK; - } - if (kgsl_context_detached(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -ENOENT; - } +static void _queue_drawobj(struct adreno_context *drawctxt, + struct kgsl_drawobj *drawobj) +{ + /* Put the command into the queue */ + drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj; + drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) % + ADRENO_CONTEXT_DRAWQUEUE_SIZE; + drawctxt->queued++; + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); +} - ret = get_timestamp(drawctxt, cmdbatch, timestamp); - if (ret) { - spin_unlock(&drawctxt->lock); +static int _queue_markerobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj, + uint32_t *timestamp, unsigned int user_ts) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); + int ret; + + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) return ret; + + /* + * See if we can fastpath this thing - if nothing is queued + * and nothing is inflight retire without bothering the GPU + */ + if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device, + drawobj->context, drawctxt->queued_timestamp)) { + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); + _retire_timestamp(drawobj); + return 1; } - cmdbatch->timestamp = *timestamp; + /* + * Remember the last queued timestamp - the marker will block + * until that timestamp is expired (unless another command + * comes along and forces the marker to execute) + */ - if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) { + markerobj->marker_timestamp = drawctxt->queued_timestamp; + drawctxt->queued_timestamp = *timestamp; + _set_ft_policy(adreno_dev, drawctxt, markerobj); + _cmdobj_set_flags(drawctxt, markerobj); - /* - * See if we can fastpath this thing - if nothing is queued - * and nothing is inflight retire without bothering the GPU - */ + _queue_drawobj(drawctxt, drawobj); - if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device, - cmdbatch->context, drawctxt->queued_timestamp)) { - trace_adreno_cmdbatch_queued(cmdbatch, - drawctxt->queued); + return 0; +} - _retire_marker(cmdbatch); - spin_unlock(&drawctxt->lock); - return 0; - } +static int _queue_cmdobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj, + uint32_t *timestamp, unsigned int user_ts) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + unsigned int j; + int ret; - /* - * Remember the last queued timestamp - the marker will block - * until that timestamp is expired (unless another command - * comes along and forces the marker to execute) - */ + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) + return ret; + + /* + * If this is a real command then we need to force any markers + * queued before it to dispatch to keep time linear - set the + * skip bit so the commands get NOPed. + */ + j = drawctxt->drawqueue_head; + + while (j != drawctxt->drawqueue_tail) { + if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) { + struct kgsl_drawobj_cmd *markerobj = + CMDOBJ(drawctxt->drawqueue[j]); + set_bit(CMDOBJ_SKIP, &markerobj->priv); + } - cmdbatch->marker_timestamp = drawctxt->queued_timestamp; + j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE); } - /* SYNC commands have timestamp 0 and will get optimized out anyway */ - if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC)) - drawctxt->queued_timestamp = *timestamp; + drawctxt->queued_timestamp = *timestamp; + _set_ft_policy(adreno_dev, drawctxt, cmdobj); + _cmdobj_set_flags(drawctxt, cmdobj); - /* - * Set the fault tolerance policy for the command batch - assuming the - * context hasn't disabled FT use the current device policy - */ + _queue_drawobj(drawctxt, drawobj); - if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) - set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy); - else - cmdbatch->fault_policy = adreno_dev->ft_policy; + return 0; +} - /* Put the command into the queue */ - drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch; - drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) % - ADRENO_CONTEXT_CMDQUEUE_SIZE; +static void _queue_syncobj(struct adreno_context *drawctxt, + struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); - /* - * If this is a real command then we need to force any markers queued - * before it to dispatch to keep time linear - set the skip bit so - * the commands get NOPed. - */ + *timestamp = 0; + drawobj->timestamp = 0; - if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) { - unsigned int i = drawctxt->cmdqueue_head; + _queue_drawobj(drawctxt, drawobj); +} - while (i != drawctxt->cmdqueue_tail) { - if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER) - set_bit(CMDBATCH_FLAG_SKIP, - &drawctxt->cmdqueue[i]->priv); +/** + * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context + * @dev_priv: Pointer to the device private struct + * @context: Pointer to the kgsl draw context + * @drawobj: Pointer to the array of drawobj's being submitted + * @count: Number of drawobj's being submitted + * @timestamp: Pointer to the requested timestamp + * + * Queue a command in the context - if there isn't any room in the queue, then + * block until there is + */ +int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp) - i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE); +{ + struct kgsl_device *device = dev_priv->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_context *drawctxt = ADRENO_CONTEXT(context); + struct adreno_dispatcher_drawqueue *dispatch_q; + int ret; + unsigned int i, user_ts; + + ret = _check_context_state(&drawctxt->base); + if (ret) + return ret; + + ret = _verify_cmdobj(dev_priv, context, drawobj, count); + if (ret) + return ret; + + /* wait for the suspend gate */ + wait_for_completion(&device->halt_gate); + + spin_lock(&drawctxt->lock); + + ret = _check_context_state_to_queue_cmds(drawctxt); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + + user_ts = *timestamp; + + for (i = 0; i < count; i++) { + + switch (drawobj[i]->type) { + case MARKEROBJ_TYPE: + ret = _queue_markerobj(adreno_dev, drawctxt, + CMDOBJ(drawobj[i]), + timestamp, user_ts); + if (ret == 1) { + spin_unlock(&drawctxt->lock); + goto done; + } else if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + break; + case CMDOBJ_TYPE: + ret = _queue_cmdobj(adreno_dev, drawctxt, + CMDOBJ(drawobj[i]), + timestamp, user_ts); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + break; + case SYNCOBJ_TYPE: + _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]), + timestamp); + break; + default: + spin_unlock(&drawctxt->lock); + return -EINVAL; } + } - drawctxt->queued++; - trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued); + dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]); _track_context(adreno_dev, dispatch_q, drawctxt); @@ -1163,8 +1310,11 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, * queue will try to schedule new commands anyway. */ - if (dispatch_q->inflight < _context_cmdbatch_burst) + if (dispatch_q->inflight < _context_drawobj_burst) adreno_dispatcher_issuecmds(adreno_dev); +done: + if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv)) + return -EPROTO; return 0; } @@ -1208,15 +1358,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id) } /* - * If an IB inside of the command batch has a gpuaddr that matches the base + * If an IB inside of the drawobj has a gpuaddr that matches the base * passed in then zero the size which effectively skips it when it is submitted * in the ringbuffer. */ -static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base) +static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base) { struct kgsl_memobj_node *ib; - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { if (ib->gpuaddr == base) { ib->priv |= MEMOBJ_SKIP; if (base) @@ -1225,10 +1375,11 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base) } } -static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmdbatch **replay, int count) +static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj, + struct kgsl_drawobj_cmd **replay, int count) { - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int i; /* @@ -1243,9 +1394,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, * b) force preamble for next commandbatch */ for (i = 1; i < count; i++) { - if (replay[i]->context->id == cmdbatch->context->id) { + if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) { replay[i]->fault_policy = replay[0]->fault_policy; - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery); break; } @@ -1262,41 +1413,44 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, drawctxt->fault_policy = replay[0]->fault_policy; } - /* set the flags to skip this cmdbatch */ - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); - cmdbatch->fault_recovery = 0; + /* set the flags to skip this cmdobj */ + set_bit(CMDOBJ_SKIP, &cmdobj->priv); + cmdobj->fault_recovery = 0; } -static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmdbatch **replay, int count) +static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj, + struct kgsl_drawobj_cmd **replay, int count) { - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int skip = 1; int i; for (i = 0; i < count; i++) { + struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]); + /* - * Only operate on command batches that belong to the + * Only operate on drawobj's that belong to the * faulting context */ - if (replay[i]->context->id != cmdbatch->context->id) + if (replay_obj->context->id != drawobj->context->id) continue; /* - * Skip all the command batches in this context until + * Skip all the drawobjs in this context until * the EOF flag is seen. If the EOF flag is seen then * force the preamble for the next command. */ if (skip) { - set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv); + set_bit(CMDOBJ_SKIP, &replay[i]->priv); - if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME) + if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME) skip = 0; } else { - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); return; } } @@ -1318,26 +1472,28 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch, set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv); } -static void remove_invalidated_cmdbatches(struct kgsl_device *device, - struct kgsl_cmdbatch **replay, int count) +static void remove_invalidated_cmdobjs(struct kgsl_device *device, + struct kgsl_drawobj_cmd **replay, int count) { int i; for (i = 0; i < count; i++) { - struct kgsl_cmdbatch *cmd = replay[i]; - if (cmd == NULL) + struct kgsl_drawobj_cmd *cmdobj = replay[i]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + + if (cmdobj == NULL) continue; - if (kgsl_context_detached(cmd->context) || - kgsl_context_invalid(cmd->context)) { + if (kgsl_context_detached(drawobj->context) || + kgsl_context_invalid(drawobj->context)) { replay[i] = NULL; mutex_lock(&device->mutex); kgsl_cancel_events_timestamp(device, - &cmd->context->events, cmd->timestamp); + &drawobj->context->events, drawobj->timestamp); mutex_unlock(&device->mutex); - kgsl_cmdbatch_destroy(cmd); + kgsl_drawobj_destroy(drawobj); } } } @@ -1361,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context) static void adreno_fault_header(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch) + struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); unsigned int status, rptr, wptr, ib1sz, ib2sz; uint64_t ib1base, ib2base; @@ -1377,22 +1534,22 @@ static void adreno_fault_header(struct kgsl_device *device, ADRENO_REG_CP_IB2_BASE_HI, &ib2base); adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz); - if (cmdbatch != NULL) { + if (drawobj != NULL) { struct adreno_context *drawctxt = - ADRENO_CONTEXT(cmdbatch->context); + ADRENO_CONTEXT(drawobj->context); - trace_adreno_gpu_fault(cmdbatch->context->id, - cmdbatch->timestamp, + trace_adreno_gpu_fault(drawobj->context->id, + drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz, drawctxt->rb->id); - pr_fault(device, cmdbatch, + pr_fault(device, drawobj, "gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", - cmdbatch->context->id, cmdbatch->timestamp, status, + drawobj->context->id, drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz); if (rb != NULL) - pr_fault(device, cmdbatch, + pr_fault(device, drawobj, "gpu fault rb %d rb sw r/w %4.4x/%4.4x\n", rb->id, rptr, rb->wptr); } else { @@ -1411,33 +1568,34 @@ static void adreno_fault_header(struct kgsl_device *device, void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) && kgsl_context_detached(&drawctxt->base)) { - pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context, - "gpu detached context %d\n", cmdbatch->context->id); + pr_context(KGSL_DEVICE(adreno_dev), drawobj->context, + "gpu detached context %d\n", drawobj->context->id); clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv); } } /** - * process_cmdbatch_fault() - Process a cmdbatch for fault policies - * @device: Device on which the cmdbatch caused a fault - * @replay: List of cmdbatches that are to be replayed on the device. The - * faulting cmdbatch is the first command in the replay list and the remaining - * cmdbatches in the list are commands that were submitted to the same queue + * process_cmdobj_fault() - Process a cmdobj for fault policies + * @device: Device on which the cmdobj caused a fault + * @replay: List of cmdobj's that are to be replayed on the device. The + * first command in the replay list is the faulting command and the remaining + * cmdobj's in the list are commands that were submitted to the same queue * as the faulting one. - * @count: Number of cmdbatches in replay + * @count: Number of cmdobj's in replay * @base: The IB1 base at the time of fault * @fault: The fault type */ -static void process_cmdbatch_fault(struct kgsl_device *device, - struct kgsl_cmdbatch **replay, int count, +static void process_cmdobj_fault(struct kgsl_device *device, + struct kgsl_drawobj_cmd **replay, int count, unsigned int base, int fault) { - struct kgsl_cmdbatch *cmdbatch = replay[0]; + struct kgsl_drawobj_cmd *cmdobj = replay[0]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); int i; char *state = "failed"; @@ -1451,18 +1609,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device, * where 1st and 4th gpu hang are more than 3 seconds apart we * won't disable GFT and invalidate the context. */ - if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) { - if (time_after(jiffies, (cmdbatch->context->fault_time + if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) { + if (time_after(jiffies, (drawobj->context->fault_time + msecs_to_jiffies(_fault_throttle_time)))) { - cmdbatch->context->fault_time = jiffies; - cmdbatch->context->fault_count = 1; + drawobj->context->fault_time = jiffies; + drawobj->context->fault_count = 1; } else { - cmdbatch->context->fault_count++; - if (cmdbatch->context->fault_count > + drawobj->context->fault_count++; + if (drawobj->context->fault_count > _fault_throttle_burst) { set_bit(KGSL_FT_DISABLE, - &cmdbatch->fault_policy); - pr_context(device, cmdbatch->context, + &cmdobj->fault_policy); + pr_context(device, drawobj->context, "gpu fault threshold exceeded %d faults in %d msecs\n", _fault_throttle_burst, _fault_throttle_time); @@ -1471,45 +1629,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device, } /* - * If FT is disabled for this cmdbatch invalidate immediately + * If FT is disabled for this cmdobj invalidate immediately */ - if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) || - test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) { + if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) || + test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) { state = "skipped"; - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); } /* If the context is detached do not run FT on context */ - if (kgsl_context_detached(cmdbatch->context)) { + if (kgsl_context_detached(drawobj->context)) { state = "detached"; - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); } /* - * Set a flag so we don't print another PM dump if the cmdbatch fails + * Set a flag so we don't print another PM dump if the cmdobj fails * again on replay */ - set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy); + set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy); /* * A hardware fault generally means something was deterministically - * wrong with the command batch - no point in trying to replay it + * wrong with the cmdobj - no point in trying to replay it * Clear the replay bit and move on to the next policy level */ if (fault & ADRENO_HARD_FAULT) - clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy)); + clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy)); /* * A timeout fault means the IB timed out - clear the policy and * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay - * because we won't see this cmdbatch again + * because we won't see this cmdobj again */ if (fault & ADRENO_TIMEOUT_FAULT) - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); /* * If the context had a GPU page fault then it is likely it would fault @@ -1517,83 +1675,84 @@ static void process_cmdbatch_fault(struct kgsl_device *device, */ if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, - &cmdbatch->context->priv)) { + &drawobj->context->priv)) { /* we'll need to resume the mmu later... */ - clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy); + clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy); clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, - &cmdbatch->context->priv); + &drawobj->context->priv); } /* - * Execute the fault tolerance policy. Each command batch stores the + * Execute the fault tolerance policy. Each cmdobj stores the * current fault policy that was set when it was queued. * As the options are tried in descending priority * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared - * from the cmdbatch policy so the next thing can be tried if the + * from the cmdobj policy so the next thing can be tried if the * change comes around again */ - /* Replay the hanging command batch again */ - if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY)); - set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery); + /* Replay the hanging cmdobj again */ + if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY)); + set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery); return; } /* * Skip the last IB1 that was played but replay everything else. - * Note that the last IB1 might not be in the "hung" command batch + * Note that the last IB1 might not be in the "hung" cmdobj * because the CP may have caused a page-fault while it was prefetching * the next IB1/IB2. walk all outstanding commands and zap the * supposedly bad IB1 where ever it lurks. */ - if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB)); - set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery); + if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB)); + set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery); for (i = 0; i < count; i++) { if (replay[i] != NULL && - replay[i]->context->id == cmdbatch->context->id) - cmdbatch_skip_ib(replay[i], base); + DRAWOBJ(replay[i])->context->id == + drawobj->context->id) + _skip_ib(replay[i], base); } return; } - /* Skip the faulted command batch submission */ - if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD)); + /* Skip the faulted cmdobj submission */ + if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD)); - /* Skip faulting command batch */ - cmdbatch_skip_cmd(cmdbatch, replay, count); + /* Skip faulting cmdobj */ + _skip_cmd(cmdobj, replay, count); return; } - if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, + if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPFRAME)); - set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery); + set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery); /* - * Skip all the pending command batches for this context until + * Skip all the pending cmdobj's for this context until * the EOF frame is seen */ - cmdbatch_skip_frame(cmdbatch, replay, count); + _skip_frame(cmdobj, replay, count); return; } /* If we get here then all the policies failed */ - pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n", - state, cmdbatch->context->id, cmdbatch->timestamp); + pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n", + state, drawobj->context->id, drawobj->timestamp); /* Mark the context as failed */ - mark_guilty_context(device, cmdbatch->context->id); + mark_guilty_context(device, drawobj->context->id); /* Invalidate the context */ - adreno_drawctxt_invalidate(device, cmdbatch->context); + adreno_drawctxt_invalidate(device, drawobj->context); } /** @@ -1605,12 +1764,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device, * @base: The IB1 base during the fault */ static void recover_dispatch_q(struct kgsl_device *device, - struct adreno_dispatcher_cmdqueue *dispatch_q, + struct adreno_dispatcher_drawqueue *dispatch_q, int fault, unsigned int base) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct kgsl_cmdbatch **replay = NULL; + struct kgsl_drawobj_cmd **replay; unsigned int ptr; int first = 0; int count = 0; @@ -1624,14 +1783,16 @@ static void recover_dispatch_q(struct kgsl_device *device, /* Recovery failed - mark everybody on this q guilty */ while (ptr != dispatch_q->tail) { - struct kgsl_context *context = - dispatch_q->cmd_q[ptr]->context; + struct kgsl_drawobj_cmd *cmdobj = + dispatch_q->cmd_q[ptr]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - mark_guilty_context(device, context->id); - adreno_drawctxt_invalidate(device, context); - kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]); + mark_guilty_context(device, drawobj->context->id); + adreno_drawctxt_invalidate(device, drawobj->context); + kgsl_drawobj_destroy(drawobj); - ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE); + ptr = DRAWQUEUE_NEXT(ptr, + ADRENO_DISPATCH_DRAWQUEUE_SIZE); } /* @@ -1643,22 +1804,22 @@ static void recover_dispatch_q(struct kgsl_device *device, goto replay; } - /* Copy the inflight command batches into the temporary storage */ + /* Copy the inflight cmdobj's into the temporary storage */ ptr = dispatch_q->head; while (ptr != dispatch_q->tail) { replay[count++] = dispatch_q->cmd_q[ptr]; - ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE); + ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE); } if (fault && count) - process_cmdbatch_fault(device, replay, + process_cmdobj_fault(device, replay, count, base, fault); replay: dispatch_q->inflight = 0; dispatch_q->head = dispatch_q->tail = 0; - /* Remove any pending command batches that have been invalidated */ - remove_invalidated_cmdbatches(device, replay, count); + /* Remove any pending cmdobj's that have been invalidated */ + remove_invalidated_cmdobjs(device, replay, count); /* Replay the pending command buffers */ for (i = 0; i < count; i++) { @@ -1674,16 +1835,16 @@ replay: */ if (first == 0) { - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); first = 1; } /* - * Force each command batch to wait for idle - this avoids weird + * Force each cmdobj to wait for idle - this avoids weird * CP parse issues */ - set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv); + set_bit(CMDOBJ_WFI, &replay[i]->priv); ret = sendcmd(adreno_dev, replay[i]); @@ -1693,15 +1854,18 @@ replay: */ if (ret) { - pr_context(device, replay[i]->context, + pr_context(device, replay[i]->base.context, "gpu reset failed ctx %d ts %d\n", - replay[i]->context->id, replay[i]->timestamp); + replay[i]->base.context->id, + replay[i]->base.timestamp); /* Mark this context as guilty (failed recovery) */ - mark_guilty_context(device, replay[i]->context->id); + mark_guilty_context(device, + replay[i]->base.context->id); - adreno_drawctxt_invalidate(device, replay[i]->context); - remove_invalidated_cmdbatches(device, &replay[i], + adreno_drawctxt_invalidate(device, + replay[i]->base.context); + remove_invalidated_cmdobjs(device, &replay[i], count - i); } } @@ -1713,36 +1877,38 @@ replay: } static void do_header_and_snapshot(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch) + struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj) { - /* Always dump the snapshot on a non-cmdbatch failure */ - if (cmdbatch == NULL) { + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + + /* Always dump the snapshot on a non-drawobj failure */ + if (cmdobj == NULL) { adreno_fault_header(device, rb, NULL); kgsl_device_snapshot(device, NULL); return; } /* Skip everything if the PMDUMP flag is set */ - if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) + if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy)) return; /* Print the fault header */ - adreno_fault_header(device, rb, cmdbatch); + adreno_fault_header(device, rb, cmdobj); - if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) - kgsl_device_snapshot(device, cmdbatch->context); + if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) + kgsl_device_snapshot(device, drawobj->context); } static int dispatcher_do_fault(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp; + struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp; struct adreno_ringbuffer *rb; struct adreno_ringbuffer *hung_rb = NULL; unsigned int reg; uint64_t base; - struct kgsl_cmdbatch *cmdbatch = NULL; + struct kgsl_drawobj_cmd *cmdobj = NULL; int ret, i; int fault; int halt; @@ -1792,10 +1958,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg); } /* - * retire cmdbatches from all the dispatch_q's before starting recovery + * retire cmdobj's from all the dispatch_q's before starting recovery */ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - adreno_dispatch_retire_cmdqueue(adreno_dev, + adreno_dispatch_retire_drawqueue(adreno_dev, &(rb->dispatch_q)); /* Select the active dispatch_q */ if (base == rb->buffer_desc.gpuaddr) { @@ -1814,15 +1980,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) } } - if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) { - cmdbatch = dispatch_q->cmd_q[dispatch_q->head]; - trace_adreno_cmdbatch_fault(cmdbatch, fault); + if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) { + cmdobj = dispatch_q->cmd_q[dispatch_q->head]; + trace_adreno_cmdbatch_fault(cmdobj, fault); } adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, ADRENO_REG_CP_IB1_BASE_HI, &base); - do_header_and_snapshot(device, hung_rb, cmdbatch); + do_header_and_snapshot(device, hung_rb, cmdobj); /* Terminate the stalled transaction and resume the IOMMU */ if (fault & ADRENO_IOMMU_PAGE_FAULT) @@ -1876,23 +2042,24 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) return 1; } -static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch, +static inline int drawobj_consumed(struct kgsl_drawobj *drawobj, unsigned int consumed, unsigned int retired) { - return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) && - (timestamp_cmp(retired, cmdbatch->timestamp) < 0)); + return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) && + (timestamp_cmp(retired, drawobj->timestamp) < 0)); } static void _print_recovery(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { static struct { unsigned int mask; const char *str; } flags[] = { ADRENO_FT_TYPES }; - int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG); + int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG); char *result = "unknown"; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); for (i = 0; i < ARRAY_SIZE(flags); i++) { if (flags[i].mask == BIT(nr)) { @@ -1901,40 +2068,41 @@ static void _print_recovery(struct kgsl_device *device, } } - pr_context(device, cmdbatch->context, + pr_context(device, drawobj->context, "gpu %s ctx %d ts %d policy %lX\n", - result, cmdbatch->context->id, cmdbatch->timestamp, - cmdbatch->fault_recovery); + result, drawobj->context->id, drawobj->timestamp, + cmdobj->fault_recovery); } -static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire) +static void cmdobj_profile_ticks(struct adreno_device *adreno_dev, + struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire) { - void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr; - struct adreno_cmdbatch_profile_entry *entry; + void *ptr = adreno_dev->profile_buffer.hostptr; + struct adreno_drawobj_profile_entry *entry; - entry = (struct adreno_cmdbatch_profile_entry *) - (ptr + (cmdbatch->profile_index * sizeof(*entry))); + entry = (struct adreno_drawobj_profile_entry *) + (ptr + (cmdobj->profile_index * sizeof(*entry))); rmb(); *start = entry->started; *retire = entry->retired; } -static void retire_cmdbatch(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch) +static void retire_cmdobj(struct adreno_device *adreno_dev, + struct kgsl_drawobj_cmd *cmdobj) { struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); uint64_t start = 0, end = 0; - if (cmdbatch->fault_recovery != 0) { - set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv); - _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch); + if (cmdobj->fault_recovery != 0) { + set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv); + _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj); } - if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) - cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end); + if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) + cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end); /* * For A3xx we still get the rptr from the CP_RB_RPTR instead of @@ -1942,48 +2110,49 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev, * So avoid reading GPU register directly for A3xx. */ if (adreno_is_a3xx(adreno_dev)) - trace_adreno_cmdbatch_retired(cmdbatch, - (int) dispatcher->inflight, start, end, - ADRENO_CMDBATCH_RB(cmdbatch), 0); + trace_adreno_cmdbatch_retired(drawobj, + (int) dispatcher->inflight, start, end, + ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery); else - trace_adreno_cmdbatch_retired(cmdbatch, - (int) dispatcher->inflight, start, end, - ADRENO_CMDBATCH_RB(cmdbatch), - adreno_get_rptr(drawctxt->rb)); + trace_adreno_cmdbatch_retired(drawobj, + (int) dispatcher->inflight, start, end, + ADRENO_DRAWOBJ_RB(drawobj), + adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery); drawctxt->submit_retire_ticks[drawctxt->ticks_index] = - end - cmdbatch->submit_ticks; + end - cmdobj->submit_ticks; drawctxt->ticks_index = (drawctxt->ticks_index + 1) % SUBMIT_RETIRE_TICKS_SIZE; - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); } -static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) +static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; int count = 0; - while (!adreno_cmdqueue_is_empty(cmdqueue)) { - struct kgsl_cmdbatch *cmdbatch = - cmdqueue->cmd_q[cmdqueue->head]; + while (!adreno_drawqueue_is_empty(drawqueue)) { + struct kgsl_drawobj_cmd *cmdobj = + drawqueue->cmd_q[drawqueue->head]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - if (!kgsl_check_timestamp(device, cmdbatch->context, - cmdbatch->timestamp)) + if (!kgsl_check_timestamp(device, drawobj->context, + drawobj->timestamp)) break; - retire_cmdbatch(adreno_dev, cmdbatch); + retire_cmdobj(adreno_dev, cmdobj); dispatcher->inflight--; - cmdqueue->inflight--; + drawqueue->inflight--; - cmdqueue->cmd_q[cmdqueue->head] = NULL; + drawqueue->cmd_q[drawqueue->head] = NULL; - cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head, - ADRENO_DISPATCH_CMDQUEUE_SIZE); + drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head, + ADRENO_DISPATCH_DRAWQUEUE_SIZE); count++; } @@ -1992,13 +2161,14 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, } static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) + struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head]; + struct kgsl_drawobj *drawobj = + DRAWOBJ(drawqueue->cmd_q[drawqueue->head]); /* Don't timeout if the timer hasn't expired yet (duh) */ - if (time_is_after_jiffies(cmdqueue->expires)) + if (time_is_after_jiffies(drawqueue->expires)) return; /* Don't timeout if the IB timeout is disabled globally */ @@ -2006,30 +2176,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, return; /* Don't time out if the context has disabled it */ - if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) + if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) return; - pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n", - cmdbatch->context->id, cmdbatch->timestamp); + pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n", + drawobj->context->id, drawobj->timestamp); adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT); } -static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) +static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue) { - int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue); + int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue); /* Nothing to do if there are no pending commands */ - if (adreno_cmdqueue_is_empty(cmdqueue)) + if (adreno_drawqueue_is_empty(drawqueue)) return count; - /* Don't update the cmdqueue timeout if we are about to preempt out */ + /* Don't update the drawqueue timeout if we are about to preempt out */ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) return count; - /* Don't update the cmdqueue timeout if it isn't active */ - if (!cmdqueue_is_current(cmdqueue)) + /* Don't update the drawqueue timeout if it isn't active */ + if (!drawqueue_is_current(drawqueue)) return count; /* @@ -2038,17 +2208,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, */ if (count) { - cmdqueue->expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + drawqueue->expires = jiffies + + msecs_to_jiffies(adreno_drawobj_timeout); return count; } /* * If we get here then 1) the ringbuffer is current and 2) we haven't * retired anything. Check to see if the timeout if valid for the - * current cmdbatch and fault if it has expired + * current drawobj and fault if it has expired */ - _adreno_dispatch_check_timeout(adreno_dev, cmdqueue); + _adreno_dispatch_check_timeout(adreno_dev, drawqueue); return 0; } @@ -2067,11 +2237,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev) /* Check to see if we need to update the command timer */ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) { - struct adreno_dispatcher_cmdqueue *cmdqueue = - CMDQUEUE(adreno_dev->cur_rb); + struct adreno_dispatcher_drawqueue *drawqueue = + DRAWQUEUE(adreno_dev->cur_rb); - if (!adreno_cmdqueue_is_empty(cmdqueue)) - mod_timer(&dispatcher->timer, cmdqueue->expires); + if (!adreno_drawqueue_is_empty(drawqueue)) + mod_timer(&dispatcher->timer, drawqueue->expires); } } @@ -2111,14 +2281,14 @@ static void adreno_dispatcher_work(struct work_struct *work) /* * As long as there are inflight commands, process retired comamnds from - * all cmdqueues + * all drawqueues */ for (i = 0; i < adreno_dev->num_ringbuffers; i++) { - struct adreno_dispatcher_cmdqueue *cmdqueue = - CMDQUEUE(&adreno_dev->ringbuffers[i]); + struct adreno_dispatcher_drawqueue *drawqueue = + DRAWQUEUE(&adreno_dev->ringbuffers[i]); - count += adreno_dispatch_process_cmdqueue(adreno_dev, - cmdqueue); + count += adreno_dispatch_process_drawqueue(adreno_dev, + drawqueue); if (dispatcher->inflight == 0) break; } @@ -2178,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device, } /* - * This is called on a regular basis while command batches are inflight. Fault + * This is called on a regular basis while cmdobj's are inflight. Fault * detection registers are read and compared to the existing values - if they * changed then the GPU is still running. If they are the same between * subsequent calls then the GPU may have faulted @@ -2230,7 +2400,7 @@ static void adreno_dispatcher_timer(unsigned long data) */ void adreno_dispatcher_start(struct kgsl_device *device) { - complete_all(&device->cmdbatch_gate); + complete_all(&device->halt_gate); /* Schedule the work loop to get things going */ adreno_dispatcher_schedule(device); @@ -2267,13 +2437,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev) del_timer_sync(&dispatcher->fault_timer); FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - struct adreno_dispatcher_cmdqueue *dispatch_q = + struct adreno_dispatcher_drawqueue *dispatch_q = &(rb->dispatch_q); - while (!adreno_cmdqueue_is_empty(dispatch_q)) { - kgsl_cmdbatch_destroy( - dispatch_q->cmd_q[dispatch_q->head]); + while (!adreno_drawqueue_is_empty(dispatch_q)) { + kgsl_drawobj_destroy( + DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head])); dispatch_q->head = (dispatch_q->head + 1) - % ADRENO_DISPATCH_CMDQUEUE_SIZE; + % ADRENO_DISPATCH_DRAWQUEUE_SIZE; } } @@ -2332,23 +2502,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher, *((unsigned int *) attr->value)); } -static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE, +static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_hi); static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644, - ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo); + ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo); /* * Our code that "puts back" a command from the context is much cleaner * if we are sure that there will always be enough room in the * ringbuffer so restrict the maximum size of the context queue to - * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1 + * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1 */ -static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644, - ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size); +static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644, + ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size); static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0, - _context_cmdbatch_burst); -static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, - adreno_cmdbatch_timeout); + _context_drawobj_burst); +static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0, + adreno_drawobj_timeout); static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait); static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0, _fault_timer_interval); @@ -2366,9 +2536,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0, static struct attribute *dispatcher_attrs[] = { &dispatcher_attr_inflight.attr, &dispatcher_attr_inflight_low_latency.attr, - &dispatcher_attr_context_cmdqueue_size.attr, + &dispatcher_attr_context_drawqueue_size.attr, &dispatcher_attr_context_burst_count.attr, - &dispatcher_attr_cmdbatch_timeout.attr, + &dispatcher_attr_drawobj_timeout.attr, &dispatcher_attr_context_queue_wait.attr, &dispatcher_attr_fault_detect_interval.attr, &dispatcher_attr_fault_throttle_time.attr, diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index 699c3e4adb27..cb9106fedc82 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -15,7 +15,7 @@ #define ____ADRENO_DISPATCHER_H extern unsigned int adreno_disp_preempt_fair_sched; -extern unsigned int adreno_cmdbatch_timeout; +extern unsigned int adreno_drawobj_timeout; extern unsigned int adreno_dispatch_starvation_time; extern unsigned int adreno_dispatch_time_slice; @@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states { * sizes that can be chosen at runtime */ -#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128 +#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128 -#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) +#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) /** - * struct adreno_dispatcher_cmdqueue - List of commands for a RB level - * @cmd_q: List of command batches submitted to dispatcher + * struct adreno_dispatcher_drawqueue - List of commands for a RB level + * @cmd_q: List of command obj's submitted to dispatcher * @inflight: Number of commands inflight in this q * @head: Head pointer to the q * @tail: Queues tail pointer - * @active_context_count: Number of active contexts seen in this rb cmdqueue - * @expires: The jiffies value at which this cmdqueue has run too long + * @active_context_count: Number of active contexts seen in this rb drawqueue + * @expires: The jiffies value at which this drawqueue has run too long */ -struct adreno_dispatcher_cmdqueue { - struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE]; +struct adreno_dispatcher_drawqueue { + struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE]; unsigned int inflight; unsigned int head; unsigned int tail; @@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue { * struct adreno_dispatcher - container for the adreno GPU dispatcher * @mutex: Mutex to protect the structure * @state: Current state of the dispatcher (active or paused) - * @timer: Timer to monitor the progress of the command batches - * @inflight: Number of command batch operations pending in the ringbuffer + * @timer: Timer to monitor the progress of the drawobjs + * @inflight: Number of drawobj operations pending in the ringbuffer * @fault: Non-zero if a fault was detected. - * @pending: Priority list of contexts waiting to submit command batches + * @pending: Priority list of contexts waiting to submit drawobjs * @plist_lock: Spin lock to protect the pending queue * @work: work_struct to put the dispatcher in a work queue * @kobj: kobject for the dispatcher directory in the device sysfs node @@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev); void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev); void adreno_dispatcher_stop(struct adreno_device *adreno_dev); -int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamp); +int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp); void adreno_dispatcher_schedule(struct kgsl_device *device); void adreno_dispatcher_pause(struct adreno_device *adreno_dev); @@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device, void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev, int bit); void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *dispatch_q); + struct adreno_dispatcher_drawqueue *dispatch_q); -static inline bool adreno_cmdqueue_is_empty( - struct adreno_dispatcher_cmdqueue *cmdqueue) +static inline bool adreno_drawqueue_is_empty( + struct adreno_dispatcher_drawqueue *drawqueue) { - return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail); + return (drawqueue != NULL && drawqueue->head == drawqueue->tail); } #endif /* __ADRENO_DISPATCHER_H */ diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index d9ebe37d0cf0..3a110ed221a8 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire); /* - * We may have cmdbatch timer running, which also uses same + * We may have kgsl sync obj timer running, which also uses same * lock, take a lock with software interrupt disabled (bh) * to avoid spin lock recursion. * * Use Spin trylock because dispatcher can acquire drawctxt->lock * if context is pending and the fence it is waiting on just got * signalled. Dispatcher acquires drawctxt->lock and tries to - * delete the cmdbatch timer using del_timer_sync(). + * delete the sync obj timer using del_timer_sync(). * del_timer_sync() waits till timer and its pending handlers * are deleted. But if the timer expires at the same time, * timer handler could be waiting on drawctxt->lock leading to a @@ -83,23 +83,27 @@ void adreno_drawctxt_dump(struct kgsl_device *device, context->id, queue, drawctxt->submitted_timestamp, start, retire); - if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { - struct kgsl_cmdbatch *cmdbatch = - drawctxt->cmdqueue[drawctxt->cmdqueue_head]; + if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) { + struct kgsl_drawobj *drawobj = + drawctxt->drawqueue[drawctxt->drawqueue_head]; - if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) { + if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) { dev_err(device->dev, " possible deadlock. Context %d might be blocked for itself\n", context->id); goto stats; } - if (kgsl_cmdbatch_events_pending(cmdbatch)) { - dev_err(device->dev, - " context[%d] (ts=%d) Active sync points:\n", - context->id, cmdbatch->timestamp); + if (drawobj->type == SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); + + if (kgsl_drawobj_events_pending(syncobj)) { + dev_err(device->dev, + " context[%d] (ts=%d) Active sync points:\n", + context->id, drawobj->timestamp); - kgsl_dump_syncpoints(device, cmdbatch); + kgsl_dump_syncpoints(device, syncobj); + } } } @@ -229,19 +233,19 @@ done: return ret; } -static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt, - struct kgsl_cmdbatch **list) +static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt, + struct kgsl_drawobj **list) { int count = 0; - while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { - struct kgsl_cmdbatch *cmdbatch = - drawctxt->cmdqueue[drawctxt->cmdqueue_head]; + while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) { + struct kgsl_drawobj *drawobj = + drawctxt->drawqueue[drawctxt->drawqueue_head]; - drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % - ADRENO_CONTEXT_CMDQUEUE_SIZE; + drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) % + ADRENO_CONTEXT_DRAWQUEUE_SIZE; - list[count++] = cmdbatch; + list[count++] = drawobj; } return count; @@ -259,7 +263,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_context *drawctxt = ADRENO_CONTEXT(context); - struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; + struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; int i, count; trace_adreno_drawctxt_invalidate(drawctxt); @@ -280,13 +284,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, drawctxt->timestamp); /* Get rid of commands still waiting in the queue */ - count = drawctxt_detach_cmdbatches(drawctxt, list); + count = drawctxt_detach_drawobjs(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { kgsl_cancel_events_timestamp(device, &context->events, list[i]->timestamp); - kgsl_cmdbatch_destroy(list[i]); + kgsl_drawobj_destroy(list[i]); } /* Make sure all pending events are processed or cancelled */ @@ -453,7 +457,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context) struct adreno_context *drawctxt; struct adreno_ringbuffer *rb; int ret, count, i; - struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; + struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; if (context == NULL) return; @@ -468,7 +472,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context) spin_unlock(&adreno_dev->active_list_lock); spin_lock(&drawctxt->lock); - count = drawctxt_detach_cmdbatches(drawctxt, list); + count = drawctxt_detach_drawobjs(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { @@ -478,7 +482,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context) * detached status here. */ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]); - kgsl_cmdbatch_destroy(list[i]); + kgsl_drawobj_destroy(list[i]); } /* diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index 5ea911954991..0578f16ae9e1 100644 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -18,7 +18,7 @@ struct adreno_context_type { const char *str; }; -#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128 +#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128 #define SUBMIT_RETIRE_TICKS_SIZE 7 struct kgsl_device; @@ -32,20 +32,21 @@ struct kgsl_context; * @internal_timestamp: Global timestamp of the last issued command * NOTE: guarded by device->mutex, not drawctxt->mutex! * @type: Context type (GL, CL, RS) - * @mutex: Mutex to protect the cmdqueue - * @cmdqueue: Queue of command batches waiting to be dispatched for this context - * @cmdqueue_head: Head of the cmdqueue queue - * @cmdqueue_tail: Tail of the cmdqueue queue + * @mutex: Mutex to protect the drawqueue + * @drawqueue: Queue of drawobjs waiting to be dispatched for this + * context + * @drawqueue_head: Head of the drawqueue queue + * @drawqueue_tail: Tail of the drawqueue queue * @pending: Priority list node for the dispatcher list of pending contexts * @wq: Workqueue structure for contexts to sleep pending room in the queue * @waiting: Workqueue structure for contexts waiting for a timestamp or event - * @queued: Number of commands queued in the cmdqueue - * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd(); + * @queued: Number of commands queued in the drawqueue + * @fault_policy: GFT fault policy set in _skip_cmd(); * @debug_root: debugfs entry for this context. * @queued_timestamp: The last timestamp that was queued on this context * @rb: The ringbuffer in which this context submits commands. * @submitted_timestamp: The last timestamp that was submitted for this context - * @submit_retire_ticks: Array to hold cmdbatch execution times from submit + * @submit_retire_ticks: Array to hold command obj execution times from submit * to retire * @ticks_index: The index into submit_retire_ticks[] where the new delta will * be written. @@ -60,9 +61,9 @@ struct adreno_context { spinlock_t lock; /* Dispatcher */ - struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE]; - unsigned int cmdqueue_head; - unsigned int cmdqueue_tail; + struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; + unsigned int drawqueue_head; + unsigned int drawqueue_tail; struct plist_node pending; wait_queue_head_t wq; @@ -92,8 +93,9 @@ struct adreno_context { * @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame * marker. * @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission. - * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during + * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during fault tolerance. + * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context. */ enum adreno_context_priv { ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC, @@ -102,6 +104,7 @@ enum adreno_context_priv { ADRENO_CONTEXT_SKIP_EOF, ADRENO_CONTEXT_FORCE_PREAMBLE, ADRENO_CONTEXT_SKIP_CMD, + ADRENO_CONTEXT_FENCE_LOG, }; /* Flags for adreno_drawctxt_switch() */ diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 07ef09034d7c..fc0602a60ac1 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -671,96 +671,17 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb, sizedwords, 0, NULL); } -/** - * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit - * @device: The kgsl device pointer - * @ibdesc: Pointer to the IB descriptor - */ -static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv, - struct kgsl_context *context, struct kgsl_memobj_node *ib) -{ - struct kgsl_device *device = dev_priv->device; - struct kgsl_process_private *private = dev_priv->process_priv; - - /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */ - if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) { - pr_context(device, context, "ctxt %d invalid ib size %lld\n", - context->id, ib->size); - return false; - } - - /* Make sure that the address is mapped */ - if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) { - pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n", - context->id, ib->gpuaddr); - return false; - } - - return true; -} - -int -adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, - struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamp) -{ - struct kgsl_device *device = dev_priv->device; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct adreno_context *drawctxt = ADRENO_CONTEXT(context); - struct kgsl_memobj_node *ib; - int ret; - - if (kgsl_context_invalid(context)) - return -EDEADLK; - - /* Verify the IBs before they get queued */ - list_for_each_entry(ib, &cmdbatch->cmdlist, node) - if (_ringbuffer_verify_ib(dev_priv, context, ib) == false) - return -EINVAL; - - /* wait for the suspend gate */ - wait_for_completion(&device->cmdbatch_gate); - - /* - * Clear the wake on touch bit to indicate an IB has been - * submitted since the last time we set it. But only clear - * it when we have rendering commands. - */ - if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER) - && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC)) - device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; - - /* A3XX does not have support for command batch profiling */ - if (adreno_is_a3xx(adreno_dev) && - (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)) - return -EOPNOTSUPP; - - /* Queue the command in the ringbuffer */ - ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch, - timestamp); - - /* - * Return -EPROTO if the device has faulted since the last time we - * checked - userspace uses this to perform post-fault activities - */ - if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv)) - ret = -EPROTO; - - return ret; -} - static void adreno_ringbuffer_set_constraint(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { - struct kgsl_context *context = cmdbatch->context; + struct kgsl_context *context = drawobj->context; /* * Check if the context has a constraint and constraint flags are * set. */ if (context->pwr_constraint.type && ((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) || - (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT))) + (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT))) kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint, context->id); } @@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev, /* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time) + struct kgsl_drawobj_cmd *cmdobj, + struct adreno_submit_time *time) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; @@ -803,25 +726,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_context *context; struct adreno_context *drawctxt; bool use_preamble = true; - bool cmdbatch_user_profiling = false; - bool cmdbatch_kernel_profiling = false; + bool user_profiling = false; + bool kernel_profiling = false; int flags = KGSL_CMD_FLAGS_NONE; int ret; struct adreno_ringbuffer *rb; - struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL; + struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL; unsigned int dwords = 0; struct adreno_submit_time local; - struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry; + struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry; if (entry) profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc, - cmdbatch->profiling_buffer_gpuaddr); + cmdobj->profiling_buffer_gpuaddr); - context = cmdbatch->context; + context = drawobj->context; drawctxt = ADRENO_CONTEXT(context); /* Get the total IBs in the list */ - list_for_each_entry(ib, &cmdbatch->cmdlist, node) + list_for_each_entry(ib, &cmdobj->cmdlist, node) numibs++; rb = drawctxt->rb; @@ -838,14 +761,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, * c) force preamble for commandbatch */ if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) && - (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) { + (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) { - set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery); - cmdbatch->fault_policy = drawctxt->fault_policy; - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery); + cmdobj->fault_policy = drawctxt->fault_policy; + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); /* if context is detached print fault recovery */ - adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch); + adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj); /* clear the drawctxt flags */ clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv); @@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if a context switch hasn't occured */ if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) && - !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) && + !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) && (rb->drawctxt_active == drawctxt)) use_preamble = false; @@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, * the accounting sane. Set start_index and numibs to 0 to just * generate the start and end markers and skip everything else */ - if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) { + if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) { use_preamble = false; numibs = 0; } @@ -884,9 +807,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, /* Each IB takes up 30 dwords in worst case */ dwords += (numibs * 30); - if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING && + if (drawobj->flags & KGSL_DRAWOBJ_PROFILING && !adreno_is_a3xx(adreno_dev) && profile_buffer) { - cmdbatch_user_profiling = true; + user_profiling = true; dwords += 6; /* @@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, time = &local; } - if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) { - cmdbatch_kernel_profiling = true; + if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) { + kernel_profiling = true; dwords += 6; if (adreno_is_a5xx(adreno_dev)) dwords += 2; @@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, *cmds++ = cp_packet(adreno_dev, CP_NOP, 1); *cmds++ = KGSL_START_OF_IB_IDENTIFIER; - if (cmdbatch_kernel_profiling) { + if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - adreno_dev->cmdbatch_profile_buffer.gpuaddr + - ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index, + adreno_dev->profile_buffer.gpuaddr + + ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index, started)); } /* - * Add cmds to read the GPU ticks at the start of the cmdbatch and - * write it into the appropriate cmdbatch profiling buffer offset + * Add cmds to read the GPU ticks at the start of command obj and + * write it into the appropriate command obj profiling buffer offset */ - if (cmdbatch_user_profiling) { + if (user_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - cmdbatch->profiling_buffer_gpuaddr + - offsetof(struct kgsl_cmdbatch_profiling_buffer, + cmdobj->profiling_buffer_gpuaddr + + offsetof(struct kgsl_drawobj_profiling_buffer, gpu_ticks_submitted)); } if (numibs) { - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { /* * Skip 0 sized IBs - these are presumed to have been * removed from consideration by the FT policy @@ -972,21 +895,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, adreno_is_preemption_enabled(adreno_dev)) cmds += gpudev->preemption_yield_enable(cmds); - if (cmdbatch_kernel_profiling) { + if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - adreno_dev->cmdbatch_profile_buffer.gpuaddr + - ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index, + adreno_dev->profile_buffer.gpuaddr + + ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index, retired)); } /* - * Add cmds to read the GPU ticks at the end of the cmdbatch and - * write it into the appropriate cmdbatch profiling buffer offset + * Add cmds to read the GPU ticks at the end of command obj and + * write it into the appropriate command obj profiling buffer offset */ - if (cmdbatch_user_profiling) { + if (user_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - cmdbatch->profiling_buffer_gpuaddr + - offsetof(struct kgsl_cmdbatch_profiling_buffer, + cmdobj->profiling_buffer_gpuaddr + + offsetof(struct kgsl_drawobj_profiling_buffer, gpu_ticks_retired)); } @@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, goto done; } - if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv)) + if (test_bit(CMDOBJ_WFI, &cmdobj->priv)) flags = KGSL_CMD_FLAGS_WFI; /* @@ -1025,26 +948,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, flags |= KGSL_CMD_FLAGS_PWRON_FIXUP; /* Set the constraints before adding to ringbuffer */ - adreno_ringbuffer_set_constraint(device, cmdbatch); + adreno_ringbuffer_set_constraint(device, drawobj); /* CFF stuff executed only if CFF is enabled */ - kgsl_cffdump_capture_ib_desc(device, context, cmdbatch); + kgsl_cffdump_capture_ib_desc(device, context, cmdobj); ret = adreno_ringbuffer_addcmds(rb, flags, &link[0], (cmds - link), - cmdbatch->timestamp, time); + drawobj->timestamp, time); if (!ret) { - cmdbatch->global_ts = drawctxt->internal_timestamp; + cmdobj->global_ts = drawctxt->internal_timestamp; /* Put the timevalues in the profiling buffer */ - if (cmdbatch_user_profiling) { + if (user_profiling) { /* * Return kernel clock time to the the client * if requested */ - if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) { + if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) { uint64_t secs = time->ktime; profile_buffer->wall_clock_ns = @@ -1069,9 +992,8 @@ done: kgsl_memdesc_unmap(&entry->memdesc); - trace_kgsl_issueibcmds(device, context->id, cmdbatch, - numibs, cmdbatch->timestamp, - cmdbatch->flags, ret, drawctxt->type); + trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp, + drawobj->flags, ret, drawctxt->type); kfree(link); return ret; diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index b126f710b5e6..63374af1e3f7 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -119,7 +119,7 @@ struct adreno_ringbuffer { struct adreno_context *drawctxt_active; struct kgsl_memdesc preemption_desc; struct kgsl_memdesc pagetable_desc; - struct adreno_dispatcher_cmdqueue dispatch_q; + struct adreno_dispatcher_drawqueue dispatch_q; wait_queue_head_t ts_expire_waitq; unsigned int wptr_preempt_end; unsigned int gpr11; @@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set); int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch, + struct kgsl_drawobj *drawobj, uint32_t *timestamp); int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, + struct kgsl_drawobj_cmd *cmdobj, struct adreno_submit_time *time); int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt); diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index f52ddfa894d5..16ca0980cfbe 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -27,8 +27,8 @@ #include "adreno_a5xx.h" TRACE_EVENT(adreno_cmdbatch_queued, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued), - TP_ARGS(cmdbatch, queued), + TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued), + TP_ARGS(drawobj, queued), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued, __field(unsigned int, prio) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = drawobj->context->id; + __entry->timestamp = drawobj->timestamp; __entry->queued = queued; - __entry->flags = cmdbatch->flags; - __entry->prio = cmdbatch->context->priority; + __entry->flags = drawobj->flags; + __entry->prio = drawobj->context->priority; ), TP_printk( "ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s", __entry->id, __entry->prio, __entry->timestamp, __entry->queued, __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "none" + KGSL_DRAWOBJ_FLAGS) : "none" ) ); TRACE_EVENT(adreno_cmdbatch_submitted, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks, + TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks, unsigned long secs, unsigned long usecs, struct adreno_ringbuffer *rb, unsigned int rptr), - TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr), + TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted, __field(int, q_inflight) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = drawobj->context->id; + __entry->timestamp = drawobj->timestamp; __entry->inflight = inflight; - __entry->flags = cmdbatch->flags; + __entry->flags = drawobj->flags; __entry->ticks = ticks; __entry->secs = secs; __entry->usecs = usecs; - __entry->prio = cmdbatch->context->priority; + __entry->prio = drawobj->context->priority; __entry->rb_id = rb->id; __entry->rptr = rptr; __entry->wptr = rb->wptr; @@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted, __entry->id, __entry->prio, __entry->timestamp, __entry->inflight, __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "none", + KGSL_DRAWOBJ_FLAGS) : "none", __entry->ticks, __entry->secs, __entry->usecs, __entry->rb_id, __entry->rptr, __entry->wptr, __entry->q_inflight @@ -98,10 +98,11 @@ TRACE_EVENT(adreno_cmdbatch_submitted, ); TRACE_EVENT(adreno_cmdbatch_retired, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, + TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t start, uint64_t retire, - struct adreno_ringbuffer *rb, unsigned int rptr), - TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr), + struct adreno_ringbuffer *rb, unsigned int rptr, + unsigned long fault_recovery), + TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -115,16 +116,17 @@ TRACE_EVENT(adreno_cmdbatch_retired, __field(unsigned int, rptr) __field(unsigned int, wptr) __field(int, q_inflight) + __field(unsigned long, fault_recovery) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = drawobj->context->id; + __entry->timestamp = drawobj->timestamp; __entry->inflight = inflight; - __entry->recovery = cmdbatch->fault_recovery; - __entry->flags = cmdbatch->flags; + __entry->recovery = fault_recovery; + __entry->flags = drawobj->flags; __entry->start = start; __entry->retire = retire; - __entry->prio = cmdbatch->context->priority; + __entry->prio = drawobj->context->priority; __entry->rb_id = rb->id; __entry->rptr = rptr; __entry->wptr = rb->wptr; @@ -138,7 +140,7 @@ TRACE_EVENT(adreno_cmdbatch_retired, __print_flags(__entry->recovery, "|", ADRENO_FT_TYPES) : "none", __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "none", + KGSL_DRAWOBJ_FLAGS) : "none", __entry->start, __entry->retire, __entry->rb_id, __entry->rptr, __entry->wptr, @@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired, ); TRACE_EVENT(adreno_cmdbatch_fault, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault), - TP_ARGS(cmdbatch, fault), + TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault), + TP_ARGS(cmdobj, fault), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) __field(unsigned int, fault) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = cmdobj->base.context->id; + __entry->timestamp = cmdobj->base.timestamp; __entry->fault = fault; ), TP_printk( @@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault, ); TRACE_EVENT(adreno_cmdbatch_recovery, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action), - TP_ARGS(cmdbatch, action), + TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action), + TP_ARGS(cmdobj, action), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) __field(unsigned int, action) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = cmdobj->base.context->id; + __entry->timestamp = cmdobj->base.timestamp; __entry->action = action; ), TP_printk( diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 88581b079246..add4590bbb90 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -36,7 +36,7 @@ #include "kgsl_cffdump.h" #include "kgsl_log.h" #include "kgsl_sharedmem.h" -#include "kgsl_cmdbatch.h" +#include "kgsl_drawobj.h" #include "kgsl_device.h" #include "kgsl_trace.h" #include "kgsl_sync.h" @@ -1497,11 +1497,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_ringbuffer_issueibcmds *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_cmdbatch *cmdbatch = NULL; + struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_cmd *cmdobj; long result = -EINVAL; /* The legacy functions don't support synchronization commands */ - if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))) + if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER))) + return -EINVAL; + + /* Sanity check the number of IBs */ + if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST && + (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS)) return -EINVAL; /* Get the context */ @@ -1509,23 +1515,20 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, if (context == NULL) return -EINVAL; - /* Create a command batch */ - cmdbatch = kgsl_cmdbatch_create(device, context, param->flags); - if (IS_ERR(cmdbatch)) { - result = PTR_ERR(cmdbatch); - goto done; + cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags, + CMDOBJ_TYPE); + if (IS_ERR(cmdobj)) { + kgsl_context_put(context); + return PTR_ERR(cmdobj); } - if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) { - /* Sanity check the number of IBs */ - if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) { - result = -EINVAL; - goto done; - } - result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch, + drawobj = DRAWOBJ(cmdobj); + + if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST) + result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj, (void __user *) param->ibdesc_addr, param->numibs); - } else { + else { struct kgsl_ibdesc ibdesc; /* Ultra legacy path */ @@ -1533,83 +1536,119 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, ibdesc.sizedwords = param->numibs; ibdesc.ctrl = 0; - result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc); + result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); } - if (result) - goto done; - - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - cmdbatch, ¶m->timestamp); + if (result == 0) + result = dev_priv->device->ftbl->queue_cmds(dev_priv, context, + &drawobj, 1, ¶m->timestamp); -done: /* * -EPROTO is a "success" error - it just tells the user that the * context had previously faulted */ if (result && result != -EPROTO) - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); kgsl_context_put(context); return result; } +/* Returns 0 on failure. Returns command type(s) on success */ +static unsigned int _process_command_input(struct kgsl_device *device, + unsigned int flags, unsigned int numcmds, + unsigned int numobjs, unsigned int numsyncs) +{ + if (numcmds > KGSL_MAX_NUMIBS || + numobjs > KGSL_MAX_NUMIBS || + numsyncs > KGSL_MAX_SYNCPOINTS) + return 0; + + /* + * The SYNC bit is supposed to identify a dummy sync object + * so warn the user if they specified any IBs with it. + * A MARKER command can either have IBs or not but if the + * command has 0 IBs it is automatically assumed to be a marker. + */ + + /* If they specify the flag, go with what they say */ + if (flags & KGSL_DRAWOBJ_MARKER) + return MARKEROBJ_TYPE; + else if (flags & KGSL_DRAWOBJ_SYNC) + return SYNCOBJ_TYPE; + + /* If not, deduce what they meant */ + if (numsyncs && numcmds) + return SYNCOBJ_TYPE | CMDOBJ_TYPE; + else if (numsyncs) + return SYNCOBJ_TYPE; + else if (numcmds) + return CMDOBJ_TYPE; + else if (numcmds == 0) + return MARKEROBJ_TYPE; + + return 0; +} + long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_submit_commands *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_cmdbatch *cmdbatch = NULL; - long result = -EINVAL; - - /* - * The SYNC bit is supposed to identify a dummy sync object so warn the - * user if they specified any IBs with it. A MARKER command can either - * have IBs or not but if the command has 0 IBs it is automatically - * assumed to be a marker. If none of the above make sure that the user - * specified a sane number of IBs - */ - - if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds) - KGSL_DEV_ERR_ONCE(device, - "Commands specified with the SYNC flag. They will be ignored\n"); - else if (param->numcmds > KGSL_MAX_NUMIBS) - return -EINVAL; - else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0) - param->flags |= KGSL_CMDBATCH_MARKER; + struct kgsl_drawobj *drawobj[2]; + unsigned int type; + long result; + unsigned int i = 0; - /* Make sure that we don't have too many syncpoints */ - if (param->numsyncs > KGSL_MAX_SYNCPOINTS) + type = _process_command_input(device, param->flags, param->numcmds, 0, + param->numsyncs); + if (!type) return -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; - /* Create a command batch */ - cmdbatch = kgsl_cmdbatch_create(device, context, param->flags); - if (IS_ERR(cmdbatch)) { - result = PTR_ERR(cmdbatch); - goto done; + if (type & SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + if (IS_ERR(syncobj)) { + result = PTR_ERR(syncobj); + goto done; + } + + drawobj[i++] = DRAWOBJ(syncobj); + + result = kgsl_drawobj_sync_add_syncpoints(device, syncobj, + param->synclist, param->numsyncs); + if (result) + goto done; } - result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch, - param->cmdlist, param->numcmds); - if (result) - goto done; + if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) { + struct kgsl_drawobj_cmd *cmdobj = + kgsl_drawobj_cmd_create(device, + context, param->flags, type); + if (IS_ERR(cmdobj)) { + result = PTR_ERR(cmdobj); + goto done; + } - result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch, - param->synclist, param->numsyncs); - if (result) - goto done; + drawobj[i++] = DRAWOBJ(cmdobj); - /* If no profiling buffer was specified, clear the flag */ - if (cmdbatch->profiling_buf_entry == NULL) - cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING; + result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj, + param->cmdlist, param->numcmds); + if (result) + goto done; - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - cmdbatch, ¶m->timestamp); + /* If no profiling buffer was specified, clear the flag */ + if (cmdobj->profiling_buf_entry == NULL) + DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING; + } + + result = device->ftbl->queue_cmds(dev_priv, context, drawobj, + i, ¶m->timestamp); done: /* @@ -1617,7 +1656,9 @@ done: * context had previously faulted */ if (result && result != -EPROTO) - kgsl_cmdbatch_destroy(cmdbatch); + while (i--) + kgsl_drawobj_destroy(drawobj[i]); + kgsl_context_put(context); return result; @@ -1629,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv, struct kgsl_gpu_command *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_cmdbatch *cmdbatch = NULL; - - long result = -EINVAL; + struct kgsl_drawobj *drawobj[2]; + unsigned int type; + long result; + unsigned int i = 0; - /* - * The SYNC bit is supposed to identify a dummy sync object so warn the - * user if they specified any IBs with it. A MARKER command can either - * have IBs or not but if the command has 0 IBs it is automatically - * assumed to be a marker. If none of the above make sure that the user - * specified a sane number of IBs - */ - if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds) - KGSL_DEV_ERR_ONCE(device, - "Commands specified with the SYNC flag. They will be ignored\n"); - else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0) - param->flags |= KGSL_CMDBATCH_MARKER; - - /* Make sure that the memobj and syncpoint count isn't too big */ - if (param->numcmds > KGSL_MAX_NUMIBS || - param->numobjs > KGSL_MAX_NUMIBS || - param->numsyncs > KGSL_MAX_SYNCPOINTS) + type = _process_command_input(device, param->flags, param->numcmds, + param->numobjs, param->numsyncs); + if (!type) return -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; - cmdbatch = kgsl_cmdbatch_create(device, context, param->flags); - if (IS_ERR(cmdbatch)) { - result = PTR_ERR(cmdbatch); - goto done; + if (type & SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + + if (IS_ERR(syncobj)) { + result = PTR_ERR(syncobj); + goto done; + } + + drawobj[i++] = DRAWOBJ(syncobj); + + result = kgsl_drawobj_sync_add_synclist(device, syncobj, + to_user_ptr(param->synclist), + param->syncsize, param->numsyncs); + if (result) + goto done; } - result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch, - to_user_ptr(param->cmdlist), - param->cmdsize, param->numcmds); - if (result) - goto done; + if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) { + struct kgsl_drawobj_cmd *cmdobj = + kgsl_drawobj_cmd_create(device, + context, param->flags, type); - result = kgsl_cmdbatch_add_memlist(device, cmdbatch, - to_user_ptr(param->objlist), - param->objsize, param->numobjs); - if (result) - goto done; + if (IS_ERR(cmdobj)) { + result = PTR_ERR(cmdobj); + goto done; + } - result = kgsl_cmdbatch_add_synclist(device, cmdbatch, - to_user_ptr(param->synclist), - param->syncsize, param->numsyncs); - if (result) - goto done; + drawobj[i++] = DRAWOBJ(cmdobj); + + result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj, + to_user_ptr(param->cmdlist), + param->cmdsize, param->numcmds); + if (result) + goto done; - /* If no profiling buffer was specified, clear the flag */ - if (cmdbatch->profiling_buf_entry == NULL) - cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING; + result = kgsl_drawobj_cmd_add_memlist(device, cmdobj, + to_user_ptr(param->objlist), + param->objsize, param->numobjs); + if (result) + goto done; + + /* If no profiling buffer was specified, clear the flag */ + if (cmdobj->profiling_buf_entry == NULL) + DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING; + } - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - cmdbatch, ¶m->timestamp); + result = device->ftbl->queue_cmds(dev_priv, context, drawobj, + i, ¶m->timestamp); done: /* @@ -1693,7 +1740,8 @@ done: * context had previously faulted */ if (result && result != -EPROTO) - kgsl_cmdbatch_destroy(cmdbatch); + while (i--) + kgsl_drawobj_destroy(drawobj[i]); kgsl_context_put(context); return result; @@ -4600,7 +4648,7 @@ static void kgsl_core_exit(void) kgsl_driver.class = NULL; } - kgsl_cmdbatch_exit(); + kgsl_drawobj_exit(); kgsl_memfree_exit(); unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX); @@ -4676,7 +4724,7 @@ static int __init kgsl_core_init(void) kgsl_events_init(); - result = kgsl_cmdbatch_init(); + result = kgsl_drawobj_init(); if (result) goto err; diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 7ac84b777051..826c4edb3582 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -28,6 +28,25 @@ #include <linux/uaccess.h> #include <asm/cacheflush.h> +/* + * --- kgsl drawobj flags --- + * These flags are same as --- drawobj flags --- + * but renamed to reflect that cmdbatch is renamed to drawobj. + */ +#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST +#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER +#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST +#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH +#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING +#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME +#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME +#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC +#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT +#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE + +#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer + + /* The number of memstore arrays limits the number of contexts allowed. * If more contexts are needed, update multiple for MEMSTORE_SIZE */ diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c index 8e783f8ce017..3337570477f9 100644 --- a/drivers/gpu/msm/kgsl_cffdump.c +++ b/drivers/gpu/msm/kgsl_cffdump.c @@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device, */ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { int ret = 0; struct kgsl_memobj_node *ib; @@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, if (!device->cff_dump_enable) return 0; /* Dump CFF for IB and all objects in it */ - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { ret = kgsl_cffdump_capture_adreno_ib_cff( device, context->proc_priv, ib->gpuaddr, ib->size >> 2); diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h index 315a097ba817..14bc397cb570 100644 --- a/drivers/gpu/msm/kgsl_cffdump.h +++ b/drivers/gpu/msm/kgsl_cffdump.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val); int kgsl_cff_dump_enable_get(void *data, u64 *val); int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch); + struct kgsl_drawobj_cmd *cmdobj); void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2, uint op3, uint op4, uint op5); @@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device, static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { return 0; } diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h deleted file mode 100644 index d5cbf375b5d3..000000000000 --- a/drivers/gpu/msm/kgsl_cmdbatch.h +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __KGSL_CMDBATCH_H -#define __KGSL_CMDBATCH_H - -#define KGSL_CMDBATCH_FLAGS \ - { KGSL_CMDBATCH_MARKER, "MARKER" }, \ - { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \ - { KGSL_CMDBATCH_SYNC, "SYNC" }, \ - { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \ - { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \ - { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" } - -/** - * struct kgsl_cmdbatch - KGSl command descriptor - * @device: KGSL GPU device that the command was created for - * @context: KGSL context that created the command - * @timestamp: Timestamp assigned to the command - * @flags: flags - * @priv: Internal flags - * @fault_policy: Internal policy describing how to handle this command in case - * of a fault - * @fault_recovery: recovery actions actually tried for this batch - * @refcount: kref structure to maintain the reference count - * @cmdlist: List of IBs to issue - * @memlist: List of all memory used in this command batch - * @synclist: Array of context/timestamp tuples to wait for before issuing - * @numsyncs: Number of sync entries in the array - * @pending: Bitmask of sync events that are active - * @timer: a timer used to track possible sync timeouts for this cmdbatch - * @marker_timestamp: For markers, the timestamp of the last "real" command that - * was queued - * @profiling_buf_entry: Mem entry containing the profiling buffer - * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here - * for easy access - * @profile_index: Index to store the start/stop ticks in the kernel profiling - * buffer - * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit. - * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch - * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the - * timer will expire - * This structure defines an atomic batch of command buffers issued from - * userspace. - */ -struct kgsl_cmdbatch { - struct kgsl_device *device; - struct kgsl_context *context; - uint32_t timestamp; - uint32_t flags; - unsigned long priv; - unsigned long fault_policy; - unsigned long fault_recovery; - struct kref refcount; - struct list_head cmdlist; - struct list_head memlist; - struct kgsl_cmdbatch_sync_event *synclist; - unsigned int numsyncs; - unsigned long pending; - struct timer_list timer; - unsigned int marker_timestamp; - struct kgsl_mem_entry *profiling_buf_entry; - uint64_t profiling_buffer_gpuaddr; - unsigned int profile_index; - uint64_t submit_ticks; - unsigned int global_ts; - unsigned long timeout_jiffies; -}; - -/** - * struct kgsl_cmdbatch_sync_event - * @id: identifer (positiion within the pending bitmap) - * @type: Syncpoint type - * @cmdbatch: Pointer to the cmdbatch that owns the sync event - * @context: Pointer to the KGSL context that owns the cmdbatch - * @timestamp: Pending timestamp for the event - * @handle: Pointer to a sync fence handle - * @device: Pointer to the KGSL device - */ -struct kgsl_cmdbatch_sync_event { - unsigned int id; - int type; - struct kgsl_cmdbatch *cmdbatch; - struct kgsl_context *context; - unsigned int timestamp; - struct kgsl_sync_fence_waiter *handle; - struct kgsl_device *device; -}; - -/** - * enum kgsl_cmdbatch_priv - Internal cmdbatch flags - * @CMDBATCH_FLAG_SKIP - skip the entire command batch - * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch - * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission - * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch - * in the profiling buffer - * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the - * cmdbatch timer - this is used to avoid recursion - */ - -enum kgsl_cmdbatch_priv { - CMDBATCH_FLAG_SKIP = 0, - CMDBATCH_FLAG_FORCE_PREAMBLE, - CMDBATCH_FLAG_WFI, - CMDBATCH_FLAG_PROFILE, - CMDBATCH_FLAG_FENCE_LOG, -}; - - -int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_ibdesc *ibdesc); - -int kgsl_cmdbatch_add_sync(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmd_syncpoint *sync); - -struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int flags); -int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc); -int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count); -int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count); -int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, - unsigned int size, unsigned int count); -int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, - unsigned int size, unsigned int count); -int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, - unsigned int size, unsigned int count); - -int kgsl_cmdbatch_init(void); -void kgsl_cmdbatch_exit(void); - -void kgsl_dump_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch); - -void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch); - -void kgsl_cmdbatch_destroy_object(struct kref *kref); - -static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch) -{ - return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS); -} - -static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch, - unsigned int bit) -{ - if (bit >= KGSL_MAX_SYNCPOINTS) - return false; - - return test_bit(bit, &cmdbatch->pending); -} - -#endif /* __KGSL_CMDBATCH_H */ diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h index ca1685e5fcf5..7681d74fb108 100644 --- a/drivers/gpu/msm/kgsl_compat.h +++ b/drivers/gpu/msm/kgsl_compat.h @@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size) return (compat_size_t)size; } -int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags, - struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist, +int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags, + struct kgsl_drawobj *drawobj, void __user *cmdlist, unsigned int numcmds, void __user *synclist, unsigned int numsyncs); @@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); #else -static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device, - unsigned int flags, struct kgsl_cmdbatch *cmdbatch, +static inline int kgsl_drawobj_create_compat(struct kgsl_device *device, + unsigned int flags, struct kgsl_drawobj *drawobj, void __user *cmdlist, unsigned int numcmds, void __user *synclist, unsigned int numsyncs) { diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index 24511a4de6f1..04935e8d0019 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -25,7 +25,7 @@ #include "kgsl_pwrscale.h" #include "kgsl_snapshot.h" #include "kgsl_sharedmem.h" -#include "kgsl_cmdbatch.h" +#include "kgsl_drawobj.h" #define KGSL_IOCTL_FUNC(_cmd, _func) \ [_IOC_NR((_cmd))] = \ @@ -127,9 +127,9 @@ struct kgsl_functable { unsigned int msecs); int (*readtimestamp) (struct kgsl_device *device, void *priv, enum kgsl_timestamp_type type, unsigned int *timestamp); - int (*issueibcmds) (struct kgsl_device_private *dev_priv, - struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamps); + int (*queue_cmds)(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp); void (*power_stats)(struct kgsl_device *device, struct kgsl_power_stats *stats); unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid); @@ -186,7 +186,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg, /** * struct kgsl_memobj_node - Memory object descriptor - * @node: Local list node for the cmdbatch + * @node: Local list node for the object * @id: GPU memory ID for the object * offset: Offset within the object * @gpuaddr: GPU address for the object @@ -235,7 +235,7 @@ struct kgsl_device { struct kgsl_mmu mmu; struct completion hwaccess_gate; - struct completion cmdbatch_gate; + struct completion halt_gate; const struct kgsl_functable *ftbl; struct work_struct idle_check_ws; struct timer_list idle_timer; @@ -292,7 +292,7 @@ struct kgsl_device { #define KGSL_DEVICE_COMMON_INIT(_dev) \ .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\ - .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\ + .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\ .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\ kgsl_idle_check),\ .context_idr = IDR_INIT((_dev).context_idr),\ diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c index 6272410ce544..7840daa6a3e2 100644 --- a/drivers/gpu/msm/kgsl_cmdbatch.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -11,17 +11,17 @@ */ /* - * KGSL command batch management - * A command batch is a single submission from userland. The cmdbatch + * KGSL drawobj management + * A drawobj is a single submission from userland. The drawobj * encapsulates everything about the submission : command buffers, flags and * sync points. * * Sync points are events that need to expire before the - * cmdbatch can be queued to the hardware. All synpoints are contained in an - * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be + * drawobj can be queued to the hardware. All synpoints are contained in an + * array of kgsl_drawobj_sync_event structs in the drawobj. There can be * multiple types of events both internal ones (GPU events) and external * triggers. As the events expire bits are cleared in a pending bitmap stored - * in the command batch. The GPU will submit the command as soon as the bitmap + * in the drawobj. The GPU will submit the command as soon as the bitmap * goes to zero indicating no more pending events. */ @@ -31,7 +31,7 @@ #include "kgsl.h" #include "kgsl_device.h" -#include "kgsl_cmdbatch.h" +#include "kgsl_drawobj.h" #include "kgsl_sync.h" #include "kgsl_trace.h" #include "kgsl_compat.h" @@ -42,26 +42,43 @@ */ static struct kmem_cache *memobjs_cache; -/** - * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object - * @cmdbatch: Pointer to the command batch object - */ -static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch) +static void drawobj_destroy_object(struct kref *kref) { - if (cmdbatch) - kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object); + struct kgsl_drawobj *drawobj = container_of(kref, + struct kgsl_drawobj, refcount); + struct kgsl_drawobj_sync *syncobj; + + kgsl_context_put(drawobj->context); + + switch (drawobj->type) { + case SYNCOBJ_TYPE: + syncobj = SYNCOBJ(drawobj); + kfree(syncobj->synclist); + kfree(syncobj); + break; + case CMDOBJ_TYPE: + case MARKEROBJ_TYPE: + kfree(CMDOBJ(drawobj)); + break; + } +} + +static inline void drawobj_put(struct kgsl_drawobj *drawobj) +{ + if (drawobj) + kref_put(&drawobj->refcount, drawobj_destroy_object); } void kgsl_dump_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_sync *syncobj) { - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync_event *event; unsigned int i; - for (i = 0; i < cmdbatch->numsyncs; i++) { - event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_cmdbatch_event_pending(cmdbatch, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; switch (event->type) { @@ -90,32 +107,33 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, } } -static void _kgsl_cmdbatch_timer(unsigned long data) +static void syncobj_timer(unsigned long data) { struct kgsl_device *device; - struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data; - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data; + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_drawobj_sync_event *event; unsigned int i; - if (cmdbatch == NULL || cmdbatch->context == NULL) + if (syncobj == NULL || drawobj->context == NULL) return; - device = cmdbatch->context->device; + device = drawobj->context->device; dev_err(device->dev, "kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n", - cmdbatch->context->id, cmdbatch->timestamp); + drawobj->context->id, drawobj->timestamp); - set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv); - kgsl_context_dump(cmdbatch->context); - clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv); + set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); + kgsl_context_dump(drawobj->context); + clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); dev_err(device->dev, " pending events:\n"); - for (i = 0; i < cmdbatch->numsyncs; i++) { - event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_cmdbatch_event_pending(cmdbatch, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; switch (event->type) { @@ -137,48 +155,31 @@ static void _kgsl_cmdbatch_timer(unsigned long data) dev_err(device->dev, "--gpu syncpoint deadlock print end--\n"); } -/** - * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object - * @kref: Pointer to the kref structure for this object - * - * Actually destroy a command batch object. Called from kgsl_cmdbatch_put - */ -void kgsl_cmdbatch_destroy_object(struct kref *kref) -{ - struct kgsl_cmdbatch *cmdbatch = container_of(kref, - struct kgsl_cmdbatch, refcount); - - kgsl_context_put(cmdbatch->context); - - kfree(cmdbatch->synclist); - kfree(cmdbatch); -} -EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object); - /* * a generic function to retire a pending sync event and (possibly) * kick the dispatcher */ -static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device, - struct kgsl_cmdbatch_sync_event *event) +static void drawobj_sync_expire(struct kgsl_device *device, + struct kgsl_drawobj_sync_event *event) { + struct kgsl_drawobj_sync *syncobj = event->syncobj; /* * Clear the event from the pending mask - if it is already clear, then * leave without doing anything useful */ - if (!test_and_clear_bit(event->id, &event->cmdbatch->pending)) + if (!test_and_clear_bit(event->id, &syncobj->pending)) return; /* * If no more pending events, delete the timer and schedule the command * for dispatch */ - if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) { - del_timer_sync(&event->cmdbatch->timer); + if (!kgsl_drawobj_events_pending(event->syncobj)) { + del_timer_sync(&syncobj->timer); if (device->ftbl->drawctxt_sched) device->ftbl->drawctxt_sched(device, - event->cmdbatch->context); + event->syncobj->base.context); } } @@ -186,20 +187,20 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device, * This function is called by the GPU event when the sync event timestamp * expires */ -static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, +static void drawobj_sync_func(struct kgsl_device *device, struct kgsl_event_group *group, void *priv, int result) { - struct kgsl_cmdbatch_sync_event *event = priv; + struct kgsl_drawobj_sync_event *event = priv; - trace_syncpoint_timestamp_expire(event->cmdbatch, + trace_syncpoint_timestamp_expire(event->syncobj, event->context, event->timestamp); - kgsl_cmdbatch_sync_expire(device, event); + drawobj_sync_expire(device, event); kgsl_context_put(event->context); - kgsl_cmdbatch_put(event->cmdbatch); + drawobj_put(&event->syncobj->base); } -static inline void _free_memobj_list(struct list_head *list) +static inline void memobj_list_free(struct list_head *list) { struct kgsl_memobj_node *mem, *tmpmem; @@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list) } } -/** - * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure - * @cmdbatch: Pointer to the command batch object to destroy - * - * Start the process of destroying a command batch. Cancel any pending events - * and decrement the refcount. Asynchronous events can still signal after - * kgsl_cmdbatch_destroy has returned. - */ -void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch) +static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { - unsigned int i; + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); unsigned long pending; - - if (IS_ERR_OR_NULL(cmdbatch)) - return; + unsigned int i; /* Zap the canary timer */ - del_timer_sync(&cmdbatch->timer); + del_timer_sync(&syncobj->timer); /* * Copy off the pending list and clear all pending events - this will * render any subsequent asynchronous callback harmless */ - bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS); - bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS); + bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS); + bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS); /* * Clear all pending events - this will render any subsequent async * callbacks harmless */ - - for (i = 0; i < cmdbatch->numsyncs; i++) { - struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; /* Don't do anything if the event has already expired */ if (!test_bit(i, &pending)) @@ -250,127 +240,152 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch) switch (event->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: - kgsl_cancel_event(cmdbatch->device, + kgsl_cancel_event(drawobj->device, &event->context->events, event->timestamp, - kgsl_cmdbatch_sync_func, event); + drawobj_sync_func, event); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: if (kgsl_sync_fence_async_cancel(event->handle)) - kgsl_cmdbatch_put(cmdbatch); + drawobj_put(drawobj); break; } } /* - * Release the the refcount on the mem entry associated with the - * cmdbatch profiling buffer + * If we cancelled an event, there's a good chance that the context is + * on a dispatcher queue, so schedule to get it removed. + */ + if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) && + drawobj->device->ftbl->drawctxt_sched) + drawobj->device->ftbl->drawctxt_sched(drawobj->device, + drawobj->context); + +} + +static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj) +{ + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj); + + /* + * Release the refcount on the mem entry associated with the + * ib profiling buffer */ - if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING) - kgsl_mem_entry_put(cmdbatch->profiling_buf_entry); + if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING) + kgsl_mem_entry_put(cmdobj->profiling_buf_entry); /* Destroy the cmdlist we created */ - _free_memobj_list(&cmdbatch->cmdlist); + memobj_list_free(&cmdobj->cmdlist); /* Destroy the memlist we created */ - _free_memobj_list(&cmdbatch->memlist); + memobj_list_free(&cmdobj->memlist); +} - /* - * If we cancelled an event, there's a good chance that the context is - * on a dispatcher queue, so schedule to get it removed. +/** + * kgsl_drawobj_destroy() - Destroy a kgsl object structure + * @obj: Pointer to the kgsl object to destroy + * + * Start the process of destroying a command batch. Cancel any pending events + * and decrement the refcount. Asynchronous events can still signal after + * kgsl_drawobj_destroy has returned. */ - if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) && - cmdbatch->device->ftbl->drawctxt_sched) - cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device, - cmdbatch->context); +void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) +{ + if (!drawobj) + return; + + if (drawobj->type & SYNCOBJ_TYPE) + drawobj_destroy_sync(drawobj); + else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) + drawobj_destroy_cmd(drawobj); + else + return; - kgsl_cmdbatch_put(cmdbatch); + drawobj_put(drawobj); } -EXPORT_SYMBOL(kgsl_cmdbatch_destroy); +EXPORT_SYMBOL(kgsl_drawobj_destroy); -/* - * A callback that gets registered with kgsl_sync_fence_async_wait and is fired - * when a fence is expired - */ -static void kgsl_cmdbatch_sync_fence_func(void *priv) +static void drawobj_sync_fence_func(void *priv) { - struct kgsl_cmdbatch_sync_event *event = priv; + struct kgsl_drawobj_sync_event *event = priv; - trace_syncpoint_fence_expire(event->cmdbatch, + trace_syncpoint_fence_expire(event->syncobj, event->handle ? event->handle->name : "unknown"); - kgsl_cmdbatch_sync_expire(event->device, event); + drawobj_sync_expire(event->device, event); - kgsl_cmdbatch_put(event->cmdbatch); + drawobj_put(&event->syncobj->base); } -/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint +/* drawobj_add_sync_fence() - Add a new sync fence syncpoint * @device: KGSL device - * @cmdbatch: KGSL cmdbatch to add the sync point to - * @priv: Private sructure passed by the user + * @syncobj: KGSL sync obj to add the sync point to + * @priv: Private structure passed by the user * - * Add a new fence sync syncpoint to the cmdbatch. + * Add a new fence sync syncpoint to the sync obj. */ -static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void *priv) +static int drawobj_add_sync_fence(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv) { struct kgsl_cmd_syncpoint_fence *sync = priv; - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_drawobj_sync_event *event; unsigned int id; - kref_get(&cmdbatch->refcount); + kref_get(&drawobj->refcount); - id = cmdbatch->numsyncs++; + id = syncobj->numsyncs++; - event = &cmdbatch->synclist[id]; + event = &syncobj->synclist[id]; event->id = id; event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE; - event->cmdbatch = cmdbatch; + event->syncobj = syncobj; event->device = device; event->context = NULL; - set_bit(event->id, &cmdbatch->pending); + set_bit(event->id, &syncobj->pending); event->handle = kgsl_sync_fence_async_wait(sync->fd, - kgsl_cmdbatch_sync_fence_func, event); + drawobj_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { int ret = PTR_ERR(event->handle); - clear_bit(event->id, &cmdbatch->pending); + clear_bit(event->id, &syncobj->pending); event->handle = NULL; - kgsl_cmdbatch_put(cmdbatch); + drawobj_put(drawobj); /* * If ret == 0 the fence was already signaled - print a trace * message so we can track that */ if (ret == 0) - trace_syncpoint_fence_expire(cmdbatch, "signaled"); + trace_syncpoint_fence_expire(syncobj, "signaled"); return ret; } - trace_syncpoint_fence(cmdbatch, event->handle->name); + trace_syncpoint_fence(syncobj, event->handle->name); return 0; } -/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch +/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj * @device: KGSL device - * @cmdbatch: KGSL cmdbatch to add the sync point to - * @priv: Private sructure passed by the user + * @syncobj: KGSL sync obj to add the sync point to + * @priv: Private structure passed by the user * - * Add a new sync point timestamp event to the cmdbatch. + * Add a new sync point timestamp event to the sync obj. */ -static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void *priv) +static int drawobj_add_sync_timestamp(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv) { struct kgsl_cmd_syncpoint_timestamp *sync = priv; - struct kgsl_context *context = kgsl_context_get(cmdbatch->device, + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_context *context = kgsl_context_get(device, sync->context_id); - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync_event *event; int ret = -EINVAL; unsigned int id; @@ -384,8 +399,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, * create a sync point on a future timestamp. */ - if (context == cmdbatch->context) { + if (context == drawobj->context) { unsigned int queued; + kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queued); @@ -397,29 +413,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, } } - kref_get(&cmdbatch->refcount); + kref_get(&drawobj->refcount); - id = cmdbatch->numsyncs++; + id = syncobj->numsyncs++; - event = &cmdbatch->synclist[id]; + event = &syncobj->synclist[id]; event->id = id; event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP; - event->cmdbatch = cmdbatch; + event->syncobj = syncobj; event->context = context; event->timestamp = sync->timestamp; event->device = device; - set_bit(event->id, &cmdbatch->pending); + set_bit(event->id, &syncobj->pending); ret = kgsl_add_event(device, &context->events, sync->timestamp, - kgsl_cmdbatch_sync_func, event); + drawobj_sync_func, event); if (ret) { - clear_bit(event->id, &cmdbatch->pending); - kgsl_cmdbatch_put(cmdbatch); + clear_bit(event->id, &syncobj->pending); + drawobj_put(drawobj); } else { - trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp); + trace_syncpoint_timestamp(syncobj, context, sync->timestamp); } done: @@ -430,43 +446,46 @@ done: } /** - * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch + * kgsl_drawobj_sync_add_sync() - Add a sync point to a command + * batch * @device: Pointer to the KGSL device struct for the GPU - * @cmdbatch: Pointer to the cmdbatch + * @syncobj: Pointer to the sync obj * @sync: Pointer to the user-specified struct defining the syncpoint * - * Create a new sync point in the cmdbatch based on the user specified - * parameters + * Create a new sync point in the sync obj based on the + * user specified parameters */ -int kgsl_cmdbatch_add_sync(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, +int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, struct kgsl_cmd_syncpoint *sync) { void *priv; int ret, psize; - int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch, + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + int (*func)(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv); switch (sync->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: psize = sizeof(struct kgsl_cmd_syncpoint_timestamp); - func = kgsl_cmdbatch_add_sync_timestamp; + func = drawobj_add_sync_timestamp; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: psize = sizeof(struct kgsl_cmd_syncpoint_fence); - func = kgsl_cmdbatch_add_sync_fence; + func = drawobj_add_sync_fence; break; default: KGSL_DRV_ERR(device, "bad syncpoint type ctxt %d type 0x%x size %zu\n", - cmdbatch->context->id, sync->type, sync->size); + drawobj->context->id, sync->type, sync->size); return -EINVAL; } if (sync->size != psize) { KGSL_DRV_ERR(device, "bad syncpoint size ctxt %d type 0x%x size %zu\n", - cmdbatch->context->id, sync->type, sync->size); + drawobj->context->id, sync->type, sync->size); return -EINVAL; } @@ -479,30 +498,32 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device, return -EFAULT; } - ret = func(device, cmdbatch, priv); + ret = func(device, syncobj, priv); kfree(priv); return ret; } static void add_profiling_buffer(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size, + struct kgsl_drawobj_cmd *cmdobj, + uint64_t gpuaddr, uint64_t size, unsigned int id, uint64_t offset) { struct kgsl_mem_entry *entry; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING)) + if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING)) return; /* Only the first buffer entry counts - ignore the rest */ - if (cmdbatch->profiling_buf_entry != NULL) + if (cmdobj->profiling_buf_entry != NULL) return; if (id != 0) - entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv, + entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv, id); else - entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv, + entry = kgsl_sharedmem_find(drawobj->context->proc_priv, gpuaddr); if (entry != NULL) { @@ -515,47 +536,50 @@ static void add_profiling_buffer(struct kgsl_device *device, if (entry == NULL) { KGSL_DRV_ERR(device, "ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n", - cmdbatch->context->id, id, offset, gpuaddr, size); + drawobj->context->id, id, offset, gpuaddr, size); return; } - cmdbatch->profiling_buf_entry = entry; + cmdobj->profiling_buf_entry = entry; if (id != 0) - cmdbatch->profiling_buffer_gpuaddr = + cmdobj->profiling_buffer_gpuaddr = entry->memdesc.gpuaddr + offset; else - cmdbatch->profiling_buffer_gpuaddr = gpuaddr; + cmdobj->profiling_buffer_gpuaddr = gpuaddr; } /** - * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch - * @cmdbatch: Pointer to the cmdbatch + * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command + * batch + * @cmdobj: Pointer to the ib * @ibdesc: Pointer to the user-specified struct defining the memory or IB * - * Create a new memory entry in the cmdbatch based on the user specified - * parameters + * Create a new memory entry in the ib based on the + * user specified parameters */ -int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc) +int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc) { uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr; uint64_t size = (uint64_t) ibdesc->sizedwords << 2; struct kgsl_memobj_node *mem; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); /* sanitize the ibdesc ctrl flags */ ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER; - if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST && + if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST && ibdesc->ctrl & KGSL_IBDESC_MEMLIST) { if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) { - add_profiling_buffer(device, cmdbatch, + add_profiling_buffer(device, cmdobj, gpuaddr, size, 0, 0); return 0; } } - if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)) + /* Ignore if SYNC or MARKER is specified */ + if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE)) return 0; mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL); @@ -569,74 +593,121 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device, mem->offset = 0; mem->flags = 0; - if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST && - ibdesc->ctrl & KGSL_IBDESC_MEMLIST) { + if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST && + ibdesc->ctrl & KGSL_IBDESC_MEMLIST) /* add to the memlist */ - list_add_tail(&mem->node, &cmdbatch->memlist); - } else { + list_add_tail(&mem->node, &cmdobj->memlist); + else { /* set the preamble flag if directed to */ - if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE && - list_empty(&cmdbatch->cmdlist)) + if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE && + list_empty(&cmdobj->cmdlist)) mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE; /* add to the cmd list */ - list_add_tail(&mem->node, &cmdbatch->cmdlist); + list_add_tail(&mem->node, &cmdobj->cmdlist); } return 0; } +static inline int drawobj_init(struct kgsl_device *device, + struct kgsl_context *context, struct kgsl_drawobj *drawobj, + unsigned int type) +{ + /* + * Increase the reference count on the context so it doesn't disappear + * during the lifetime of this object + */ + if (!_kgsl_context_get(context)) + return -ENOENT; + + kref_init(&drawobj->refcount); + + drawobj->device = device; + drawobj->context = context; + drawobj->type = type; + + return 0; +} + /** - * kgsl_cmdbatch_create() - Create a new cmdbatch structure + * kgsl_drawobj_sync_create() - Create a new sync obj + * structure * @device: Pointer to a KGSL device struct * @context: Pointer to a KGSL context struct - * @flags: Flags for the cmdbatch * - * Allocate an new cmdbatch structure + * Allocate an new kgsl_drawobj_sync structure */ -struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int flags) +struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, + struct kgsl_context *context) { - struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL); - if (cmdbatch == NULL) + struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj), + GFP_KERNEL); + if (syncobj == NULL) return ERR_PTR(-ENOMEM); - /* - * Increase the reference count on the context so it doesn't disappear - * during the lifetime of this command batch - */ + if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) { + kfree(syncobj); + return ERR_PTR(-ENOENT); + } + + /* Add a timer to help debug sync deadlocks */ + setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj); + + return syncobj; +} + +/** + * kgsl_drawobj_cmd_create() - Create a new command obj + * structure + * @device: Pointer to a KGSL device struct + * @context: Pointer to a KGSL context struct + * @flags: Flags for the command obj + * @type: type of cmdobj MARKER/CMD + * + * Allocate a new kgsl_drawobj_cmd structure + */ +struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, + struct kgsl_context *context, unsigned int flags, + unsigned int type) +{ + struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + struct kgsl_drawobj *drawobj; + + if (cmdobj == NULL) + return ERR_PTR(-ENOMEM); - if (!_kgsl_context_get(context)) { - kfree(cmdbatch); + type &= CMDOBJ_TYPE | MARKEROBJ_TYPE; + if (type == 0) { + kfree(cmdobj); + return ERR_PTR(-EINVAL); + } + + drawobj = DRAWOBJ(cmdobj); + + if (drawobj_init(device, context, drawobj, type)) { + kfree(cmdobj); return ERR_PTR(-ENOENT); } - kref_init(&cmdbatch->refcount); - INIT_LIST_HEAD(&cmdbatch->cmdlist); - INIT_LIST_HEAD(&cmdbatch->memlist); - - cmdbatch->device = device; - cmdbatch->context = context; - /* sanitize our flags for cmdbatches */ - cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH - | KGSL_CMDBATCH_MARKER - | KGSL_CMDBATCH_END_OF_FRAME - | KGSL_CMDBATCH_SYNC - | KGSL_CMDBATCH_PWR_CONSTRAINT - | KGSL_CMDBATCH_MEMLIST - | KGSL_CMDBATCH_PROFILING - | KGSL_CMDBATCH_PROFILING_KTIME); + /* sanitize our flags for drawobj's */ + drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH + | KGSL_DRAWOBJ_MARKER + | KGSL_DRAWOBJ_END_OF_FRAME + | KGSL_DRAWOBJ_PWR_CONSTRAINT + | KGSL_DRAWOBJ_MEMLIST + | KGSL_DRAWOBJ_PROFILING + | KGSL_DRAWOBJ_PROFILING_KTIME); - /* Add a timer to help debug sync deadlocks */ - setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer, - (unsigned long) cmdbatch); + INIT_LIST_HEAD(&cmdobj->cmdlist); + INIT_LIST_HEAD(&cmdobj->memlist); - return cmdbatch; + return cmdobj; } #ifdef CONFIG_COMPAT static int add_ibdesc_list_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { int i, ret = 0; struct kgsl_ibdesc_compat ibdesc32; @@ -654,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device, ibdesc.sizedwords = (size_t) ibdesc32.sizedwords; ibdesc.ctrl = (unsigned int) ibdesc32.ctrl; - ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc); + ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); if (ret) break; @@ -665,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device, } static int add_syncpoints_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { struct kgsl_cmd_syncpoint_compat sync32; struct kgsl_cmd_syncpoint sync; @@ -683,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device, sync.priv = compat_ptr(sync32.priv); sync.size = (size_t) sync32.size; - ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) break; @@ -694,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device, } #else static int add_ibdesc_list_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { return -EINVAL; } static int add_syncpoints_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { return -EINVAL; } #endif -int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) +/* Returns: + * -EINVAL: Bad data + * 0: All data fields are empty (nothing to do) + * 1: All list information is valid + */ +static int _verify_input_list(unsigned int count, void __user *ptr, + unsigned int size) +{ + /* Return early if nothing going on */ + if (count == 0 && ptr == NULL && size == 0) + return 0; + + /* Sanity check inputs */ + if (count == 0 || ptr == NULL || size == 0) + return -EINVAL; + + return 1; +} + +int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { struct kgsl_ibdesc ibdesc; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); int i, ret; + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) + return 0; + + ret = _verify_input_list(count, ptr, sizeof(ibdesc)); + if (ret <= 0) + return -EINVAL; + if (is_compat_task()) - return add_ibdesc_list_compat(device, cmdbatch, ptr, count); + return add_ibdesc_list_compat(device, cmdobj, ptr, count); for (i = 0; i < count; i++) { memset(&ibdesc, 0, sizeof(ibdesc)); @@ -721,7 +820,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc))) return -EFAULT; - ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc); + ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); if (ret) return ret; @@ -731,8 +830,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, return 0; } -int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) +int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { struct kgsl_cmd_syncpoint sync; int i, ret; @@ -740,17 +839,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, if (count == 0) return 0; - if (count > KGSL_MAX_SYNCPOINTS) - return -EINVAL; - - cmdbatch->synclist = kcalloc(count, - sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL); + syncobj->synclist = kcalloc(count, + sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL); - if (cmdbatch->synclist == NULL) + if (syncobj->synclist == NULL) return -ENOMEM; if (is_compat_task()) - return add_syncpoints_compat(device, cmdbatch, ptr, count); + return add_syncpoints_compat(device, syncobj, ptr, count); for (i = 0; i < count; i++) { memset(&sync, 0, sizeof(sync)); @@ -758,7 +854,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, if (copy_from_user(&sync, ptr, sizeof(sync))) return -EFAULT; - ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) return ret; @@ -768,7 +864,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, return 0; } -static int kgsl_cmdbatch_add_object(struct list_head *head, +static int drawobj_add_object(struct list_head *head, struct kgsl_command_object *obj) { struct kgsl_memobj_node *mem; @@ -793,24 +889,22 @@ static int kgsl_cmdbatch_add_object(struct list_head *head, KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \ KGSL_CMDLIST_IB_PREAMBLE) -int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, +/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */ +int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_object obj; - int i, ret = 0; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); + int i, ret; - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) return 0; - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; - - /* Ignore all if SYNC or MARKER is specified */ - if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)) - return 0; + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) + return ret; for (i = 0; i < count; i++) { memset(&obj, 0, sizeof(obj)); @@ -823,12 +917,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, if (!(obj.flags & CMDLIST_FLAGS)) { KGSL_DRV_ERR(device, "invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n", - cmdbatch->context->id, obj.flags, obj.id, + baseobj->context->id, obj.flags, obj.id, obj.offset, obj.gpuaddr, obj.size); return -EINVAL; } - ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj); + ret = drawobj_add_object(&cmdobj->cmdlist, &obj); if (ret) return ret; @@ -838,20 +932,21 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, return 0; } -int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, +int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_object obj; - int i, ret = 0; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); + int i, ret; - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) return 0; - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) + return ret; for (i = 0; i < count; i++) { memset(&obj, 0, sizeof(obj)); @@ -863,17 +958,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) { KGSL_DRV_ERR(device, "invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n", - cmdbatch->context->id, obj.flags, obj.id, - obj.offset, obj.gpuaddr, obj.size); + DRAWOBJ(cmdobj)->context->id, obj.flags, + obj.id, obj.offset, obj.gpuaddr, obj.size); return -EINVAL; } if (obj.flags & KGSL_OBJLIST_PROFILE) - add_profiling_buffer(device, cmdbatch, obj.gpuaddr, + add_profiling_buffer(device, cmdobj, obj.gpuaddr, obj.size, obj.id, obj.offset); else { - ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist, - &obj); + ret = drawobj_add_object(&cmdobj->memlist, &obj); if (ret) return ret; } @@ -884,29 +978,23 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, return 0; } -int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, +int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_syncpoint syncpoint; struct kgsl_cmd_syncpoint sync; - int i, ret = 0; - - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) - return 0; - - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; + int i, ret; - if (count > KGSL_MAX_SYNCPOINTS) + /* If creating a sync and the data is not there or wrong then error */ + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) return -EINVAL; - cmdbatch->synclist = kcalloc(count, - sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL); + syncobj->synclist = kcalloc(count, + sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL); - if (cmdbatch->synclist == NULL) + if (syncobj->synclist == NULL) return -ENOMEM; for (i = 0; i < count; i++) { @@ -920,7 +1008,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, sync.priv = to_user_ptr(syncpoint.priv); sync.size = syncpoint.size; - ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) return ret; @@ -930,13 +1018,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, return 0; } -void kgsl_cmdbatch_exit(void) +void kgsl_drawobj_exit(void) { if (memobjs_cache != NULL) kmem_cache_destroy(memobjs_cache); } -int kgsl_cmdbatch_init(void) +int kgsl_drawobj_init(void) { memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0); if (memobjs_cache == NULL) { diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h new file mode 100644 index 000000000000..89ed944c539a --- /dev/null +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -0,0 +1,198 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __KGSL_DRAWOBJ_H +#define __KGSL_DRAWOBJ_H + +#define DRAWOBJ(obj) (&obj->base) +#define SYNCOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_sync, base) +#define CMDOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_cmd, base) + +#define CMDOBJ_TYPE BIT(0) +#define MARKEROBJ_TYPE BIT(1) +#define SYNCOBJ_TYPE BIT(2) + +/** + * struct kgsl_drawobj - KGSL drawobj descriptor + * @device: KGSL GPU device that the command was created for + * @context: KGSL context that created the command + * @type: Object type + * @timestamp: Timestamp assigned to the command + * @flags: flags + * @refcount: kref structure to maintain the reference count + */ +struct kgsl_drawobj { + struct kgsl_device *device; + struct kgsl_context *context; + uint32_t type; + uint32_t timestamp; + unsigned long flags; + struct kref refcount; +}; + +/** + * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker + * cmds also since markers are special form of cmds that do not + * need their cmds to be executed. + * @base: Base kgsl_drawobj + * @priv: Internal flags + * @global_ts: The ringbuffer timestamp corresponding to this + * command obj + * @fault_policy: Internal policy describing how to handle this command in case + * of a fault + * @fault_recovery: recovery actions actually tried for this batch + * be hung + * @refcount: kref structure to maintain the reference count + * @cmdlist: List of IBs to issue + * @memlist: List of all memory used in this command batch + * @marker_timestamp: For markers, the timestamp of the last "real" command that + * was queued + * @profiling_buf_entry: Mem entry containing the profiling buffer + * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here + * for easy access + * @profile_index: Index to store the start/stop ticks in the kernel profiling + * buffer + * @submit_ticks: Variable to hold ticks at the time of + * command obj submit. + + */ +struct kgsl_drawobj_cmd { + struct kgsl_drawobj base; + unsigned long priv; + unsigned int global_ts; + unsigned long fault_policy; + unsigned long fault_recovery; + struct list_head cmdlist; + struct list_head memlist; + unsigned int marker_timestamp; + struct kgsl_mem_entry *profiling_buf_entry; + uint64_t profiling_buffer_gpuaddr; + unsigned int profile_index; + uint64_t submit_ticks; +}; + +/** + * struct kgsl_drawobj_sync - KGSL sync object + * @base: Base kgsl_drawobj, this needs to be the first entry + * @synclist: Array of context/timestamp tuples to wait for before issuing + * @numsyncs: Number of sync entries in the array + * @pending: Bitmask of sync events that are active + * @timer: a timer used to track possible sync timeouts for this + * sync obj + * @timeout_jiffies: For a sync obj the jiffies at + * which the timer will expire + */ +struct kgsl_drawobj_sync { + struct kgsl_drawobj base; + struct kgsl_drawobj_sync_event *synclist; + unsigned int numsyncs; + unsigned long pending; + struct timer_list timer; + unsigned long timeout_jiffies; +}; + +/** + * struct kgsl_drawobj_sync_event + * @id: identifer (positiion within the pending bitmap) + * @type: Syncpoint type + * @syncobj: Pointer to the syncobj that owns the sync event + * @context: KGSL context for whose timestamp we want to + * register this event + * @timestamp: Pending timestamp for the event + * @handle: Pointer to a sync fence handle + * @device: Pointer to the KGSL device + */ +struct kgsl_drawobj_sync_event { + unsigned int id; + int type; + struct kgsl_drawobj_sync *syncobj; + struct kgsl_context *context; + unsigned int timestamp; + struct kgsl_sync_fence_waiter *handle; + struct kgsl_device *device; +}; + +#define KGSL_DRAWOBJ_FLAGS \ + { KGSL_DRAWOBJ_MARKER, "MARKER" }, \ + { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \ + { KGSL_DRAWOBJ_SYNC, "SYNC" }, \ + { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \ + { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \ + { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" } + +/** + * enum kgsl_drawobj_cmd_priv - Internal command obj flags + * @CMDOBJ_SKIP - skip the entire command obj + * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for + * command obj + * @CMDOBJ_WFI - Force wait-for-idle for the submission + * @CMDOBJ_PROFILE - store the start / retire ticks for + * the command obj in the profiling buffer + */ +enum kgsl_drawobj_cmd_priv { + CMDOBJ_SKIP = 0, + CMDOBJ_FORCE_PREAMBLE, + CMDOBJ_WFI, + CMDOBJ_PROFILE, +}; + +struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, + struct kgsl_context *context, unsigned int flags, + unsigned int type); +int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc); +int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count); +int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, + unsigned int size, unsigned int count); +int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, + unsigned int size, unsigned int count); + +struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, + struct kgsl_context *context); +int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, + int count); +int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, + unsigned int size, unsigned int count); +int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, + struct kgsl_cmd_syncpoint *sync); + +int kgsl_drawobj_init(void); +void kgsl_drawobj_exit(void); + +void kgsl_dump_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj); + +void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj); + +static inline bool kgsl_drawobj_events_pending( + struct kgsl_drawobj_sync *syncobj) +{ + return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS); +} + +static inline bool kgsl_drawobj_event_pending( + struct kgsl_drawobj_sync *syncobj, unsigned int bit) +{ + if (bit >= KGSL_MAX_SYNCPOINTS) + return false; + + return test_bit(bit, &syncobj->pending); +} +#endif /* __KGSL_DRAWOBJ_H */ diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 4ef9f80177d6..6438c6e65b97 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds, TP_PROTO(struct kgsl_device *device, int drawctxt_id, - struct kgsl_cmdbatch *cmdbatch, unsigned int numibs, int timestamp, int flags, int result, unsigned int type), - TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp, + TP_ARGS(device, drawctxt_id, numibs, timestamp, flags, result, type), TP_STRUCT__entry( @@ -74,7 +73,7 @@ TRACE_EVENT(kgsl_issueibcmds, __entry->numibs, __entry->timestamp, __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "None", + KGSL_DRAWOBJ_FLAGS) : "None", __entry->result, __print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES) ) @@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy, ); DECLARE_EVENT_CLASS(syncpoint_timestamp_template, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(cmdbatch, context, timestamp), + TP_ARGS(syncobj, context, timestamp), TP_STRUCT__entry( - __field(unsigned int, cmdbatch_context_id) + __field(unsigned int, syncobj_context_id) __field(unsigned int, context_id) __field(unsigned int, timestamp) ), TP_fast_assign( - __entry->cmdbatch_context_id = cmdbatch->context->id; + __entry->syncobj_context_id = syncobj->base.context->id; __entry->context_id = context->id; __entry->timestamp = timestamp; ), TP_printk("ctx=%d sync ctx=%d ts=%d", - __entry->cmdbatch_context_id, __entry->context_id, + __entry->syncobj_context_id, __entry->context_id, __entry->timestamp) ); DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(cmdbatch, context, timestamp) + TP_ARGS(syncobj, context, timestamp) ); DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(cmdbatch, context, timestamp) + TP_ARGS(syncobj, context, timestamp) ); DECLARE_EVENT_CLASS(syncpoint_fence_template, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name), - TP_ARGS(cmdbatch, name), + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name), TP_STRUCT__entry( __string(fence_name, name) - __field(unsigned int, cmdbatch_context_id) + __field(unsigned int, syncobj_context_id) ), TP_fast_assign( - __entry->cmdbatch_context_id = cmdbatch->context->id; + __entry->syncobj_context_id = syncobj->base.context->id; __assign_str(fence_name, name); ), TP_printk("ctx=%d fence=%s", - __entry->cmdbatch_context_id, __get_str(fence_name)) + __entry->syncobj_context_id, __get_str(fence_name)) ); DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name), - TP_ARGS(cmdbatch, name) + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name) ); DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name), - TP_ARGS(cmdbatch, name) + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name) ); TRACE_EVENT(kgsl_msg, |
