diff options
-rw-r--r-- | drivers/gpu/msm/adreno.h | 1 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_a5xx.c | 39 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.c | 15 |
3 files changed, 36 insertions, 19 deletions
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index f739783ebd84..816185e9aad4 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -721,6 +721,7 @@ struct adreno_gpudev { struct adreno_ringbuffer *, unsigned int *, struct kgsl_context *, uint64_t cond_addr, struct kgsl_memobj_node *); + int (*preemption_yield_enable)(unsigned int *); int (*preemption_post_ibsubmit)(struct adreno_device *, struct adreno_ringbuffer *, unsigned int *, struct kgsl_context *); diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 9d37c86aee0f..42cbb07c4b30 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -317,10 +317,6 @@ static int a5xx_preemption_token(struct adreno_device *adreno_dev, { unsigned int *cmds_orig = cmds; - /* Enable yield in RB only */ - *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1); - *cmds++ = 1; - *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4); cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr); *cmds++ = 1; @@ -411,18 +407,11 @@ static int a5xx_preemption_pre_ibsubmit( } /* - * a5xx_preemption_post_ibsubmit() - Below PM4 commands are + * a5xx_preemption_yield_enable() - Below PM4 commands are * added after every cmdbatch submission. */ -static int a5xx_preemption_post_ibsubmit( - struct adreno_device *adreno_dev, - struct adreno_ringbuffer *rb, unsigned int *cmds, - struct kgsl_context *context) +static int a5xx_preemption_yield_enable(unsigned int *cmds) { - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - unsigned int *cmds_orig = cmds; - unsigned int ctx_id = context ? context->id : 0; - /* * SRM -- set render mode (ex binning, direct render etc) * SRM is set by UMD usually at start of IB to tell CP the type of @@ -437,11 +426,27 @@ static int a5xx_preemption_post_ibsubmit( *cmds++ = 0; *cmds++ = 0; - cmds += a5xx_preemption_token(adreno_dev, rb, cmds, + *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1); + *cmds++ = 1; + + return 8; +} + +/* + * a5xx_preemption_post_ibsubmit() - Below PM4 commands are + * added after every cmdbatch submission. + */ +static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, unsigned int *cmds, + struct kgsl_context *context) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int ctx_id = context ? context->id : 0; + + return a5xx_preemption_token(adreno_dev, rb, cmds, device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(ctx_id, preempted)); - return cmds - cmds_orig; } static void a5xx_platform_setup(struct adreno_device *adreno_dev) @@ -4182,8 +4187,10 @@ struct adreno_gpudev adreno_a5xx_gpudev = { .regulator_disable = a5xx_regulator_disable, .pwrlevel_change_settings = a5xx_pwrlevel_change_settings, .preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit, + .preemption_yield_enable = + a5xx_preemption_yield_enable, .preemption_post_ibsubmit = - a5xx_preemption_post_ibsubmit, + a5xx_preemption_post_ibsubmit, .preemption_token = a5xx_preemption_token, .preemption_init = a5xx_preemption_init, .preemption_schedule = a5xx_preemption_schedule, diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index a397a3e83cf4..dc1fbdb64317 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -520,7 +520,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (gpudev->preemption_post_ibsubmit && adreno_is_preemption_enabled(adreno_dev)) - total_sizedwords += 13; + total_sizedwords += 5; /* * a5xx uses 64 bit memory address. pm4 commands that involve read/write @@ -707,8 +707,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (gpudev->preemption_post_ibsubmit && adreno_is_preemption_enabled(adreno_dev)) - ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, - rb, ringcmds, &drawctxt->base); + ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb, + ringcmds, &drawctxt->base); /* * If we have more ringbuffer commands than space reserved @@ -860,6 +860,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; @@ -978,6 +979,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, dwords += 2; } + if (gpudev->preemption_yield_enable && + adreno_is_preemption_enabled(adreno_dev)) + dwords += 8; + link = kzalloc(sizeof(unsigned int) * dwords, GFP_KERNEL); if (!link) { ret = -ENOMEM; @@ -1028,6 +1033,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, } } + if (gpudev->preemption_yield_enable && + adreno_is_preemption_enabled(adreno_dev)) + cmds += gpudev->preemption_yield_enable(cmds); + if (cmdbatch_kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, adreno_dev->cmdbatch_profile_buffer.gpuaddr + |