summaryrefslogtreecommitdiff
path: root/drivers/gpu/msm/adreno_ringbuffer.c
diff options
context:
space:
mode:
authorHarshdeep Dhatt <hdhatt@codeaurora.org>2016-03-09 13:51:36 -0700
committerJeevan Shriram <jshriram@codeaurora.org>2016-04-13 11:03:08 -0700
commitd8cd9dca9bd9ab27f0cd0b0fca43fc7493b4198d (patch)
tree50b58bc033c53aab4257081cd280a422f931e242 /drivers/gpu/msm/adreno_ringbuffer.c
parent8876c65ca7dee5aedeb3d5377d2b943fad9b899d (diff)
msm: kgsl: Correct the order of preemption packets
Current order: IB1 batch, timestamp writes, SRM=NULL, CP_YIELD_ENABLE, CP_CONTEXT_SWITCH_YIELD Correct order: IB1 batch, SRM=NULL, CP_YIELD_ENABLE, timestamp writes, CP_CONTEXT_SWITCH_YIELD Reason: if preemption is initiated after the last checkpoint but before SET_RENDER_MODE == NULL is executed, all of the PM4s starting at the preamble of the check point will be replayed up to the SRM == NULL, including an attempt to re-timestamp/ re-retire the last batch of IBs. If what was intended here was to make sure that the IB batch would be retired once then the SET_RENDER_MODE == NULL and CP_YIELD_ENABLE should be placed immediately after IB_PFE packets and before the time stamping PM4 packets in the ring buffer. CRs-Fixed: 990078 Change-Id: I04a1a44f12dd3a09c50b4fe39e14a2bd636b24de Signed-off-by: Harshdeep Dhatt <hdhatt@codeaurora.org>
Diffstat (limited to 'drivers/gpu/msm/adreno_ringbuffer.c')
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index a397a3e83cf4..dc1fbdb64317 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -520,7 +520,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- total_sizedwords += 13;
+ total_sizedwords += 5;
/*
* a5xx uses 64 bit memory address. pm4 commands that involve read/write
@@ -707,8 +707,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
- rb, ringcmds, &drawctxt->base);
+ ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb,
+ ringcmds, &drawctxt->base);
/*
* If we have more ringbuffer commands than space reserved
@@ -860,6 +860,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_memobj_node *ib;
unsigned int numibs = 0;
unsigned int *link;
@@ -978,6 +979,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
dwords += 2;
}
+ if (gpudev->preemption_yield_enable &&
+ adreno_is_preemption_enabled(adreno_dev))
+ dwords += 8;
+
link = kzalloc(sizeof(unsigned int) * dwords, GFP_KERNEL);
if (!link) {
ret = -ENOMEM;
@@ -1028,6 +1033,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
}
}
+ if (gpudev->preemption_yield_enable &&
+ adreno_is_preemption_enabled(adreno_dev))
+ cmds += gpudev->preemption_yield_enable(cmds);
+
if (cmdbatch_kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
adreno_dev->cmdbatch_profile_buffer.gpuaddr +