summaryrefslogtreecommitdiff
path: root/drivers/gpu/msm/adreno_ringbuffer.c
diff options
context:
space:
mode:
authorLuK1337 <priv.luk@gmail.com>2020-08-20 14:49:18 +0200
committerDavide Garberi <dade.garberi@gmail.com>2022-07-27 18:58:37 +0200
commit5a85f4d5e60aff1bc71ee652152cf30c203ce3c8 (patch)
tree4fce4c8f8603db4e0784cfdc98fd3934ba69d61b /drivers/gpu/msm/adreno_ringbuffer.c
parent41d54fc727971cd963ab3e24da64ce7f907d8ce5 (diff)
Revert "msm: kgsl: Mark the scratch buffer as privileged"
* Requires new GPU firmware This reverts commit adec4f93e1705640e7b03d33394224ff5d835280. Change-Id: I747c00bff92f6e793f207839a7ad0a61b2656f96
Diffstat (limited to 'drivers/gpu/msm/adreno_ringbuffer.c')
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c44
1 files changed, 2 insertions, 42 deletions
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 475e5e60163d..f478a34d1129 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -138,7 +138,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
adreno_ringbuffer_wptr(adreno_dev, rb);
}
-int adreno_ringbuffer_submit_spin_nosync(struct adreno_ringbuffer *rb,
+int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
struct adreno_submit_time *time, unsigned int timeout)
{
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
@@ -147,38 +147,6 @@ int adreno_ringbuffer_submit_spin_nosync(struct adreno_ringbuffer *rb,
return adreno_spin_idle(adreno_dev, timeout);
}
-/*
- * adreno_ringbuffer_submit_spin() - Submit the cmds and wait until GPU is idle
- * @rb: Pointer to ringbuffer
- * @time: Pointer to adreno_submit_time
- * @timeout: timeout value in ms
- *
- * Add commands to the ringbuffer and wait until GPU goes to idle. This routine
- * inserts a WHERE_AM_I packet to trigger a shadow rptr update. So, use
- * adreno_ringbuffer_submit_spin_nosync() if the previous cmd in the RB is a
- * CSY packet because CSY followed by WHERE_AM_I is not legal..
- */
-int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
- struct adreno_submit_time *time, unsigned int timeout)
-{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *cmds;
-
- if (adreno_is_a3xx(adreno_dev))
- return adreno_ringbuffer_submit_spin_nosync(rb, time, timeout);
-
- cmds = adreno_ringbuffer_allocspace(rb, 3);
- if (IS_ERR(cmds))
- return PTR_ERR(cmds);
-
- *cmds++ = cp_packet(adreno_dev, CP_WHERE_AM_I, 2);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- SCRATCH_RPTR_GPU_ADDR(device, rb->id));
-
- return adreno_ringbuffer_submit_spin_nosync(rb, time, timeout);
-}
-
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
unsigned int dwords)
{
@@ -305,12 +273,11 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- unsigned int priv = KGSL_MEMDESC_RANDOM | KGSL_MEMDESC_PRIVILEGED;
int i, status;
if (!adreno_is_a3xx(adreno_dev)) {
status = kgsl_allocate_global(device, &device->scratch,
- PAGE_SIZE, 0, priv, "scratch");
+ PAGE_SIZE, 0, KGSL_MEMDESC_RANDOM, "scratch");
if (status != 0)
return status;
}
@@ -513,8 +480,6 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
total_sizedwords += 5;
- else if (!adreno_is_a3xx(adreno_dev))
- total_sizedwords += 3;
/*
* a5xx uses 64 bit memory address. pm4 commands that involve read/write
@@ -705,11 +670,6 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
adreno_is_preemption_enabled(adreno_dev))
ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
ringcmds);
- else if (!adreno_is_a3xx(adreno_dev)) {
- *ringcmds++ = cp_packet(adreno_dev, CP_WHERE_AM_I, 2);
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
- SCRATCH_RPTR_GPU_ADDR(device, rb->id));
- }
/*
* If we have more ringbuffer commands than space reserved