diff options
author | Akhil P Oommen <akhilpo@codeaurora.org> | 2020-07-16 00:49:09 +0530 |
---|---|---|
committer | Pranav Patel <pranavp@codeaurora.org> | 2020-08-04 15:33:12 +0530 |
commit | a6e4cb81280abe3515ad9ed442ae96fd580688d2 (patch) | |
tree | ea6e5fcf9e595d1c662825783ec1fe2c23a89d0d /drivers/gpu/msm/adreno_ringbuffer.c | |
parent | 4a1a4fb57a7c84060e82d8e59b7851f1ed7e107f (diff) |
msm: kgsl: Mark the scratch buffer as privileged
Mark the scratch buffer as privileged so that it can only be accessed by
GPU through the ringbuffer. To accomplish this, we need to:
1. Disable the shadow rptr feature.
2. Trigger RPTR update from GPU using a WHERE_AM_I packet.
3. Add support for the new ucode.
Change-Id: I9b388f55f53b69028b9bbb2306cb43fd1297c52f
Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org>
Signed-off-by: Pranav Patel <pranavp@codeaurora.org>
Diffstat (limited to 'drivers/gpu/msm/adreno_ringbuffer.c')
-rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.c | 46 |
1 files changed, 43 insertions, 3 deletions
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 3a3777823013..475e5e60163d 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017,2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017,2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -138,7 +138,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb, adreno_ringbuffer_wptr(adreno_dev, rb); } -int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, +int adreno_ringbuffer_submit_spin_nosync(struct adreno_ringbuffer *rb, struct adreno_submit_time *time, unsigned int timeout) { struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); @@ -147,6 +147,38 @@ int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, return adreno_spin_idle(adreno_dev, timeout); } +/* + * adreno_ringbuffer_submit_spin() - Submit the cmds and wait until GPU is idle + * @rb: Pointer to ringbuffer + * @time: Pointer to adreno_submit_time + * @timeout: timeout value in ms + * + * Add commands to the ringbuffer and wait until GPU goes to idle. This routine + * inserts a WHERE_AM_I packet to trigger a shadow rptr update. So, use + * adreno_ringbuffer_submit_spin_nosync() if the previous cmd in the RB is a + * CSY packet because CSY followed by WHERE_AM_I is not legal.. + */ +int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, + struct adreno_submit_time *time, unsigned int timeout) +{ + struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int *cmds; + + if (adreno_is_a3xx(adreno_dev)) + return adreno_ringbuffer_submit_spin_nosync(rb, time, timeout); + + cmds = adreno_ringbuffer_allocspace(rb, 3); + if (IS_ERR(cmds)) + return PTR_ERR(cmds); + + *cmds++ = cp_packet(adreno_dev, CP_WHERE_AM_I, 2); + cmds += cp_gpuaddr(adreno_dev, cmds, + SCRATCH_RPTR_GPU_ADDR(device, rb->id)); + + return adreno_ringbuffer_submit_spin_nosync(rb, time, timeout); +} + unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb, unsigned int dwords) { @@ -273,11 +305,12 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + unsigned int priv = KGSL_MEMDESC_RANDOM | KGSL_MEMDESC_PRIVILEGED; int i, status; if (!adreno_is_a3xx(adreno_dev)) { status = kgsl_allocate_global(device, &device->scratch, - PAGE_SIZE, 0, KGSL_MEMDESC_RANDOM, "scratch"); + PAGE_SIZE, 0, priv, "scratch"); if (status != 0) return status; } @@ -480,6 +513,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (gpudev->preemption_post_ibsubmit && adreno_is_preemption_enabled(adreno_dev)) total_sizedwords += 5; + else if (!adreno_is_a3xx(adreno_dev)) + total_sizedwords += 3; /* * a5xx uses 64 bit memory address. pm4 commands that involve read/write @@ -670,6 +705,11 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, adreno_is_preemption_enabled(adreno_dev)) ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, ringcmds); + else if (!adreno_is_a3xx(adreno_dev)) { + *ringcmds++ = cp_packet(adreno_dev, CP_WHERE_AM_I, 2); + ringcmds += cp_gpuaddr(adreno_dev, ringcmds, + SCRATCH_RPTR_GPU_ADDR(device, rb->id)); + } /* * If we have more ringbuffer commands than space reserved |