summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJordan Crouse <jcrouse@codeaurora.org>2016-02-01 08:22:09 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:19:19 -0700
commitd4a7e95db83b778529d1cdcaa124f0be903e494c (patch)
tree5183dc9111689c92e18c82897773c773d6297837 /drivers/gpu
parent9c8d55382c2f454be420df38a745cbbf774c2677 (diff)
msm: kgsl: Refactor MMU/IOMMU support
The current MMU code assumes a binary state - either there is a IOMMU or there isn't. This precludes other memory models and makes for a lot of inherent IOMMU knowledge in the generic MMU code and the rest of the driver. Reorganize and cleanup the MMU and IOMMU code: * Add a Kconfig boolean dependent on ARM and/or MSM SMMU support. * Make "nommu" mode an actual MMU subtype and figure out available MMU subtypes at probe time. * Move IOMMU device tree parsing to the IOMMU code. * Move the MMU subtype private structures into struct kgsl_mmu. * Move adreno_iommu specific functions out of other generic adreno code. * Move A4XX specific preemption code out of the ringbuffer code. CRs-Fixed: 970264 Change-Id: Ic0dedbad1293a1d129b7c4ed1105d684ca84d97f Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/msm/Kconfig4
-rw-r--r--drivers/gpu/msm/Makefile4
-rw-r--r--drivers/gpu/msm/adreno.c208
-rw-r--r--drivers/gpu/msm/adreno.h21
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c88
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c28
-rw-r--r--drivers/gpu/msm/adreno_compat.c5
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c2
-rw-r--r--drivers/gpu/msm/adreno_iommu.c87
-rw-r--r--drivers/gpu/msm/adreno_iommu.h50
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c111
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h13
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c13
-rw-r--r--drivers/gpu/msm/kgsl.c51
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c359
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c337
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h131
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c6
18 files changed, 764 insertions, 754 deletions
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
index 967e70f04155..399add593796 100644
--- a/drivers/gpu/msm/Kconfig
+++ b/drivers/gpu/msm/Kconfig
@@ -41,3 +41,7 @@ config QCOM_ADRENO_DEFAULT_GOVERNOR
default "msm-adreno-tz" if DEVFREQ_GOV_QCOM_ADRENO_TZ
default "simple_ondemand"
depends on QCOM_KGSL
+
+config MSM_KGSL_IOMMU
+ bool
+ default y if MSM_KGSL && (MSM_IOMMU || ARM_SMMU)
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index d6ad9829d150..e77f0086dbdc 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -9,10 +9,10 @@ msm_kgsl_core-y = \
kgsl_pwrctrl.o \
kgsl_pwrscale.o \
kgsl_mmu.o \
- kgsl_iommu.o \
kgsl_snapshot.o \
kgsl_events.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_IOMMU) += kgsl_iommu.o
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_QCOM_KGSL_CFF_DUMP) += kgsl_cffdump.o
msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
@@ -35,9 +35,9 @@ msm_adreno-y += \
adreno_sysfs.o \
adreno.o \
adreno_cp_parser.o \
- adreno_iommu.o \
adreno_perfcounter.o
+msm_adreno-$(CONFIG_MSM_KGSL_IOMMU) += adreno_iommu.o
msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o
msm_adreno-$(CONFIG_COMPAT) += adreno_compat.o
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 68d5f8ec35fe..e2eaf68fd10a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -30,6 +30,7 @@
#include "kgsl_trace.h"
#include "adreno.h"
+#include "adreno_iommu.h"
#include "adreno_compat.h"
#include "adreno_pm4types.h"
#include "adreno_trace.h"
@@ -68,8 +69,6 @@ static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
static const struct kgsl_functable adreno_functable;
-static struct kgsl_iommu device_3d0_iommu;
-
static struct adreno_device device_3d0 = {
.dev = {
KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
@@ -235,164 +234,6 @@ int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset,
return 0;
}
-/*
- * adreno_iommu_cb_probe() - Adreno iommu context bank probe
- *
- * Iommu context bank probe function.
- */
-static int adreno_iommu_cb_probe(struct platform_device *pdev)
-{
- struct kgsl_iommu_context *ctx = NULL;
- struct device_node *node = pdev->dev.of_node;
- struct kgsl_iommu *iommu = &device_3d0_iommu;
- int ret = 0;
-
- /* Map context names from dt to id's */
- if (!strcmp("gfx3d_user", node->name)) {
- ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
- ctx->id = KGSL_IOMMU_CONTEXT_USER;
- ctx->cb_num = -1;
- } else if (!strcmp("gfx3d_secure", node->name)) {
- ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
- ctx->id = KGSL_IOMMU_CONTEXT_SECURE;
- ctx->cb_num = -1;
- device_3d0.dev.mmu.secured = true;
- } else {
- KGSL_CORE_ERR("dt: Unknown context label %s\n", node->name);
- return -EINVAL;
- }
-
- if (ctx->name != NULL) {
- KGSL_CORE_ERR("dt: %s appears multiple times\n", node->name);
- return -EINVAL;
- }
- ctx->name = node->name;
-
- /* this property won't be found for all context banks */
- if (of_property_read_u32(node, "qcom,gpu-offset",
- &ctx->gpu_offset))
- ctx->gpu_offset = UINT_MAX;
-
- ctx->kgsldev = &device_3d0.dev;
-
- /* arm-smmu driver we'll have the right device pointer here. */
- if (of_find_property(node, "iommus", NULL)) {
- ctx->dev = &pdev->dev;
- } else {
- /*
- * old iommu driver requires that we query the context bank
- * device rather than getting it from dt.
- */
- ctx->dev = kgsl_mmu_get_ctx(ctx->name);
- if (IS_ERR_OR_NULL(ctx->dev)) {
- ret = (ctx->dev == NULL) ? -ENODEV : PTR_ERR(ctx->dev);
- KGSL_CORE_ERR("ctx %s: kgsl_mmu_get_ctx err: %d\n",
- ctx->name, ret);
- return ret;
- }
- }
-
- kgsl_mmu_set_mmutype(KGSL_MMU_TYPE_IOMMU);
-
- return ret;
-}
-
-static struct of_device_id iommu_match_table[] = {
- { .compatible = "qcom,kgsl-smmu-v1", },
- { .compatible = "qcom,kgsl-smmu-v2", },
- { .compatible = "qcom,smmu-kgsl-cb", },
- {}
-};
-
-/**
- * adreno_iommu_pdev_probe() - Adreno iommu context bank probe
- * @pdev: Platform device
- *
- * Iommu probe function.
- */
-static int adreno_iommu_pdev_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const char *cname;
- struct property *prop;
- u32 reg_val[2];
- int i = 0;
- struct kgsl_iommu *iommu = &device_3d0_iommu;
-
- if (of_device_is_compatible(dev->of_node, "qcom,smmu-kgsl-cb"))
- return adreno_iommu_cb_probe(pdev);
- else if (of_device_is_compatible(dev->of_node, "qcom,kgsl-smmu-v1"))
- iommu->version = 1;
- else
- iommu->version = 2;
-
- if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg_val, 2)) {
- KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
- return -EINVAL;
- }
- iommu->regstart = reg_val[0];
- iommu->regsize = reg_val[1];
-
- /* Protecting the SMMU registers is mandatory */
- if (of_property_read_u32_array(pdev->dev.of_node, "qcom,protect",
- reg_val, 2)) {
- KGSL_CORE_ERR("dt: no iommu protection range specified\n");
- return -EINVAL;
- }
- iommu->protect.base = reg_val[0] / sizeof(u32);
- iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
-
- of_property_for_each_string(dev->of_node, "clock-names", prop, cname) {
- struct clk *c = devm_clk_get(dev, cname);
- if (IS_ERR(c)) {
- KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
- return -ENODEV;
- }
- if (i >= KGSL_IOMMU_MAX_CLKS) {
- KGSL_CORE_ERR("dt: too many clocks defined.\n");
- return -EINVAL;
- }
-
- iommu->clks[i] = c;
- ++i;
- }
-
- if (of_property_read_bool(pdev->dev.of_node, "qcom,retention"))
- device_3d0.dev.mmu.features |= KGSL_MMU_RETENTION;
-
- if (of_property_read_bool(pdev->dev.of_node, "qcom,global_pt"))
- device_3d0.dev.mmu.features |= KGSL_MMU_GLOBAL_PAGETABLE;
-
- if (of_property_read_bool(pdev->dev.of_node, "qcom,hyp_secure_alloc"))
- device_3d0.dev.mmu.features |= KGSL_MMU_HYP_SECURE_ALLOC;
-
- if (of_property_read_bool(pdev->dev.of_node, "qcom,force-32bit"))
- device_3d0.dev.mmu.features |= KGSL_MMU_FORCE_32BIT;
-
- if (of_property_read_u32(pdev->dev.of_node, "qcom,micro-mmu-control",
- &iommu->micro_mmu_ctrl))
- iommu->micro_mmu_ctrl = UINT_MAX;
-
- if (of_property_read_bool(pdev->dev.of_node, "qcom,coherent-htw"))
- device_3d0.dev.mmu.features |= KGSL_MMU_COHERENT_HTW;
-
- if (of_property_read_u32(pdev->dev.of_node, "qcom,secure_align_mask",
- &device_3d0.dev.mmu.secure_align_mask))
- device_3d0.dev.mmu.secure_align_mask = 0xfff;
-
- return of_platform_populate(pdev->dev.of_node, iommu_match_table,
- NULL, &pdev->dev);
-}
-
-static struct platform_driver kgsl_iommu_platform_driver = {
- .probe = adreno_iommu_pdev_probe,
- .driver = {
- .owner = THIS_MODULE,
- .name = "kgsl-iommu",
- .of_match_table = iommu_match_table,
- }
-};
-
static int _get_counter(struct adreno_device *adreno_dev,
int group, int countable, unsigned int *lo,
unsigned int *hi)
@@ -1037,10 +878,6 @@ static int adreno_probe(struct platform_device *pdev)
struct adreno_device *adreno_dev;
int status;
- /* Defer adreno probe if IOMMU is not already probed */
- if (device_3d0_iommu.regstart == 0)
- return -EPROBE_DEFER;
-
adreno_dev = adreno_get_dev(pdev);
if (adreno_dev == NULL) {
@@ -1050,7 +887,6 @@ static int adreno_probe(struct platform_device *pdev)
device = KGSL_DEVICE(adreno_dev);
device->pdev = pdev;
- device->mmu.priv = &device_3d0_iommu;
/* Get the chip ID from the DT and set up target specific parameters */
adreno_identify_gpu(adreno_dev);
@@ -1829,7 +1665,8 @@ static int adreno_getproperty(struct kgsl_device *device,
memset(&devinfo, 0, sizeof(devinfo));
devinfo.device_id = device->id+1;
devinfo.chip_id = adreno_dev->chipid;
- devinfo.mmu_enabled = kgsl_mmu_enabled();
+ devinfo.mmu_enabled =
+ MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED);
devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
devinfo.gmem_sizebytes = adreno_dev->gmem_size;
@@ -1872,9 +1709,11 @@ static int adreno_getproperty(struct kgsl_device *device,
break;
case KGSL_PROP_MMU_ENABLE:
{
- int mmu_prop = kgsl_mmu_enabled();
+ /* Report MMU only if we can handle paged memory */
+ int mmu_prop = MMU_FEATURE(&device->mmu,
+ KGSL_MMU_PAGED);
- if (sizebytes != sizeof(int)) {
+ if (sizebytes < sizeof(mmu_prop)) {
status = -EINVAL;
break;
}
@@ -2918,27 +2757,6 @@ static struct platform_driver kgsl_bus_platform_driver = {
}
};
-#if defined(CONFIG_ARM_SMMU) || defined(CONFIG_MSM_IOMMU)
-static int kgsl_iommu_driver_register(void)
-{
- return platform_driver_register(&kgsl_iommu_platform_driver);
-}
-
-static void kgsl_iommu_driver_unregister(void)
-{
- platform_driver_unregister(&kgsl_iommu_platform_driver);
-}
-#else
-static inline int kgsl_iommu_driver_register(void)
-{
- return 0;
-}
-
-static inline void kgsl_iommu_driver_unregister(void)
-{
-}
-#endif
-
static int __init kgsl_3d_init(void)
{
int ret;
@@ -2947,17 +2765,9 @@ static int __init kgsl_3d_init(void)
if (ret)
return ret;
- ret = kgsl_iommu_driver_register();
- if (ret) {
- platform_driver_unregister(&kgsl_bus_platform_driver);
- return ret;
- }
-
ret = platform_driver_register(&adreno_platform_driver);
- if (ret) {
- kgsl_iommu_driver_unregister();
+ if (ret)
platform_driver_unregister(&kgsl_bus_platform_driver);
- }
return ret;
}
@@ -2965,9 +2775,7 @@ static int __init kgsl_3d_init(void)
static void __exit kgsl_3d_exit(void)
{
platform_driver_unregister(&adreno_platform_driver);
- kgsl_iommu_driver_unregister();
platform_driver_unregister(&kgsl_bus_platform_driver);
-
}
module_init(kgsl_3d_init);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 9a38ea337b5c..904c4355b827 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -855,30 +855,12 @@ void adreno_coresight_remove(struct adreno_device *adreno_dev);
bool adreno_hw_isidle(struct adreno_device *adreno_dev);
-int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
- struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt);
-
-int adreno_iommu_init(struct adreno_device *adreno_dev);
-
-void adreno_iommu_set_pt_generate_rb_cmds(struct adreno_ringbuffer *rb,
- struct kgsl_pagetable *pt);
-
void adreno_fault_detect_start(struct adreno_device *adreno_dev);
void adreno_fault_detect_stop(struct adreno_device *adreno_dev);
void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit);
void adreno_cp_callback(struct adreno_device *adreno_dev, int bit);
-unsigned int adreno_iommu_set_pt_ib(struct adreno_ringbuffer *rb,
- unsigned int *cmds,
- struct kgsl_pagetable *pt);
-
-unsigned int adreno_iommu_set_pt_generate_cmds(
- struct adreno_ringbuffer *rb,
- unsigned int *cmds,
- struct kgsl_pagetable *pt);
-
int adreno_sysfs_init(struct adreno_device *adreno_dev);
void adreno_sysfs_close(struct adreno_device *adreno_dev);
@@ -1407,9 +1389,6 @@ void adreno_readreg64(struct adreno_device *adreno_dev,
void adreno_writereg64(struct adreno_device *adreno_dev,
enum adreno_regs lo, enum adreno_regs hi, uint64_t val);
-unsigned int adreno_iommu_set_apriv(struct adreno_device *adreno_dev,
- unsigned int *cmds, int set);
-
static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev)
{
return adreno_dev->fast_hang_detect &&
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 351c4f9cee37..ffe1a6ba0b3e 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1798,6 +1798,90 @@ static struct adreno_snapshot_data a4xx_snapshot_data = {
.sect_sizes = &a4xx_snap_sizes,
};
+#define ADRENO_RB_PREEMPT_TOKEN_DWORDS 125
+
+static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,
+ struct adreno_ringbuffer *incoming_rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int *ringcmds, *start;
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ int ptname;
+ struct kgsl_pagetable *pt;
+ int pt_switch_sizedwords = 0, total_sizedwords = 20;
+ unsigned link[ADRENO_RB_PREEMPT_TOKEN_DWORDS];
+ uint i;
+
+ if (incoming_rb->preempted_midway) {
+
+ kgsl_sharedmem_readl(&incoming_rb->pagetable_desc,
+ &ptname, offsetof(
+ struct adreno_ringbuffer_pagetable_info,
+ current_rb_ptname));
+ pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
+ ptname);
+ /*
+ * always expect a valid pt, else pt refcounting is
+ * messed up or current pt tracking has a bug which
+ * could lead to eventual disaster
+ */
+ BUG_ON(!pt);
+ /* set the ringbuffer for incoming RB */
+ pt_switch_sizedwords =
+ adreno_iommu_set_pt_generate_cmds(incoming_rb,
+ &link[0], pt);
+ total_sizedwords += pt_switch_sizedwords;
+ }
+
+ /*
+ * Allocate total_sizedwords space in RB, this is the max space
+ * required.
+ */
+ ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+
+ if (IS_ERR(ringcmds))
+ return PTR_ERR(ringcmds);
+
+ start = ringcmds;
+
+ *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
+ *ringcmds++ = 0;
+
+ if (incoming_rb->preempted_midway) {
+ for (i = 0; i < pt_switch_sizedwords; i++)
+ *ringcmds++ = link[i];
+ }
+
+ *ringcmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_PREEMPT_DISABLE), 1);
+ *ringcmds++ = 0;
+
+ *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
+ *ringcmds++ = 1;
+
+ ringcmds += gpudev->preemption_token(adreno_dev, rb, ringcmds,
+ device->memstore.gpuaddr +
+ KGSL_MEMSTORE_RB_OFFSET(rb, preempted));
+
+ if ((uint)(ringcmds - start) > total_sizedwords) {
+ KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
+ BUG();
+ }
+
+ /*
+ * If we have commands less than the space reserved in RB
+ * adjust the wptr accordingly
+ */
+ rb->wptr = rb->wptr - (total_sizedwords - (uint)(ringcmds - start));
+
+ /* submit just the preempt token */
+ mb();
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
+ return 0;
+}
+
/**
* a4xx_preempt_trig_state() - Schedule preemption in TRIGGERRED
* state
@@ -1886,7 +1970,7 @@ static void a4xx_preempt_trig_state(
/* Submit preempt token to make preemption happen */
if (adreno_drawctxt_switch(adreno_dev, adreno_dev->cur_rb, NULL, 0))
BUG();
- if (adreno_ringbuffer_submit_preempt_token(adreno_dev->cur_rb,
+ if (a4xx_submit_preempt_token(adreno_dev->cur_rb,
adreno_dev->next_rb))
BUG();
dispatcher->preempt_token_submit = 1;
@@ -1997,7 +2081,7 @@ static void a4xx_preempt_clear_state(
adreno_dev->next_rb, adreno_dev->next_rb->timestamp);
/* submit preempt token packet to ensure preemption */
if (switch_low_to_high < 0) {
- ret = adreno_ringbuffer_submit_preempt_token(
+ ret = a4xx_submit_preempt_token(
adreno_dev->cur_rb, adreno_dev->next_rb);
/*
* unexpected since we are submitting this when rptr = wptr,
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 45179e6681be..7fec178e2908 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -146,7 +146,7 @@ static void a5xx_preemption_start(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = device->mmu.priv;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
uint64_t ttbr0;
uint32_t contextidr;
struct kgsl_pagetable *pt;
@@ -196,17 +196,33 @@ static void a5xx_preemption_save(struct adreno_device *adreno_dev,
PREEMPT_RECORD(rptr));
}
+#ifdef CONFIG_MSM_KGSL_IOMMU
+static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ /* Allocate mem for storing preemption smmu record */
+ return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
+ KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
+}
+#else
+static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ return -ENODEV;
+}
+#endif
+
static int a5xx_preemption_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = device->mmu.priv;
struct adreno_ringbuffer *rb;
int ret;
unsigned int i;
uint64_t addr;
/* We are dependent on IOMMU to make preemption go on the CP side */
- if (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU)
+ if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
return -ENODEV;
/* Allocate mem for storing preemption counters */
@@ -248,9 +264,7 @@ static int a5xx_preemption_init(struct adreno_device *adreno_dev)
addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
}
- /* Allocate mem for storing preemption smmu record */
- return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
+ return a5xx_preemption_iommu_init(adreno_dev);
}
/*
@@ -1843,7 +1857,7 @@ out:
static void a5xx_start(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = device->mmu.priv;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
unsigned int i;
struct adreno_ringbuffer *rb;
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index 4d78de5b9ec3..582cbfb61e78 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,7 +40,8 @@ int adreno_getproperty_compat(struct kgsl_device *device,
memset(&devinfo, 0, sizeof(devinfo));
devinfo.device_id = device->id + 1;
devinfo.chip_id = adreno_dev->chipid;
- devinfo.mmu_enabled = kgsl_mmu_enabled();
+ devinfo.mmu_enabled =
+ MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED);
devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
devinfo.gmem_sizebytes = adreno_dev->gmem_size;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 0ff2bd527431..f94fbc813d48 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -583,7 +583,7 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
/* No context - set the default pagetable and thats it. */
new_pt = device->mmu.defaultpagetable;
}
- ret = adreno_iommu_set_pt_ctx(rb, new_pt, drawctxt);
+ ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt);
if (ret) {
KGSL_DRV_ERR(device,
"Failed to set pagetable on rb %d\n", rb->id);
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index f42d94a8698c..f1168d96b5de 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -14,7 +14,6 @@
#include "kgsl_sharedmem.h"
#include "a3xx_reg.h"
#include "adreno_pm4types.h"
-#include "kgsl_mmu.h"
#define A5XX_PFP_PER_PROCESS_UCODE_VER 0x5FF064
#define A5XX_PM4_PER_PROCESS_UCODE_VER 0x5FF052
@@ -59,14 +58,12 @@ static unsigned int _wait_reg(struct adreno_device *adreno_dev,
#define KGSL_MMU(_dev) \
((struct kgsl_mmu *) (&(KGSL_DEVICE((_dev))->mmu)))
-#define KGSL_IOMMU(_dev) \
- ((struct kgsl_iommu *) ((KGSL_DEVICE((_dev))->mmu.priv)))
-
static unsigned int _iommu_lock(struct adreno_device *adreno_dev,
unsigned int *cmds)
{
unsigned int *start = cmds;
- struct kgsl_iommu *iommu = KGSL_IOMMU(adreno_dev);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
/*
* If we don't have this register, probe should have forced
@@ -102,7 +99,8 @@ static unsigned int _iommu_lock(struct adreno_device *adreno_dev,
static unsigned int _iommu_unlock(struct adreno_device *adreno_dev,
unsigned int *cmds)
{
- struct kgsl_iommu *iommu = KGSL_IOMMU(adreno_dev);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
unsigned int *start = cmds;
BUG_ON(iommu->micro_mmu_ctrl == UINT_MAX);
@@ -172,9 +170,10 @@ static unsigned int _cp_smmu_reg(struct adreno_device *adreno_dev,
enum kgsl_iommu_reg_map reg,
unsigned int num)
{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
unsigned int *start = cmds;
unsigned int offset;
- struct kgsl_iommu *iommu = KGSL_IOMMU(adreno_dev);
offset = kgsl_mmu_get_reg_ahbaddr(KGSL_MMU(adreno_dev),
KGSL_IOMMU_CONTEXT_USER, reg) >> 2;
@@ -300,7 +299,7 @@ static bool _ctx_switch_use_cpu_path(
*
* Returns the number of commands generated
*/
-unsigned int adreno_iommu_set_apriv(struct adreno_device *adreno_dev,
+static unsigned int adreno_iommu_set_apriv(struct adreno_device *adreno_dev,
unsigned int *cmds, int set)
{
unsigned int *cmds_orig = cmds;
@@ -593,10 +592,10 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
{
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
u64 ttbr0;
u32 contextidr;
unsigned int *cmds_orig = cmds;
- struct kgsl_iommu *iommu = KGSL_IOMMU(adreno_dev);
ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
@@ -632,46 +631,6 @@ unsigned int adreno_iommu_set_pt_generate_cmds(
}
/**
- * adreno_iommu_set_pt_ib() - Generate commands to switch pagetable. The
- * commands generated use an IB
- * @rb: The RB in which the commands will be executed
- * @cmds: Memory pointer where commands are generated
- * @pt: The pagetable to switch to
- */
-unsigned int adreno_iommu_set_pt_ib(struct adreno_ringbuffer *rb,
- unsigned int *cmds,
- struct kgsl_pagetable *pt)
-{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- unsigned int *cmds_orig = cmds;
- struct kgsl_iommu_pt *iommu_pt = pt->priv;
-
- /* Write the ttbr0 and contextidr values to pagetable desc memory */
- *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- ttbr0)));
- *cmds++ = lower_32_bits(iommu_pt->ttbr0);
-
- *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- contextidr)));
- *cmds++ = iommu_pt->contextidr;
-
- *cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
- *cmds++ = 0;
- cmds += cp_wait_for_me(adreno_dev, cmds);
- *cmds++ = cp_mem_packet(adreno_dev, CP_INDIRECT_BUFFER_PFE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds, rb->pt_update_desc.gpuaddr);
- *cmds++ = rb->pt_update_desc.size / sizeof(unsigned int);
-
- return cmds - cmds_orig;
-}
-
-/**
* __add_curr_ctxt_cmds() - Add commands to set a context id in memstore
* @rb: The RB in which the commands will be added for execution
* @cmds: Pointer to memory where commands are added
@@ -874,14 +833,11 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
int adreno_iommu_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
return 0;
- if (iommu == NULL)
- return -ENODEV;
-
/*
* A nop is required in an indirect buffer when switching
* pagetables in-stream
@@ -915,7 +871,7 @@ int adreno_iommu_init(struct adreno_device *adreno_dev)
}
/**
- * adreno_mmu_set_pt_ctx() - Change the pagetable of the current RB
+ * adreno_iommu_set_pt_ctx() - Change the pagetable of the current RB
* @device: Pointer to device to which the rb belongs
* @rb: The RB pointer on which pagetable is to be changed
* @new_pt: The new pt the device will change to
@@ -963,24 +919,3 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
return result;
}
-/**
- * adreno_iommu_set_pt_generate_rb_cmds() - Generate commands to switch pt
- * in a ringbuffer descriptor
- * @rb: The RB whose descriptor is used
- * @pt: The pt to switch to
- */
-void adreno_iommu_set_pt_generate_rb_cmds(struct adreno_ringbuffer *rb,
- struct kgsl_pagetable *pt)
-{
- if (rb->pt_update_desc.hostptr)
- return;
-
- rb->pt_update_desc.hostptr = rb->pagetable_desc.hostptr +
- sizeof(struct adreno_ringbuffer_pagetable_info);
- rb->pt_update_desc.size =
- adreno_iommu_set_pt_generate_cmds(rb,
- rb->pt_update_desc.hostptr, pt) *
- sizeof(unsigned int);
- rb->pt_update_desc.gpuaddr = rb->pagetable_desc.gpuaddr +
- sizeof(struct adreno_ringbuffer_pagetable_info);
-}
diff --git a/drivers/gpu/msm/adreno_iommu.h b/drivers/gpu/msm/adreno_iommu.h
new file mode 100644
index 000000000000..8e03b09c8255
--- /dev/null
+++ b/drivers/gpu/msm/adreno_iommu.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ADRENO_IOMMU_H
+#define __ADRENO_IOMMU_H
+
+#ifdef CONFIG_MSM_KGSL_IOMMU
+int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
+ struct kgsl_pagetable *new_pt,
+ struct adreno_context *drawctxt);
+
+int adreno_iommu_init(struct adreno_device *adreno_dev);
+
+unsigned int adreno_iommu_set_pt_generate_cmds(
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds,
+ struct kgsl_pagetable *pt);
+#else
+static inline int adreno_iommu_init(struct adreno_device *adreno_dev)
+{
+ return 0;
+}
+
+static inline int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
+ struct kgsl_pagetable *new_pt,
+ struct adreno_context *drawctxt)
+{
+ return 0;
+}
+
+static inline unsigned int adreno_iommu_set_pt_generate_cmds(
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds,
+ struct kgsl_pagetable *pt)
+{
+ return 0;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 48210f7256f9..74e65ba89752 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -23,6 +23,7 @@
#include "kgsl_pwrctrl.h"
#include "adreno.h"
+#include "adreno_iommu.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
@@ -31,9 +32,6 @@
#define GSL_RB_NOP_SIZEDWORDS 2
-#define ADRENO_RB_PREEMPT_TOKEN_IB_DWORDS 50
-#define ADRENO_RB_PREEMPT_TOKEN_DWORDS 125
-
#define RB_HOSTPTR(_rb, _pos) \
((unsigned int *) ((_rb)->buffer_desc.hostptr + \
((_pos) * sizeof(unsigned int))))
@@ -285,8 +283,6 @@ static void _ringbuffer_setup_common(struct adreno_device *adreno_dev)
rb->wptr_preempt_end = 0xFFFFFFFF;
rb->starve_timer_state =
ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- adreno_iommu_set_pt_generate_rb_cmds(rb,
- device->mmu.defaultpagetable);
}
/* Continue setting up the current ringbuffer */
@@ -439,8 +435,6 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
kgsl_free_global(device, &rb->pagetable_desc);
kgsl_free_global(device, &rb->preemption_desc);
- memset(&rb->pt_update_desc, 0, sizeof(struct kgsl_memdesc));
-
kgsl_free_global(device, &rb->buffer_desc);
kgsl_del_event_group(&rb->events);
memset(rb, 0, sizeof(struct adreno_ringbuffer));
@@ -1293,106 +1287,3 @@ int adreno_ringbuffer_waittimestamp(struct adreno_ringbuffer *rb,
return ret;
}
-
-/**
- * adreno_ringbuffer_submit_preempt_token() - Submit a preempt token
- * @rb: Ringbuffer in which the token is submitted
- * @incoming_rb: The RB to which the GPU switches when this preemption
- * token is executed.
- *
- * Called to make sure that an outstanding preemption request is
- * granted.
- */
-int adreno_ringbuffer_submit_preempt_token(struct adreno_ringbuffer *rb,
- struct adreno_ringbuffer *incoming_rb)
-{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *ringcmds, *start;
- struct kgsl_iommu *iommu = device->mmu.priv;
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- int ptname;
- struct kgsl_pagetable *pt;
- int pt_switch_sizedwords = 0, total_sizedwords = 20;
- unsigned link[ADRENO_RB_PREEMPT_TOKEN_DWORDS];
- uint i;
- uint64_t ttbr0;
-
- if (incoming_rb->preempted_midway) {
-
- if (adreno_is_a5xx(adreno_dev)) {
- kgsl_sharedmem_readq(&rb->pagetable_desc, &ttbr0,
- offsetof(struct adreno_ringbuffer_pagetable_info
- , ttbr0));
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- offsetof(struct a5xx_cp_smmu_info, ttbr0),
- ttbr0);
- } else {
- kgsl_sharedmem_readl(&incoming_rb->pagetable_desc,
- &ptname, offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname));
- pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
- ptname);
- /*
- * always expect a valid pt, else pt refcounting is
- * messed up or current pt tracking has a bug which
- * could lead to eventual disaster
- */
- BUG_ON(!pt);
- /* set the ringbuffer for incoming RB */
- pt_switch_sizedwords =
- adreno_iommu_set_pt_generate_cmds(incoming_rb,
- &link[0], pt);
- total_sizedwords += pt_switch_sizedwords;
-
- }
- }
-
- /*
- * Allocate total_sizedwords space in RB, this is the max space
- * required.
- */
- ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
-
- if (IS_ERR(ringcmds))
- return PTR_ERR(ringcmds);
-
- start = ringcmds;
-
- *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
- *ringcmds++ = 0;
-
- if (incoming_rb->preempted_midway) {
- for (i = 0; i < pt_switch_sizedwords; i++)
- *ringcmds++ = link[i];
- }
-
- *ringcmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
- ADRENO_REG_CP_PREEMPT_DISABLE), 1);
- *ringcmds++ = 0;
-
- *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
- *ringcmds++ = 1;
-
- ringcmds += gpudev->preemption_token(adreno_dev, rb, ringcmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, preempted));
-
- if ((uint)(ringcmds - start) > total_sizedwords) {
- KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
- BUG();
- }
-
- /*
- * If we have commands less than the space reserved in RB
- * adjust the wptr accordingly
- */
- rb->wptr = rb->wptr - (total_sizedwords - (uint)(ringcmds - start));
-
- /* submit just the preempt token */
- mb();
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
- return 0;
-}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 59f69a76279b..5507c6d9b0f0 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -14,6 +14,7 @@
#define __ADRENO_RINGBUFFER_H
#include "kgsl_iommu.h"
+#include "adreno_iommu.h"
#include "adreno_dispatch.h"
/* Given a ringbuffer, return the adreno device that owns it */
@@ -90,8 +91,6 @@ struct adreno_ringbuffer_pagetable_info {
* preemption info written/read by CP
* @pagetable_desc: Memory to hold information about the pagetables being used
* and the commands to switch pagetable on the RB
- * @pt_update_desc: The memory descriptor containing commands that update
- * pagetable
* @dispatch_q: The dispatcher side queue for this ringbuffer
* @ts_expire_waitq: Wait queue to wait for rb timestamp to expire
* @ts_expire_waitq: Wait q to wait for rb timestamp to expire
@@ -117,7 +116,6 @@ struct adreno_ringbuffer {
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
struct kgsl_memdesc pagetable_desc;
- struct kgsl_memdesc pt_update_desc;
struct adreno_dispatcher_cmdqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
unsigned int wptr_preempt_end;
@@ -194,9 +192,6 @@ int adreno_rb_readtimestamp(struct adreno_device *adreno_dev,
void *priv, enum kgsl_timestamp_type type,
unsigned int *timestamp);
-int adreno_ringbuffer_submit_preempt_token(struct adreno_ringbuffer *rb,
- struct adreno_ringbuffer *incoming_rb);
-
static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
unsigned int rptr)
{
@@ -219,4 +214,10 @@ static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
return (val + size - sizeof(unsigned int)) % size;
}
+static inline int adreno_ringbuffer_set_pt_ctx(struct adreno_ringbuffer *rb,
+ struct kgsl_pagetable *pt, struct adreno_context *context)
+{
+ return adreno_iommu_set_pt_ctx(rb, pt, context);
+}
+
#endif /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 6acc415dac76..a8a055eeae0a 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -234,10 +234,9 @@ static inline void parse_ib(struct kgsl_device *device,
static inline bool iommu_is_setstate_addr(struct kgsl_device *device,
uint64_t gpuaddr, uint64_t size)
{
- struct kgsl_iommu *iommu = device->mmu.priv;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
- if (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU ||
- iommu == NULL)
+ if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
return false;
return kgsl_gpuaddr_in_memdesc(&iommu->setstate, gpuaddr,
@@ -794,11 +793,7 @@ static void adreno_snapshot_iommu(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_mmu *mmu = &device->mmu;
- struct kgsl_iommu *iommu = mmu->priv;
-
- if (iommu == NULL)
- return;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
snapshot, snapshot_global, &iommu->setstate);
@@ -876,7 +871,7 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot,
snapshot, snapshot_global,
&adreno_dev->pwron_fixup);
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_IOMMU)
adreno_snapshot_iommu(device, snapshot);
if (ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) {
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 0149d1bf49c3..e42ade36e83f 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -63,10 +63,9 @@
#define KGSL_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif
-static char *ksgl_mmu_type;
-module_param_named(mmutype, ksgl_mmu_type, charp, 0);
-MODULE_PARM_DESC(ksgl_mmu_type,
-"Type of MMU to be used for graphics. Valid values are 'iommu' or 'nommu'");
+static char *kgsl_mmu_type;
+module_param_named(mmutype, kgsl_mmu_type, charp, 0);
+MODULE_PARM_DESC(kgsl_mmu_type, "Type of MMU to be used for graphics");
/* Mutex used for the IOMMU sync quirk */
DEFINE_MUTEX(kgsl_mmu_sync);
@@ -849,8 +848,7 @@ static void kgsl_destroy_process_private(struct kref *kref)
idr_destroy(&private->syncsource_idr);
/* When using global pagetables, do not detach global pagetable */
- if (kgsl_mmu_enabled() &&
- private->pagetable->name != KGSL_MMU_GLOBAL_PT)
+ if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
kgsl_mmu_putpagetable(private->pagetable);
kfree(private);
@@ -918,17 +916,15 @@ static struct kgsl_process_private *kgsl_process_private_new(
idr_init(&private->syncsource_idr);
/* Allocate a pagetable for the new process object */
- if (kgsl_mmu_enabled()) {
- private->pagetable = kgsl_mmu_getpagetable(&device->mmu, tgid);
- if (IS_ERR(private->pagetable)) {
- int err = PTR_ERR(private->pagetable);
+ private->pagetable = kgsl_mmu_getpagetable(&device->mmu, tgid);
+ if (IS_ERR(private->pagetable)) {
+ int err = PTR_ERR(private->pagetable);
- idr_destroy(&private->mem_idr);
- idr_destroy(&private->syncsource_idr);
+ idr_destroy(&private->mem_idr);
+ idr_destroy(&private->syncsource_idr);
- kfree(private);
- private = ERR_PTR(err);
- }
+ kfree(private);
+ private = ERR_PTR(err);
}
return private;
@@ -1003,8 +999,7 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
process_release_sync_sources(private);
/* When using global pagetables, do not detach global pagetable */
- if (kgsl_mmu_enabled() &&
- private->pagetable->name != KGSL_MMU_GLOBAL_PT)
+ if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
kgsl_mmu_detach_pagetable(private->pagetable);
/* Remove the process struct from the master list */
@@ -2324,11 +2319,8 @@ static long _map_usermem_addr(struct kgsl_device *device,
struct kgsl_pagetable *pagetable, struct kgsl_mem_entry *entry,
unsigned long hostptr, size_t offset, size_t size)
{
- if (!kgsl_mmu_enabled()) {
- KGSL_DRV_ERR(device,
- "Cannot map paged memory with the MMU disabled\n");
+ if (!MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED))
return -EINVAL;
- }
/* No CPU mapped buffer could ever be secure */
if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
@@ -2510,10 +2502,10 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
entry->memdesc.flags = ((uint64_t) param->flags)
| KGSL_MEMFLAGS_FORCE_32BIT;
- if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu))
+ if (!kgsl_mmu_use_cpu_map(mmu))
entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
if (param->flags & KGSL_MEMFLAGS_SECURE)
@@ -2960,7 +2952,7 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
if (entry == NULL)
return ERR_PTR(-ENOMEM);
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ if (MMU_FEATURE(&dev_priv->device->mmu, KGSL_MMU_NEED_GUARD_PAGE))
entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
if (flags & KGSL_MEMFLAGS_SECURE)
@@ -3896,11 +3888,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
- status = kgsl_mmu_init(device, ksgl_mmu_type);
- if (status != 0) {
- KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
+ status = kgsl_mmu_probe(device, kgsl_mmu_type);
+ if (status != 0)
goto error_pwrctrl_close;
- }
/* Check to see if our device can perform DMA correctly */
status = dma_set_coherent_mask(&device->pdev->dev, KGSL_DMA_BIT_MASK);
@@ -3943,9 +3933,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
/* Initialize common sysfs entries */
kgsl_pwrctrl_init_sysfs(device);
- dev_info(device->dev, "Initialized %s: mmu=%s\n", device->name,
- kgsl_mmu_enabled() ? "on" : "off");
-
return 0;
error_close_mmu:
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 695b2d38e3ed..7fc28fa27d41 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -19,6 +19,7 @@
#include <linux/iommu.h>
#include <linux/msm_kgsl.h>
#include <linux/ratelimit.h>
+#include <linux/of_platform.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
#include <stddef.h>
@@ -35,6 +36,8 @@
#include "kgsl_cffdump.h"
#include "kgsl_pwrctrl.h"
+#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
+
static struct kgsl_mmu_pt_ops iommu_pt_ops;
static bool need_iommu_sync;
@@ -478,10 +481,10 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- if (mmu == NULL || mmu->priv == NULL)
+ if (mmu == NULL)
return ret;
- iommu = mmu->priv;
+ iommu = _IOMMU_PRIV(mmu);
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
device = KGSL_MMU_DEVICE(mmu);
adreno_dev = ADRENO_DEVICE(device);
@@ -608,7 +611,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
*/
static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
int j;
atomic_dec(&iommu->clk_enable_count);
@@ -644,7 +647,7 @@ static void kgsl_iommu_clk_prepare_enable(struct clk *clk)
static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu)
{
int j;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
if (iommu->clks[j])
@@ -663,6 +666,21 @@ static u64 kgsl_iommu_get_ttbr0(struct kgsl_pagetable *pt)
return iommu_pt->ttbr0;
}
+static bool kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt,
+ u64 ttbr0)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
+ u64 domain_ttbr0;
+
+ if (iommu_pt == NULL)
+ return 0;
+
+ domain_ttbr0 = kgsl_iommu_get_ttbr0(pt);
+
+ return (domain_ttbr0 == ttbr0);
+}
+
/* kgsl_iommu_get_contextidr - query CONTEXTIDR setting for a pagetable */
static u32 kgsl_iommu_get_contextidr(struct kgsl_pagetable *pt)
{
@@ -688,7 +706,7 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
BUG_ON(!list_empty(&pt->list));
- iommu = mmu->priv;
+ iommu = _IOMMU_PRIV(mmu);
if (KGSL_MMU_SECURE_PT == pt->name)
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
@@ -788,6 +806,7 @@ _alloc_pt(struct device *dev, struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
pt->pt_ops = &iommu_pt_ops;
pt->priv = iommu_pt;
+ pt->fault_addr = ~0ULL;
iommu_pt->rbtree = RB_ROOT;
if (MMU_FEATURE(mmu, KGSL_MMU_64BIT))
@@ -822,7 +841,7 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
struct kgsl_iommu_pt *iommu_pt = NULL;
int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
unsigned int cb_num;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
@@ -890,7 +909,7 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
int secure_vmid = VMID_CP_PIXEL;
@@ -947,7 +966,7 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int dynamic = 1;
unsigned int cb_num = ctx->cb_num;
@@ -1027,6 +1046,24 @@ static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
}
}
+static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
+ unsigned long name)
+{
+ struct kgsl_pagetable *pt;
+
+ if (!kgsl_mmu_is_perprocess(mmu) && (name != KGSL_MMU_SECURE_PT)) {
+ name = KGSL_MMU_GLOBAL_PT;
+ if (mmu->defaultpagetable != NULL)
+ return mmu->defaultpagetable;
+ }
+
+ pt = kgsl_get_pagetable(name);
+ if (pt == NULL)
+ pt = kgsl_mmu_createpagetableobject(mmu, name);
+
+ return pt;
+}
+
/*
* kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
* @mmu - Pointer to mmu structure
@@ -1036,14 +1073,54 @@ static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
* Return - The address of register which can be used in type0 packet
*/
static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
- enum kgsl_iommu_context_id id, enum kgsl_iommu_reg_map reg)
+ int id, unsigned int reg)
{
- unsigned int result;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[id];
- result = ctx->gpu_offset + kgsl_iommu_reg_list[reg];
- return result;
+ return ctx->gpu_offset + kgsl_iommu_reg_list[reg];
+}
+
+static void _detach_context(struct kgsl_iommu_context *ctx)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+
+ if (ctx->default_pt == NULL)
+ return;
+
+ iommu_pt = ctx->default_pt->priv;
+
+ _detach_pt(iommu_pt, ctx);
+
+ ctx->default_pt = NULL;
+}
+
+static void kgsl_iommu_close(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
+ int i;
+
+ for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
+ _detach_context(&iommu->ctx[i]);
+
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ mmu->defaultpagetable = NULL;
+
+ kgsl_mmu_putpagetable(mmu->securepagetable);
+ mmu->securepagetable = NULL;
+
+ if (iommu->regbase != NULL)
+ iounmap(iommu->regbase);
+
+ kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
+
+ if (kgsl_guard_page != NULL) {
+ __free_page(kgsl_guard_page);
+ kgsl_guard_page = NULL;
+ }
+
+ kgsl_iommu_remove_global(mmu, &iommu->setstate);
+ kgsl_sharedmem_free(&iommu->setstate);
}
static int _setstate_alloc(struct kgsl_device *device,
@@ -1066,22 +1143,20 @@ static int _setstate_alloc(struct kgsl_device *device,
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
- /*
- * intialize device mmu
- *
- * call this with the global lock held
- */
- int status = 0;
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
+ struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
+ int status;
+
+ mmu->features |= KGSL_MMU_PAGED;
+ mmu->features |= KGSL_MMU_NEED_GUARD_PAGE;
if (ctx->name == NULL) {
KGSL_CORE_ERR("dt: gfx3d0_user context bank not found\n");
return -EINVAL;
}
- status = _setstate_alloc(KGSL_MMU_DEVICE(mmu), iommu);
+ status = _setstate_alloc(device, iommu);
if (status)
return status;
@@ -1130,29 +1205,15 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
done:
if (status)
- kgsl_sharedmem_free(&iommu->setstate);
+ kgsl_iommu_close(mmu);
return status;
}
-static void _detach_context(struct kgsl_iommu_context *ctx)
-{
- struct kgsl_iommu_pt *iommu_pt;
-
- if (ctx->default_pt == NULL)
- return;
-
- iommu_pt = ctx->default_pt->priv;
-
- _detach_pt(iommu_pt, ctx);
-
- ctx->default_pt = NULL;
-}
-
static int _setup_user_context(struct kgsl_mmu *mmu)
{
int ret = 0;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1171,6 +1232,8 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
}
iommu_pt = mmu->defaultpagetable->priv;
+ if (iommu_pt == NULL)
+ return -ENODEV;
ret = _attach_pt(iommu_pt, ctx);
if (ret)
@@ -1211,7 +1274,7 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
static int _setup_secure_context(struct kgsl_mmu *mmu)
{
int ret;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
unsigned int cb_num;
@@ -1255,7 +1318,7 @@ done:
static int kgsl_iommu_start(struct kgsl_mmu *mmu)
{
int status;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
status = _setup_user_context(mmu);
@@ -1368,7 +1431,7 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
/* This function must be called with context bank attached */
static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
unsigned int sctlr_val;
@@ -1394,7 +1457,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
if (ctx->default_pt != NULL && ctx->fault) {
@@ -1415,8 +1478,8 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
int i;
- struct kgsl_iommu *iommu = mmu->priv;
/*
* If the iommu supports retention, we don't need
@@ -1428,40 +1491,11 @@ static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
}
}
-static void kgsl_iommu_close(struct kgsl_mmu *mmu)
-{
- struct kgsl_iommu *iommu = mmu->priv;
- int i;
-
- for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
- _detach_context(&iommu->ctx[i]);
-
- kgsl_mmu_putpagetable(mmu->defaultpagetable);
- mmu->defaultpagetable = NULL;
-
-
- kgsl_mmu_putpagetable(mmu->securepagetable);
- mmu->securepagetable = NULL;
-
- if (iommu->regbase != NULL)
- iounmap(iommu->regbase);
-
- kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
-
- if (kgsl_guard_page != NULL) {
- __free_page(kgsl_guard_page);
- kgsl_guard_page = NULL;
- }
-
- kgsl_iommu_remove_global(mmu, &iommu->setstate);
- kgsl_sharedmem_free(&iommu->setstate);
-}
-
static u64
kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
{
u64 val;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
/*
* We cannot enable or disable the clocks in interrupt context, this
* function is called from interrupt context if there is an axi error
@@ -1490,7 +1524,7 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pt)
{
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int ret = 0;
uint64_t ttbr0, temp;
@@ -1563,7 +1597,7 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
unsigned long pf_policy)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1605,7 +1639,7 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
static struct kgsl_protected_registers *
kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
return &iommu->protect;
}
@@ -1957,6 +1991,176 @@ static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
return false;
}
+static const struct {
+ int id;
+ char *name;
+} kgsl_iommu_cbs[] = {
+ { KGSL_IOMMU_CONTEXT_USER, "gfx3d_user", },
+ { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure" },
+};
+
+static int _kgsl_iommu_cb_probe(struct kgsl_device *device,
+ struct kgsl_iommu *iommu, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct kgsl_iommu_context *ctx = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_iommu_cbs); i++) {
+ if (!strcmp(node->name, kgsl_iommu_cbs[i].name)) {
+ int id = kgsl_iommu_cbs[i].id;
+
+ ctx = &iommu->ctx[id];
+ ctx->id = id;
+ ctx->cb_num = -1;
+ ctx->name = kgsl_iommu_cbs[i].name;
+
+ break;
+ }
+ }
+
+ if (ctx == NULL) {
+ KGSL_CORE_ERR("dt: Unknown context label %s\n", node->name);
+ return -EINVAL;
+ }
+
+ if (ctx->id == KGSL_IOMMU_CONTEXT_SECURE)
+ device->mmu.secured = true;
+
+ /* this property won't be found for all context banks */
+ if (of_property_read_u32(node, "qcom,gpu-offset", &ctx->gpu_offset))
+ ctx->gpu_offset = UINT_MAX;
+
+ ctx->kgsldev = device;
+
+ /* arm-smmu driver we'll have the right device pointer here. */
+ if (of_find_property(node, "iommus", NULL)) {
+ ctx->dev = &pdev->dev;
+ } else {
+ ctx->dev = kgsl_mmu_get_ctx(ctx->name);
+
+ if (IS_ERR(ctx->dev))
+ return PTR_ERR(ctx->dev);
+ }
+
+ return 0;
+}
+
+static const struct {
+ char *feature;
+ int bit;
+} kgsl_iommu_features[] = {
+ { "qcom,retention", KGSL_MMU_RETENTION },
+ { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
+ { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
+ { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
+ { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW },
+};
+
+static int _kgsl_iommu_probe(struct kgsl_device *device,
+ struct device_node *node)
+{
+ const char *cname;
+ struct property *prop;
+ u32 reg_val[2];
+ int i = 0;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct device_node *child;
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ memset(iommu, 0, sizeof(*iommu));
+
+ if (of_device_is_compatible(node, "qcom,kgsl-smmu-v1"))
+ iommu->version = 1;
+ else
+ iommu->version = 2;
+
+ if (of_property_read_u32_array(node, "reg", reg_val, 2)) {
+ KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
+ return -EINVAL;
+ }
+ iommu->regstart = reg_val[0];
+ iommu->regsize = reg_val[1];
+
+ /* Protecting the SMMU registers is mandatory */
+ if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
+ KGSL_CORE_ERR("dt: no iommu protection range specified\n");
+ return -EINVAL;
+ }
+ iommu->protect.base = reg_val[0] / sizeof(u32);
+ iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
+
+ of_property_for_each_string(node, "clock-names", prop, cname) {
+ struct clk *c = devm_clk_get(&pdev->dev, cname);
+
+ if (IS_ERR(c)) {
+ KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
+ return -ENODEV;
+ }
+ if (i >= KGSL_IOMMU_MAX_CLKS) {
+ KGSL_CORE_ERR("dt: too many clocks defined.\n");
+ return -EINVAL;
+ }
+
+ iommu->clks[i] = c;
+ ++i;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_iommu_features); i++) {
+ if (of_property_read_bool(node, kgsl_iommu_features[i].feature))
+ device->mmu.features |= kgsl_iommu_features[i].bit;
+ }
+
+ if (of_property_read_u32(node, "qcom,micro-mmu-control",
+ &iommu->micro_mmu_ctrl))
+ iommu->micro_mmu_ctrl = UINT_MAX;
+
+ if (of_property_read_u32(node, "qcom,secure_align_mask",
+ &device->mmu.secure_align_mask))
+ device->mmu.secure_align_mask = 0xfff;
+
+ /* Fill out the rest of the devices in the node */
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
+
+ for_each_child_of_node(node, child) {
+ int ret;
+
+ if (!of_device_is_compatible(child, "qcom,smmu-kgsl-cb"))
+ continue;
+
+ ret = _kgsl_iommu_cb_probe(device, iommu, child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct {
+ char *compat;
+ int (*probe)(struct kgsl_device *device, struct device_node *node);
+} kgsl_dt_devices[] = {
+ { "qcom,kgsl-smmu-v1", _kgsl_iommu_probe },
+ { "qcom,kgsl-smmu-v2", _kgsl_iommu_probe },
+};
+
+static int kgsl_iommu_probe(struct kgsl_device *device)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_dt_devices); i++) {
+ struct device_node *node;
+
+ node = of_find_compatible_node(device->pdev->dev.of_node,
+ NULL, kgsl_dt_devices[i].compat);
+
+ if (node != NULL)
+ return kgsl_dt_devices[i].probe(device, node);
+ }
+
+ return -ENODEV;
+}
+
struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
@@ -1968,12 +2172,15 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk = kgsl_iommu_disable_clk,
.mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
+ .mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
.mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
.mmu_init_pt = kgsl_iommu_init_pt,
.mmu_add_global = kgsl_iommu_add_global,
.mmu_remove_global = kgsl_iommu_remove_global,
+ .mmu_getpagetable = kgsl_iommu_getpagetable,
+ .probe = kgsl_iommu_probe,
};
static struct kgsl_mmu_pt_ops iommu_pt_ops = {
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 1983a863c28e..e99104bbc671 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,6 @@
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/iommu.h>
#include <linux/types.h>
#include "kgsl.h"
@@ -25,8 +24,6 @@
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
-static enum kgsl_mmutype kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
-
static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
static void kgsl_destroy_pagetable(struct kref *kref)
@@ -48,7 +45,7 @@ static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
}
-static struct kgsl_pagetable *
+struct kgsl_pagetable *
kgsl_get_pagetable(unsigned long name)
{
struct kgsl_pagetable *pt, *ret = NULL;
@@ -210,10 +207,6 @@ kgsl_mmu_detach_pagetable(struct kgsl_pagetable *pagetable)
{
unsigned long flags;
- /* NOMMU has no pagetable so return early if its NULL */
- if (!pagetable)
- return;
-
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
if (!list_empty(&pagetable->list))
@@ -230,11 +223,12 @@ kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, u64 pt_base)
struct kgsl_pagetable *pt;
int ptid = -1;
- if (!mmu->mmu_ops)
+ if (!MMU_OP_VALID(mmu, mmu_pt_equal))
return KGSL_MMU_GLOBAL_PT;
+
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kgsl_mmu_pagetable_get_ttbr0(pt) == pt_base) {
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
ptid = (int) pt->name;
break;
}
@@ -264,17 +258,18 @@ struct kgsl_pagetable *kgsl_mmu_get_pt_from_ptname(struct kgsl_mmu *mmu,
EXPORT_SYMBOL(kgsl_mmu_get_pt_from_ptname);
unsigned int
-kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, phys_addr_t pt_base,
- unsigned int addr)
+kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, u64 pt_base,
+ uint64_t addr)
{
struct kgsl_pagetable *pt;
unsigned int ret = 0;
- if (!mmu->mmu_ops)
+ if (!MMU_OP_VALID(mmu, mmu_pt_equal))
return 0;
+
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kgsl_mmu_pagetable_get_ttbr0(pt) == pt_base) {
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
ret = 1;
break;
@@ -292,44 +287,30 @@ kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, phys_addr_t pt_base,
}
EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
-int kgsl_mmu_init(struct kgsl_device *device, char *mmutype)
+int kgsl_mmu_init(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
- int ret = 0;
-
- if (mmutype && !strcmp(mmutype, "nommu"))
- kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
-
- switch (kgsl_mmu_type) {
- case KGSL_MMU_TYPE_IOMMU:
- mmu->mmu_ops = &kgsl_iommu_ops;
- break;
- case KGSL_MMU_TYPE_NONE:
- break;
- }
if (MMU_OP_VALID(mmu, mmu_init))
- ret = mmu->mmu_ops->mmu_init(mmu);
+ return mmu->mmu_ops->mmu_init(mmu);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(kgsl_mmu_init);
int kgsl_mmu_start(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
- int ret = 0;
- if (kgsl_mmu_type != KGSL_MMU_TYPE_NONE)
- ret = mmu->mmu_ops->mmu_start(mmu);
+ if (MMU_OP_VALID(mmu, mmu_start))
+ return mmu->mmu_ops->mmu_start(mmu);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(kgsl_mmu_start);
-static struct kgsl_pagetable *
-kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
- unsigned int name)
+struct kgsl_pagetable *
+kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, unsigned int name)
{
int status = 0;
struct kgsl_pagetable *pagetable = NULL;
@@ -345,7 +326,6 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
pagetable->mmu = mmu;
pagetable->name = name;
- pagetable->fault_addr = 0xFFFFFFFF;
atomic_set(&pagetable->stats.entries, 0);
atomic_long_set(&pagetable->stats.mapped, 0);
@@ -353,8 +333,10 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
if (MMU_OP_VALID(mmu, mmu_init_pt)) {
status = mmu->mmu_ops->mmu_init_pt(mmu, pagetable);
- if (status)
- goto err;
+ if (status) {
+ kfree(pagetable);
+ return ERR_PTR(status);
+ }
}
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
@@ -365,36 +347,6 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
pagetable_add_sysfs_objects(pagetable);
return pagetable;
-
-err:
- if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
- pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
-
- kfree(pagetable);
-
- return ERR_PTR(status);
-}
-
-struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
- unsigned long name)
-{
- struct kgsl_pagetable *pt;
-
- if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
- return (void *)(-1);
-
- if (!kgsl_mmu_is_perprocess(mmu) && (KGSL_MMU_SECURE_PT != name)) {
- name = KGSL_MMU_GLOBAL_PT;
- if (mmu->defaultpagetable)
- return mmu->defaultpagetable;
- }
-
- pt = kgsl_get_pagetable(name);
-
- if (pt == NULL)
- pt = kgsl_mmu_createpagetableobject(mmu, name);
-
- return pt;
}
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
@@ -403,24 +355,6 @@ void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
-static int _nommu_get_gpuaddr(struct kgsl_memdesc *memdesc)
-{
- if (memdesc->sgt->nents > 1) {
- KGSL_CORE_ERR(
- "Attempt to map non-contiguous memory with NOMMU\n");
- return -EINVAL;
- }
-
- memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
-
- if (memdesc->gpuaddr == 0) {
- KGSL_CORE_ERR("Unable to get a physical address\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* kgsl_mmu_find_svm_region() - Find a empty spot in the SVM region
* @pagetable: KGSL pagetable to search
@@ -463,9 +397,6 @@ int
kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
- if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
- return _nommu_get_gpuaddr(memdesc);
-
if (PT_OP_VALID(pagetable, get_gpuaddr))
return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc);
@@ -487,27 +418,21 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
(KGSL_MEMDESC_MAPPED & memdesc->priv))
return -EINVAL;
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
- return 0;
-
- /* Add space for the guard page when allocating the mmu VA. */
- size = memdesc->size;
- if (kgsl_memdesc_has_guard_page(memdesc))
- size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);
+ size = kgsl_memdesc_footprint(memdesc);
if (PT_OP_VALID(pagetable, mmu_map))
ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
- if (ret == 0) {
- KGSL_STATS_ADD(size, &pagetable->stats.mapped,
- &pagetable->stats.max_mapped);
+ if (ret)
+ return ret;
- atomic_inc(&pagetable->stats.entries);
+ atomic_inc(&pagetable->stats.entries);
+ KGSL_STATS_ADD(size, &pagetable->stats.mapped,
+ &pagetable->stats.max_mapped);
- memdesc->priv |= KGSL_MEMDESC_MAPPED;
- }
+ memdesc->priv |= KGSL_MEMDESC_MAPPED;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(kgsl_mmu_map);
@@ -516,19 +441,17 @@ EXPORT_SYMBOL(kgsl_mmu_map);
* @pagetable: Pagetable to release the memory from
* @memdesc: Memory descriptor containing the GPU address to free
*/
-int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
+void kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
- return 0;
+ return;
if (PT_OP_VALID(pagetable, put_gpuaddr))
pagetable->pt_ops->put_gpuaddr(pagetable, memdesc);
if (!kgsl_memdesc_is_global(memdesc))
memdesc->gpuaddr = 0;
-
- return 0;
}
EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
@@ -555,33 +478,16 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
uint64_t size;
- uint64_t start_addr = 0;
- uint64_t end_addr = 0;
if (memdesc->size == 0 || memdesc->gpuaddr == 0 ||
!(KGSL_MEMDESC_MAPPED & memdesc->priv))
return -EINVAL;
- if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
- return 0;
-
- /* Add space for the guard page when freeing the mmu VA. */
- size = memdesc->size;
- if (kgsl_memdesc_has_guard_page(memdesc))
- size += kgsl_memdesc_guard_page_size(pagetable->mmu, memdesc);
-
- start_addr = memdesc->gpuaddr;
- end_addr = (memdesc->gpuaddr + size);
+ size = kgsl_memdesc_footprint(memdesc);
if (PT_OP_VALID(pagetable, mmu_unmap))
pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
- /* If buffer is unmapped 0 fault addr */
- if ((pagetable->fault_addr >= start_addr) &&
- (pagetable->fault_addr < end_addr))
- pagetable->fault_addr = 0;
-
- /* Remove the statistics */
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
@@ -592,15 +498,6 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
}
EXPORT_SYMBOL(kgsl_mmu_unmap);
-void kgsl_mmu_close(struct kgsl_device *device)
-{
- struct kgsl_mmu *mmu = &device->mmu;
-
- if (MMU_OP_VALID(mmu, mmu_close))
- mmu->mmu_ops->mmu_close(mmu);
-}
-EXPORT_SYMBOL(kgsl_mmu_close);
-
void kgsl_mmu_remove_global(struct kgsl_device *device,
struct kgsl_memdesc *memdesc)
{
@@ -621,36 +518,170 @@ void kgsl_mmu_add_global(struct kgsl_device *device,
}
EXPORT_SYMBOL(kgsl_mmu_add_global);
-int kgsl_mmu_enabled(void)
+void kgsl_mmu_close(struct kgsl_device *device)
{
- if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
- return 1;
- else
- return 0;
-}
-EXPORT_SYMBOL(kgsl_mmu_enabled);
+ struct kgsl_mmu *mmu = &(device->mmu);
-enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
-{
- return kgsl_mmu_type;
+ if (MMU_OP_VALID(mmu, mmu_close))
+ mmu->mmu_ops->mmu_close(mmu);
}
-EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
+EXPORT_SYMBOL(kgsl_mmu_close);
-void kgsl_mmu_set_mmutype(enum kgsl_mmutype type)
+enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device)
{
- kgsl_mmu_type = type;
+ return device ? device->mmu.type : KGSL_MMU_TYPE_NONE;
}
-EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
+EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr)
{
- if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
- return (gpuaddr != 0);
-
if (PT_OP_VALID(pagetable, addr_in_range))
return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr);
return false;
}
EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
+
+/*
+ * NOMMU defintions - NOMMU really just means that the MMU is kept in pass
+ * through and the GPU directly accesses physical memory. Used in debug mode and
+ * when a real MMU isn't up and running yet.
+ */
+
+static bool nommu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
+ uint64_t gpuaddr)
+{
+ return (gpuaddr != 0) ? true : false;
+}
+
+static int nommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ if (memdesc->sgt->nents > 1) {
+ WARN_ONCE(1,
+ "Attempt to map non-contiguous memory with NOMMU\n");
+ return -EINVAL;
+ }
+
+ memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
+
+ return memdesc->gpuaddr != 0 ? 0 : -ENOMEM;
+}
+
+static struct kgsl_mmu_pt_ops nommu_pt_ops = {
+ .get_gpuaddr = nommu_get_gpuaddr,
+ .addr_in_range = nommu_gpuaddr_in_range,
+};
+
+static void nommu_add_global(struct kgsl_mmu *mmu,
+ struct kgsl_memdesc *memdesc)
+{
+ memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
+}
+
+static void nommu_remove_global(struct kgsl_mmu *mmu,
+ struct kgsl_memdesc *memdesc)
+{
+ memdesc->gpuaddr = 0;
+}
+
+static int nommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
+{
+ if (pt == NULL)
+ return -EINVAL;
+
+ pt->pt_ops = &nommu_pt_ops;
+ return 0;
+}
+
+static struct kgsl_pagetable *nommu_getpagetable(struct kgsl_mmu *mmu,
+ unsigned long name)
+{
+ struct kgsl_pagetable *pagetable;
+
+ pagetable = kgsl_get_pagetable(KGSL_MMU_GLOBAL_PT);
+
+ if (pagetable == NULL)
+ pagetable = kgsl_mmu_createpagetableobject(mmu,
+ KGSL_MMU_GLOBAL_PT);
+
+ return pagetable;
+}
+
+static int nommu_init(struct kgsl_mmu *mmu)
+{
+ mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
+ return 0;
+}
+
+static int nommu_probe(struct kgsl_device *device)
+{
+ /* NOMMU always exists */
+ return 0;
+}
+
+static struct kgsl_mmu_ops kgsl_nommu_ops = {
+ .mmu_init = nommu_init,
+ .mmu_add_global = nommu_add_global,
+ .mmu_remove_global = nommu_remove_global,
+ .mmu_init_pt = nommu_init_pt,
+ .mmu_getpagetable = nommu_getpagetable,
+ .probe = nommu_probe,
+};
+
+static struct {
+ const char *name;
+ unsigned int type;
+ struct kgsl_mmu_ops *ops;
+} kgsl_mmu_subtypes[] = {
+#ifdef CONFIG_MSM_KGSL_IOMMU
+ { "iommu", KGSL_MMU_TYPE_IOMMU, &kgsl_iommu_ops },
+#endif
+ { "nommu", KGSL_MMU_TYPE_NONE, &kgsl_nommu_ops },
+};
+
+int kgsl_mmu_probe(struct kgsl_device *device, char *mmutype)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+ int ret, i;
+
+ if (mmutype != NULL) {
+ for (i = 0; i < ARRAY_SIZE(kgsl_mmu_subtypes); i++) {
+ if (strcmp(kgsl_mmu_subtypes[i].name, mmutype))
+ continue;
+
+ ret = kgsl_mmu_subtypes[i].ops->probe(device);
+
+ if (ret == 0) {
+ mmu->type = kgsl_mmu_subtypes[i].type;
+ mmu->mmu_ops = kgsl_mmu_subtypes[i].ops;
+
+ if (MMU_OP_VALID(mmu, mmu_init))
+ return mmu->mmu_ops->mmu_init(mmu);
+ }
+
+ return ret;
+ }
+
+ KGSL_CORE_ERR("mmu: MMU type '%s' unknown\n", mmutype);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_mmu_subtypes); i++) {
+ ret = kgsl_mmu_subtypes[i].ops->probe(device);
+
+ if (ret == 0) {
+ mmu->type = kgsl_mmu_subtypes[i].type;
+ mmu->mmu_ops = kgsl_mmu_subtypes[i].ops;
+
+ if (MMU_OP_VALID(mmu, mmu_init))
+ return mmu->mmu_ops->mmu_init(mmu);
+
+ return 0;
+ }
+ }
+
+ KGSL_CORE_ERR("mmu: couldn't detect any known MMU types\n");
+ return -ENODEV;
+}
+EXPORT_SYMBOL(kgsl_mmu_probe);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 56b613612a90..90882e4ab8c7 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -41,7 +41,7 @@ struct kgsl_pagetable {
atomic_long_t max_mapped;
} stats;
const struct kgsl_mmu_pt_ops *pt_ops;
- unsigned int fault_addr;
+ uint64_t fault_addr;
void *priv;
struct kgsl_mmu *mmu;
};
@@ -49,6 +49,7 @@ struct kgsl_pagetable {
struct kgsl_mmu;
struct kgsl_mmu_ops {
+ int (*probe)(struct kgsl_device *device);
int (*mmu_init) (struct kgsl_mmu *mmu);
void (*mmu_close)(struct kgsl_mmu *mmu);
int (*mmu_start) (struct kgsl_mmu *mmu);
@@ -60,8 +61,9 @@ struct kgsl_mmu_ops {
void (*mmu_enable_clk)(struct kgsl_mmu *mmu);
void (*mmu_disable_clk)(struct kgsl_mmu *mmu);
unsigned int (*mmu_get_reg_ahbaddr)(struct kgsl_mmu *mmu,
- enum kgsl_iommu_context_id ctx_id,
- enum kgsl_iommu_reg_map reg);
+ int ctx_id, unsigned int reg);
+ bool (*mmu_pt_equal)(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt, u64 ttbr0);
int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
struct kgsl_protected_registers *(*mmu_get_prot_regs)
(struct kgsl_mmu *mmu);
@@ -70,6 +72,8 @@ struct kgsl_mmu_ops {
struct kgsl_memdesc *memdesc);
void (*mmu_remove_global)(struct kgsl_mmu *mmu,
struct kgsl_memdesc *memdesc);
+ struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
+ unsigned long name);
};
struct kgsl_mmu_pt_ops {
@@ -111,49 +115,61 @@ struct kgsl_mmu_pt_ops {
#define KGSL_MMU_64BIT BIT(6)
/* MMU can do coherent hardware table walks */
#define KGSL_MMU_COHERENT_HTW BIT(7)
-
+/* The MMU supports non-contigious pages */
+#define KGSL_MMU_PAGED BIT(8)
+/* The device requires a guard page */
+#define KGSL_MMU_NEED_GUARD_PAGE BIT(9)
+
+/**
+ * struct kgsl_mmu - Master definition for KGSL MMU devices
+ * @flags: MMU device flags
+ * @type: Type of MMU that is attached
+ * @defaultpagetable: Default pagetable object for the MMU
+ * @securepagetable: Default secure pagetable object for the MMU
+ * @mmu_ops: Function pointers for the MMU sub-type
+ * @secured: True if the MMU needs to be secured
+ * @feature: Static list of MMU features
+ * @secure_aligned_mask: Mask that secure buffers need to be aligned to
+ * @priv: Union of sub-device specific members
+ */
struct kgsl_mmu {
- uint32_t flags;
- /* current page table object being used by device mmu */
- struct kgsl_pagetable *defaultpagetable;
- /* secure global pagetable device mmu */
- struct kgsl_pagetable *securepagetable;
+ unsigned long flags;
+ enum kgsl_mmutype type;
+ struct kgsl_pagetable *defaultpagetable;
+ struct kgsl_pagetable *securepagetable;
const struct kgsl_mmu_ops *mmu_ops;
- void *priv;
bool secured;
- uint features;
+ unsigned long features;
unsigned int secure_align_mask;
+ union {
+ struct kgsl_iommu iommu;
+ } priv;
};
-extern struct kgsl_mmu_ops kgsl_iommu_ops;
+#define KGSL_IOMMU_PRIV(_device) (&((_device)->mmu.priv.iommu))
-struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *,
- unsigned long name);
+extern struct kgsl_mmu_ops kgsl_iommu_ops;
+int kgsl_mmu_probe(struct kgsl_device *device, char *name);
+int kgsl_mmu_start(struct kgsl_device *device);
struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *,
u64 ptbase);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
-int kgsl_mmu_init(struct kgsl_device *device, char *mmutype);
-int kgsl_mmu_start(struct kgsl_device *device);
-void kgsl_mmu_close(struct kgsl_device *device);
-int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
- struct kgsl_memdesc *memdesc);
+
int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
-int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
- struct kgsl_memdesc *memdesc);
+int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
-int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
+void kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, u64 pt_base);
unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
- phys_addr_t pt_base, unsigned int addr);
-int kgsl_mmu_enabled(void);
-void kgsl_mmu_set_mmutype(enum kgsl_mmutype type);
-enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
+ u64 ttbr0, uint64_t addr);
+enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device);
bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, uint64_t gpuaddr);
int kgsl_mmu_get_region(struct kgsl_pagetable *pagetable,
@@ -170,6 +186,7 @@ void kgsl_mmu_remove_global(struct kgsl_device *device,
struct kgsl_pagetable *kgsl_mmu_get_pt_from_ptname(struct kgsl_mmu *mmu,
int ptname);
+void kgsl_mmu_close(struct kgsl_device *device);
uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
uint64_t start, uint64_t end, uint64_t size,
@@ -183,6 +200,11 @@ void kgsl_mmu_detach_pagetable(struct kgsl_pagetable *pagetable);
int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
uint64_t *lo, uint64_t *hi, uint64_t memflags);
+struct kgsl_pagetable *kgsl_get_pagetable(unsigned long name);
+
+struct kgsl_pagetable *
+kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, unsigned int name);
+
/*
* Static inline functions of MMU that simply call the SMMU specific
* function using a function pointer. These functions can be thought
@@ -207,6 +229,15 @@ static inline u64 kgsl_mmu_get_current_ttbr0(struct kgsl_mmu *mmu)
return 0;
}
+static inline struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
+ unsigned long name)
+{
+ if (MMU_OP_VALID(mmu, mmu_getpagetable))
+ return mmu->mmu_ops->mmu_getpagetable(mmu, name);
+
+ return NULL;
+}
+
static inline int kgsl_mmu_set_pt(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable)
{
@@ -222,6 +253,15 @@ static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
mmu->mmu_ops->mmu_stop(mmu);
}
+static inline bool kgsl_mmu_pt_equal(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt, u64 ttbr0)
+{
+ if (MMU_OP_VALID(mmu, mmu_pt_equal))
+ return mmu->mmu_ops->mmu_pt_equal(mmu, pt, ttbr0);
+
+ return false;
+}
+
static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu)
{
if (MMU_OP_VALID(mmu, mmu_enable_clk))
@@ -244,8 +284,7 @@ static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
* Returns the ahb address of reg else 0
*/
static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
- enum kgsl_iommu_context_id ctx_id,
- enum kgsl_iommu_reg_map reg)
+ int ctx_id, unsigned int reg)
{
if (MMU_OP_VALID(mmu, mmu_get_reg_ahbaddr))
return mmu->mmu_ops->mmu_get_reg_ahbaddr(mmu, ctx_id, reg);
@@ -253,32 +292,6 @@ static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
return 0;
}
-/*
- * kgsl_mmu_is_perprocess() - Runtime check for per-process
- * pagetables.
- * @mmu: the mmu
- *
- * Returns true if per-process pagetables are enabled,
- * false if not.
- */
-static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
-{
- return MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? 0 : 1;
-}
-
-/*
- * kgsl_mmu_use_cpu_map() - Runtime check for matching the CPU
- * address space on the GPU.
- * @mmu: the mmu
- *
- * Returns true if supported false if not.
- */
-static inline int kgsl_mmu_use_cpu_map(struct kgsl_mmu *mmu)
-{
- return kgsl_mmu_is_perprocess(mmu) &&
- kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_NONE;
-}
-
static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu,
unsigned long pf_policy)
{
@@ -309,6 +322,16 @@ static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs
return NULL;
}
+static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
+{
+ return MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? 0 : 1;
+}
+
+static inline int kgsl_mmu_use_cpu_map(struct kgsl_mmu *mmu)
+{
+ return kgsl_mmu_is_perprocess(mmu);
+}
+
static inline int kgsl_mmu_is_secured(struct kgsl_mmu *mmu)
{
return mmu && (mmu->secured) && (mmu->securepagetable);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index bc955ae66285..893e31852ccb 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -342,7 +342,7 @@ int kgsl_allocate_user(struct kgsl_device *device,
memdesc->flags = flags;
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
ret = kgsl_sharedmem_alloc_contig(device, memdesc,
pagetable, size);
else if (flags & KGSL_MEMFLAGS_SECURE)
@@ -1098,7 +1098,7 @@ int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
/* Record statistics */
- if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
memdesc->gpuaddr = memdesc->physaddr;
KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
@@ -1169,7 +1169,7 @@ static int scm_lock_chunk(struct kgsl_memdesc *memdesc, int lock)
static int kgsl_cma_alloc_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, uint64_t size)
{
- struct kgsl_iommu *iommu = device->mmu.priv;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
int result = 0;
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
size_t aligned;