summaryrefslogtreecommitdiff
path: root/drivers/gpu/msm/kgsl_iommu.c
diff options
context:
space:
mode:
authorJordan Crouse <jcrouse@codeaurora.org>2016-02-01 08:22:09 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:19:19 -0700
commitd4a7e95db83b778529d1cdcaa124f0be903e494c (patch)
tree5183dc9111689c92e18c82897773c773d6297837 /drivers/gpu/msm/kgsl_iommu.c
parent9c8d55382c2f454be420df38a745cbbf774c2677 (diff)
msm: kgsl: Refactor MMU/IOMMU support
The current MMU code assumes a binary state - either there is a IOMMU or there isn't. This precludes other memory models and makes for a lot of inherent IOMMU knowledge in the generic MMU code and the rest of the driver. Reorganize and cleanup the MMU and IOMMU code: * Add a Kconfig boolean dependent on ARM and/or MSM SMMU support. * Make "nommu" mode an actual MMU subtype and figure out available MMU subtypes at probe time. * Move IOMMU device tree parsing to the IOMMU code. * Move the MMU subtype private structures into struct kgsl_mmu. * Move adreno_iommu specific functions out of other generic adreno code. * Move A4XX specific preemption code out of the ringbuffer code. CRs-Fixed: 970264 Change-Id: Ic0dedbad1293a1d129b7c4ed1105d684ca84d97f Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Diffstat (limited to 'drivers/gpu/msm/kgsl_iommu.c')
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c359
1 files changed, 283 insertions, 76 deletions
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 695b2d38e3ed..7fc28fa27d41 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -19,6 +19,7 @@
#include <linux/iommu.h>
#include <linux/msm_kgsl.h>
#include <linux/ratelimit.h>
+#include <linux/of_platform.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
#include <stddef.h>
@@ -35,6 +36,8 @@
#include "kgsl_cffdump.h"
#include "kgsl_pwrctrl.h"
+#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
+
static struct kgsl_mmu_pt_ops iommu_pt_ops;
static bool need_iommu_sync;
@@ -478,10 +481,10 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- if (mmu == NULL || mmu->priv == NULL)
+ if (mmu == NULL)
return ret;
- iommu = mmu->priv;
+ iommu = _IOMMU_PRIV(mmu);
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
device = KGSL_MMU_DEVICE(mmu);
adreno_dev = ADRENO_DEVICE(device);
@@ -608,7 +611,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
*/
static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
int j;
atomic_dec(&iommu->clk_enable_count);
@@ -644,7 +647,7 @@ static void kgsl_iommu_clk_prepare_enable(struct clk *clk)
static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu)
{
int j;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
if (iommu->clks[j])
@@ -663,6 +666,21 @@ static u64 kgsl_iommu_get_ttbr0(struct kgsl_pagetable *pt)
return iommu_pt->ttbr0;
}
+static bool kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt,
+ u64 ttbr0)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
+ u64 domain_ttbr0;
+
+ if (iommu_pt == NULL)
+ return 0;
+
+ domain_ttbr0 = kgsl_iommu_get_ttbr0(pt);
+
+ return (domain_ttbr0 == ttbr0);
+}
+
/* kgsl_iommu_get_contextidr - query CONTEXTIDR setting for a pagetable */
static u32 kgsl_iommu_get_contextidr(struct kgsl_pagetable *pt)
{
@@ -688,7 +706,7 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
BUG_ON(!list_empty(&pt->list));
- iommu = mmu->priv;
+ iommu = _IOMMU_PRIV(mmu);
if (KGSL_MMU_SECURE_PT == pt->name)
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
@@ -788,6 +806,7 @@ _alloc_pt(struct device *dev, struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
pt->pt_ops = &iommu_pt_ops;
pt->priv = iommu_pt;
+ pt->fault_addr = ~0ULL;
iommu_pt->rbtree = RB_ROOT;
if (MMU_FEATURE(mmu, KGSL_MMU_64BIT))
@@ -822,7 +841,7 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
struct kgsl_iommu_pt *iommu_pt = NULL;
int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
unsigned int cb_num;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
@@ -890,7 +909,7 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
int secure_vmid = VMID_CP_PIXEL;
@@ -947,7 +966,7 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int dynamic = 1;
unsigned int cb_num = ctx->cb_num;
@@ -1027,6 +1046,24 @@ static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
}
}
+static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
+ unsigned long name)
+{
+ struct kgsl_pagetable *pt;
+
+ if (!kgsl_mmu_is_perprocess(mmu) && (name != KGSL_MMU_SECURE_PT)) {
+ name = KGSL_MMU_GLOBAL_PT;
+ if (mmu->defaultpagetable != NULL)
+ return mmu->defaultpagetable;
+ }
+
+ pt = kgsl_get_pagetable(name);
+ if (pt == NULL)
+ pt = kgsl_mmu_createpagetableobject(mmu, name);
+
+ return pt;
+}
+
/*
* kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
* @mmu - Pointer to mmu structure
@@ -1036,14 +1073,54 @@ static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
* Return - The address of register which can be used in type0 packet
*/
static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
- enum kgsl_iommu_context_id id, enum kgsl_iommu_reg_map reg)
+ int id, unsigned int reg)
{
- unsigned int result;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[id];
- result = ctx->gpu_offset + kgsl_iommu_reg_list[reg];
- return result;
+ return ctx->gpu_offset + kgsl_iommu_reg_list[reg];
+}
+
+static void _detach_context(struct kgsl_iommu_context *ctx)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+
+ if (ctx->default_pt == NULL)
+ return;
+
+ iommu_pt = ctx->default_pt->priv;
+
+ _detach_pt(iommu_pt, ctx);
+
+ ctx->default_pt = NULL;
+}
+
+static void kgsl_iommu_close(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
+ int i;
+
+ for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
+ _detach_context(&iommu->ctx[i]);
+
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ mmu->defaultpagetable = NULL;
+
+ kgsl_mmu_putpagetable(mmu->securepagetable);
+ mmu->securepagetable = NULL;
+
+ if (iommu->regbase != NULL)
+ iounmap(iommu->regbase);
+
+ kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
+
+ if (kgsl_guard_page != NULL) {
+ __free_page(kgsl_guard_page);
+ kgsl_guard_page = NULL;
+ }
+
+ kgsl_iommu_remove_global(mmu, &iommu->setstate);
+ kgsl_sharedmem_free(&iommu->setstate);
}
static int _setstate_alloc(struct kgsl_device *device,
@@ -1066,22 +1143,20 @@ static int _setstate_alloc(struct kgsl_device *device,
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
- /*
- * intialize device mmu
- *
- * call this with the global lock held
- */
- int status = 0;
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
+ struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
+ int status;
+
+ mmu->features |= KGSL_MMU_PAGED;
+ mmu->features |= KGSL_MMU_NEED_GUARD_PAGE;
if (ctx->name == NULL) {
KGSL_CORE_ERR("dt: gfx3d0_user context bank not found\n");
return -EINVAL;
}
- status = _setstate_alloc(KGSL_MMU_DEVICE(mmu), iommu);
+ status = _setstate_alloc(device, iommu);
if (status)
return status;
@@ -1130,29 +1205,15 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
done:
if (status)
- kgsl_sharedmem_free(&iommu->setstate);
+ kgsl_iommu_close(mmu);
return status;
}
-static void _detach_context(struct kgsl_iommu_context *ctx)
-{
- struct kgsl_iommu_pt *iommu_pt;
-
- if (ctx->default_pt == NULL)
- return;
-
- iommu_pt = ctx->default_pt->priv;
-
- _detach_pt(iommu_pt, ctx);
-
- ctx->default_pt = NULL;
-}
-
static int _setup_user_context(struct kgsl_mmu *mmu)
{
int ret = 0;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1171,6 +1232,8 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
}
iommu_pt = mmu->defaultpagetable->priv;
+ if (iommu_pt == NULL)
+ return -ENODEV;
ret = _attach_pt(iommu_pt, ctx);
if (ret)
@@ -1211,7 +1274,7 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
static int _setup_secure_context(struct kgsl_mmu *mmu)
{
int ret;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
unsigned int cb_num;
@@ -1255,7 +1318,7 @@ done:
static int kgsl_iommu_start(struct kgsl_mmu *mmu)
{
int status;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
status = _setup_user_context(mmu);
@@ -1368,7 +1431,7 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
/* This function must be called with context bank attached */
static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
unsigned int sctlr_val;
@@ -1394,7 +1457,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
if (ctx->default_pt != NULL && ctx->fault) {
@@ -1415,8 +1478,8 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
int i;
- struct kgsl_iommu *iommu = mmu->priv;
/*
* If the iommu supports retention, we don't need
@@ -1428,40 +1491,11 @@ static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
}
}
-static void kgsl_iommu_close(struct kgsl_mmu *mmu)
-{
- struct kgsl_iommu *iommu = mmu->priv;
- int i;
-
- for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
- _detach_context(&iommu->ctx[i]);
-
- kgsl_mmu_putpagetable(mmu->defaultpagetable);
- mmu->defaultpagetable = NULL;
-
-
- kgsl_mmu_putpagetable(mmu->securepagetable);
- mmu->securepagetable = NULL;
-
- if (iommu->regbase != NULL)
- iounmap(iommu->regbase);
-
- kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
-
- if (kgsl_guard_page != NULL) {
- __free_page(kgsl_guard_page);
- kgsl_guard_page = NULL;
- }
-
- kgsl_iommu_remove_global(mmu, &iommu->setstate);
- kgsl_sharedmem_free(&iommu->setstate);
-}
-
static u64
kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
{
u64 val;
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
/*
* We cannot enable or disable the clocks in interrupt context, this
* function is called from interrupt context if there is an axi error
@@ -1490,7 +1524,7 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pt)
{
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int ret = 0;
uint64_t ttbr0, temp;
@@ -1563,7 +1597,7 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
unsigned long pf_policy)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1605,7 +1639,7 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
static struct kgsl_protected_registers *
kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
return &iommu->protect;
}
@@ -1957,6 +1991,176 @@ static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
return false;
}
+static const struct {
+ int id;
+ char *name;
+} kgsl_iommu_cbs[] = {
+ { KGSL_IOMMU_CONTEXT_USER, "gfx3d_user", },
+ { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure" },
+};
+
+static int _kgsl_iommu_cb_probe(struct kgsl_device *device,
+ struct kgsl_iommu *iommu, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct kgsl_iommu_context *ctx = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_iommu_cbs); i++) {
+ if (!strcmp(node->name, kgsl_iommu_cbs[i].name)) {
+ int id = kgsl_iommu_cbs[i].id;
+
+ ctx = &iommu->ctx[id];
+ ctx->id = id;
+ ctx->cb_num = -1;
+ ctx->name = kgsl_iommu_cbs[i].name;
+
+ break;
+ }
+ }
+
+ if (ctx == NULL) {
+ KGSL_CORE_ERR("dt: Unknown context label %s\n", node->name);
+ return -EINVAL;
+ }
+
+ if (ctx->id == KGSL_IOMMU_CONTEXT_SECURE)
+ device->mmu.secured = true;
+
+ /* this property won't be found for all context banks */
+ if (of_property_read_u32(node, "qcom,gpu-offset", &ctx->gpu_offset))
+ ctx->gpu_offset = UINT_MAX;
+
+ ctx->kgsldev = device;
+
+ /* arm-smmu driver we'll have the right device pointer here. */
+ if (of_find_property(node, "iommus", NULL)) {
+ ctx->dev = &pdev->dev;
+ } else {
+ ctx->dev = kgsl_mmu_get_ctx(ctx->name);
+
+ if (IS_ERR(ctx->dev))
+ return PTR_ERR(ctx->dev);
+ }
+
+ return 0;
+}
+
+static const struct {
+ char *feature;
+ int bit;
+} kgsl_iommu_features[] = {
+ { "qcom,retention", KGSL_MMU_RETENTION },
+ { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
+ { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
+ { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
+ { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW },
+};
+
+static int _kgsl_iommu_probe(struct kgsl_device *device,
+ struct device_node *node)
+{
+ const char *cname;
+ struct property *prop;
+ u32 reg_val[2];
+ int i = 0;
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct device_node *child;
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ memset(iommu, 0, sizeof(*iommu));
+
+ if (of_device_is_compatible(node, "qcom,kgsl-smmu-v1"))
+ iommu->version = 1;
+ else
+ iommu->version = 2;
+
+ if (of_property_read_u32_array(node, "reg", reg_val, 2)) {
+ KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
+ return -EINVAL;
+ }
+ iommu->regstart = reg_val[0];
+ iommu->regsize = reg_val[1];
+
+ /* Protecting the SMMU registers is mandatory */
+ if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
+ KGSL_CORE_ERR("dt: no iommu protection range specified\n");
+ return -EINVAL;
+ }
+ iommu->protect.base = reg_val[0] / sizeof(u32);
+ iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
+
+ of_property_for_each_string(node, "clock-names", prop, cname) {
+ struct clk *c = devm_clk_get(&pdev->dev, cname);
+
+ if (IS_ERR(c)) {
+ KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
+ return -ENODEV;
+ }
+ if (i >= KGSL_IOMMU_MAX_CLKS) {
+ KGSL_CORE_ERR("dt: too many clocks defined.\n");
+ return -EINVAL;
+ }
+
+ iommu->clks[i] = c;
+ ++i;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_iommu_features); i++) {
+ if (of_property_read_bool(node, kgsl_iommu_features[i].feature))
+ device->mmu.features |= kgsl_iommu_features[i].bit;
+ }
+
+ if (of_property_read_u32(node, "qcom,micro-mmu-control",
+ &iommu->micro_mmu_ctrl))
+ iommu->micro_mmu_ctrl = UINT_MAX;
+
+ if (of_property_read_u32(node, "qcom,secure_align_mask",
+ &device->mmu.secure_align_mask))
+ device->mmu.secure_align_mask = 0xfff;
+
+ /* Fill out the rest of the devices in the node */
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
+
+ for_each_child_of_node(node, child) {
+ int ret;
+
+ if (!of_device_is_compatible(child, "qcom,smmu-kgsl-cb"))
+ continue;
+
+ ret = _kgsl_iommu_cb_probe(device, iommu, child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct {
+ char *compat;
+ int (*probe)(struct kgsl_device *device, struct device_node *node);
+} kgsl_dt_devices[] = {
+ { "qcom,kgsl-smmu-v1", _kgsl_iommu_probe },
+ { "qcom,kgsl-smmu-v2", _kgsl_iommu_probe },
+};
+
+static int kgsl_iommu_probe(struct kgsl_device *device)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_dt_devices); i++) {
+ struct device_node *node;
+
+ node = of_find_compatible_node(device->pdev->dev.of_node,
+ NULL, kgsl_dt_devices[i].compat);
+
+ if (node != NULL)
+ return kgsl_dt_devices[i].probe(device, node);
+ }
+
+ return -ENODEV;
+}
+
struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
@@ -1968,12 +2172,15 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk = kgsl_iommu_disable_clk,
.mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
+ .mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
.mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
.mmu_init_pt = kgsl_iommu_init_pt,
.mmu_add_global = kgsl_iommu_add_global,
.mmu_remove_global = kgsl_iommu_remove_global,
+ .mmu_getpagetable = kgsl_iommu_getpagetable,
+ .probe = kgsl_iommu_probe,
};
static struct kgsl_mmu_pt_ops iommu_pt_ops = {