/* * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ #include #include #include #include #include #include #include "msm_drv.h" #include "msm_mmu.h" #ifndef SZ_4G #define SZ_4G (((size_t) SZ_1G) * 4) #endif struct msm_smmu_client { struct device *dev; struct dma_iommu_mapping *mmu_mapping; bool domain_attached; }; struct msm_smmu { struct msm_mmu base; struct device *client_dev; struct msm_smmu_client *client; }; struct msm_smmu_domain { const char *label; size_t va_start; size_t va_size; bool secure; }; #define to_msm_smmu(x) container_of(x, struct msm_smmu, base) #define msm_smmu_to_client(smmu) (smmu->client) static int msm_smmu_fault_handler(struct iommu_domain *iommu, struct device *dev, unsigned long iova, int flags, void *arg) { dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__, iova, flags, iommu); return 0; } static int _msm_smmu_create_mapping(struct msm_smmu_client *client, const struct msm_smmu_domain *domain); static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); int rc = 0; if (!client) { pr_err("undefined smmu client\n"); return -EINVAL; } /* domain attach only once */ if (client->domain_attached) return 0; rc = arm_iommu_attach_device(client->dev, client->mmu_mapping); if (rc) { dev_err(client->dev, "iommu attach dev failed (%d)\n", rc); return rc; } client->domain_attached = true; dev_dbg(client->dev, "iommu domain attached\n"); return 0; } static void msm_smmu_detach(struct msm_mmu *mmu) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); if (!client) { pr_err("undefined smmu client\n"); return; } if (!client->domain_attached) return; arm_iommu_detach_device(client->dev); client->domain_attached = false; dev_dbg(client->dev, "iommu domain detached\n"); } static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, u32 flags, void *priv) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); int ret; if (!client || !sgt) return -EINVAL; if (priv) ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL, priv); else ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); return (ret != sgt->nents) ? -ENOMEM : 0; } static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, void *priv) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); if (priv) msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL, priv); else dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); } static int msm_smmu_early_splash_map(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, u32 flags) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); struct iommu_domain *domain; if (!client || !sgt) return -EINVAL; if (!client->mmu_mapping || !client->mmu_mapping->domain) return -EINVAL; domain = client->mmu_mapping->domain; return iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, flags); } static void msm_smmu_early_splash_unmap(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); struct iommu_domain *domain; struct scatterlist *sg; size_t len = 0; int unmapped, i = 0; if (!client || !client->mmu_mapping || !client->mmu_mapping->domain) return; domain = client->mmu_mapping->domain; for_each_sg(sgt->sgl, sg, sgt->nents, i) len += sg->length; unmapped = iommu_unmap(domain, iova, len); if (unmapped < len) DRM_ERROR("could not unmap iova@%llx\n", iova); } static void msm_smmu_destroy(struct msm_mmu *mmu) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct platform_device *pdev = to_platform_device(smmu->client_dev); if (smmu->client_dev) platform_device_unregister(pdev); kfree(smmu); } /* user can call this API to set the attribute of smmu*/ static int msm_smmu_set_property(struct msm_mmu *mmu, enum iommu_attr attr, void *data) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); struct iommu_domain *domain; int ret = 0; if (!client) return -EINVAL; domain = client->mmu_mapping->domain; if (!domain) return -EINVAL; ret = iommu_domain_set_attr(domain, attr, data); if (ret) DRM_ERROR("set domain attribute failed\n"); return ret; } static const struct msm_mmu_funcs funcs = { .attach = msm_smmu_attach, .detach = msm_smmu_detach, .map = msm_smmu_map, .unmap = msm_smmu_unmap, .destroy = msm_smmu_destroy, .early_splash_map = msm_smmu_early_splash_map, .early_splash_unmap = msm_smmu_early_splash_unmap, .set_property = msm_smmu_set_property, }; static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { [MSM_SMMU_DOMAIN_UNSECURE] = { .label = "mdp_ns", .va_start = SZ_128K, .va_size = SZ_4G - SZ_128K, .secure = false, }, [MSM_SMMU_DOMAIN_SECURE] = { .label = "mdp_s", .va_start = SZ_128K, .va_size = SZ_4G - SZ_128K, .secure = true, }, [MSM_SMMU_DOMAIN_NRT_UNSECURE] = { .label = "rot_ns", .va_start = SZ_128K, .va_size = SZ_4G - SZ_128K, .secure = false, }, [MSM_SMMU_DOMAIN_NRT_SECURE] = { .label = "rot_s", .va_start = SZ_128K, .va_size = SZ_4G - SZ_128K, .secure = true, }, }; static const struct of_device_id msm_smmu_dt_match[] = { { .compatible = "qcom,smmu_sde_unsec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] }, { .compatible = "qcom,smmu_sde_sec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] }, { .compatible = "qcom,smmu_sde_nrt_unsec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] }, { .compatible = "qcom,smmu_sde_nrt_sec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] }, {} }; MODULE_DEVICE_TABLE(of, msm_smmu_dt_match); static struct device *msm_smmu_device_create(struct device *dev, enum msm_mmu_domain_type domain, struct msm_smmu *smmu) { struct device_node *child; struct platform_device *pdev; int i; const char *compat = NULL; for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) { if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) { compat = msm_smmu_dt_match[i].compatible; break; } } if (!compat) { DRM_ERROR("unable to find matching domain for %d\n", domain); return ERR_PTR(-ENOENT); } DRM_INFO("found domain %d compat: %s\n", domain, compat); if (domain == MSM_SMMU_DOMAIN_UNSECURE) { int rc; smmu->client = devm_kzalloc(dev, sizeof(struct msm_smmu_client), GFP_KERNEL); if (!smmu->client) return ERR_PTR(-ENOMEM); smmu->client->dev = dev; rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu), msm_smmu_dt_match[i].data); if (rc) { devm_kfree(dev, smmu->client); smmu->client = NULL; return ERR_PTR(rc); } return NULL; } child = of_find_compatible_node(dev->of_node, NULL, compat); if (!child) { DRM_ERROR("unable to find compatible node for %s\n", compat); return ERR_PTR(-ENODEV); } pdev = of_platform_device_create(child, NULL, dev); if (!pdev) { DRM_ERROR("unable to create smmu platform dev for domain %d\n", domain); return ERR_PTR(-ENODEV); } smmu->client = platform_get_drvdata(pdev); return &pdev->dev; } void msm_smmu_register_fault_handler(struct msm_mmu *mmu, iommu_fault_handler_t handler) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); if (client) iommu_set_fault_handler(client->mmu_mapping->domain, handler, client->dev); } struct msm_mmu *msm_smmu_new(struct device *dev, enum msm_mmu_domain_type domain) { struct msm_smmu *smmu; struct device *client_dev; struct msm_smmu_client *client; smmu = kzalloc(sizeof(*smmu), GFP_KERNEL); if (!smmu) return ERR_PTR(-ENOMEM); client_dev = msm_smmu_device_create(dev, domain, smmu); if (IS_ERR(client_dev)) { kfree(smmu); return (void *)client_dev ? : ERR_PTR(-ENODEV); } smmu->client_dev = client_dev; msm_mmu_init(&smmu->base, dev, &funcs); client = msm_smmu_to_client(smmu); if (client) iommu_set_fault_handler(client->mmu_mapping->domain, msm_smmu_fault_handler, dev); return &smmu->base; } static int _msm_smmu_create_mapping(struct msm_smmu_client *client, const struct msm_smmu_domain *domain) { int rc; client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type, domain->va_start, domain->va_size); if (IS_ERR(client->mmu_mapping)) { dev_err(client->dev, "iommu create mapping failed for domain=%s\n", domain->label); return PTR_ERR(client->mmu_mapping); } if (domain->secure) { int secure_vmid = VMID_CP_PIXEL; rc = iommu_domain_set_attr(client->mmu_mapping->domain, DOMAIN_ATTR_SECURE_VMID, &secure_vmid); if (rc) { dev_err(client->dev, "couldn't set secure pix vmid\n"); goto error; } } DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n", domain->label, domain->va_start, domain->va_size, domain->secure); return 0; error: arm_iommu_release_mapping(client->mmu_mapping); return rc; } /** * msm_smmu_probe() * @pdev: platform device * * Each smmu context acts as a separate device and the context banks are * configured with a VA range. * Registers the clks as each context bank has its own clks, for which voting * has to be done everytime before using that context bank. */ static int msm_smmu_probe(struct platform_device *pdev) { const struct of_device_id *match; struct msm_smmu_client *client; const struct msm_smmu_domain *domain; int rc; match = of_match_device(msm_smmu_dt_match, &pdev->dev); if (!match || !match->data) { dev_err(&pdev->dev, "probe failed as match data is invalid\n"); return -EINVAL; } domain = match->data; if (!domain) { dev_err(&pdev->dev, "no matching device found\n"); return -EINVAL; } DRM_INFO("probing device %s\n", match->compatible); client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL); if (!client) return -ENOMEM; client->dev = &pdev->dev; rc = _msm_smmu_create_mapping(client, domain); platform_set_drvdata(pdev, client); return rc; } static int msm_smmu_remove(struct platform_device *pdev) { struct msm_smmu_client *client; client = platform_get_drvdata(pdev); if (client->domain_attached) { arm_iommu_detach_device(client->dev); client->domain_attached = false; } arm_iommu_release_mapping(client->mmu_mapping); return 0; } static struct platform_driver msm_smmu_driver = { .probe = msm_smmu_probe, .remove = msm_smmu_remove, .driver = { .name = "msmdrm_smmu", .of_match_table = msm_smmu_dt_match, }, }; int __init msm_smmu_driver_init(void) { int ret; ret = platform_driver_register(&msm_smmu_driver); if (ret) pr_err("mdss_smmu_register_driver() failed!\n"); return ret; } void __exit msm_smmu_driver_cleanup(void) { platform_driver_unregister(&msm_smmu_driver); } MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM SMMU driver");