summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLiam Mark <lmark@codeaurora.org>2015-06-05 17:10:00 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:12:47 -0700
commitd85c7f8f7cad50135463a6fd267d2d9a07ced00a (patch)
treeed46fc77ef03d607bafb39ab1c4732b9ab98eba9
parent85e70599b1508526f2ac314dbeeb76e7981a12f5 (diff)
iommu: add ftrace profiling for map and unmap
Add ftrace start and end logging for map, iommu_map_sg and unmap in order to facilitate performance testing. Change-Id: I9ddf241ffa6cf519f6abece7b0820640f5ce1975 Signed-off-by: Liam Mark <lmark@codeaurora.org>
-rw-r--r--drivers/iommu/iommu.c15
-rw-r--r--include/linux/iommu.h7
-rw-r--r--include/trace/events/iommu.h71
3 files changed, 89 insertions, 4 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 104baf152877..7769871b4365 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1035,9 +1035,12 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
size_t orig_size = size;
int ret = 0;
+ trace_map_start(iova, paddr, size);
if (unlikely(domain->ops->map == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ domain->ops->pgsize_bitmap == 0UL)) {
+ trace_map_end(iova, paddr, size);
return -ENODEV;
+ }
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
@@ -1050,6 +1053,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, min_pagesz);
+ trace_map_end(iova, paddr, size);
return -EINVAL;
}
@@ -1077,6 +1081,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
else
trace_map(iova, paddr, size);
+ trace_map_end(iova, paddr, size);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);
@@ -1086,10 +1091,12 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
+ trace_unmap_start(iova, 0, size);
if (unlikely(domain->ops->unmap == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ domain->ops->pgsize_bitmap == 0UL)) {
+ trace_unmap_end(iova, 0, size);
return -ENODEV;
-
+ }
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
@@ -1101,6 +1108,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
if (!IS_ALIGNED(iova | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, min_pagesz);
+ trace_unmap_end(iova, 0, size);
return -EINVAL;
}
@@ -1125,6 +1133,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
trace_unmap(iova, 0, size);
+ trace_unmap_end(iova, 0, size);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2d835b7c226d..614363f84b38 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -262,7 +262,12 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
+ size_t ret;
+
+ trace_map_sg_start(iova, nents);
+ ret = domain->ops->map_sg(domain, iova, sg, nents, prot);
+ trace_map_sg_end(iova, nents);
+ return ret;
}
extern int iommu_dma_supported(struct iommu_domain *domain, struct device *dev,
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index a8f5c32d174b..4f44a9c4a697 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -113,6 +113,20 @@ DEFINE_EVENT(iommu_map_unmap, map,
TP_ARGS(iova, paddr, size)
);
+DEFINE_EVENT(iommu_map_unmap, map_start,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size)
+);
+
+DEFINE_EVENT(iommu_map_unmap, map_end,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size)
+);
+
DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@@ -124,6 +138,63 @@ DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
)
);
+DEFINE_EVENT_PRINT(iommu_map_unmap, unmap_start,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size),
+
+ TP_printk("IOMMU: iova=0x%016llx size=0x%x",
+ __entry->iova, __entry->size
+ )
+);
+
+DEFINE_EVENT_PRINT(iommu_map_unmap, unmap_end,
+
+ TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+
+ TP_ARGS(iova, paddr, size),
+
+ TP_printk("IOMMU: iova=0x%016llx size=0x%x",
+ __entry->iova, __entry->size
+ )
+);
+
+DECLARE_EVENT_CLASS(iommu_map_sg,
+
+ TP_PROTO(unsigned long iova, unsigned int nents),
+
+ TP_ARGS(iova, nents),
+
+ TP_STRUCT__entry(
+ __field(u64, iova)
+ __field(int, nents)
+ ),
+
+ TP_fast_assign(
+ __entry->iova = iova;
+ __entry->nents = nents;
+ ),
+
+ TP_printk("IOMMU: iova=0x%016llx nents=%u",
+ __entry->iova, __entry->nents
+ )
+);
+
+DEFINE_EVENT(iommu_map_sg, map_sg_start,
+
+ TP_PROTO(unsigned long iova, unsigned int nents),
+
+ TP_ARGS(iova, nents)
+);
+
+DEFINE_EVENT(iommu_map_sg, map_sg_end,
+
+ TP_PROTO(unsigned long iova, unsigned int nents),
+
+ TP_ARGS(iova, nents)
+);
+
DECLARE_EVENT_CLASS(iommu_error,
TP_PROTO(struct device *dev, unsigned long iova, int flags),