summaryrefslogtreecommitdiff
path: root/drivers/hwtracing
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwtracing')
-rw-r--r--drivers/hwtracing/coresight/Kconfig114
-rw-r--r--drivers/hwtracing/coresight/Makefile10
-rw-r--r--drivers/hwtracing/coresight/coresight-csr.c263
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c1566
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c135
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c285
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-event.c169
-rw-r--r--drivers/hwtracing/coresight/coresight-hwevent.c322
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h38
-rw-r--r--drivers/hwtracing/coresight/coresight-qmi.h116
-rw-r--r--drivers/hwtracing/coresight/coresight-qpdi.c406
-rw-r--r--drivers/hwtracing/coresight/coresight-remote-etm.c395
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c912
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c1326
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c766
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c3787
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c15
-rw-r--r--drivers/hwtracing/coresight/coresight.c162
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c60
-rw-r--r--drivers/hwtracing/stm/Kconfig15
-rw-r--r--drivers/hwtracing/stm/Makefile2
-rw-r--r--drivers/hwtracing/stm/core.c203
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c67
-rw-r--r--drivers/hwtracing/stm/heartbeat.c126
-rw-r--r--drivers/hwtracing/stm/policy.c27
-rw-r--r--drivers/hwtracing/stm/stm.h2
27 files changed, 11099 insertions, 200 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 6c8921140f02..6e72cda433db 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -12,6 +12,23 @@ menuconfig CORESIGHT
trace source gets enabled.
if CORESIGHT
+
+config CORESIGHT_EVENT
+ tristate "CoreSight Event driver"
+ help
+ This driver provides support for registering with various events
+ and performing CoreSight actions like aborting trace on their
+ occurrence. These events can be controlled by using module
+ parameters.
+
+config CORESIGHT_CSR
+ bool "CoreSight Slave Register driver"
+ help
+ This driver provides support for CoreSight Slave Register block
+ that hosts miscellaneous configuration registers.
+ Those configuration registers can be used to control, various
+ coresight configurations.
+
config CORESIGHT_LINKS_AND_SINKS
bool "CoreSight Link and Sink drivers"
help
@@ -23,6 +40,7 @@ config CORESIGHT_LINKS_AND_SINKS
config CORESIGHT_LINK_AND_SINK_TMC
bool "Coresight generic TMC driver"
depends on CORESIGHT_LINKS_AND_SINKS
+ select CORESIGHT_CSR
help
This enables support for the Trace Memory Controller driver.
Depending on its configuration the device can act as a link (embedded
@@ -61,7 +79,6 @@ config CORESIGHT_SOURCE_ETM3X
config CORESIGHT_SOURCE_ETM4X
bool "CoreSight Embedded Trace Macrocell 4.x driver"
- depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
help
This driver provides support for the ETM4.x tracer module, tracing the
@@ -69,6 +86,22 @@ config CORESIGHT_SOURCE_ETM4X
for instruction level tracing. Depending on the implemented version
data tracing may also be available.
+config CORESIGHT_REMOTE_ETM
+ bool "Remote processor ETM trace support"
+ depends on MSM_QMI_INTERFACE
+ help
+ Enables support for ETM trace collection on remote processor using
+ CoreSight framework. Enabling this will allow turning on ETM
+ tracing on remote processor via sysfs by configuring the required
+ CoreSight components.
+
+config CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE
+ int "default enable bits for Remote processor ETM"
+ depends on CORESIGHT_REMOTE_ETM
+ help
+ Support for enabling separated Remote processor ETM tracing. Depends
+ on if instance id bit is set.
+
config CORESIGHT_QCOM_REPLICATOR
bool "Qualcomm CoreSight Replicator driver"
depends on CORESIGHT_LINKS_AND_SINKS
@@ -77,4 +110,83 @@ config CORESIGHT_QCOM_REPLICATOR
programmable ATB replicator sends the ATB trace stream from the
ETB/ETF to the TPIUi and ETR.
+config CORESIGHT_STM
+ bool "CoreSight System Trace Macrocell driver"
+ select CORESIGHT_LINKS_AND_SINKS
+ help
+ This driver provides support for hardware assisted software
+ instrumentation based tracing. This is primarily useful for
+ logging useful software events or data.
+
+config CORESIGHT_HWEVENT
+ bool "CoreSight Hardware Event driver"
+ depends on CORESIGHT_STM
+ select CORESIGHT_CSR
+ help
+ This driver provides support for monitoring and tracing CoreSight
+ Hardware Event across STM interface. It configures Coresight
+ Hardware Event mux control registers to select hardware events
+ based on user input.
+
+config CORESIGHT_CTI
+ bool "CoreSight Cross Trigger Interface driver"
+ help
+ This driver provides support for Cross Trigger Interface that is
+ used to input or output i.e. pass cross trigger events from one
+ hardware component to another. It can also be used to pass
+ software generated events.
+
+config CORESIGHT_CTI_SAVE_DISABLE
+ bool "Turn off CTI save and restore"
+ depends on CORESIGHT_CTI
+ help
+ Turns off CoreSight CTI save and restore support for cpu CTIs. This
+ avoids voting for the clocks during probe as well as the associated
+ save and restore latency at the cost of breaking cpu CTI support on
+ targets where cpu CTIs have to be preserved across power collapse.
+
+ If unsure, say 'N' here to avoid breaking cpu CTI support.
+
+config CORESIGHT_TPDA
+ bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
+ help
+ This driver provides support for configuring aggregator. This is
+ primarily useful for pulling the data sets from one or more
+ attached monitors and pushing the resultant data out. Multiple
+ monitors are connected on different input ports of TPDA.
+
+config CORESIGHT_TPDM
+ bool "CoreSight Trace, Profiling & Diagnostics Monitor driver"
+ help
+ This driver provides support for configuring monitor. Monitors are
+ primarily responsible for data set collection and support the
+ ability to collect any permutation of data set types. Monitors are
+ also responsible for interaction with system cross triggering.
+
+config CORESIGHT_TPDM_DEFAULT_ENABLE
+ bool "Turn on TPDM tracing by default"
+ depends on CORESIGHT_TPDM
+ help
+ Turns on CoreSight TPDM tracing for different data set types by
+ default. Otherwise, tracing is disabled by default but can be
+ enabled via sysfs.
+
+ If unsure, say 'N' here to avoid potential power and performance
+ penalty.
+
+config CORESIGHT_QPDI
+ bool "CoreSight PMIC debug interface support"
+ help
+ This driver provides support for controlling the PMIC debug interface
+ feature. When enabled via sysfs it allows disagnostic access to the
+ PMIC. Similarly this debug feature can be disabled via sysfs which
+ prevents debug dongle detection.
+
+config CORESIGHT_SOURCE_DUMMY
+ bool "Dummy source support"
+ help
+ Enables support for dummy source devices. Dummy source driver can be
+ used for CoreSight sources that are owned and configured by some other
+ subsystem and use Linux drivers to configure rest of trace path.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 99f8e5f6256e..09433897b6a2 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -3,6 +3,9 @@
#
obj-$(CONFIG_CORESIGHT) += coresight.o
obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
+obj-$(CONFIG_CORESIGHT_EVENT) += coresight-event.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
@@ -11,3 +14,10 @@ obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
+obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
+obj-$(CONFIG_CORESIGHT_TPDM) += coresight-tpdm.o
+obj-$(CONFIG_CORESIGHT_REMOTE_ETM) += coresight-remote-etm.o
+obj-$(CONFIG_CORESIGHT_QPDI) += coresight-qpdi.o
+obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
+obj-$(CONFIG_CORESIGHT_SOURCE_DUMMY) += coresight-dummy.o
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
new file mode 100644
index 000000000000..3c18d686091a
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2012-2013, 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define csr_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define csr_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define CSR_LOCK(drvdata) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ csr_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define CSR_UNLOCK(drvdata) \
+do { \
+ csr_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+#define CSR_SWDBGPWRCTRL (0x000)
+#define CSR_SWDBGPWRACK (0x004)
+#define CSR_SWSPADREG0 (0x008)
+#define CSR_SWSPADREG1 (0x00C)
+#define CSR_STMTRANSCTRL (0x010)
+#define CSR_STMAWIDCTRL (0x014)
+#define CSR_STMCHNOFST0 (0x018)
+#define CSR_STMCHNOFST1 (0x01C)
+#define CSR_STMEXTHWCTRL0 (0x020)
+#define CSR_STMEXTHWCTRL1 (0x024)
+#define CSR_STMEXTHWCTRL2 (0x028)
+#define CSR_STMEXTHWCTRL3 (0x02C)
+#define CSR_USBBAMCTRL (0x030)
+#define CSR_USBFLSHCTRL (0x034)
+#define CSR_TIMESTAMPCTRL (0x038)
+#define CSR_AOTIMEVAL0 (0x03C)
+#define CSR_AOTIMEVAL1 (0x040)
+#define CSR_QDSSTIMEVAL0 (0x044)
+#define CSR_QDSSTIMEVAL1 (0x048)
+#define CSR_QDSSTIMELOAD0 (0x04C)
+#define CSR_QDSSTIMELOAD1 (0x050)
+#define CSR_DAPMSAVAL (0x054)
+#define CSR_QDSSCLKVOTE (0x058)
+#define CSR_QDSSCLKIPI (0x05C)
+#define CSR_QDSSPWRREQIGNORE (0x060)
+#define CSR_QDSSSPARE (0x064)
+#define CSR_IPCAT (0x068)
+#define CSR_BYTECNTVAL (0x06C)
+
+#define BLKSIZE_256 0
+#define BLKSIZE_512 1
+#define BLKSIZE_1024 2
+#define BLKSIZE_2048 3
+
+struct csr_drvdata {
+ void __iomem *base;
+ phys_addr_t pbase;
+ struct device *dev;
+ struct coresight_device *csdev;
+ uint32_t blksize;
+};
+
+static struct csr_drvdata *csrdrvdata;
+
+void msm_qdss_csr_enable_bam_to_usb(void)
+{
+ struct csr_drvdata *drvdata = csrdrvdata;
+ uint32_t usbbamctrl, usbflshctrl;
+
+ CSR_UNLOCK(drvdata);
+
+ usbbamctrl = csr_readl(drvdata, CSR_USBBAMCTRL);
+ usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
+ csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
+
+ usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+ usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+ usbflshctrl |= 0x2;
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+ usbbamctrl |= 0x4;
+ csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
+
+ CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
+
+void msm_qdss_csr_disable_bam_to_usb(void)
+{
+ struct csr_drvdata *drvdata = csrdrvdata;
+ uint32_t usbbamctrl;
+
+ CSR_UNLOCK(drvdata);
+
+ usbbamctrl = csr_readl(drvdata, CSR_USBBAMCTRL);
+ usbbamctrl &= (~0x4);
+ csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
+
+ CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(msm_qdss_csr_disable_bam_to_usb);
+
+void msm_qdss_csr_disable_flush(void)
+{
+ struct csr_drvdata *drvdata = csrdrvdata;
+ uint32_t usbflshctrl;
+
+ CSR_UNLOCK(drvdata);
+
+ usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+ usbflshctrl &= ~0x2;
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+ CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
+
+int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val)
+{
+ struct csr_drvdata *drvdata = csrdrvdata;
+ int ret = 0;
+
+ CSR_UNLOCK(drvdata);
+
+ if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL0))
+ csr_writel(drvdata, val, CSR_STMEXTHWCTRL0);
+ else if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL1))
+ csr_writel(drvdata, val, CSR_STMEXTHWCTRL1);
+ else if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL2))
+ csr_writel(drvdata, val, CSR_STMEXTHWCTRL2);
+ else if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL3))
+ csr_writel(drvdata, val, CSR_STMEXTHWCTRL3);
+ else
+ ret = -EINVAL;
+
+ CSR_LOCK(drvdata);
+
+ return ret;
+}
+EXPORT_SYMBOL(coresight_csr_hwctrl_set);
+
+void coresight_csr_set_byte_cntr(uint32_t count)
+{
+ struct csr_drvdata *drvdata = csrdrvdata;
+
+ CSR_UNLOCK(drvdata);
+
+ csr_writel(drvdata, count, CSR_BYTECNTVAL);
+
+ /* make sure byte count value is written */
+ mb();
+
+ CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(coresight_csr_set_byte_cntr);
+
+static int csr_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct csr_drvdata *drvdata;
+ struct resource *res;
+ struct coresight_desc *desc;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr-base");
+ if (!res)
+ return -ENODEV;
+ drvdata->pbase = res->start;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,blk-size",
+ &drvdata->blksize);
+ if (ret)
+ drvdata->blksize = BLKSIZE_256;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->type = CORESIGHT_DEV_TYPE_NONE;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ /* Store the driver data pointer for use in exported functions */
+ csrdrvdata = drvdata;
+ dev_info(dev, "CSR initialized\n");
+ return 0;
+}
+
+static int csr_remove(struct platform_device *pdev)
+{
+ struct csr_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct of_device_id csr_match[] = {
+ {.compatible = "qcom,coresight-csr"},
+ {}
+};
+
+static struct platform_driver csr_driver = {
+ .probe = csr_probe,
+ .remove = csr_remove,
+ .driver = {
+ .name = "coresight-csr",
+ .owner = THIS_MODULE,
+ .of_match_table = csr_match,
+ },
+};
+
+static int __init csr_init(void)
+{
+ return platform_driver_register(&csr_driver);
+}
+module_init(csr_init);
+
+static void __exit csr_exit(void)
+{
+ platform_driver_unregister(&csr_driver);
+}
+module_exit(csr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight CSR driver");
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
new file mode 100644
index 000000000000..80952b0f7ad1
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -0,0 +1,1566 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/cpu_pm.h>
+#include <linux/topology.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+
+#include "coresight-priv.h"
+
+#define cti_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define cti_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define CTI_LOCK(drvdata) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ cti_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define CTI_UNLOCK(drvdata) \
+do { \
+ cti_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+#define CTICONTROL (0x000)
+#define CTIINTACK (0x010)
+#define CTIAPPSET (0x014)
+#define CTIAPPCLEAR (0x018)
+#define CTIAPPPULSE (0x01C)
+#define CTIINEN(n) (0x020 + (n * 4))
+#define CTIOUTEN(n) (0x0A0 + (n * 4))
+#define CTITRIGINSTATUS (0x130)
+#define CTITRIGOUTSTATUS (0x134)
+#define CTICHINSTATUS (0x138)
+#define CTICHOUTSTATUS (0x13C)
+#define CTIGATE (0x140)
+#define ASICCTL (0x144)
+#define ITCHINACK (0xEDC)
+#define ITTRIGINACK (0xEE0)
+#define ITCHOUT (0xEE4)
+#define ITTRIGOUT (0xEE8)
+#define ITCHOUTACK (0xEEC)
+#define ITTRIGOUTACK (0xEF0)
+#define ITCHIN (0xEF4)
+#define ITTRIGIN (0xEF8)
+
+#define CTI_MAX_TRIGGERS (8)
+#define CTI_MAX_CHANNELS (4)
+#define AFFINITY_LEVEL_L2 1
+
+#define to_cti_drvdata(c) container_of(c, struct cti_drvdata, cti)
+
+struct cti_state {
+ unsigned int cticontrol;
+ unsigned int ctiappset;
+ unsigned int ctigate;
+ unsigned int ctiinen[CTI_MAX_TRIGGERS];
+ unsigned int ctiouten[CTI_MAX_TRIGGERS];
+};
+
+struct cti_pctrl {
+ struct pinctrl *pctrl;
+ int trig;
+};
+
+struct cti_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ spinlock_t spinlock;
+ struct mutex mutex;
+ struct coresight_cti cti;
+ int refcnt;
+ int cpu;
+ bool cti_save;
+ bool cti_hwclk;
+ bool l2_off;
+ struct cti_state *state;
+ struct cti_pctrl *gpio_trigin;
+ struct cti_pctrl *gpio_trigout;
+};
+
+static struct notifier_block cti_cpu_pm_notifier;
+static int registered;
+
+static LIST_HEAD(cti_list);
+static DEFINE_MUTEX(cti_lock);
+#ifdef CONFIG_CORESIGHT_CTI_SAVE_DISABLE
+static int cti_save_disable = 1;
+#else
+static int cti_save_disable;
+#endif
+
+static int cti_verify_trigger_bound(int trig)
+{
+ if (trig < 0 || trig >= CTI_MAX_TRIGGERS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cti_verify_channel_bound(int ch)
+{
+ if (ch < 0 || ch >= CTI_MAX_CHANNELS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cti_cpu_verify_access(struct cti_drvdata *drvdata)
+{
+ if (drvdata->cti_save && drvdata->l2_off)
+ return -EPERM;
+
+ return 0;
+}
+
+void coresight_cti_ctx_save(void)
+{
+ struct cti_drvdata *drvdata;
+ struct coresight_cti *cti;
+ int trig, cpuid, cpu;
+ unsigned long flag;
+
+ /*
+ * Explicitly check and return to avoid latency associated with
+ * traversing the linked list of all CTIs and checking for their
+ * respective cti_save flag.
+ */
+ if (cti_save_disable)
+ return;
+
+ cpu = raw_smp_processor_id();
+
+ list_for_each_entry(cti, &cti_list, link) {
+ drvdata = to_cti_drvdata(cti);
+ if (!drvdata->cti_save)
+ continue;
+
+ for_each_cpu(cpuid, topology_core_cpumask(cpu)) {
+ if (drvdata->cpu == cpuid)
+ goto out;
+ }
+ continue;
+out:
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ drvdata->l2_off = true;
+ drvdata->state->cticontrol = cti_readl(drvdata, CTICONTROL);
+ drvdata->state->ctiappset = cti_readl(drvdata, CTIAPPSET);
+ drvdata->state->ctigate = cti_readl(drvdata, CTIGATE);
+ for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+ drvdata->state->ctiinen[trig] =
+ cti_readl(drvdata, CTIINEN(trig));
+ drvdata->state->ctiouten[trig] =
+ cti_readl(drvdata, CTIOUTEN(trig));
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ }
+}
+EXPORT_SYMBOL(coresight_cti_ctx_save);
+
+void coresight_cti_ctx_restore(void)
+{
+ struct cti_drvdata *drvdata;
+ struct coresight_cti *cti;
+ int trig, cpuid, cpu;
+ unsigned long flag;
+
+ /*
+ * Explicitly check and return to avoid latency associated with
+ * traversing the linked list of all CTIs and checking for their
+ * respective cti_save flag.
+ */
+ if (cti_save_disable)
+ return;
+
+ cpu = raw_smp_processor_id();
+
+ list_for_each_entry(cti, &cti_list, link) {
+ drvdata = to_cti_drvdata(cti);
+ if (!drvdata->cti_save)
+ continue;
+
+ for_each_cpu(cpuid, topology_core_cpumask(cpu)) {
+ if (drvdata->cpu == cpuid)
+ goto out;
+ }
+ continue;
+out:
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ CTI_UNLOCK(drvdata);
+ cti_writel(drvdata, drvdata->state->ctiappset, CTIAPPSET);
+ cti_writel(drvdata, drvdata->state->ctigate, CTIGATE);
+ for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+ cti_writel(drvdata, drvdata->state->ctiinen[trig],
+ CTIINEN(trig));
+ cti_writel(drvdata, drvdata->state->ctiouten[trig],
+ CTIOUTEN(trig));
+ }
+ cti_writel(drvdata, drvdata->state->cticontrol, CTICONTROL);
+ CTI_LOCK(drvdata);
+ drvdata->l2_off = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ }
+}
+EXPORT_SYMBOL(coresight_cti_ctx_restore);
+
+static void cti_enable(struct cti_drvdata *drvdata)
+{
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, 0x1, CTICONTROL);
+
+ CTI_LOCK(drvdata);
+}
+
+int cti_trigin_gpio_enable(struct cti_drvdata *drvdata)
+{
+ int ret;
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pctrl_state;
+
+ if (drvdata->gpio_trigin->pctrl)
+ return 0;
+
+ pctrl = devm_pinctrl_get(drvdata->dev);
+ if (IS_ERR(pctrl)) {
+ dev_err(drvdata->dev, "pinctrl get failed\n");
+ return PTR_ERR(pctrl);
+ }
+
+ pctrl_state = pinctrl_lookup_state(pctrl, "cti-trigin-pctrl");
+ if (IS_ERR(pctrl_state)) {
+ dev_err(drvdata->dev,
+ "pinctrl get state failed\n");
+ ret = PTR_ERR(pctrl_state);
+ goto err;
+ }
+
+ ret = pinctrl_select_state(pctrl, pctrl_state);
+ if (ret) {
+ dev_err(drvdata->dev,
+ "pinctrl enable state failed\n");
+ goto err;
+ }
+
+ drvdata->gpio_trigin->pctrl = pctrl;
+ return 0;
+err:
+ devm_pinctrl_put(pctrl);
+ return ret;
+}
+
+int cti_trigout_gpio_enable(struct cti_drvdata *drvdata)
+{
+ int ret;
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pctrl_state;
+
+ if (drvdata->gpio_trigout->pctrl)
+ return 0;
+
+ pctrl = devm_pinctrl_get(drvdata->dev);
+ if (IS_ERR(pctrl)) {
+ dev_err(drvdata->dev, "pinctrl get failed\n");
+ return PTR_ERR(pctrl);
+ }
+
+ pctrl_state = pinctrl_lookup_state(pctrl, "cti-trigout-pctrl");
+ if (IS_ERR(pctrl_state)) {
+ dev_err(drvdata->dev,
+ "pinctrl get state failed\n");
+ ret = PTR_ERR(pctrl_state);
+ goto err;
+ }
+
+ ret = pinctrl_select_state(pctrl, pctrl_state);
+ if (ret) {
+ dev_err(drvdata->dev,
+ "pinctrl enable state failed\n");
+ goto err;
+ }
+
+ drvdata->gpio_trigout->pctrl = pctrl;
+ return 0;
+err:
+ devm_pinctrl_put(pctrl);
+ return ret;
+}
+
+void cti_trigin_gpio_disable(struct cti_drvdata *drvdata)
+{
+ if (!drvdata->gpio_trigin->pctrl)
+ return;
+
+ devm_pinctrl_put(drvdata->gpio_trigin->pctrl);
+ drvdata->gpio_trigin->pctrl = NULL;
+}
+
+void cti_trigout_gpio_disable(struct cti_drvdata *drvdata)
+{
+ if (!drvdata->gpio_trigout->pctrl)
+ return;
+
+ devm_pinctrl_put(drvdata->gpio_trigout->pctrl);
+ drvdata->gpio_trigout->pctrl = NULL;
+}
+
+static void __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ if (drvdata->refcnt == 0)
+ cti_enable(drvdata);
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIINEN(trig));
+ if (ctien & (0x1 << ch))
+ goto out;
+ cti_writel(drvdata, (ctien | 0x1 << ch), CTIINEN(trig));
+
+ CTI_LOCK(drvdata);
+
+ drvdata->refcnt++;
+ return;
+out:
+ CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+ ret = cti_verify_trigger_bound(trig);
+ if (ret)
+ return ret;
+ ret = cti_verify_channel_bound(ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+
+ if (drvdata->gpio_trigin->trig == trig) {
+ ret = cti_trigin_gpio_enable(drvdata);
+ if (ret)
+ goto err0;
+ }
+
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (drvdata->refcnt == 0) {
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err1;
+ }
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ ret = cti_cpu_verify_access(drvdata);
+ if (ret)
+ goto err2;
+
+ __cti_map_trigin(drvdata, trig, ch);
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ mutex_unlock(&drvdata->mutex);
+ return 0;
+err2:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ /*
+ * We come here before refcnt is potentially modified in
+ * __cti_map_trigin so it is safe to check it against 0 without
+ * adjusting its value.
+ */
+ if (drvdata->refcnt == 0)
+ clk_disable_unprepare(drvdata->clk);
+err1:
+ cti_trigin_gpio_disable(drvdata);
+err0:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigin);
+
+static void __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ if (drvdata->refcnt == 0)
+ cti_enable(drvdata);
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ if (ctien & (0x1 << ch))
+ goto out;
+ cti_writel(drvdata, (ctien | 0x1 << ch), CTIOUTEN(trig));
+
+ CTI_LOCK(drvdata);
+
+ drvdata->refcnt++;
+ return;
+out:
+ CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+ ret = cti_verify_trigger_bound(trig);
+ if (ret)
+ return ret;
+ ret = cti_verify_channel_bound(ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+
+ if (drvdata->gpio_trigout->trig == trig) {
+ ret = cti_trigout_gpio_enable(drvdata);
+ if (ret)
+ goto err0;
+ }
+
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (drvdata->refcnt == 0) {
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err1;
+ }
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ ret = cti_cpu_verify_access(drvdata);
+ if (ret)
+ goto err2;
+
+ __cti_map_trigout(drvdata, trig, ch);
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ mutex_unlock(&drvdata->mutex);
+ return 0;
+err2:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ /*
+ * We come here before refcnt is potentially incremented in
+ * __cti_map_trigout so it is safe to check it against 0.
+ */
+ if (drvdata->refcnt == 0)
+ clk_disable_unprepare(drvdata->clk);
+err1:
+ cti_trigout_gpio_disable(drvdata);
+err0:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigout);
+
+static void cti_disable(struct cti_drvdata *drvdata)
+{
+ CTI_UNLOCK(drvdata);
+
+ /* Clear any pending triggers and ensure gate is enabled */
+ cti_writel(drvdata, BM(0, (CTI_MAX_CHANNELS - 1)), CTIAPPCLEAR);
+ cti_writel(drvdata, BM(0, (CTI_MAX_CHANNELS - 1)), CTIGATE);
+
+ cti_writel(drvdata, 0x0, CTICONTROL);
+
+ CTI_LOCK(drvdata);
+}
+
+static void __cti_unmap_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIINEN(trig));
+ if (!(ctien & (0x1 << ch)))
+ goto out;
+ cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIINEN(trig));
+
+ CTI_LOCK(drvdata);
+
+ drvdata->refcnt--;
+
+ if (drvdata->refcnt == 0)
+ cti_disable(drvdata);
+ return;
+out:
+ CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+ if (cti_verify_trigger_bound(trig))
+ return;
+ if (cti_verify_channel_bound(ch))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (cti_cpu_verify_access(drvdata))
+ goto err;
+ /*
+ * This is required to avoid clk_disable_unprepare call from being made
+ * when unmap is called without the corresponding map function call.
+ */
+ if (!drvdata->refcnt)
+ goto err;
+
+ __cti_unmap_trigin(drvdata, trig, ch);
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (drvdata->refcnt == 0)
+ clk_disable_unprepare(drvdata->clk);
+
+ if (drvdata->gpio_trigin->trig == trig)
+ cti_trigin_gpio_disable(drvdata);
+
+ mutex_unlock(&drvdata->mutex);
+ return;
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ mutex_unlock(&drvdata->mutex);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigin);
+
+static void __cti_unmap_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ if (!(ctien & (0x1 << ch)))
+ goto out;
+ cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIOUTEN(trig));
+
+ CTI_LOCK(drvdata);
+
+ drvdata->refcnt--;
+
+ if (drvdata->refcnt == 0)
+ cti_disable(drvdata);
+ return;
+out:
+ CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+ if (cti_verify_trigger_bound(trig))
+ return;
+ if (cti_verify_channel_bound(ch))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (cti_cpu_verify_access(drvdata))
+ goto err;
+ /*
+ * This is required to avoid clk_disable_unprepare call from being made
+ * when unmap is called without the corresponding map function call.
+ */
+ if (!drvdata->refcnt)
+ goto err;
+
+ __cti_unmap_trigout(drvdata, trig, ch);
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (drvdata->refcnt == 0)
+ clk_disable_unprepare(drvdata->clk);
+
+ if (drvdata->gpio_trigout->trig == trig)
+ cti_trigout_gpio_disable(drvdata);
+
+ mutex_unlock(&drvdata->mutex);
+ return;
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ mutex_unlock(&drvdata->mutex);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigout);
+
+static void __cti_reset(struct cti_drvdata *drvdata)
+{
+ int trig;
+
+ if (!drvdata->refcnt)
+ return;
+
+ CTI_UNLOCK(drvdata);
+
+ for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+ cti_writel(drvdata, 0, CTIINEN(trig));
+ cti_writel(drvdata, 0, CTIOUTEN(trig));
+ }
+
+ CTI_LOCK(drvdata);
+
+ cti_disable(drvdata);
+ drvdata->refcnt = 0;
+}
+
+void coresight_cti_reset(struct coresight_cti *cti)
+{
+ struct cti_drvdata *drvdata;
+ unsigned long flag;
+ int trig;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (cti_cpu_verify_access(drvdata))
+ goto err;
+
+ __cti_reset(drvdata);
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+ if (drvdata->gpio_trigin->trig == trig)
+ cti_trigin_gpio_disable(drvdata);
+ if (drvdata->gpio_trigout->trig == trig)
+ cti_trigout_gpio_disable(drvdata);
+ }
+
+ mutex_unlock(&drvdata->mutex);
+ return;
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ mutex_unlock(&drvdata->mutex);
+}
+EXPORT_SYMBOL(coresight_cti_reset);
+
+static int __cti_set_trig(struct cti_drvdata *drvdata, int ch)
+{
+ if (!drvdata->refcnt)
+ return -EINVAL;
+
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, (1 << ch), CTIAPPSET);
+
+ CTI_LOCK(drvdata);
+
+ return 0;
+}
+
+int coresight_cti_set_trig(struct coresight_cti *cti, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+ ret = cti_verify_channel_bound(ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ ret = cti_cpu_verify_access(drvdata);
+ if (ret)
+ goto err;
+
+ ret = __cti_set_trig(drvdata, ch);
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_set_trig);
+
+static void __cti_clear_trig(struct cti_drvdata *drvdata, int ch)
+{
+ if (!drvdata->refcnt)
+ return;
+
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, (1 << ch), CTIAPPCLEAR);
+
+ CTI_LOCK(drvdata);
+}
+
+void coresight_cti_clear_trig(struct coresight_cti *cti, int ch)
+{
+ struct cti_drvdata *drvdata;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+ if (cti_verify_channel_bound(ch))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (cti_cpu_verify_access(drvdata))
+ goto err;
+
+ __cti_clear_trig(drvdata, ch);
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+}
+EXPORT_SYMBOL(coresight_cti_clear_trig);
+
+static int __cti_pulse_trig(struct cti_drvdata *drvdata, int ch)
+{
+ if (!drvdata->refcnt)
+ return -EINVAL;
+
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, (1 << ch), CTIAPPPULSE);
+
+ CTI_LOCK(drvdata);
+
+ return 0;
+}
+
+int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+ ret = cti_verify_channel_bound(ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ ret = cti_cpu_verify_access(drvdata);
+ if (ret)
+ goto err;
+
+ ret = __cti_pulse_trig(drvdata, ch);
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_pulse_trig);
+
+static int __cti_ack_trig(struct cti_drvdata *drvdata, int trig)
+{
+ if (!drvdata->refcnt)
+ return -EINVAL;
+
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, (0x1 << trig), CTIINTACK);
+
+ CTI_LOCK(drvdata);
+
+ return 0;
+}
+
+int coresight_cti_ack_trig(struct coresight_cti *cti, int trig)
+{
+ struct cti_drvdata *drvdata;
+ int ret;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+ ret = cti_verify_trigger_bound(trig);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ ret = cti_cpu_verify_access(drvdata);
+ if (ret)
+ goto err;
+
+ ret = __cti_ack_trig(drvdata, trig);
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_ack_trig);
+
+static int __cti_enable_gate(struct cti_drvdata *drvdata, int ch)
+{
+ uint32_t ctigate;
+
+ if (!drvdata->refcnt)
+ return -EINVAL;
+
+ CTI_UNLOCK(drvdata);
+
+ ctigate = cti_readl(drvdata, CTIGATE);
+ cti_writel(drvdata, (ctigate & ~(1 << ch)), CTIGATE);
+
+ CTI_LOCK(drvdata);
+
+ return 0;
+}
+
+int coresight_cti_enable_gate(struct coresight_cti *cti, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+ ret = cti_verify_channel_bound(ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ ret = cti_cpu_verify_access(drvdata);
+ if (ret)
+ goto err;
+
+ ret = __cti_enable_gate(drvdata, ch);
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_enable_gate);
+
+static void __cti_disable_gate(struct cti_drvdata *drvdata, int ch)
+{
+ uint32_t ctigate;
+
+ if (!drvdata->refcnt)
+ return;
+
+ CTI_UNLOCK(drvdata);
+
+ ctigate = cti_readl(drvdata, CTIGATE);
+ cti_writel(drvdata, (ctigate | (1 << ch)), CTIGATE);
+
+ CTI_LOCK(drvdata);
+}
+
+void coresight_cti_disable_gate(struct coresight_cti *cti, int ch)
+{
+ struct cti_drvdata *drvdata;
+ unsigned long flag;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+ if (cti_verify_channel_bound(ch))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (cti_cpu_verify_access(drvdata))
+ goto err;
+
+ __cti_disable_gate(drvdata, ch);
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+}
+EXPORT_SYMBOL(coresight_cti_disable_gate);
+
+struct coresight_cti *coresight_cti_get(const char *name)
+{
+ struct coresight_cti *cti;
+
+ mutex_lock(&cti_lock);
+ list_for_each_entry(cti, &cti_list, link) {
+ if (!strcmp(cti->name, name)) {
+ mutex_unlock(&cti_lock);
+ return cti;
+ }
+ }
+ mutex_unlock(&cti_lock);
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(coresight_cti_get);
+
+void coresight_cti_put(struct coresight_cti *cti)
+{
+}
+EXPORT_SYMBOL(coresight_cti_put);
+
+static ssize_t cti_show_trigin(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long trig, ch, flag;
+ uint32_t ctien;
+ ssize_t size = 0;
+
+ mutex_lock(&drvdata->mutex);
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (!drvdata->refcnt)
+ goto err;
+
+ for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (!cti_cpu_verify_access(drvdata))
+ ctien = cti_readl(drvdata, CTIINEN(trig));
+ else
+ ctien = drvdata->state->ctiinen[trig];
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+ if (ctien & (1 << ch)) {
+ /* Ensure we do not write more than PAGE_SIZE
+ * bytes of data including \n character and null
+ * terminator
+ */
+ size += scnprintf(&buf[size], PAGE_SIZE - size -
+ 1, " %#lx %#lx,", trig, ch);
+ if (size >= PAGE_SIZE - 2) {
+ dev_err(dev, "show buffer full\n");
+ goto err;
+ }
+
+ }
+ }
+ }
+err:
+ size += scnprintf(&buf[size], 2, "\n");
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(show_trigin, S_IRUGO, cti_show_trigin, NULL);
+
+static ssize_t cti_show_trigout(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long trig, ch, flag;
+ uint32_t ctien;
+ ssize_t size = 0;
+
+ mutex_lock(&drvdata->mutex);
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (!drvdata->refcnt)
+ goto err;
+
+ for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (!cti_cpu_verify_access(drvdata))
+ ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ else
+ ctien = drvdata->state->ctiouten[trig];
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+ if (ctien & (1 << ch)) {
+ /* Ensure we do not write more than PAGE_SIZE
+ * bytes of data including \n character and null
+ * terminator
+ */
+ size += scnprintf(&buf[size], PAGE_SIZE - size -
+ 1, " %#lx %#lx,", trig, ch);
+ if (size >= PAGE_SIZE - 2) {
+ dev_err(dev, "show buffer full\n");
+ goto err;
+ }
+
+ }
+ }
+ }
+err:
+ size += scnprintf(&buf[size], 2, "\n");
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(show_trigout, S_IRUGO, cti_show_trigout, NULL);
+
+static ssize_t cti_store_map_trigin(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+ int ret;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ ret = coresight_cti_map_trigin(&drvdata->cti, val1, val2);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(map_trigin, S_IWUSR, NULL, cti_store_map_trigin);
+
+static ssize_t cti_store_map_trigout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+ int ret;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ ret = coresight_cti_map_trigout(&drvdata->cti, val1, val2);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(map_trigout, S_IWUSR, NULL, cti_store_map_trigout);
+
+static ssize_t cti_store_unmap_trigin(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ coresight_cti_unmap_trigin(&drvdata->cti, val1, val2);
+
+ return size;
+}
+static DEVICE_ATTR(unmap_trigin, S_IWUSR, NULL, cti_store_unmap_trigin);
+
+static ssize_t cti_store_unmap_trigout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ coresight_cti_unmap_trigout(&drvdata->cti, val1, val2);
+
+ return size;
+}
+static DEVICE_ATTR(unmap_trigout, S_IWUSR, NULL, cti_store_unmap_trigout);
+
+static ssize_t cti_store_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ coresight_cti_reset(&drvdata->cti);
+ return size;
+}
+static DEVICE_ATTR(reset, S_IWUSR, NULL, cti_store_reset);
+
+static ssize_t cti_show_trig(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long ch, flag;
+ uint32_t ctiset;
+ ssize_t size = 0;
+
+ mutex_lock(&drvdata->mutex);
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (!drvdata->refcnt)
+ goto err;
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (!cti_cpu_verify_access(drvdata))
+ ctiset = cti_readl(drvdata, CTIAPPSET);
+ else
+ ctiset = drvdata->state->ctiappset;
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+ if (ctiset & (1 << ch)) {
+ /* Ensure we do not write more than PAGE_SIZE
+ * bytes of data including \n character and null
+ * terminator
+ */
+ size += scnprintf(&buf[size], PAGE_SIZE - size -
+ 1, " %#lx,", ch);
+ if (size >= PAGE_SIZE - 2) {
+ dev_err(dev, "show buffer full\n");
+ goto err;
+ }
+
+ }
+ }
+err:
+ size += scnprintf(&buf[size], 2, "\n");
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(show_trig, S_IRUGO, cti_show_trig, NULL);
+
+static ssize_t cti_store_set_trig(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ ret = coresight_cti_set_trig(&drvdata->cti, val);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(set_trig, S_IWUSR, NULL, cti_store_set_trig);
+
+static ssize_t cti_store_clear_trig(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ coresight_cti_clear_trig(&drvdata->cti, val);
+
+ return size;
+}
+static DEVICE_ATTR(clear_trig, S_IWUSR, NULL, cti_store_clear_trig);
+
+static ssize_t cti_store_pulse_trig(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ ret = coresight_cti_pulse_trig(&drvdata->cti, val);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(pulse_trig, S_IWUSR, NULL, cti_store_pulse_trig);
+
+static ssize_t cti_store_ack_trig(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ ret = coresight_cti_ack_trig(&drvdata->cti, val);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(ack_trig, S_IWUSR, NULL, cti_store_ack_trig);
+
+static ssize_t cti_show_gate(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long ch, flag;
+ uint32_t ctigate;
+ ssize_t size = 0;
+
+ mutex_lock(&drvdata->mutex);
+ /*
+ * refcnt can be used here since in all cases its value is modified only
+ * within the mutex lock region in addition to within the spinlock.
+ */
+ if (!drvdata->refcnt)
+ goto err;
+
+ spin_lock_irqsave(&drvdata->spinlock, flag);
+ if (!cti_cpu_verify_access(drvdata))
+ ctigate = cti_readl(drvdata, CTIGATE);
+ else
+ ctigate = drvdata->state->ctigate;
+ spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+ for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+ if (ctigate & (1 << ch)) {
+ /* Ensure we do not write more than PAGE_SIZE
+ * bytes of data including \n character and null
+ * terminator
+ */
+ size += scnprintf(&buf[size], PAGE_SIZE - size -
+ 1, " %#lx,", ch);
+ if (size >= PAGE_SIZE - 2) {
+ dev_err(dev, "show buffer full\n");
+ goto err;
+ }
+
+ }
+ }
+err:
+ size += scnprintf(&buf[size], 2, "\n");
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(show_gate, S_IRUGO, cti_show_gate, NULL);
+
+static ssize_t cti_store_enable_gate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ ret = coresight_cti_enable_gate(&drvdata->cti, val);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(enable_gate, S_IWUSR, NULL, cti_store_enable_gate);
+
+static ssize_t cti_store_disable_gate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ coresight_cti_disable_gate(&drvdata->cti, val);
+
+ return size;
+}
+static DEVICE_ATTR(disable_gate, S_IWUSR, NULL, cti_store_disable_gate);
+
+static struct attribute *cti_attrs[] = {
+ &dev_attr_show_trigin.attr,
+ &dev_attr_show_trigout.attr,
+ &dev_attr_map_trigin.attr,
+ &dev_attr_map_trigout.attr,
+ &dev_attr_unmap_trigin.attr,
+ &dev_attr_unmap_trigout.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_show_trig.attr,
+ &dev_attr_set_trig.attr,
+ &dev_attr_clear_trig.attr,
+ &dev_attr_pulse_trig.attr,
+ &dev_attr_ack_trig.attr,
+ &dev_attr_show_gate.attr,
+ &dev_attr_enable_gate.attr,
+ &dev_attr_disable_gate.attr,
+ NULL,
+};
+
+static struct attribute_group cti_attr_grp = {
+ .attrs = cti_attrs,
+};
+
+static const struct attribute_group *cti_attr_grps[] = {
+ &cti_attr_grp,
+ NULL,
+};
+
+static int cti_cpu_pm_callback(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ unsigned long aff_level = (unsigned long) v;
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (aff_level == AFFINITY_LEVEL_L2)
+ coresight_cti_ctx_save();
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if (aff_level == AFFINITY_LEVEL_L2)
+ coresight_cti_ctx_restore();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cti_cpu_pm_notifier = {
+ .notifier_call = cti_cpu_pm_callback,
+};
+
+static int cti_probe(struct platform_device *pdev)
+{
+ int ret;
+ int trig;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct cti_drvdata *drvdata;
+ struct resource *res;
+ struct coresight_desc *desc;
+ struct device_node *cpu_node;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ /* Store the driver data pointer for use in exported functions */
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cti-base");
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ mutex_init(&drvdata->mutex);
+
+ drvdata->clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ drvdata->gpio_trigin = devm_kzalloc(dev, sizeof(struct cti_pctrl),
+ GFP_KERNEL);
+ if (!drvdata->gpio_trigin)
+ return -ENOMEM;
+
+ drvdata->gpio_trigin->trig = -1;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,cti-gpio-trigin", &trig);
+ if (!ret)
+ drvdata->gpio_trigin->trig = trig;
+ else if (ret != -EINVAL)
+ return ret;
+
+ drvdata->gpio_trigout = devm_kzalloc(dev, sizeof(struct cti_pctrl),
+ GFP_KERNEL);
+ if (!drvdata->gpio_trigout)
+ return -ENOMEM;
+
+ drvdata->gpio_trigout->trig = -1;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,cti-gpio-trigout", &trig);
+ if (!ret)
+ drvdata->gpio_trigout->trig = trig;
+ else if (ret != -EINVAL)
+ return ret;
+
+ drvdata->cpu = -1;
+ cpu_node = of_parse_phandle(pdev->dev.of_node, "cpu", 0);
+ if (cpu_node) {
+ drvdata->cpu = pdata ? pdata->cpu : -1;
+ if (drvdata->cpu == -1) {
+ dev_err(drvdata->dev, "CTI cpu node invalid\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!cti_save_disable)
+ drvdata->cti_save = of_property_read_bool(pdev->dev.of_node,
+ "qcom,cti-save");
+ if (drvdata->cti_save) {
+ drvdata->state = devm_kzalloc(dev, sizeof(struct cti_state),
+ GFP_KERNEL);
+ if (!drvdata->state)
+ return -ENOMEM;
+
+ drvdata->cti_hwclk = of_property_read_bool(pdev->dev.of_node,
+ "qcom,cti-hwclk");
+ }
+ if (drvdata->cti_save && !drvdata->cti_hwclk) {
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&cti_lock);
+ drvdata->cti.name = ((struct coresight_platform_data *)
+ (pdev->dev.platform_data))->name;
+ list_add_tail(&drvdata->cti.link, &cti_list);
+ mutex_unlock(&cti_lock);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ desc->type = CORESIGHT_DEV_TYPE_NONE;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->groups = cti_attr_grps;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err;
+ }
+
+ if (drvdata->cti_save) {
+ if (!registered)
+ cpu_pm_register_notifier(&cti_cpu_pm_notifier);
+ registered++;
+ }
+
+ dev_dbg(dev, "CTI initialized\n");
+ return 0;
+err:
+ if (drvdata->cti_save && !drvdata->cti_hwclk)
+ clk_disable_unprepare(drvdata->clk);
+ return ret;
+}
+
+static int cti_remove(struct platform_device *pdev)
+{
+ struct cti_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ if (drvdata->cti_save) {
+ registered--;
+ if (!registered)
+ cpu_pm_unregister_notifier(&cti_cpu_pm_notifier);
+ }
+ coresight_unregister(drvdata->csdev);
+ if (drvdata->cti_save && !drvdata->cti_hwclk)
+ clk_disable_unprepare(drvdata->clk);
+ return 0;
+}
+
+static struct of_device_id cti_match[] = {
+ {.compatible = "arm,coresight-cti"},
+ {}
+};
+
+static struct platform_driver cti_driver = {
+ .probe = cti_probe,
+ .remove = cti_remove,
+ .driver = {
+ .name = "coresight-cti",
+ .owner = THIS_MODULE,
+ .of_match_table = cti_match,
+ },
+};
+
+static int __init cti_init(void)
+{
+ return platform_driver_register(&cti_driver);
+}
+module_init(cti_init);
+
+static void __exit cti_exit(void)
+{
+ platform_driver_unregister(&cti_driver);
+}
+module_exit(cti_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight CTI driver");
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
new file mode 100644
index 000000000000..8f07f1434079
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -0,0 +1,135 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/coresight.h>
+
+#define DUMMY_TRACE_ID_START 256
+
+struct dummy_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ int traceid;
+};
+
+static int dummy_enable(struct coresight_device *csdev)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "Dummy source enabled\n");
+
+ return 0;
+}
+
+static void dummy_disable(struct coresight_device *csdev)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "Dummy source disabled\n");
+}
+
+static int dummy_trace_id(struct coresight_device *csdev)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
+static const struct coresight_ops_source dummy_source_ops = {
+ .trace_id = dummy_trace_id,
+ .enable = dummy_enable,
+ .disable = dummy_disable,
+};
+
+static const struct coresight_ops dummy_cs_ops = {
+ .source_ops = &dummy_source_ops,
+};
+
+static int dummy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct dummy_drvdata *drvdata;
+ struct coresight_desc *desc;
+ static int traceid = DUMMY_TRACE_ID_START;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ drvdata->traceid = traceid++;
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc->ops = &dummy_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "Dummy source initialized\n");
+
+ return 0;
+}
+
+static int dummy_remove(struct platform_device *pdev)
+{
+ struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static const struct of_device_id dummy_match[] = {
+ {.compatible = "qcom,coresight-dummy"},
+ {}
+};
+
+static struct platform_driver dummy_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .driver = {
+ .name = "coresight-dummy",
+ .owner = THIS_MODULE,
+ .of_match_table = dummy_match,
+ },
+};
+
+int __init dummy_init(void)
+{
+ return platform_driver_register(&dummy_driver);
+}
+module_init(dummy_init);
+
+void __exit dummy_exit(void)
+{
+ platform_driver_unregister(&dummy_driver);
+}
+module_exit(dummy_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight dummy source driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a6707642bb23..9f9dd574c8d0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,7 +24,6 @@
#include <linux/smp.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
-#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/coresight.h>
#include <linux/pm_wakeup.h>
@@ -42,14 +41,18 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
+static struct notifier_block etm4_cpu_notifier;
+static struct notifier_block etm4_cpu_dying_notifier;
static void etm4_os_unlock(void *info)
{
struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
+ CS_UNLOCK(drvdata->base);
/* Writing any value to ETMOSLAR unlocks the trace registers */
writel_relaxed(0x0, drvdata->base + TRCOSLAR);
isb();
+ CS_LOCK(drvdata->base);
}
static bool etm4_arch_supported(u8 arch)
@@ -66,24 +69,8 @@ static bool etm4_arch_supported(u8 arch)
static int etm4_trace_id(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- unsigned long flags;
- int trace_id = -1;
-
- if (!drvdata->enable)
- return drvdata->trcid;
-
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
-
- CS_UNLOCK(drvdata->base);
- trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
- trace_id &= ETM_TRACEID_MASK;
- CS_LOCK(drvdata->base);
-
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
- return trace_id;
+ return drvdata->trcid;
}
static void etm4_enable_hw(void *info)
@@ -93,8 +80,6 @@ static void etm4_enable_hw(void *info)
CS_UNLOCK(drvdata->base);
- etm4_os_unlock(drvdata);
-
/* Disable the trace unit before programming trace registers */
writel_relaxed(0, drvdata->base + TRCPRGCTLR);
@@ -2492,8 +2477,6 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
if (drvdata->nr_addr_cmp)
drvdata->vinst_ctrl |= BIT(9);
- /* no address range filtering for ViewInst */
- drvdata->viiectlr = 0x0;
/* no start-stop filtering for ViewInst */
drvdata->vissctlr = 0x0;
@@ -2527,6 +2510,9 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
drvdata->addr_val[1] = (unsigned long)_etext;
drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+
+ /* address range filtering for ViewInst */
+ drvdata->viiectlr = 0x1;
}
for (i = 0; i < drvdata->numcidc; i++) {
@@ -2543,27 +2529,125 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
drvdata->vmid_mask1 = 0x0;
/*
- * A trace ID value of 0 is invalid, so let's start at some
- * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
- * start at 0x20.
+ * Start trace id from 0x1.
*/
- drvdata->trcid = 0x20 + drvdata->cpu;
+ drvdata->trcid = 0x1 + drvdata->cpu;
+}
+
+static int etm4_set_reg_dump(struct etmv4_drvdata *drvdata)
+{
+ int ret;
+ void *baddr;
+ struct amba_device *adev;
+ struct resource *res;
+ struct device *dev = drvdata->dev;
+ struct msm_dump_entry dump_entry;
+ uint32_t size;
+
+ adev = to_amba_device(dev);
+ if (!adev)
+ return -EINVAL;
+
+ res = &adev->res;
+ size = resource_size(res);
+
+ baddr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!baddr)
+ return -ENOMEM;
+
+ drvdata->reg_data.addr = virt_to_phys(baddr);
+ drvdata->reg_data.len = size;
+ scnprintf(drvdata->reg_data.name, sizeof(drvdata->reg_data.name),
+ "KETM_REG%d", drvdata->cpu);
+
+ dump_entry.id = MSM_DUMP_DATA_ETM_REG + drvdata->cpu;
+ dump_entry.addr = virt_to_phys(&drvdata->reg_data);
+
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+ if (ret)
+ devm_kfree(dev, baddr);
+
+ return ret;
+}
+
+static int etm4_late_init(struct etmv4_drvdata *drvdata)
+{
+ int ret;
+ struct coresight_desc *desc;
+ struct device *dev = drvdata->dev;
+
+ if (etm4_arch_supported(drvdata->arch) == false)
+ return -EINVAL;
+
+ etm4_init_default_data(drvdata);
+
+ ret = etm4_set_reg_dump(drvdata);
+ if (ret)
+ dev_err(dev, "ETM REG dump setup failed. ret %d\n", ret);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc->ops = &etm4_cs_ops;
+ desc->pdata = dev->platform_data;
+ desc->dev = dev;
+ desc->groups = coresight_etmv4_groups;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err_coresight_register;
+ }
+
+ dev_info(dev, "ETM 4.0 initialized\n");
+
+ if (boot_enable) {
+ coresight_enable(drvdata->csdev);
+ drvdata->boot_enable = true;
+ }
+
+ drvdata->init = true;
+
+ return 0;
+
+err_coresight_register:
+ devm_kfree(dev, desc);
+ return ret;
}
static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ static bool clk_disable[NR_CPUS];
+ int ret;
if (!etmdrvdata[cpu])
goto out;
switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_UP_PREPARE:
+ if (!etmdrvdata[cpu]->os_unlock) {
+ ret = pm_runtime_get_sync(etmdrvdata[cpu]->dev);
+ if (ret) {
+ dev_err(etmdrvdata[cpu]->dev,
+ "ETM clk enable during hotplug failed for cpu: %d, ret: %d\n",
+ cpu, ret);
+ goto err_clk_init;
+ }
+ clk_disable[cpu] = true;
+ }
+ break;
+
case CPU_STARTING:
spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock) {
etm4_os_unlock(etmdrvdata[cpu]);
etmdrvdata[cpu]->os_unlock = true;
+ etm4_init_arch_data(etmdrvdata[cpu]);
}
if (etmdrvdata[cpu]->enable)
@@ -2572,11 +2656,72 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
break;
case CPU_ONLINE:
+ mutex_lock(&etmdrvdata[cpu]->mutex);
+ if (!etmdrvdata[cpu]->init) {
+ ret = etm4_late_init(etmdrvdata[cpu]);
+ if (ret) {
+ dev_err(etmdrvdata[cpu]->dev,
+ "ETM init failed. Cpu: %d, ret: %d\n",
+ cpu, ret);
+ mutex_unlock(&etmdrvdata[cpu]->mutex);
+ goto err_init;
+ }
+ }
+ mutex_unlock(&etmdrvdata[cpu]->mutex);
+
+ if (clk_disable[cpu]) {
+ pm_runtime_put(etmdrvdata[cpu]->dev);
+ clk_disable[cpu] = false;
+ }
+
if (etmdrvdata[cpu]->boot_enable &&
!etmdrvdata[cpu]->sticky_enable)
coresight_enable(etmdrvdata[cpu]->csdev);
break;
+ case CPU_UP_CANCELED:
+ if (clk_disable[cpu]) {
+ pm_runtime_put(etmdrvdata[cpu]->dev);
+ clk_disable[cpu] = false;
+ }
+ break;
+ }
+out:
+ return NOTIFY_OK;
+
+err_init:
+ if (--etm4_count == 0) {
+ unregister_hotcpu_notifier(&etm4_cpu_notifier);
+ unregister_hotcpu_notifier(&etm4_cpu_dying_notifier);
+ }
+
+ if (clk_disable[cpu]) {
+ pm_runtime_put(etmdrvdata[cpu]->dev);
+ clk_disable[cpu] = false;
+ }
+
+ devm_iounmap(etmdrvdata[cpu]->dev, etmdrvdata[cpu]->base);
+ dev_set_drvdata(etmdrvdata[cpu]->dev, NULL);
+ devm_kfree(etmdrvdata[cpu]->dev, etmdrvdata[cpu]);
+ etmdrvdata[cpu] = NULL;
+
+err_clk_init:
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block etm4_cpu_notifier = {
+ .notifier_call = etm4_cpu_callback,
+};
+
+static int etm4_cpu_dying_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (!etmdrvdata[cpu])
+ goto out;
+
+ switch (action & (~CPU_TASKS_FROZEN)) {
case CPU_DYING:
spin_lock(&etmdrvdata[cpu]->spinlock);
if (etmdrvdata[cpu]->enable)
@@ -2588,8 +2733,9 @@ out:
return NOTIFY_OK;
}
-static struct notifier_block etm4_cpu_notifier = {
- .notifier_call = etm4_cpu_callback,
+static struct notifier_block etm4_cpu_dying_notifier = {
+ .notifier_call = etm4_cpu_dying_callback,
+ .priority = 1,
};
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
@@ -2600,13 +2746,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct etmv4_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
struct device_node *np = adev->dev.of_node;
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -2629,58 +2770,64 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
+ mutex_init(&drvdata->mutex);
- drvdata->cpu = pdata ? pdata->cpu : 0;
+ drvdata->cpu = pdata ? pdata->cpu : -1;
+ if (drvdata->cpu == -1) {
+ dev_err(drvdata->dev, "invalid ETM cpu handle\n");
+ return -EINVAL;
+ }
get_online_cpus();
- etmdrvdata[drvdata->cpu] = drvdata;
- if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
+ if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock,
+ drvdata, 1)) {
drvdata->os_unlock = true;
- if (smp_call_function_single(drvdata->cpu,
- etm4_init_arch_data, drvdata, 1))
- dev_err(dev, "ETM arch init failed\n");
+ ret = smp_call_function_single(drvdata->cpu,
+ etm4_init_arch_data, drvdata, 1);
+ if (ret) {
+ dev_err(dev, "ETM arch init failed\n");
+ put_online_cpus();
+ pm_runtime_put(&adev->dev);
+ return ret;
+ }
+ }
- if (!etm4_count++)
+ etmdrvdata[drvdata->cpu] = drvdata;
+
+ if (!etm4_count++) {
register_hotcpu_notifier(&etm4_cpu_notifier);
+ register_hotcpu_notifier(&etm4_cpu_dying_notifier);
+ }
put_online_cpus();
- if (etm4_arch_supported(drvdata->arch) == false) {
- ret = -EINVAL;
- goto err_arch_supported;
- }
- etm4_init_default_data(drvdata);
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
pm_runtime_put(&adev->dev);
- desc->type = CORESIGHT_DEV_TYPE_SOURCE;
- desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
- desc->ops = &etm4_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- desc->groups = coresight_etmv4_groups;
- drvdata->csdev = coresight_register(desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto err_coresight_register;
- }
-
- dev_info(dev, "%s initialized\n", (char *)id->data);
-
- if (boot_enable) {
- coresight_enable(drvdata->csdev);
- drvdata->boot_enable = true;
+ mutex_lock(&drvdata->mutex);
+ if (drvdata->os_unlock && !drvdata->init) {
+ ret = etm4_late_init(drvdata);
+ if (ret) {
+ mutex_unlock(&drvdata->mutex);
+ goto err_late_init;
+ }
}
+ mutex_unlock(&drvdata->mutex);
return 0;
-err_arch_supported:
- pm_runtime_put(&adev->dev);
-err_coresight_register:
- if (--etm4_count == 0)
+err_late_init:
+ if (--etm4_count == 0) {
unregister_hotcpu_notifier(&etm4_cpu_notifier);
+ unregister_hotcpu_notifier(&etm4_cpu_dying_notifier);
+ }
+ etmdrvdata[drvdata->cpu] = NULL;
+ dev_set_drvdata(dev, NULL);
return ret;
}
@@ -2689,8 +2836,10 @@ static int etm4_remove(struct amba_device *adev)
struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
coresight_unregister(drvdata->csdev);
- if (--etm4_count == 0)
+ if (--etm4_count == 0) {
unregister_hotcpu_notifier(&etm4_cpu_notifier);
+ unregister_hotcpu_notifier(&etm4_cpu_dying_notifier);
+ }
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index c34100205ca9..f920793daa80 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
#define _CORESIGHT_CORESIGHT_ETM_H
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "coresight-priv.h"
+#include <soc/qcom/memory_dump.h>
/*
* Device registers:
@@ -185,12 +187,14 @@
* @dev: The device entity associated to this component.
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
+ * @mutex: Avoid race condition between hotplug and probe path.
* @cpu: The cpu this component is affined to.
* @arch: ETM version number.
* @enable: Is this ETM currently tracing.
* @sticky_enable: true if ETM base configuration has been done.
* @boot_enable:True if we should start tracing at boot time.
* @os_unlock: True if access to management registers is allowed.
+ * @init: True if ETM is initialzied
* @nr_pe: The number of processing entity available for tracing.
* @nr_pe_cmp: The number of processing entity comparator inputs that are
* available for tracing.
@@ -280,18 +284,21 @@
* @ns_ex_level:In non-secure state, indicates whether instruction tracing is
* supported for the corresponding Exception level.
* @ext_inp: External input selection.
+ * @reg_data: MSM memory dump data
*/
struct etmv4_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
spinlock_t spinlock;
+ struct mutex mutex;
int cpu;
u8 arch;
bool enable;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
+ bool init;
u8 nr_pe;
u8 nr_pe_cmp;
u8 nr_addr_cmp;
@@ -366,6 +373,7 @@ struct etmv4_drvdata {
u8 s_ex_level;
u8 ns_ex_level;
u32 ext_inp;
+ struct msm_dump_data reg_data;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-event.c b/drivers/hwtracing/coresight/coresight-event.c
new file mode 100644
index 000000000000..0bced010d4c5
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-event.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/coresight.h>
+
+#include <trace/events/exception.h>
+
+static int event_abort_enable;
+static int event_abort_set(const char *val, struct kernel_param *kp);
+module_param_call(event_abort_enable, event_abort_set, param_get_int,
+ &event_abort_enable, 0644);
+
+static int event_abort_early_panic = 1;
+static int event_abort_on_panic_set(const char *val, struct kernel_param *kp);
+module_param_call(event_abort_early_panic, event_abort_on_panic_set,
+ param_get_int, &event_abort_early_panic, 0644);
+
+static void event_abort_user_fault(void *ignore,
+ struct task_struct *task,
+ unsigned long addr,
+ unsigned int fsr)
+{
+ coresight_abort();
+ pr_debug("coresight_event: task_name: %s, addr: %lu, fsr:%u",
+ (char *)task->comm, addr, fsr);
+}
+
+static void event_abort_undef_instr(void *ignore,
+ struct pt_regs *regs,
+ void *pc)
+{
+ if (user_mode(regs)) {
+ coresight_abort();
+ pr_debug("coresight_event: pc: %p", pc);
+ }
+}
+
+static void event_abort_unhandled_abort(void *ignore,
+ struct pt_regs *regs,
+ unsigned long addr,
+ unsigned int fsr)
+{
+ if (user_mode(regs)) {
+ coresight_abort();
+ pr_debug("coresight_event: addr: %lu, fsr:%u", addr, fsr);
+ }
+}
+
+static void event_abort_kernel_panic(void *ignore, long state)
+{
+ coresight_abort();
+}
+
+static int event_abort_register(void)
+{
+ int ret;
+
+ ret = register_trace_user_fault(event_abort_user_fault, NULL);
+ if (ret)
+ goto err_usr_fault;
+ ret = register_trace_undef_instr(event_abort_undef_instr, NULL);
+ if (ret)
+ goto err_undef_instr;
+ ret = register_trace_unhandled_abort(event_abort_unhandled_abort, NULL);
+ if (ret)
+ goto err_unhandled_abort;
+
+ return 0;
+
+err_unhandled_abort:
+ unregister_trace_undef_instr(event_abort_undef_instr, NULL);
+err_undef_instr:
+ unregister_trace_user_fault(event_abort_user_fault, NULL);
+err_usr_fault:
+ return ret;
+}
+
+static void event_abort_unregister(void)
+{
+ unregister_trace_user_fault(event_abort_user_fault, NULL);
+ unregister_trace_undef_instr(event_abort_undef_instr, NULL);
+ unregister_trace_unhandled_abort(event_abort_unhandled_abort, NULL);
+}
+
+static int event_abort_set(const char *val, struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret) {
+ pr_err("coresight_event: error setting value %d\n", ret);
+ return ret;
+ }
+
+ if (event_abort_enable)
+ ret = event_abort_register();
+ else
+ event_abort_unregister();
+
+ return ret;
+}
+
+static int event_abort_on_panic_set(const char *val, struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret) {
+ pr_err("coresight_event: error setting val on panic %d\n", ret);
+ return ret;
+ }
+
+ if (event_abort_early_panic) {
+ unregister_trace_kernel_panic_late(event_abort_kernel_panic,
+ NULL);
+ ret = register_trace_kernel_panic(event_abort_kernel_panic,
+ NULL);
+ if (ret)
+ goto err;
+ } else {
+ unregister_trace_kernel_panic(event_abort_kernel_panic, NULL);
+ ret = register_trace_kernel_panic_late(event_abort_kernel_panic,
+ NULL);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ pr_err("coresight_event: error registering panic event %d\n", ret);
+ return ret;
+}
+
+static int __init event_init(void)
+{
+ int ret;
+
+ ret = register_trace_kernel_panic(event_abort_kernel_panic, NULL);
+ if (ret) {
+ /* We do not want to fail module init. This module can still
+ * be used to register other abort events.
+ */
+ pr_err("coresight_event: error registering on panic %d\n", ret);
+ }
+ return 0;
+}
+module_init(event_init);
+
+static void __exit event_exit(void)
+{
+ unregister_trace_kernel_panic(event_abort_kernel_panic, NULL);
+}
+module_exit(event_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Coresight Event driver to abort tracing");
diff --git a/drivers/hwtracing/coresight/coresight-hwevent.c b/drivers/hwtracing/coresight/coresight-hwevent.c
new file mode 100644
index 000000000000..266cc69946e3
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-hwevent.c
@@ -0,0 +1,322 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include "coresight-priv.h"
+
+struct hwevent_mux {
+ phys_addr_t start;
+ phys_addr_t end;
+};
+
+struct hwevent_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ struct mutex mutex;
+ int nr_hclk;
+ struct clk **hclk;
+ int nr_hreg;
+ struct regulator **hreg;
+ int nr_hmux;
+ struct hwevent_mux *hmux;
+};
+
+static int hwevent_enable(struct hwevent_drvdata *drvdata)
+{
+ int ret, i, j;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < drvdata->nr_hreg; i++) {
+ ret = regulator_enable(drvdata->hreg[i]);
+ if (ret)
+ goto err0;
+ }
+
+ for (j = 0; j < drvdata->nr_hclk; j++) {
+ ret = clk_prepare_enable(drvdata->hclk[j]);
+ if (ret)
+ goto err1;
+ }
+ return 0;
+err1:
+ for (j--; j >= 0; j--)
+ clk_disable_unprepare(drvdata->hclk[j]);
+err0:
+ for (i--; i >= 0; i--)
+ regulator_disable(drvdata->hreg[i]);
+
+ clk_disable_unprepare(drvdata->clk);
+ return ret;
+}
+
+static void hwevent_disable(struct hwevent_drvdata *drvdata)
+{
+ int i;
+
+ clk_disable_unprepare(drvdata->clk);
+ for (i = 0; i < drvdata->nr_hclk; i++)
+ clk_disable_unprepare(drvdata->hclk[i]);
+ for (i = 0; i < drvdata->nr_hreg; i++)
+ regulator_disable(drvdata->hreg[i]);
+}
+
+static ssize_t hwevent_store_setreg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct hwevent_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ void *hwereg;
+ unsigned long long addr;
+ unsigned long val;
+ int ret, i;
+
+ if (sscanf(buf, "%llx %lx", &addr, &val) != 2)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ ret = hwevent_enable(drvdata);
+ if (ret) {
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+ }
+
+ for (i = 0; i < drvdata->nr_hmux; i++) {
+ if ((addr >= drvdata->hmux[i].start) &&
+ (addr < drvdata->hmux[i].end)) {
+ hwereg = devm_ioremap(dev,
+ drvdata->hmux[i].start,
+ drvdata->hmux[i].end -
+ drvdata->hmux[i].start);
+ if (!hwereg) {
+ dev_err(dev, "unable to map address 0x%llx\n",
+ addr);
+ ret = -ENOMEM;
+ goto err;
+ }
+ writel_relaxed(val, hwereg + addr -
+ drvdata->hmux[i].start);
+ /*
+ * Ensure writes to hwevent control registers
+ * are completed before unmapping the address
+ */
+ mb();
+ devm_iounmap(dev, hwereg);
+ break;
+ }
+ }
+
+ if (i == drvdata->nr_hmux) {
+ ret = coresight_csr_hwctrl_set(addr, val);
+ if (ret) {
+ dev_err(dev, "invalid mux control register address\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ hwevent_disable(drvdata);
+ mutex_unlock(&drvdata->mutex);
+ return size;
+err:
+ hwevent_disable(drvdata);
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+static DEVICE_ATTR(setreg, S_IWUSR, NULL, hwevent_store_setreg);
+
+static struct attribute *hwevent_attrs[] = {
+ &dev_attr_setreg.attr,
+ NULL,
+};
+
+static struct attribute_group hwevent_attr_grp = {
+ .attrs = hwevent_attrs,
+};
+
+static const struct attribute_group *hwevent_attr_grps[] = {
+ &hwevent_attr_grp,
+ NULL,
+};
+
+static int hwevent_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hwevent_drvdata *drvdata;
+ struct coresight_desc *desc;
+ struct coresight_platform_data *pdata;
+ struct resource *res;
+ int ret, i;
+ const char *hmux_name, *hclk_name, *hreg_name;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ drvdata->nr_hmux = of_property_count_strings(pdev->dev.of_node,
+ "reg-names");
+
+ if (!drvdata->nr_hmux)
+ return -ENODEV;
+
+ if (drvdata->nr_hmux > 0) {
+ drvdata->hmux = devm_kzalloc(dev, drvdata->nr_hmux *
+ sizeof(*drvdata->hmux),
+ GFP_KERNEL);
+ if (!drvdata->hmux)
+ return -ENOMEM;
+ for (i = 0; i < drvdata->nr_hmux; i++) {
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "reg-names", i,
+ &hmux_name);
+ if (ret)
+ return ret;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ hmux_name);
+ if (!res)
+ return -ENODEV;
+ drvdata->hmux[i].start = res->start;
+ drvdata->hmux[i].end = res->end;
+ }
+ } else {
+ return drvdata->nr_hmux;
+ }
+
+ mutex_init(&drvdata->mutex);
+
+ drvdata->clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ drvdata->nr_hclk = of_property_count_strings(pdev->dev.of_node,
+ "qcom,hwevent-clks");
+ drvdata->nr_hreg = of_property_count_strings(pdev->dev.of_node,
+ "qcom,hwevent-regs");
+
+ if (drvdata->nr_hclk > 0) {
+ drvdata->hclk = devm_kzalloc(dev, drvdata->nr_hclk *
+ sizeof(*drvdata->hclk),
+ GFP_KERNEL);
+ if (!drvdata->hclk)
+ return -ENOMEM;
+
+ for (i = 0; i < drvdata->nr_hclk; i++) {
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "qcom,hwevent-clks",
+ i, &hclk_name);
+ if (ret)
+ return ret;
+
+ drvdata->hclk[i] = devm_clk_get(dev, hclk_name);
+ if (IS_ERR(drvdata->hclk[i]))
+ return PTR_ERR(drvdata->hclk[i]);
+ }
+ }
+ if (drvdata->nr_hreg > 0) {
+ drvdata->hreg = devm_kzalloc(dev, drvdata->nr_hreg *
+ sizeof(*drvdata->hreg),
+ GFP_KERNEL);
+ if (!drvdata->hreg)
+ return -ENOMEM;
+
+ for (i = 0; i < drvdata->nr_hreg; i++) {
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "qcom,hwevent-regs",
+ i, &hreg_name);
+ if (ret)
+ return ret;
+
+ drvdata->hreg[i] = devm_regulator_get(dev, hreg_name);
+ if (IS_ERR(drvdata->hreg[i]))
+ return PTR_ERR(drvdata->hreg[i]);
+ }
+ }
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_NONE;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->groups = hwevent_attr_grps;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "Hardware Event driver initialized\n");
+ return 0;
+}
+
+static int hwevent_remove(struct platform_device *pdev)
+{
+ struct hwevent_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static const struct of_device_id hwevent_match[] = {
+ {.compatible = "qcom,coresight-hwevent"},
+ {}
+};
+
+static struct platform_driver hwevent_driver = {
+ .probe = hwevent_probe,
+ .remove = hwevent_remove,
+ .driver = {
+ .name = "coresight-hwevent",
+ .owner = THIS_MODULE,
+ .of_match_table = hwevent_match,
+ },
+};
+
+static int __init hwevent_init(void)
+{
+ return platform_driver_register(&hwevent_driver);
+}
+module_init(hwevent_init);
+
+static void __exit hwevent_exit(void)
+{
+ platform_driver_unregister(&hwevent_driver);
+}
+module_exit(hwevent_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Hardware Event driver");
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 62fcd98cc7cf..03ed2b59b7e0 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,9 @@
#define CORESIGHT_DEVTYPE 0xfcc
#define TIMEOUT_US 100
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
static inline void CS_LOCK(void __iomem *addr)
{
@@ -52,6 +54,27 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
+static inline bool coresight_authstatus_enabled(void __iomem *addr)
+{
+ int ret;
+ unsigned auth_val;
+
+ if (!addr)
+ return false;
+
+ auth_val = readl_relaxed(addr + CORESIGHT_AUTHSTATUS);
+
+ if ((0x2 == BMVAL(auth_val, 0, 1)) ||
+ (0x2 == BMVAL(auth_val, 2, 3)) ||
+ (0x2 == BMVAL(auth_val, 4, 5)) ||
+ (0x2 == BMVAL(auth_val, 6, 7)))
+ ret = false;
+ else
+ ret = true;
+
+ return ret;
+}
+
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
extern int etm_writel_cp14(u32 off, u32 val);
@@ -60,4 +83,17 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#endif
+#ifdef CONFIG_CORESIGHT_CSR
+extern void msm_qdss_csr_enable_bam_to_usb(void);
+extern void msm_qdss_csr_disable_bam_to_usb(void);
+extern void msm_qdss_csr_disable_flush(void);
+extern int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val);
+#else
+static inline void msm_qdss_csr_enable_bam_to_usb(void) {}
+static inline void msm_qdss_csr_disable_bam_to_usb(void) {}
+static inline void msm_qdss_csr_disable_flush(void) {}
+static inline int coresight_csr_hwctrl_set(uint64_t addr,
+ uint32_t val) { return -EINVAL; }
+#endif
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-qmi.h b/drivers/hwtracing/coresight/coresight-qmi.h
new file mode 100644
index 000000000000..baf408a75fcd
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-qmi.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_QMI_H
+#define _CORESIGHT_QMI_H
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define CORESIGHT_QMI_SVC_ID (0x33)
+#define CORESIGHT_QMI_VERSION (1)
+
+#define CORESIGHT_QMI_GET_ETM_REQ_V01 (0x002B)
+#define CORESIGHT_QMI_GET_ETM_RESP_V01 (0x002B)
+#define CORESIGHT_QMI_SET_ETM_REQ_V01 (0x002C)
+#define CORESIGHT_QMI_SET_ETM_RESP_V01 (0x002C)
+
+#define CORESIGHT_QMI_GET_ETM_REQ_MAX_LEN (0)
+#define CORESIGHT_QMI_GET_ETM_RESP_MAX_LEN (14)
+#define CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN (7)
+#define CORESIGHT_QMI_SET_ETM_RESP_MAX_LEN (7)
+
+#define TIMEOUT_MS (5000)
+
+enum coresight_etm_state_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use */
+ CORESIGHT_ETM_STATE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ CORESIGHT_ETM_STATE_DISABLED_V01 = 0,
+ CORESIGHT_ETM_STATE_ENABLED_V01 = 1,
+ CORESIGHT_ETM_STATE_ENUM_TYPE_MAX_ENUM_VAL_01 = INT_MAX,
+};
+
+struct coresight_get_etm_req_msg_v01 {
+ /*
+ * This element is a placeholder to prevent declaration of
+ * empty struct. Do not change.
+ */
+ char __placeholder;
+};
+
+struct coresight_get_etm_resp_msg_v01 {
+ /* Mandatory */
+ /* QMI result Code */
+ struct qmi_response_type_v01 resp;
+
+ /* Optional */
+ /* ETM output state, must be set to true if state is being passed */
+ uint8_t state_valid;
+ /* Present when result code is QMI_RESULT_SUCCESS */
+ enum coresight_etm_state_enum_type_v01 state;
+};
+
+struct coresight_set_etm_req_msg_v01 {
+ /* Mandatory */
+ /* ETM output state */
+ enum coresight_etm_state_enum_type_v01 state;
+};
+
+struct coresight_set_etm_resp_msg_v01 {
+ /* Mandatory */
+ struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info coresight_set_etm_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum coresight_etm_state_enum_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct coresight_set_etm_req_msg_v01,
+ state),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct elem_info coresight_set_etm_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct coresight_set_etm_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-qpdi.c b/drivers/hwtracing/coresight/coresight-qpdi.c
new file mode 100644
index 000000000000..a39d59431bee
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-qpdi.c
@@ -0,0 +1,406 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/coresight.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include "coresight-priv.h"
+
+#define qpdi_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define qpdi_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define QPDI_DISABLE_CFG (0x0)
+
+static int boot_enable;
+module_param_named(
+ boot_enable, boot_enable, int, S_IRUGO
+);
+
+struct qpdi_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct mutex mutex;
+ struct regulator *reg;
+ unsigned int reg_low;
+ unsigned int reg_high;
+ unsigned int reg_lpm;
+ unsigned int reg_hpm;
+ struct regulator *reg_io;
+ unsigned int reg_low_io;
+ unsigned int reg_high_io;
+ unsigned int reg_lpm_io;
+ unsigned int reg_hpm_io;
+ int pmic_gpio_vote;
+ bool skip_ldo;
+ bool enable;
+};
+
+static int qpdi_reg_set_optimum_mode(struct regulator *reg,
+ unsigned int reg_hpm)
+{
+ if (regulator_count_voltages(reg) <= 0)
+ return 0;
+
+ return regulator_set_load(reg, reg_hpm);
+}
+
+
+static int qpdi_reg_set_voltage(struct regulator *reg, unsigned int reg_low,
+ unsigned int reg_high)
+{
+ if (regulator_count_voltages(reg) <= 0)
+ return 0;
+
+ return regulator_set_voltage(reg, reg_low, reg_high);
+}
+
+static int __qpdi_enable(struct qpdi_drvdata *drvdata)
+{
+ int ret;
+
+ if (!drvdata->reg || !drvdata->reg_io)
+ return -EINVAL;
+
+ ret = qpdi_reg_set_optimum_mode(drvdata->reg, drvdata->reg_hpm);
+ if (ret < 0)
+ return ret;
+ ret = qpdi_reg_set_voltage(drvdata->reg, drvdata->reg_low,
+ drvdata->reg_high);
+ if (ret)
+ goto err0;
+ ret = regulator_enable(drvdata->reg);
+ if (ret)
+ goto err1;
+ ret = qpdi_reg_set_optimum_mode(drvdata->reg_io, drvdata->reg_hpm_io);
+ if (ret < 0)
+ goto err2;
+ ret = qpdi_reg_set_voltage(drvdata->reg_io, drvdata->reg_low_io,
+ drvdata->reg_high_io);
+ if (ret)
+ goto err3;
+ ret = regulator_enable(drvdata->reg_io);
+ if (ret)
+ goto err4;
+ return 0;
+err4:
+ qpdi_reg_set_voltage(drvdata->reg_io, 0, drvdata->reg_high_io);
+err3:
+ qpdi_reg_set_optimum_mode(drvdata->reg_io, 0);
+err2:
+ regulator_disable(drvdata->reg);
+err1:
+ qpdi_reg_set_voltage(drvdata->reg, 0, drvdata->reg_high);
+err0:
+ qpdi_reg_set_optimum_mode(drvdata->reg, 0);
+ return ret;
+}
+
+static int qpdi_enable(struct qpdi_drvdata *drvdata)
+{
+ int ret;
+
+ mutex_lock(&drvdata->mutex);
+
+ if (drvdata->enable)
+ goto out;
+
+ if (!drvdata->skip_ldo) {
+ ret = __qpdi_enable(drvdata);
+ if (ret)
+ goto err;
+ }
+
+ qpdi_writel(drvdata, 0x2, QPDI_DISABLE_CFG);
+
+ drvdata->enable = true;
+ dev_info(drvdata->dev, "qpdi enabled\n");
+out:
+ mutex_unlock(&drvdata->mutex);
+ return 0;
+err:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+
+static void __qpdi_disable(struct qpdi_drvdata *drvdata)
+{
+ regulator_disable(drvdata->reg);
+ qpdi_reg_set_voltage(drvdata->reg, 0, drvdata->reg_high);
+ qpdi_reg_set_optimum_mode(drvdata->reg, 0);
+
+ regulator_disable(drvdata->reg_io);
+ qpdi_reg_set_voltage(drvdata->reg_io, 0, drvdata->reg_high_io);
+ qpdi_reg_set_optimum_mode(drvdata->reg_io, 0);
+}
+
+static void qpdi_disable(struct qpdi_drvdata *drvdata)
+{
+ mutex_lock(&drvdata->mutex);
+
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->mutex);
+ return;
+ }
+
+ qpdi_writel(drvdata, 0x3, QPDI_DISABLE_CFG);
+
+ if (!drvdata->skip_ldo)
+ __qpdi_disable(drvdata);
+
+ drvdata->enable = false;
+ mutex_unlock(&drvdata->mutex);
+ dev_info(drvdata->dev, "qpdi disabled\n");
+}
+
+static ssize_t qpdi_show_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpdi_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->enable;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t qpdi_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qpdi_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret = 0;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ if (val)
+ ret = qpdi_enable(drvdata);
+ else
+ qpdi_disable(drvdata);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, qpdi_show_enable,
+ qpdi_store_enable);
+
+static struct attribute *qpdi_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group qpdi_attr_grp = {
+ .attrs = qpdi_attrs,
+};
+
+static const struct attribute_group *qpdi_attr_grps[] = {
+ &qpdi_attr_grp,
+ NULL,
+};
+
+static int qpdi_parse_of_data(struct platform_device *pdev,
+ struct qpdi_drvdata *drvdata)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *reg_node = NULL;
+ struct device *dev = &pdev->dev;
+ const __be32 *prop;
+ int len, ret;
+
+ drvdata->skip_ldo = of_property_read_bool(node, "qcom,skip-ldo");
+ if (drvdata->skip_ldo)
+ return 0;
+
+ drvdata->pmic_gpio_vote = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,pmic-carddetect-gpio", 0);
+ if (drvdata->pmic_gpio_vote < 0)
+ dev_info(dev, "QPDI hotplug card detection is not supported\n");
+ else {
+ ret = gpio_request(drvdata->pmic_gpio_vote, "qpdi_gpio_hp");
+ if (ret) {
+ dev_err(dev, "failed to allocate the GPIO\n");
+ return ret;
+ }
+
+ ret = gpio_direction_input(drvdata->pmic_gpio_vote);
+ if (ret) {
+ dev_err(dev, "failed to set the gpio to input\n");
+ gpio_free(drvdata->pmic_gpio_vote);
+ return ret;
+ }
+
+ drvdata->skip_ldo = 1;
+ return 0;
+ }
+
+ reg_node = of_parse_phandle(node, "vdd-supply", 0);
+ if (reg_node) {
+ drvdata->reg = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(drvdata->reg))
+ return PTR_ERR(drvdata->reg);
+
+ prop = of_get_property(node, "qcom,vdd-voltage-level", &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_err(dev, "sdc voltage levels not specified\n");
+ } else {
+ drvdata->reg_low = be32_to_cpup(&prop[0]);
+ drvdata->reg_high = be32_to_cpup(&prop[1]);
+ }
+
+ prop = of_get_property(node, "qcom,vdd-current-level", &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_err(dev, "sdc current levels not specified\n");
+ } else {
+ drvdata->reg_lpm = be32_to_cpup(&prop[0]);
+ drvdata->reg_hpm = be32_to_cpup(&prop[1]);
+ }
+ of_node_put(reg_node);
+ } else {
+ dev_err(dev, "sdc voltage supply not specified or available\n");
+ }
+
+ reg_node = of_parse_phandle(node, "vdd-io-supply", 0);
+ if (reg_node) {
+ drvdata->reg_io = devm_regulator_get(dev, "vdd-io");
+ if (IS_ERR(drvdata->reg_io))
+ return PTR_ERR(drvdata->reg_io);
+
+ prop = of_get_property(node, "qcom,vdd-io-voltage-level", &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_err(dev, "sdc io voltage levels not specified\n");
+ } else {
+ drvdata->reg_low_io = be32_to_cpup(&prop[0]);
+ drvdata->reg_high_io = be32_to_cpup(&prop[1]);
+ }
+
+ prop = of_get_property(node, "qcom,vdd-io-current-level", &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_err(dev, "sdc io current levels not specified\n");
+ } else {
+ drvdata->reg_lpm_io = be32_to_cpup(&prop[0]);
+ drvdata->reg_hpm_io = be32_to_cpup(&prop[1]);
+ }
+ of_node_put(reg_node);
+ } else {
+ dev_err(dev,
+ "sdc io voltage supply not specified or available\n");
+ }
+
+ return 0;
+}
+
+static int qpdi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct qpdi_drvdata *drvdata;
+ struct resource *res;
+ struct coresight_desc *desc;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qpdi-base");
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ mutex_init(&drvdata->mutex);
+
+ ret = qpdi_parse_of_data(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_NONE;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->groups = qpdi_attr_grps;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ if (boot_enable)
+ qpdi_enable(drvdata);
+
+ dev_info(dev, "CoreSight QPDI driver initialized\n");
+ return 0;
+}
+
+static int qpdi_remove(struct platform_device *pdev)
+{
+ struct qpdi_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ if (drvdata->pmic_gpio_vote > -1)
+ gpio_free(drvdata->pmic_gpio_vote);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static const struct of_device_id qpdi_match[] = {
+ {.compatible = "qcom,coresight-qpdi"},
+ {}
+};
+
+static struct platform_driver qpdi_driver = {
+ .probe = qpdi_probe,
+ .remove = qpdi_remove,
+ .driver = {
+ .name = "coresight-qpdi",
+ .owner = THIS_MODULE,
+ .of_match_table = qpdi_match,
+ },
+};
+
+static int __init qpdi_init(void)
+{
+ return platform_driver_register(&qpdi_driver);
+}
+module_init(qpdi_init);
+
+static void __exit qpdi_exit(void)
+{
+ platform_driver_unregister(&qpdi_driver);
+}
+module_exit(qpdi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight QPDI driver");
diff --git a/drivers/hwtracing/coresight/coresight-remote-etm.c b/drivers/hwtracing/coresight/coresight-remote-etm.c
new file mode 100644
index 000000000000..cc0b25b130d7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-remote-etm.c
@@ -0,0 +1,395 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include "coresight-qmi.h"
+
+#define REMOTE_ETM_TRACE_ID_START 192
+
+#ifdef CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE
+static int boot_enable = CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE;
+#else
+static int boot_enable;
+#endif
+
+module_param_named(
+ boot_enable, boot_enable, int, S_IRUGO
+);
+
+struct remote_etm_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct mutex mutex;
+ struct workqueue_struct *wq;
+ struct qmi_handle *handle;
+ struct work_struct work_svc_arrive;
+ struct work_struct work_svc_exit;
+ struct work_struct work_rcv_msg;
+ struct notifier_block nb;
+ uint32_t inst_id;
+ struct delayed_work work_delay_enable;
+ bool enable;
+ int traceid;
+};
+
+static int remote_etm_enable(struct coresight_device *csdev)
+{
+ struct remote_etm_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+ struct coresight_set_etm_req_msg_v01 req;
+ struct coresight_set_etm_resp_msg_v01 resp = { { 0, 0 } };
+ struct msg_desc req_desc, resp_desc;
+ int ret;
+
+ mutex_lock(&drvdata->mutex);
+
+ /*
+ * The QMI handle may be NULL in the following scenarios:
+ * 1. QMI service is not present
+ * 2. QMI service is present but attempt to enable remote ETM is earlier
+ * than service is ready to handle request
+ * 3. Connection between QMI client and QMI service failed
+ *
+ * Enable CoreSight without processing further QMI commands which
+ * provides the option to enable remote ETM by other means.
+ */
+ if (!drvdata->handle) {
+ dev_info(drvdata->dev,
+ "%s: QMI service unavailable\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ req.state = CORESIGHT_ETM_STATE_ENABLED_V01;
+
+ req_desc.msg_id = CORESIGHT_QMI_SET_ETM_REQ_V01;
+ req_desc.max_msg_len = CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN;
+ req_desc.ei_array = coresight_set_etm_req_msg_v01_ei;
+
+ resp_desc.msg_id = CORESIGHT_QMI_SET_ETM_RESP_V01;
+ resp_desc.max_msg_len = CORESIGHT_QMI_SET_ETM_RESP_MAX_LEN;
+ resp_desc.ei_array = coresight_set_etm_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(drvdata->handle, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp), TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(drvdata->dev, "%s: QMI send req failed %d\n", __func__,
+ ret);
+ goto err;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ dev_err(drvdata->dev, "%s: QMI request failed %d %d\n",
+ __func__, resp.resp.result, resp.resp.error);
+ ret = -EREMOTEIO;
+ goto err;
+ }
+ drvdata->enable = true;
+ mutex_unlock(&drvdata->mutex);
+
+ dev_info(drvdata->dev, "Remote ETM tracing enabled\n");
+ return 0;
+err:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+
+static void remote_etm_disable(struct coresight_device *csdev)
+{
+ struct remote_etm_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+ struct coresight_set_etm_req_msg_v01 req;
+ struct coresight_set_etm_resp_msg_v01 resp = { { 0, 0 } };
+ struct msg_desc req_desc, resp_desc;
+ int ret;
+
+ mutex_lock(&drvdata->mutex);
+
+ if (!drvdata->handle) {
+ dev_info(drvdata->dev,
+ "%s: QMI service unavailable\n", __func__);
+ goto err;
+ }
+
+ req.state = CORESIGHT_ETM_STATE_DISABLED_V01;
+
+ req_desc.msg_id = CORESIGHT_QMI_SET_ETM_REQ_V01;
+ req_desc.max_msg_len = CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN;
+ req_desc.ei_array = coresight_set_etm_req_msg_v01_ei;
+
+ resp_desc.msg_id = CORESIGHT_QMI_SET_ETM_RESP_V01;
+ resp_desc.max_msg_len = CORESIGHT_QMI_SET_ETM_RESP_MAX_LEN;
+ resp_desc.ei_array = coresight_set_etm_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(drvdata->handle, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp), TIMEOUT_MS);
+ if (ret < 0) {
+ dev_err(drvdata->dev, "%s: QMI send req failed %d\n", __func__,
+ ret);
+ goto err;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ dev_err(drvdata->dev, "%s: QMI request failed %d %d\n",
+ __func__, resp.resp.result, resp.resp.error);
+ goto err;
+ }
+ drvdata->enable = false;
+ mutex_unlock(&drvdata->mutex);
+
+ dev_info(drvdata->dev, "Remote ETM tracing disabled\n");
+ return;
+err:
+ mutex_unlock(&drvdata->mutex);
+}
+
+static int remote_etm_trace_id(struct coresight_device *csdev)
+{
+ struct remote_etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
+static const struct coresight_ops_source remote_etm_source_ops = {
+ .trace_id = remote_etm_trace_id,
+ .enable = remote_etm_enable,
+ .disable = remote_etm_disable,
+};
+
+static const struct coresight_ops remote_cs_ops = {
+ .source_ops = &remote_etm_source_ops,
+};
+
+static void remote_etm_rcv_msg(struct work_struct *work)
+{
+ struct remote_etm_drvdata *drvdata = container_of(work,
+ struct remote_etm_drvdata,
+ work_rcv_msg);
+ if (qmi_recv_msg(drvdata->handle) < 0)
+ dev_err(drvdata->dev, "%s: Error receiving QMI message\n",
+ __func__);
+}
+
+static void remote_etm_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *notify_priv)
+{
+ struct remote_etm_drvdata *drvdata =
+ (struct remote_etm_drvdata *)notify_priv;
+ switch (event) {
+ case QMI_RECV_MSG:
+ queue_work(drvdata->wq, &drvdata->work_rcv_msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void remote_delay_enable_handler(struct work_struct *work)
+{
+ struct remote_etm_drvdata *drvdata = container_of(work,
+ struct remote_etm_drvdata,
+ work_delay_enable.work);
+ coresight_enable(drvdata->csdev);
+}
+
+static void remote_etm_svc_arrive(struct work_struct *work)
+{
+ struct remote_etm_drvdata *drvdata = container_of(work,
+ struct remote_etm_drvdata,
+ work_svc_arrive);
+
+ drvdata->handle = qmi_handle_create(remote_etm_notify, drvdata);
+ if (!drvdata->handle) {
+ dev_err(drvdata->dev, "%s: QMI client handle alloc failed\n",
+ __func__);
+ return;
+ }
+
+ mutex_lock(&drvdata->mutex);
+ if (qmi_connect_to_service(drvdata->handle, CORESIGHT_QMI_SVC_ID,
+ CORESIGHT_QMI_VERSION,
+ drvdata->inst_id) < 0) {
+ dev_err(drvdata->dev,
+ "%s: Could not connect handle to service\n", __func__);
+ qmi_handle_destroy(drvdata->handle);
+ drvdata->handle = NULL;
+ }
+
+ if (drvdata->inst_id < sizeof(int)*BITS_PER_BYTE
+ && (boot_enable & BIT(drvdata->inst_id))) {
+ if (!drvdata->enable)
+ schedule_delayed_work(&drvdata->work_delay_enable,
+ msecs_to_jiffies(TIMEOUT_MS));
+ }
+ mutex_unlock(&drvdata->mutex);
+}
+
+static void remote_etm_svc_exit(struct work_struct *work)
+{
+ struct remote_etm_drvdata *drvdata = container_of(work,
+ struct remote_etm_drvdata,
+ work_svc_exit);
+ mutex_lock(&drvdata->mutex);
+ qmi_handle_destroy(drvdata->handle);
+ drvdata->handle = NULL;
+ mutex_unlock(&drvdata->mutex);
+}
+
+static int remote_etm_svc_event_notify(struct notifier_block *this,
+ unsigned long event,
+ void *data)
+{
+ struct remote_etm_drvdata *drvdata = container_of(this,
+ struct remote_etm_drvdata,
+ nb);
+
+ switch (event) {
+ case QMI_SERVER_ARRIVE:
+ queue_work(drvdata->wq, &drvdata->work_svc_arrive);
+ break;
+ case QMI_SERVER_EXIT:
+ queue_work(drvdata->wq, &drvdata->work_svc_exit);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int remote_etm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct remote_etm_drvdata *drvdata;
+ struct coresight_desc *desc;
+ int ret;
+ static int traceid = REMOTE_ETM_TRACE_ID_START;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,inst-id",
+ &drvdata->inst_id);
+ if (ret)
+ return ret;
+
+ mutex_init(&drvdata->mutex);
+
+ drvdata->nb.notifier_call = remote_etm_svc_event_notify;
+
+ drvdata->wq = create_singlethread_workqueue(dev_name(dev));
+ if (!drvdata->wq)
+ return -EFAULT;
+ INIT_WORK(&drvdata->work_svc_arrive, remote_etm_svc_arrive);
+ INIT_WORK(&drvdata->work_svc_exit, remote_etm_svc_exit);
+ INIT_WORK(&drvdata->work_rcv_msg, remote_etm_rcv_msg);
+ INIT_DELAYED_WORK(&drvdata->work_delay_enable,
+ remote_delay_enable_handler);
+ ret = qmi_svc_event_notifier_register(CORESIGHT_QMI_SVC_ID,
+ CORESIGHT_QMI_VERSION,
+ drvdata->inst_id,
+ &drvdata->nb);
+ if (ret < 0)
+ goto err0;
+
+ drvdata->traceid = traceid++;
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc->ops = &remote_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err1;
+ }
+ dev_info(dev, "Remote ETM initialized\n");
+
+ if (drvdata->inst_id >= sizeof(int)*BITS_PER_BYTE)
+ dev_err(dev, "inst_id greater than boot_enable bit mask\n");
+ else if (boot_enable & BIT(drvdata->inst_id))
+ coresight_enable(drvdata->csdev);
+
+ return 0;
+err1:
+ qmi_svc_event_notifier_unregister(CORESIGHT_QMI_SVC_ID,
+ CORESIGHT_QMI_VERSION,
+ drvdata->inst_id,
+ &drvdata->nb);
+err0:
+ destroy_workqueue(drvdata->wq);
+ return ret;
+}
+
+static int remote_etm_remove(struct platform_device *pdev)
+{
+ struct remote_etm_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static const struct of_device_id remote_etm_match[] = {
+ {.compatible = "qcom,coresight-remote-etm"},
+ {}
+};
+
+static struct platform_driver remote_etm_driver = {
+ .probe = remote_etm_probe,
+ .remove = remote_etm_remove,
+ .driver = {
+ .name = "coresight-remote-etm",
+ .owner = THIS_MODULE,
+ .of_match_table = remote_etm_match,
+ },
+};
+
+int __init remote_etm_init(void)
+{
+ return platform_driver_register(&remote_etm_driver);
+}
+module_init(remote_etm_init);
+
+void __exit remote_etm_exit(void)
+{
+ platform_driver_unregister(&remote_etm_driver);
+}
+module_exit(remote_etm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Remote ETM driver");
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
new file mode 100644
index 000000000000..d85bcd8e121a
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -0,0 +1,912 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/of_address.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+#include <linux/coresight-stm.h>
+#include <asm/unaligned.h>
+
+#include "coresight-priv.h"
+
+#define stm_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define stm_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define STM_LOCK(drvdata) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ stm_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define STM_UNLOCK(drvdata) \
+do { \
+ stm_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+#define STMDMASTARTR (0xC04)
+#define STMDMASTOPR (0xC08)
+#define STMDMASTATR (0xC0C)
+#define STMDMACTLR (0xC10)
+#define STMDMAIDR (0xCFC)
+#define STMHEER (0xD00)
+#define STMHETER (0xD20)
+#define STMHEMCR (0xD64)
+#define STMHEMASTR (0xDF4)
+#define STMHEFEAT1R (0xDF8)
+#define STMHEIDR (0xDFC)
+#define STMSPER (0xE00)
+#define STMSPTER (0xE20)
+#define STMSPSCR (0xE60)
+#define STMSPMSCR (0xE64)
+#define STMSPOVERRIDER (0xE68)
+#define STMSPMOVERRIDER (0xE6C)
+#define STMSPTRIGCSR (0xE70)
+#define STMTCSR (0xE80)
+#define STMTSSTIMR (0xE84)
+#define STMTSFREQR (0xE8C)
+#define STMSYNCR (0xE90)
+#define STMAUXCR (0xE94)
+#define STMSPFEAT1R (0xEA0)
+#define STMSPFEAT2R (0xEA4)
+#define STMSPFEAT3R (0xEA8)
+#define STMITTRIGGER (0xEE8)
+#define STMITATBDATA0 (0xEEC)
+#define STMITATBCTR2 (0xEF0)
+#define STMITATBID (0xEF4)
+#define STMITATBCTR0 (0xEF8)
+
+#define NR_STM_CHANNEL (32)
+#define BYTES_PER_CHANNEL (256)
+#define STM_TRACE_BUF_SIZE (4096)
+#define STM_USERSPACE_HEADER_SIZE (8)
+#define STM_USERSPACE_MAGIC1_VAL (0xf0)
+#define STM_USERSPACE_MAGIC2_VAL (0xf1)
+
+#define OST_TOKEN_STARTSIMPLE (0x10)
+#define OST_TOKEN_STARTBASE (0x30)
+#define OST_VERSION_PROP (1)
+#define OST_VERSION_MIPI1 (16)
+
+#define STM_MAKE_VERSION(ma, mi) ((ma << 8) | mi)
+#define STM_HEADER_MAGIC (0x5953)
+
+enum stm_pkt_type {
+ STM_PKT_TYPE_DATA = 0x98,
+ STM_PKT_TYPE_FLAG = 0xE8,
+ STM_PKT_TYPE_TRIG = 0xF8,
+};
+
+enum {
+ STM_OPTION_MARKED = 0x10,
+};
+
+#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \
+ (ch * BYTES_PER_CHANNEL))
+#define stm_channel_off(type, opts) (type & ~opts)
+
+#ifdef CONFIG_CORESIGHT_STM_DEFAULT_ENABLE
+static int boot_enable = 1;
+#else
+static int boot_enable;
+#endif
+
+module_param_named(
+ boot_enable, boot_enable, int, S_IRUGO
+);
+
+static int boot_nr_channel;
+
+module_param_named(
+ boot_nr_channel, boot_nr_channel, int, S_IRUGO
+);
+
+struct channel_space {
+ void __iomem *base;
+ unsigned long *bitmap;
+};
+
+struct stm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct miscdevice miscdev;
+ spinlock_t spinlock;
+ struct channel_space chs;
+ bool enable;
+ DECLARE_BITMAP(entities, OST_ENTITY_MAX);
+ bool data_barrier;
+ uint32_t ch_alloc_fail_count;
+};
+
+static struct stm_drvdata *stmdrvdata;
+
+static inline void stm_data_writeb(uint8_t val, void *addr)
+{
+ __raw_writeb_no_log(val, addr);
+ if (stmdrvdata->data_barrier)
+ /* Helps avoid large number of outstanding writes */
+ mb();
+}
+
+static inline void stm_data_writew(uint16_t val, void *addr)
+{
+ __raw_writew_no_log(val, addr);
+ if (stmdrvdata->data_barrier)
+ /* Helps avoid large number of outstanding writes */
+ mb();
+}
+
+static inline void stm_data_writel(uint32_t val, void *addr)
+{
+ __raw_writel_no_log(val, addr);
+ if (stmdrvdata->data_barrier)
+ /* Helps avoid large number of outstanding writes */
+ mb();
+}
+
+static int stm_hwevent_isenable(struct stm_drvdata *drvdata)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable)
+ if (BVAL(stm_readl(drvdata, STMHEMCR), 0))
+ ret = stm_readl(drvdata, STMHEER) == 0 ? 0 : 1;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+static void __stm_hwevent_enable(struct stm_drvdata *drvdata)
+{
+ STM_UNLOCK(drvdata);
+
+ /* Program STMHETER to ensure TRIGOUTHETE (fed to CTI) is asserted
+ for HW events.
+ */
+ stm_writel(drvdata, 0xFFFFFFFF, STMHETER);
+ stm_writel(drvdata, 0xFFFFFFFF, STMHEER);
+ stm_writel(drvdata, 0x5, STMHEMCR);
+
+ STM_LOCK(drvdata);
+}
+
+static int stm_hwevent_enable(struct stm_drvdata *drvdata)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable)
+ __stm_hwevent_enable(drvdata);
+ else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+static int stm_port_isenable(struct stm_drvdata *drvdata)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable)
+ ret = stm_readl(drvdata, STMSPER) == 0 ? 0 : 1;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+static void __stm_port_enable(struct stm_drvdata *drvdata)
+{
+ STM_UNLOCK(drvdata);
+
+ stm_writel(drvdata, 0x10, STMSPTRIGCSR);
+ stm_writel(drvdata, 0xFFFFFFFF, STMSPER);
+
+ STM_LOCK(drvdata);
+}
+
+static int stm_port_enable(struct stm_drvdata *drvdata)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable)
+ __stm_port_enable(drvdata);
+ else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+static void __stm_enable(struct stm_drvdata *drvdata)
+{
+ __stm_hwevent_enable(drvdata);
+ __stm_port_enable(drvdata);
+
+ STM_UNLOCK(drvdata);
+
+ stm_writel(drvdata, 0xFFF, STMSYNCR);
+ /* SYNCEN is read-only and HWTEN is not implemented */
+ stm_writel(drvdata, 0x100003, STMTCSR);
+
+ STM_LOCK(drvdata);
+}
+
+static int stm_enable(struct coresight_device *csdev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+ unsigned long flags;
+
+ ret = pm_runtime_get_sync(drvdata->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ __stm_enable(drvdata);
+ drvdata->enable = true;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "STM tracing enabled\n");
+ return 0;
+}
+
+static void __stm_hwevent_disable(struct stm_drvdata *drvdata)
+{
+ STM_UNLOCK(drvdata);
+
+ stm_writel(drvdata, 0x0, STMHEMCR);
+ stm_writel(drvdata, 0x0, STMHEER);
+ stm_writel(drvdata, 0x0, STMHETER);
+
+ STM_LOCK(drvdata);
+}
+
+static void stm_hwevent_disable(struct stm_drvdata *drvdata)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable)
+ __stm_hwevent_disable(drvdata);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
+static void __stm_port_disable(struct stm_drvdata *drvdata)
+{
+ STM_UNLOCK(drvdata);
+
+ stm_writel(drvdata, 0x0, STMSPER);
+ stm_writel(drvdata, 0x0, STMSPTRIGCSR);
+
+ STM_LOCK(drvdata);
+}
+
+static void stm_port_disable(struct stm_drvdata *drvdata)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable)
+ __stm_port_disable(drvdata);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
+static void __stm_disable(struct stm_drvdata *drvdata)
+{
+ STM_UNLOCK(drvdata);
+
+ stm_writel(drvdata, 0x100000, STMTCSR);
+
+ STM_LOCK(drvdata);
+
+ __stm_hwevent_disable(drvdata);
+ __stm_port_disable(drvdata);
+}
+
+static void stm_disable(struct coresight_device *csdev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ __stm_disable(drvdata);
+ drvdata->enable = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /* Wait for 100ms so that pending data has been written to HW */
+ msleep(100);
+
+ pm_runtime_put(drvdata->dev);
+
+ dev_info(drvdata->dev, "STM tracing disabled\n");
+}
+
+static int stm_trace_id(struct coresight_device *csdev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ int trace_id = -1;
+
+ if (pm_runtime_get_sync(drvdata->dev) < 0)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ trace_id = BMVAL(stm_readl(drvdata, STMTCSR), 16, 22);
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ pm_runtime_put(drvdata->dev);
+out:
+ return trace_id;
+}
+
+static const struct coresight_ops_source stm_source_ops = {
+ .trace_id = stm_trace_id,
+ .enable = stm_enable,
+ .disable = stm_disable,
+};
+
+static const struct coresight_ops stm_cs_ops = {
+ .source_ops = &stm_source_ops,
+};
+
+static uint32_t stm_channel_alloc(void)
+{
+ struct stm_drvdata *drvdata = stmdrvdata;
+ uint32_t off, ch, num_ch_per_cpu;
+ int cpu;
+
+ num_ch_per_cpu = NR_STM_CHANNEL/num_present_cpus();
+
+ cpu = get_cpu();
+
+ off = num_ch_per_cpu * cpu;
+ ch = find_next_zero_bit(drvdata->chs.bitmap,
+ NR_STM_CHANNEL, off);
+ if (ch > (off + num_ch_per_cpu)) {
+ put_cpu();
+ return NR_STM_CHANNEL;
+ }
+
+ set_bit(ch, drvdata->chs.bitmap);
+ put_cpu();
+
+ return ch;
+}
+
+static void stm_channel_free(uint32_t ch)
+{
+ struct stm_drvdata *drvdata = stmdrvdata;
+
+ clear_bit(ch, drvdata->chs.bitmap);
+}
+
+static int stm_send(void *addr, const void *data, uint32_t size)
+{
+ uint32_t len = size;
+
+ if (((unsigned long)data & 0x1) && (size >= 1)) {
+ stm_data_writeb(*(uint8_t *)data, addr);
+ data++;
+ size--;
+ }
+ if (((unsigned long)data & 0x2) && (size >= 2)) {
+ stm_data_writew(*(uint16_t *)data, addr);
+ data += 2;
+ size -= 2;
+ }
+
+ /* now we are 32bit aligned */
+ while (size >= 4) {
+ stm_data_writel(*(uint32_t *)data, addr);
+ data += 4;
+ size -= 4;
+ }
+
+ if (size >= 2) {
+ stm_data_writew(*(uint16_t *)data, addr);
+ data += 2;
+ size -= 2;
+ }
+ if (size >= 1) {
+ stm_data_writeb(*(uint8_t *)data, addr);
+ data++;
+ size--;
+ }
+
+ return len;
+}
+
+static int stm_trace_ost_header(unsigned long ch_addr, uint32_t options,
+ uint8_t entity_id, uint8_t proto_id)
+{
+ void *addr;
+ uint32_t header;
+ char *hdr;
+
+ hdr = (char *)&header;
+
+ hdr[0] = OST_TOKEN_STARTSIMPLE;
+ hdr[1] = OST_VERSION_MIPI1;
+ hdr[2] = entity_id;
+ hdr[3] = proto_id;
+
+ /* header is expected to be D32M type */
+ options |= STM_OPTION_MARKED;
+ options &= ~STM_OPTION_TIMESTAMPED;
+ addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
+
+ return stm_send(addr, &header, sizeof(header));
+}
+
+static int stm_trace_data_header(void *addr)
+{
+ char hdr[16];
+ int len = 0;
+
+ *(uint16_t *)(hdr) = STM_MAKE_VERSION(0, 1);
+ *(uint16_t *)(hdr + 2) = STM_HEADER_MAGIC;
+ *(uint32_t *)(hdr + 4) = raw_smp_processor_id();
+ *(uint64_t *)(hdr + 8) = sched_clock();
+
+ len += stm_send(addr, hdr, sizeof(hdr));
+ len += stm_send(addr, current->comm, TASK_COMM_LEN);
+
+ return len;
+}
+
+static int stm_trace_data(unsigned long ch_addr, uint32_t options,
+ const void *data, uint32_t size)
+{
+ void *addr;
+ int len = 0;
+
+ options &= ~STM_OPTION_TIMESTAMPED;
+ addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
+
+ /* send the data header */
+ len += stm_trace_data_header(addr);
+ /* send the actual data */
+ len += stm_send(addr, data, size);
+
+ return len;
+}
+
+static int stm_trace_ost_tail(unsigned long ch_addr, uint32_t options)
+{
+ void *addr;
+ uint32_t tail = 0x0;
+
+ addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, options));
+
+ return stm_send(addr, &tail, sizeof(tail));
+}
+
+static inline int __stm_trace(uint32_t options, uint8_t entity_id,
+ uint8_t proto_id, const void *data, uint32_t size)
+{
+ struct stm_drvdata *drvdata = stmdrvdata;
+ int len = 0;
+ uint32_t ch;
+ unsigned long ch_addr;
+
+ /* allocate channel and get the channel address */
+ ch = stm_channel_alloc();
+ if (ch >= NR_STM_CHANNEL) {
+ drvdata->ch_alloc_fail_count++;
+ dev_err_ratelimited(drvdata->dev,
+ "Channel allocation failed %d",
+ drvdata->ch_alloc_fail_count);
+ return 0;
+ }
+ ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
+
+ /* send the ost header */
+ len += stm_trace_ost_header(ch_addr, options, entity_id,
+ proto_id);
+
+ /* send the payload data */
+ len += stm_trace_data(ch_addr, options, data, size);
+
+ /* send the ost tail */
+ len += stm_trace_ost_tail(ch_addr, options);
+
+ /* we are done, free the channel */
+ stm_channel_free(ch);
+
+ return len;
+}
+
+/**
+ * stm_trace - trace the binary or string data through STM
+ * @options: tracing options - guaranteed, timestamped, etc
+ * @entity_id: entity representing the trace data
+ * @proto_id: protocol id to distinguish between different binary formats
+ * @data: pointer to binary or string data buffer
+ * @size: size of data to send
+ *
+ * Packetizes the data as the payload to an OST packet and sends it over STM
+ *
+ * CONTEXT:
+ * Can be called from any context.
+ *
+ * RETURNS:
+ * number of bytes transferred over STM
+ */
+int stm_trace(uint32_t options, uint8_t entity_id, uint8_t proto_id,
+ const void *data, uint32_t size)
+{
+ struct stm_drvdata *drvdata = stmdrvdata;
+
+ /* we don't support sizes more than 24bits (0 to 23) */
+ if (!(drvdata && drvdata->enable &&
+ test_bit(entity_id, drvdata->entities) && size &&
+ (size < 0x1000000)))
+ return 0;
+
+ return __stm_trace(options, entity_id, proto_id, data, size);
+}
+EXPORT_SYMBOL(stm_trace);
+
+static ssize_t stm_write(struct file *file, const char __user *data,
+ size_t size, loff_t *ppos)
+{
+ struct stm_drvdata *drvdata = container_of(file->private_data,
+ struct stm_drvdata, miscdev);
+ char *buf;
+ uint8_t entity_id, proto_id;
+ uint32_t options;
+
+ if (!drvdata->enable || !size)
+ return -EINVAL;
+
+ if (size > STM_TRACE_BUF_SIZE)
+ size = STM_TRACE_BUF_SIZE;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, data, size)) {
+ kfree(buf);
+ dev_dbg(drvdata->dev, "%s: copy_from_user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ if (size >= STM_USERSPACE_HEADER_SIZE &&
+ buf[0] == STM_USERSPACE_MAGIC1_VAL &&
+ buf[1] == STM_USERSPACE_MAGIC2_VAL) {
+
+ entity_id = buf[2];
+ proto_id = buf[3];
+ options = *(uint32_t *)(buf + 4);
+
+ if (!test_bit(entity_id, drvdata->entities) ||
+ !(size - STM_USERSPACE_HEADER_SIZE)) {
+ kfree(buf);
+ return size;
+ }
+
+ __stm_trace(options, entity_id, proto_id,
+ buf + STM_USERSPACE_HEADER_SIZE,
+ size - STM_USERSPACE_HEADER_SIZE);
+ } else {
+ if (!test_bit(OST_ENTITY_DEV_NODE, drvdata->entities)) {
+ kfree(buf);
+ return size;
+ }
+
+ __stm_trace(STM_OPTION_TIMESTAMPED, OST_ENTITY_DEV_NODE, 0,
+ buf, size);
+ }
+
+ kfree(buf);
+
+ return size;
+}
+
+static const struct file_operations stm_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .write = stm_write,
+ .llseek = no_llseek,
+};
+
+static ssize_t stm_show_hwevent_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = stm_hwevent_isenable(drvdata);
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t stm_store_hwevent_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret = 0;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ if (val)
+ ret = stm_hwevent_enable(drvdata);
+ else
+ stm_hwevent_disable(drvdata);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(hwevent_enable, S_IRUGO | S_IWUSR, stm_show_hwevent_enable,
+ stm_store_hwevent_enable);
+
+static ssize_t stm_show_port_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = stm_port_isenable(drvdata);
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t stm_store_port_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret = 0;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ if (val)
+ ret = stm_port_enable(drvdata);
+ else
+ stm_port_disable(drvdata);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(port_enable, S_IRUGO | S_IWUSR, stm_show_port_enable,
+ stm_store_port_enable);
+
+static ssize_t stm_show_entities(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t len;
+
+ len = scnprintf(buf, PAGE_SIZE, "%*pb\n",
+ OST_ENTITY_MAX, drvdata->entities);
+
+ if (PAGE_SIZE - len < 2)
+ len = -EINVAL;
+ else
+ len += scnprintf(buf + len, 2, "\n");
+
+ return len;
+}
+
+static ssize_t stm_store_entities(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ if (val1 >= OST_ENTITY_MAX)
+ return -EINVAL;
+
+ if (val2)
+ __set_bit(val1, drvdata->entities);
+ else
+ __clear_bit(val1, drvdata->entities);
+
+ return size;
+}
+static DEVICE_ATTR(entities, S_IRUGO | S_IWUSR, stm_show_entities,
+ stm_store_entities);
+
+static struct attribute *stm_attrs[] = {
+ &dev_attr_hwevent_enable.attr,
+ &dev_attr_port_enable.attr,
+ &dev_attr_entities.attr,
+ NULL,
+};
+
+static struct attribute_group stm_attr_grp = {
+ .attrs = stm_attrs,
+};
+
+static const struct attribute_group *stm_attr_grps[] = {
+ &stm_attr_grp,
+ NULL,
+};
+
+static int stm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata;
+ struct stm_drvdata *drvdata;
+ struct resource res;
+ size_t res_size, bitmap_size;
+ struct coresight_desc *desc;
+
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(adev->dev.of_node, 1, &res);
+ if (ret)
+ return -ENODEV;
+
+ if (boot_nr_channel) {
+ res_size = min((resource_size_t)(boot_nr_channel *
+ BYTES_PER_CHANNEL), resource_size(&res));
+ bitmap_size = (boot_nr_channel + sizeof(long) - 1) /
+ sizeof(long);
+ } else {
+ res_size = min((resource_size_t)(NR_STM_CHANNEL *
+ BYTES_PER_CHANNEL), resource_size(&res));
+ bitmap_size = (NR_STM_CHANNEL + sizeof(long) - 1) /
+ sizeof(long);
+ }
+ drvdata->chs.base = devm_ioremap(dev, res.start, res_size);
+ if (!drvdata->chs.base)
+ return -ENOMEM;
+
+ drvdata->chs.bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
+ if (!drvdata->chs.bitmap)
+ return -ENOMEM;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ if (!coresight_authstatus_enabled(drvdata->base))
+ goto err1;
+
+ pm_runtime_put(&adev->dev);
+
+ bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
+
+ drvdata->data_barrier = of_property_read_bool(adev->dev.of_node,
+ "qcom,data-barrier");
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
+ desc->ops = &stm_cs_ops;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
+ desc->groups = stm_attr_grps;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ drvdata->miscdev.name = ((struct coresight_platform_data *)
+ (adev->dev.platform_data))->name;
+ drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
+ drvdata->miscdev.fops = &stm_fops;
+ ret = misc_register(&drvdata->miscdev);
+ if (ret)
+ goto err;
+
+ dev_info(drvdata->dev, "STM initialized\n");
+
+ if (boot_enable)
+ coresight_enable(drvdata->csdev);
+
+ /* Store the driver data pointer for use in exported functions */
+ stmdrvdata = drvdata;
+ return 0;
+err:
+ coresight_unregister(drvdata->csdev);
+ return ret;
+err1:
+ return -EPERM;
+}
+
+static int stm_remove(struct amba_device *adev)
+{
+ struct stm_drvdata *drvdata = amba_get_drvdata(adev);
+
+ misc_deregister(&drvdata->miscdev);
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct amba_id stm_ids[] = {
+ {
+ .id = 0x0003b962,
+ .mask = 0x0003ffff,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver stm_driver = {
+ .drv = {
+ .name = "coresight-stm",
+ .owner = THIS_MODULE,
+ },
+ .probe = stm_probe,
+ .remove = stm_remove,
+ .id_table = stm_ids,
+};
+
+static int __init stm_init(void)
+{
+ return amba_driver_register(&stm_driver);
+}
+module_init(stm_init);
+
+static void __exit stm_exit(void)
+{
+ amba_driver_unregister(&stm_driver);
+}
+module_exit(stm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight System Trace Macrocell driver");
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index a57c7ec1661f..691c7bb3afac 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,8 +25,15 @@
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
#include <linux/amba/bus.h>
+#include <asm/cacheflush.h>
+#include <linux/msm-sps.h>
+#include <linux/usb_bam.h>
+#include <linux/usb/usb_qdss.h>
+#include <soc/qcom/memory_dump.h>
#include "coresight-priv.h"
@@ -79,6 +86,19 @@
#define TMC_STS_TRIGGERED_BIT 2
#define TMC_FFCR_FLUSHMAN_BIT 6
+#define TMC_ETR_SG_ENT_TO_BLK(phys_pte) (((phys_addr_t)phys_pte >> 4) \
+ << PAGE_SHIFT)
+#define TMC_ETR_SG_ENT(phys_pte) (((phys_pte >> PAGE_SHIFT) << 4) | 0x2)
+#define TMC_ETR_SG_NXT_TBL(phys_pte) (((phys_pte >> PAGE_SHIFT) << 4) | 0x3)
+#define TMC_ETR_SG_LST_ENT(phys_pte) (((phys_pte >> PAGE_SHIFT) << 4) | 0x1)
+
+#define TMC_ETR_BAM_PIPE_INDEX 0
+#define TMC_ETR_BAM_NR_PIPES 2
+
+#define TMC_ETFETB_DUMP_MAGIC_V2 (0x42445953)
+#define TMC_REG_DUMP_MAGIC_V2 (0x42445953)
+#define TMC_REG_DUMP_VER (1)
+
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
@@ -98,6 +118,41 @@ enum tmc_mem_intf_width {
TMC_MEM_INTF_WIDTH_256BITS = 0x5,
};
+enum tmc_etr_mem_type {
+ TMC_ETR_MEM_TYPE_CONTIG,
+ TMC_ETR_MEM_TYPE_SG,
+};
+
+static const char * const str_tmc_etr_mem_type[] = {
+ [TMC_ETR_MEM_TYPE_CONTIG] = "contig",
+ [TMC_ETR_MEM_TYPE_SG] = "sg",
+};
+
+enum tmc_etr_out_mode {
+ TMC_ETR_OUT_MODE_NONE,
+ TMC_ETR_OUT_MODE_MEM,
+ TMC_ETR_OUT_MODE_USB,
+};
+
+static const char * const str_tmc_etr_out_mode[] = {
+ [TMC_ETR_OUT_MODE_NONE] = "none",
+ [TMC_ETR_OUT_MODE_MEM] = "mem",
+ [TMC_ETR_OUT_MODE_USB] = "usb",
+};
+
+struct tmc_etr_bam_data {
+ struct sps_bam_props props;
+ unsigned long handle;
+ struct sps_pipe *pipe;
+ struct sps_connect connect;
+ uint32_t src_pipe_idx;
+ unsigned long dest;
+ uint32_t dest_pipe_idx;
+ struct sps_mem_buffer desc_fifo;
+ struct sps_mem_buffer data_fifo;
+ bool enable;
+};
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -113,6 +168,8 @@ enum tmc_mem_intf_width {
* @enable: this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @trigger_cntr: amount of words to store after a trigger.
+ * @reg_data: MSM memory dump data to store TMC registers.
+ * @buf_data: MSM memory dump data to store ETF/ETB buffer.
*/
struct tmc_drvdata {
void __iomem *base;
@@ -122,15 +179,36 @@ struct tmc_drvdata {
spinlock_t spinlock;
int read_count;
bool reading;
+ bool aborting;
char *buf;
dma_addr_t paddr;
void __iomem *vaddr;
u32 size;
+ struct mutex mem_lock;
+ u32 mem_size;
bool enable;
+ bool sticky_enable;
enum tmc_config_type config_type;
u32 trigger_cntr;
+ enum tmc_etr_mem_type mem_type;
+ enum tmc_etr_mem_type memtype;
+ u32 delta_bottom;
+ int sg_blk_num;
+ enum tmc_etr_out_mode out_mode;
+ struct usb_qdss_ch *usbch;
+ struct tmc_etr_bam_data *bamdata;
+ bool enable_to_bam;
+ struct msm_dump_data reg_data;
+ struct msm_dump_data buf_data;
+ struct coresight_cti *cti_flush;
+ struct coresight_cti *cti_reset;
+ char *reg_buf;
+ bool force_reg_dump;
+ bool dump_reg;
};
+static void __tmc_reg_dump(struct tmc_drvdata *drvdata);
+
static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
{
/* Ensure formatter, unformatter and hardware fifo are empty */
@@ -191,12 +269,279 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static void tmc_etr_sg_tbl_free(uint32_t *vaddr, uint32_t size, uint32_t ents)
+{
+ uint32_t i = 0, pte_n = 0, last_pte;
+ uint32_t *virt_st_tbl, *virt_pte;
+ void *virt_blk;
+ phys_addr_t phys_pte;
+ int total_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ int ents_per_blk = PAGE_SIZE/sizeof(uint32_t);
+
+ virt_st_tbl = vaddr;
+
+ while (i < total_ents) {
+ last_pte = ((i + ents_per_blk) > total_ents) ?
+ total_ents : (i + ents_per_blk);
+ while (i < last_pte) {
+ virt_pte = virt_st_tbl + pte_n;
+
+ /* Do not go beyond number of entries allocated */
+ if (i == ents) {
+ free_page((unsigned long)virt_st_tbl);
+ return;
+ }
+
+ phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+ virt_blk = phys_to_virt(phys_pte);
+
+ if ((last_pte - i) > 1) {
+ free_page((unsigned long)virt_blk);
+ pte_n++;
+ } else if (last_pte == total_ents) {
+ free_page((unsigned long)virt_blk);
+ free_page((unsigned long)virt_st_tbl);
+ } else {
+ free_page((unsigned long)virt_st_tbl);
+ virt_st_tbl = (uint32_t *)virt_blk;
+ pte_n = 0;
+ break;
+ }
+ i++;
+ }
+ }
+}
+
+static void tmc_etr_sg_tbl_flush(uint32_t *vaddr, uint32_t size)
+{
+ uint32_t i = 0, pte_n = 0, last_pte;
+ uint32_t *virt_st_tbl, *virt_pte;
+ void *virt_blk;
+ phys_addr_t phys_pte;
+ int total_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ int ents_per_blk = PAGE_SIZE/sizeof(uint32_t);
+
+ virt_st_tbl = vaddr;
+ dmac_flush_range((void *)virt_st_tbl, (void *)virt_st_tbl + PAGE_SIZE);
+
+ while (i < total_ents) {
+ last_pte = ((i + ents_per_blk) > total_ents) ?
+ total_ents : (i + ents_per_blk);
+ while (i < last_pte) {
+ virt_pte = virt_st_tbl + pte_n;
+ phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+ virt_blk = phys_to_virt(phys_pte);
+
+ dmac_flush_range(virt_blk, virt_blk + PAGE_SIZE);
+
+ if ((last_pte - i) > 1) {
+ pte_n++;
+ } else if (last_pte != total_ents) {
+ virt_st_tbl = (uint32_t *)virt_blk;
+ pte_n = 0;
+ break;
+ }
+ i++;
+ }
+ }
+}
+
+/*
+ * Scatter gather table layout in memory:
+ * 1. Table contains 32-bit entries
+ * 2. Each entry in the table points to 4K block of memory
+ * 3. Last entry in the table points to next table
+ * 4. (*) Based on mem_size requested, if there is no need for next level of
+ * table, last entry in the table points directly to 4K block of memory.
+ *
+ * sg_tbl_num=0
+ * |---------------|<-- drvdata->vaddr
+ * | blk_num=0 |
+ * |---------------|
+ * | blk_num=1 |
+ * |---------------|
+ * | blk_num=2 |
+ * |---------------| sg_tbl_num=1
+ * |(*)Nxt Tbl Addr|------>|---------------|
+ * |---------------| | blk_num=3 |
+ * |---------------|
+ * | blk_num=4 |
+ * |---------------|
+ * | blk_num=5 |
+ * |---------------| sg_tbl_num=2
+ * |(*)Nxt Tbl Addr|------>|---------------|
+ * |---------------| | blk_num=6 |
+ * |---------------|
+ * | blk_num=7 |
+ * |---------------|
+ * | blk_num=8 |
+ * |---------------|
+ * | |End of
+ * |---------------|-----
+ * Table
+ * For simplicity above diagram assumes following:
+ * a. mem_size = 36KB --> total_ents = 9
+ * b. ents_per_blk = 4
+ */
+
+static int tmc_etr_sg_tbl_alloc(struct tmc_drvdata *drvdata)
+{
+ int ret;
+ uint32_t i = 0, last_pte;
+ uint32_t *virt_pgdir, *virt_st_tbl;
+ void *virt_pte;
+ int total_ents = DIV_ROUND_UP(drvdata->size, PAGE_SIZE);
+ int ents_per_blk = PAGE_SIZE/sizeof(uint32_t);
+
+ virt_pgdir = (uint32_t *)get_zeroed_page(GFP_KERNEL);
+ if (!virt_pgdir)
+ return -ENOMEM;
+
+ virt_st_tbl = virt_pgdir;
+
+ while (i < total_ents) {
+ last_pte = ((i + ents_per_blk) > total_ents) ?
+ total_ents : (i + ents_per_blk);
+ while (i < last_pte) {
+ virt_pte = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!virt_pte) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if ((last_pte - i) > 1) {
+ *virt_st_tbl =
+ TMC_ETR_SG_ENT(virt_to_phys(virt_pte));
+ virt_st_tbl++;
+ } else if (last_pte == total_ents) {
+ *virt_st_tbl =
+ TMC_ETR_SG_LST_ENT(virt_to_phys(virt_pte));
+ } else {
+ *virt_st_tbl =
+ TMC_ETR_SG_NXT_TBL(virt_to_phys(virt_pte));
+ virt_st_tbl = (uint32_t *)virt_pte;
+ break;
+ }
+ i++;
+ }
+ }
+
+ drvdata->vaddr = virt_pgdir;
+ drvdata->paddr = virt_to_phys(virt_pgdir);
+
+ /* Flush the dcache before proceeding */
+ tmc_etr_sg_tbl_flush((uint32_t *)drvdata->vaddr, drvdata->size);
+
+ dev_dbg(drvdata->dev, "%s: table starts at %#lx, total entries %d\n",
+ __func__, (unsigned long)drvdata->paddr, total_ents);
+
+ return 0;
+err:
+ tmc_etr_sg_tbl_free(virt_pgdir, drvdata->size, i);
+ return ret;
+}
+
+static void tmc_etr_sg_mem_reset(uint32_t *vaddr, uint32_t size)
+{
+ uint32_t i = 0, pte_n = 0, last_pte;
+ uint32_t *virt_st_tbl, *virt_pte;
+ void *virt_blk;
+ phys_addr_t phys_pte;
+ int total_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ int ents_per_blk = PAGE_SIZE/sizeof(uint32_t);
+
+ virt_st_tbl = vaddr;
+
+ while (i < total_ents) {
+ last_pte = ((i + ents_per_blk) > total_ents) ?
+ total_ents : (i + ents_per_blk);
+ while (i < last_pte) {
+ virt_pte = virt_st_tbl + pte_n;
+ phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+ virt_blk = phys_to_virt(phys_pte);
+
+ if ((last_pte - i) > 1) {
+ memset(virt_blk, 0, PAGE_SIZE);
+ pte_n++;
+ } else if (last_pte == total_ents) {
+ memset(virt_blk, 0, PAGE_SIZE);
+ } else {
+ virt_st_tbl = (uint32_t *)virt_blk;
+ pte_n = 0;
+ break;
+ }
+ i++;
+ }
+ }
+
+ /* Flush the dcache before proceeding */
+ tmc_etr_sg_tbl_flush(vaddr, size);
+}
+
+static int tmc_etr_alloc_mem(struct tmc_drvdata *drvdata)
+{
+ int ret;
+
+ if (!drvdata->vaddr) {
+ if (drvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG) {
+ drvdata->vaddr = dma_zalloc_coherent(drvdata->dev,
+ drvdata->size,
+ &drvdata->paddr,
+ GFP_KERNEL);
+ if (!drvdata->vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ } else {
+ ret = tmc_etr_sg_tbl_alloc(drvdata);
+ if (ret)
+ goto err;
+ }
+ }
+ /*
+ * Need to reinitialize buf for each tmc enable session since it is
+ * getting modified during tmc etr dump.
+ */
+ drvdata->buf = drvdata->vaddr;
+ return 0;
+err:
+ dev_err(drvdata->dev, "etr ddr memory allocation failed\n");
+ return ret;
+}
+
+static void tmc_etr_free_mem(struct tmc_drvdata *drvdata)
+{
+ if (drvdata->vaddr) {
+ if (drvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG)
+ dma_free_coherent(drvdata->dev, drvdata->size,
+ drvdata->vaddr, drvdata->paddr);
+ else
+ tmc_etr_sg_tbl_free((uint32_t *)drvdata->vaddr,
+ drvdata->size,
+ DIV_ROUND_UP(drvdata->size, PAGE_SIZE));
+
+ drvdata->vaddr = 0;
+ drvdata->paddr = 0;
+ }
+}
+
+static void tmc_etr_mem_reset(struct tmc_drvdata *drvdata)
+{
+ if (drvdata->vaddr) {
+ if (drvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG)
+ memset(drvdata->vaddr, 0, drvdata->size);
+ else
+ tmc_etr_sg_mem_reset((uint32_t *)drvdata->vaddr,
+ drvdata->size);
+ }
+}
+
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl;
/* Zero out the memory to help with debug */
- memset(drvdata->vaddr, 0, drvdata->size);
+ tmc_etr_mem_reset(drvdata);
CS_UNLOCK(drvdata->base);
@@ -206,7 +551,10 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
axictl |= TMC_AXICTL_WR_BURST_LEN;
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
+ if (drvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG)
+ axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
+ else
+ axictl |= TMC_AXICTL_SCT_GAT_MODE;
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
axictl = (axictl &
~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
@@ -214,7 +562,8 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
- writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
+ writel_relaxed(((u64)drvdata->paddr >> 32) & 0xFF,
+ drvdata->base + TMC_DBAHI);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
@@ -238,23 +587,244 @@ static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static void tmc_etr_fill_usb_bam_data(struct tmc_drvdata *drvdata)
+{
+ struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+ get_qdss_bam_connection_info(&bamdata->dest,
+ &bamdata->dest_pipe_idx,
+ &bamdata->src_pipe_idx,
+ &bamdata->desc_fifo,
+ &bamdata->data_fifo,
+ NULL);
+}
+
+static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
+{
+ struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+ uint32_t axictl;
+
+ if (drvdata->enable_to_bam)
+ return;
+
+ /* Configure and enable required CSR registers */
+ msm_qdss_csr_enable_bam_to_usb();
+
+ /* Configure and enable ETR for usb bam output */
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(bamdata->data_fifo.size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+ axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+ axictl |= (0xF << 8);
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ axictl &= ~(0x1 << 7);
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ axictl = (axictl & ~0x3) | 0x2;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+ writel_relaxed((uint32_t)bamdata->data_fifo.phys_base,
+ drvdata->base + TMC_DBALO);
+ writel_relaxed((((uint64_t)bamdata->data_fifo.phys_base) >> 32) & 0xFF,
+ drvdata->base + TMC_DBAHI);
+ /* Set FOnFlIn for periodic flush */
+ writel_relaxed(0x133, drvdata->base + TMC_FFCR);
+ writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+
+ drvdata->enable_to_bam = true;
+}
+
+static int tmc_etr_bam_enable(struct tmc_drvdata *drvdata)
+{
+ struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+ int ret;
+
+ if (bamdata->enable)
+ return 0;
+
+ /* Reset bam to start with */
+ ret = sps_device_reset(bamdata->handle);
+ if (ret)
+ goto err0;
+
+ /* Now configure and enable bam */
+
+ bamdata->pipe = sps_alloc_endpoint();
+ if (!bamdata->pipe)
+ return -ENOMEM;
+
+ ret = sps_get_config(bamdata->pipe, &bamdata->connect);
+ if (ret)
+ goto err1;
+
+ bamdata->connect.mode = SPS_MODE_SRC;
+ bamdata->connect.source = bamdata->handle;
+ bamdata->connect.event_thresh = 0x4;
+ bamdata->connect.src_pipe_index = TMC_ETR_BAM_PIPE_INDEX;
+ bamdata->connect.options = SPS_O_AUTO_ENABLE;
+
+ bamdata->connect.destination = bamdata->dest;
+ bamdata->connect.dest_pipe_index = bamdata->dest_pipe_idx;
+ bamdata->connect.desc = bamdata->desc_fifo;
+ bamdata->connect.data = bamdata->data_fifo;
+
+ ret = sps_connect(bamdata->pipe, &bamdata->connect);
+ if (ret)
+ goto err1;
+
+ bamdata->enable = true;
+ return 0;
+err1:
+ sps_free_endpoint(bamdata->pipe);
+err0:
+ return ret;
+}
+
+static void tmc_wait_for_flush(struct tmc_drvdata *drvdata)
+{
+ int count;
+
+ /* Ensure no flush is in progress */
+ for (count = TIMEOUT_US;
+ BVAL(readl_relaxed(drvdata->base + TMC_FFSR), 0) != 0
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while waiting for TMC flush, TMC_FFSR: %#x\n",
+ readl_relaxed(drvdata->base + TMC_FFSR));
+}
+
+static void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata)
+{
+ if (!drvdata->enable_to_bam)
+ return;
+
+ /* Ensure periodic flush is disabled in CSR block */
+ msm_qdss_csr_disable_flush();
+
+ CS_UNLOCK(drvdata->base);
+
+ tmc_wait_for_flush(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata);
+
+ /* Disable CSR configuration */
+ msm_qdss_csr_disable_bam_to_usb();
+ drvdata->enable_to_bam = false;
+}
+
+static void tmc_etr_bam_disable(struct tmc_drvdata *drvdata)
+{
+ struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+ if (!bamdata->enable)
+ return;
+
+ sps_disconnect(bamdata->pipe);
+ sps_free_endpoint(bamdata->pipe);
+ bamdata->enable = false;
+}
+
+static void usb_notifier(void *priv, unsigned int event,
+ struct qdss_request *d_req, struct usb_qdss_ch *ch)
+{
+ struct tmc_drvdata *drvdata = priv;
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&drvdata->mem_lock);
+ if (event == USB_QDSS_CONNECT) {
+ tmc_etr_fill_usb_bam_data(drvdata);
+ ret = tmc_etr_bam_enable(drvdata);
+ if (ret)
+ dev_err(drvdata->dev, "ETR BAM enable failed\n");
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ __tmc_etr_enable_to_bam(drvdata);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ } else if (event == USB_QDSS_DISCONNECT) {
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ __tmc_etr_disable_to_bam(drvdata);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ tmc_etr_bam_disable(drvdata);
+ }
+ mutex_unlock(&drvdata->mem_lock);
+}
+
static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
+ int ret;
unsigned long flags;
pm_runtime_get_sync(drvdata->dev);
+ mutex_lock(&drvdata->mem_lock);
+
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
pm_runtime_put(drvdata->dev);
return -EBUSY;
}
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
+ drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ /*
+ * ETR DDR memory is not allocated until user enables
+ * tmc at least once. If user specifies different ETR
+ * DDR size than the default size or switches between
+ * contiguous or scatter-gather memory type after
+ * enabling tmc; the new selection will be honored from
+ * next tmc enable session.
+ */
+ if (drvdata->size != drvdata->mem_size ||
+ drvdata->memtype != drvdata->mem_type) {
+ tmc_etr_free_mem(drvdata);
+ drvdata->size = drvdata->mem_size;
+ drvdata->memtype = drvdata->mem_type;
+ }
+ ret = tmc_etr_alloc_mem(drvdata);
+ if (ret) {
+ pm_runtime_put(drvdata->dev);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+ }
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
+ drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ drvdata->usbch = usb_qdss_open("qdss", drvdata,
+ usb_notifier);
+ if (IS_ERR_OR_NULL(drvdata->usbch)) {
+ dev_err(drvdata->dev, "usb_qdss_open failed\n");
+ ret = PTR_ERR(drvdata->usbch);
+ pm_runtime_put(drvdata->dev);
+ mutex_unlock(&drvdata->mem_lock);
+ if (!ret)
+ ret = -ENODEV;
+
+ return ret;
+ }
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETB ||
+ mode == TMC_MODE_CIRCULAR_BUFFER) {
+ coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ }
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
tmc_etb_enable_hw(drvdata);
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- tmc_etr_enable_hw(drvdata);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_enable_hw(drvdata);
} else {
if (mode == TMC_MODE_CIRCULAR_BUFFER)
tmc_etb_enable_hw(drvdata);
@@ -262,7 +832,19 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
tmc_etf_enable_hw(drvdata);
}
drvdata->enable = true;
+ if (drvdata->force_reg_dump) {
+ drvdata->dump_reg = true;
+ __tmc_reg_dump(drvdata);
+ drvdata->dump_reg = false;
+ }
+
+ /*
+ * sticky_enable prevents users from reading tmc dev node before
+ * enabling tmc at least once.
+ */
+ drvdata->sticky_enable = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC enabled\n");
return 0;
@@ -306,11 +888,15 @@ static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
for (i = 0; i < memwords; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
- return;
+ goto out;
memcpy(bufp, &read_data, 4);
bufp += 4;
}
}
+
+out:
+ if (drvdata->aborting)
+ drvdata->buf_data.magic = TMC_ETFETB_DUMP_MAGIC_V2;
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
@@ -319,11 +905,65 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
tmc_flush_and_stop(drvdata);
tmc_etb_dump_hw(drvdata);
+ __tmc_reg_dump(drvdata);
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
}
+static void tmc_etr_sg_rwp_pos(struct tmc_drvdata *drvdata, uint32_t rwp)
+{
+ uint32_t i = 0, pte_n = 0, last_pte;
+ uint32_t *virt_st_tbl, *virt_pte;
+ void *virt_blk;
+ bool found = false;
+ phys_addr_t phys_pte;
+ int total_ents = DIV_ROUND_UP(drvdata->size, PAGE_SIZE);
+ int ents_per_blk = PAGE_SIZE/sizeof(uint32_t);
+
+ virt_st_tbl = drvdata->vaddr;
+
+ while (i < total_ents) {
+ last_pte = ((i + ents_per_blk) > total_ents) ?
+ total_ents : (i + ents_per_blk);
+ while (i < last_pte) {
+ virt_pte = virt_st_tbl + pte_n;
+ phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+
+ /*
+ * When the trace buffer is full; RWP could be on any
+ * 4K block from scatter gather table. Compute below -
+ * 1. Block number where RWP is currently residing
+ * 2. RWP position in that 4K block
+ * 3. Delta offset from current RWP position to end of
+ * block.
+ */
+ if (phys_pte <= rwp && rwp < (phys_pte + PAGE_SIZE)) {
+ virt_blk = phys_to_virt(phys_pte);
+ drvdata->sg_blk_num = i;
+ drvdata->buf = virt_blk + rwp - phys_pte;
+ drvdata->delta_bottom =
+ phys_pte + PAGE_SIZE - rwp;
+ found = true;
+ break;
+ }
+
+ if ((last_pte - i) > 1) {
+ pte_n++;
+ } else if (i < (total_ents - 1)) {
+ virt_blk = phys_to_virt(phys_pte);
+ virt_st_tbl = (uint32_t *)virt_blk;
+ pte_n = 0;
+ break;
+ }
+
+ i++;
+ }
+ if (found)
+ break;
+ }
+}
+
static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
{
u32 rwp, val;
@@ -331,11 +971,25 @@ static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
rwp = readl_relaxed(drvdata->base + TMC_RWP);
val = readl_relaxed(drvdata->base + TMC_STS);
- /* How much memory do we still have */
- if (val & BIT(0))
- drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- else
- drvdata->buf = drvdata->vaddr;
+ if (drvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG) {
+ /* How much memory do we still have */
+ if (val & BIT(0))
+ drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
+ else
+ drvdata->buf = drvdata->vaddr;
+ } else {
+ /*
+ * Reset these variables before computing since we
+ * rely on their values during tmc read
+ */
+ drvdata->sg_blk_num = 0;
+ drvdata->delta_bottom = 0;
+
+ if (val & BIT(0))
+ tmc_etr_sg_rwp_pos(drvdata, rwp);
+ else
+ drvdata->buf = drvdata->vaddr;
+ }
}
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
@@ -344,6 +998,7 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
tmc_flush_and_stop(drvdata);
tmc_etr_dump_hw(drvdata);
+ __tmc_reg_dump(drvdata);
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
@@ -363,6 +1018,7 @@ static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
unsigned long flags;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading)
goto out;
@@ -370,7 +1026,10 @@ static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
tmc_etb_disable_hw(drvdata);
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- tmc_etr_disable_hw(drvdata);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+ __tmc_etr_disable_to_bam(drvdata);
+ else
+ tmc_etr_disable_hw(drvdata);
} else {
if (mode == TMC_MODE_CIRCULAR_BUFFER)
tmc_etb_disable_hw(drvdata);
@@ -381,8 +1040,22 @@ out:
drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR
+ && drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ tmc_etr_bam_disable(drvdata);
+ usb_qdss_close(drvdata->usbch);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR
+ && drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETB
+ || mode == TMC_MODE_CIRCULAR_BUFFER) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+ }
+ pm_runtime_put(drvdata->dev);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC disabled\n");
}
@@ -401,9 +1074,46 @@ static void tmc_disable_link(struct coresight_device *csdev, int inport,
tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
}
+static void tmc_abort(struct coresight_device *csdev)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ enum tmc_mode mode;
+
+ drvdata->aborting = true;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading)
+ goto out0;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ tmc_etb_disable_hw(drvdata);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_disable_hw(drvdata);
+ else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+ __tmc_etr_disable_to_bam(drvdata);
+ } else {
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode == TMC_MODE_CIRCULAR_BUFFER)
+ tmc_etb_disable_hw(drvdata);
+ else
+ goto out1;
+ }
+out0:
+ drvdata->enable = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC aborted\n");
+ return;
+out1:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
static const struct coresight_ops_sink tmc_sink_ops = {
.enable = tmc_enable_sink,
.disable = tmc_disable_sink,
+ .abort = tmc_abort,
};
static const struct coresight_ops_link tmc_link_ops = {
@@ -430,7 +1140,20 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
unsigned long flags;
enum tmc_mode mode;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->sticky_enable) {
+ dev_err(drvdata->dev, "enable tmc once before reading\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
+ drvdata->vaddr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
if (!drvdata->enable)
goto out;
@@ -450,11 +1173,13 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
out:
drvdata->reading = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC read start\n");
return 0;
err:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
return ret;
}
@@ -493,8 +1218,10 @@ static int tmc_open(struct inode *inode, struct file *file)
goto out;
ret = tmc_read_prepare(drvdata);
- if (ret)
+ if (ret) {
+ drvdata->read_count--;
return ret;
+ }
out:
nonseekable_open(inode, file);
@@ -502,27 +1229,160 @@ out:
return 0;
}
+/*
+ * TMC read logic when scatter gather feature is enabled:
+ *
+ * sg_tbl_num=0
+ * |---------------|<-- drvdata->vaddr
+ * | blk_num=0 |
+ * | blk_num_rel=5 |
+ * |---------------|
+ * | blk_num=1 |
+ * | blk_num_rel=6 |
+ * |---------------|
+ * | blk_num=2 |
+ * | blk_num_rel=7 |
+ * |---------------| sg_tbl_num=1
+ * | Next Table |------>|---------------|
+ * | Addr | | blk_num=3 |
+ * |---------------| | blk_num_rel=8 |
+ * |---------------|
+ * 4k Block Addr | blk_num=4 |
+ * |--------------| blk_num_rel=0 |
+ * | |---------------|
+ * | | blk_num=5 |
+ * | | blk_num_rel=1 |
+ * | |---------------| sg_tbl_num=2
+ * |---------------| | Next Table |------>|---------------|
+ * | | | Addr | | blk_num=6 |
+ * | | |---------------| | blk_num_rel=2 |
+ * | read_off | |---------------|
+ * | | | blk_num=7 |
+ * | | ppos | blk_num_rel=3 |
+ * |---------------|----- |---------------|
+ * | | | blk_num=8 |
+ * | delta_up | | blk_num_rel=4 |
+ * | | RWP/drvdata->buf |---------------|
+ * |---------------|----------------- | |
+ * | | | | |End of
+ * | | | |---------------|-----
+ * | | drvdata->delta_bottom Table
+ * | | |
+ * |_______________| _|_
+ * 4K Block
+ *
+ * For simplicity above diagram assumes following:
+ * a. mem_size = 36KB --> total_ents = 9
+ * b. ents_per_blk = 4
+ * c. RWP is on 5th block (blk_num = 5); so we have to start reading from RWP
+ * position
+ */
+
+static void tmc_etr_sg_compute_read(struct tmc_drvdata *drvdata, loff_t *ppos,
+ char **bufpp, size_t *len)
+{
+ uint32_t i = 0, blk_num_rel = 0, read_len = 0;
+ uint32_t blk_num, sg_tbl_num, blk_num_loc, read_off;
+ uint32_t *virt_pte, *virt_st_tbl;
+ void *virt_blk;
+ phys_addr_t phys_pte = 0;
+ int total_ents = DIV_ROUND_UP(drvdata->size, PAGE_SIZE);
+ int ents_per_blk = PAGE_SIZE/sizeof(uint32_t);
+
+ /*
+ * Find relative block number from ppos and reading offset
+ * within block and find actual block number based on relative
+ * block number
+ */
+ if (drvdata->buf == drvdata->vaddr) {
+ blk_num = *ppos / PAGE_SIZE;
+ read_off = *ppos % PAGE_SIZE;
+ } else {
+ if (*ppos < drvdata->delta_bottom) {
+ read_off = PAGE_SIZE - drvdata->delta_bottom;
+ } else {
+ blk_num_rel = (*ppos / PAGE_SIZE) + 1;
+ read_off = (*ppos - drvdata->delta_bottom) % PAGE_SIZE;
+ }
+
+ blk_num = (drvdata->sg_blk_num + blk_num_rel) % total_ents;
+ }
+
+ virt_st_tbl = (uint32_t *)drvdata->vaddr;
+
+ /* Compute table index and block entry index within that table */
+ if (blk_num && (blk_num == (total_ents - 1)) &&
+ !(blk_num % (ents_per_blk - 1))) {
+ sg_tbl_num = blk_num / ents_per_blk;
+ blk_num_loc = ents_per_blk - 1;
+ } else {
+ sg_tbl_num = blk_num / (ents_per_blk - 1);
+ blk_num_loc = blk_num % (ents_per_blk - 1);
+ }
+
+ for (i = 0; i < sg_tbl_num; i++) {
+ virt_pte = virt_st_tbl + (ents_per_blk - 1);
+ phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+ virt_st_tbl = (uint32_t *)phys_to_virt(phys_pte);
+ }
+
+ virt_pte = virt_st_tbl + blk_num_loc;
+ phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+ virt_blk = phys_to_virt(phys_pte);
+
+ *bufpp = virt_blk + read_off;
+
+ if (*len > (PAGE_SIZE - read_off))
+ *len = PAGE_SIZE - read_off;
+
+ /*
+ * When buffer is wrapped around and trying to read last relative
+ * block (i.e. delta_up), compute len differently
+ */
+ if (blk_num_rel && (blk_num == drvdata->sg_blk_num)) {
+ read_len = PAGE_SIZE - drvdata->delta_bottom - read_off;
+ if (*len > read_len)
+ *len = read_len;
+ }
+
+ dev_dbg_ratelimited(drvdata->dev,
+ "%s: read at %p, phys %pa len %zu blk %d, rel blk %d RWP blk %d\n",
+ __func__, *bufpp, &phys_pte, *len, blk_num, blk_num_rel,
+ drvdata->sg_blk_num);
+}
+
static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
loff_t *ppos)
{
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ char *bufp;
+
+ mutex_lock(&drvdata->mem_lock);
+
+ bufp = drvdata->buf + *ppos;
if (*ppos + len > drvdata->size)
len = drvdata->size - *ppos;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (bufp == (char *)(drvdata->vaddr + drvdata->size))
- bufp = drvdata->vaddr;
- else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
- bufp -= drvdata->size;
- if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
- len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
+ if (drvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG) {
+ if (bufp == (char *)(drvdata->vaddr + drvdata->size))
+ bufp = drvdata->vaddr;
+ else if (bufp >
+ (char *)(drvdata->vaddr + drvdata->size))
+ bufp -= drvdata->size;
+ if ((bufp + len) >
+ (char *)(drvdata->vaddr + drvdata->size))
+ len = (char *)(drvdata->vaddr + drvdata->size)
+ - bufp;
+ } else
+ tmc_etr_sg_compute_read(drvdata, ppos, &bufp, &len);
}
if (copy_to_user(data, bufp, len)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
@@ -530,6 +1390,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
__func__, len, (int)(drvdata->size - *ppos));
+
+ mutex_unlock(&drvdata->mem_lock);
return len;
}
@@ -552,6 +1414,47 @@ out:
return 0;
}
+static int tmc_etr_bam_init(struct amba_device *adev,
+ struct tmc_drvdata *drvdata)
+{
+ int ret;
+ struct device *dev = &adev->dev;
+ struct resource res;
+ struct tmc_etr_bam_data *bamdata;
+
+ bamdata = devm_kzalloc(dev, sizeof(*bamdata), GFP_KERNEL);
+ if (!bamdata)
+ return -ENOMEM;
+ drvdata->bamdata = bamdata;
+
+ ret = of_address_to_resource(adev->dev.of_node, 1, &res);
+ if (ret)
+ return -ENODEV;
+
+ bamdata->props.phys_addr = res.start;
+ bamdata->props.virt_addr = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!bamdata->props.virt_addr)
+ return -ENOMEM;
+ bamdata->props.virt_size = resource_size(&res);
+
+ bamdata->props.event_threshold = 0x4; /* Pipe event threshold */
+ bamdata->props.summing_threshold = 0x10; /* BAM event threshold */
+ bamdata->props.irq = 0;
+ bamdata->props.num_pipes = TMC_ETR_BAM_NR_PIPES;
+
+ return sps_register_bam_device(&bamdata->props, &bamdata->handle);
+}
+
+static void tmc_etr_bam_exit(struct tmc_drvdata *drvdata)
+{
+ struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+ if (!bamdata->handle)
+ return;
+ sps_deregister_bam_device(bamdata->handle);
+}
+
static const struct file_operations tmc_fops = {
.owner = THIS_MODULE,
.open = tmc_open,
@@ -634,6 +1537,173 @@ static ssize_t trigger_cntr_store(struct device *dev,
}
static DEVICE_ATTR_RW(trigger_cntr);
+static ssize_t mem_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->mem_size;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t mem_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ mutex_lock(&drvdata->mem_lock);
+ if (kstrtoul(buf, 16, &val)) {
+ mutex_unlock(&drvdata->mem_lock);
+ return -EINVAL;
+ }
+
+ drvdata->mem_size = val;
+ mutex_unlock(&drvdata->mem_lock);
+ return size;
+}
+static DEVICE_ATTR_RW(mem_size);
+
+static ssize_t mem_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ str_tmc_etr_mem_type[drvdata->mem_type]);
+}
+
+static ssize_t mem_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[10] = "";
+
+ if (strlen(buf) >= 10)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mem_lock);
+ if (!strcmp(str, str_tmc_etr_mem_type[TMC_ETR_MEM_TYPE_CONTIG])) {
+ drvdata->mem_type = TMC_ETR_MEM_TYPE_CONTIG;
+ } else if (!strcmp(str, str_tmc_etr_mem_type[TMC_ETR_MEM_TYPE_SG])) {
+ drvdata->mem_type = TMC_ETR_MEM_TYPE_SG;
+ } else {
+ mutex_unlock(&drvdata->mem_lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&drvdata->mem_lock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(mem_type);
+
+static ssize_t out_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ str_tmc_etr_out_mode[drvdata->out_mode]);
+}
+
+static ssize_t out_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[10] = "";
+ unsigned long flags;
+ int ret;
+
+ if (strlen(buf) >= 10)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mem_lock);
+ if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM])) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->enable) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+ __tmc_etr_disable_to_bam(drvdata);
+ tmc_etr_enable_hw(drvdata);
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+
+ tmc_etr_bam_disable(drvdata);
+ usb_qdss_close(drvdata->usbch);
+ } else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->enable) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto err1;
+ }
+ tmc_etr_disable_hw(drvdata);
+ drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+
+ drvdata->usbch = usb_qdss_open("qdss", drvdata,
+ usb_notifier);
+ if (IS_ERR(drvdata->usbch)) {
+ dev_err(drvdata->dev, "usb_qdss_open failed\n");
+ ret = PTR_ERR(drvdata->usbch);
+ goto err0;
+ }
+ }
+out:
+ mutex_unlock(&drvdata->mem_lock);
+ return size;
+err1:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+err0:
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+}
+static DEVICE_ATTR_RW(out_mode);
+
+static ssize_t available_out_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(str_tmc_etr_out_mode); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s ",
+ str_tmc_etr_out_mode[i]);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+static DEVICE_ATTR_RO(available_out_modes);
+
static struct attribute *coresight_etb_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_status.attr,
@@ -642,6 +1712,10 @@ static struct attribute *coresight_etb_attrs[] = {
ATTRIBUTE_GROUPS(coresight_etb);
static struct attribute *coresight_etr_attrs[] = {
+ &dev_attr_available_out_modes.attr,
+ &dev_attr_mem_size.attr,
+ &dev_attr_mem_type.attr,
+ &dev_attr_out_mode.attr,
&dev_attr_trigger_cntr.attr,
&dev_attr_status.attr,
NULL,
@@ -655,6 +1729,129 @@ static struct attribute *coresight_etf_attrs[] = {
};
ATTRIBUTE_GROUPS(coresight_etf);
+static int tmc_etf_set_buf_dump(struct tmc_drvdata *drvdata)
+{
+ int ret;
+ struct msm_dump_entry dump_entry;
+ static int count;
+
+ drvdata->buf_data.addr = virt_to_phys(drvdata->buf);
+ drvdata->buf_data.len = drvdata->size;
+ scnprintf(drvdata->buf_data.name, sizeof(drvdata->buf_data.name),
+ "KTMC_ETF%d", count);
+
+ dump_entry.id = MSM_DUMP_DATA_TMC_ETF + count;
+ dump_entry.addr = virt_to_phys(&drvdata->buf_data);
+
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+ if (ret)
+ return ret;
+
+ count++;
+
+ return 0;
+}
+
+static void __tmc_reg_dump(struct tmc_drvdata *drvdata)
+{
+ uint32_t *reg_buf;
+
+ if (!drvdata->reg_buf)
+ return;
+ else if (!drvdata->aborting && !drvdata->dump_reg)
+ return;
+
+ drvdata->reg_data.version = TMC_REG_DUMP_VER;
+
+ reg_buf = (uint32_t *)drvdata->reg_buf;
+
+ reg_buf[1] = readl_relaxed(drvdata->base + TMC_RSZ);
+ reg_buf[3] = readl_relaxed(drvdata->base + TMC_STS);
+ reg_buf[5] = readl_relaxed(drvdata->base + TMC_RRP);
+ reg_buf[6] = readl_relaxed(drvdata->base + TMC_RWP);
+ reg_buf[7] = readl_relaxed(drvdata->base + TMC_TRG);
+ reg_buf[8] = readl_relaxed(drvdata->base + TMC_CTL);
+ reg_buf[10] = readl_relaxed(drvdata->base + TMC_MODE);
+ reg_buf[11] = readl_relaxed(drvdata->base + TMC_LBUFLEVEL);
+ reg_buf[12] = readl_relaxed(drvdata->base + TMC_CBUFLEVEL);
+ reg_buf[13] = readl_relaxed(drvdata->base + TMC_BUFWM);
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ reg_buf[14] = readl_relaxed(drvdata->base + TMC_RRPHI);
+ reg_buf[15] = readl_relaxed(drvdata->base + TMC_RWPHI);
+ reg_buf[68] = readl_relaxed(drvdata->base + TMC_AXICTL);
+ reg_buf[70] = readl_relaxed(drvdata->base + TMC_DBALO);
+ reg_buf[71] = readl_relaxed(drvdata->base + TMC_DBAHI);
+ }
+ reg_buf[192] = readl_relaxed(drvdata->base + TMC_FFSR);
+ reg_buf[193] = readl_relaxed(drvdata->base + TMC_FFCR);
+ reg_buf[194] = readl_relaxed(drvdata->base + TMC_PSCR);
+ reg_buf[1000] = readl_relaxed(drvdata->base + CORESIGHT_CLAIMSET);
+ reg_buf[1001] = readl_relaxed(drvdata->base + CORESIGHT_CLAIMCLR);
+ reg_buf[1005] = readl_relaxed(drvdata->base + CORESIGHT_LSR);
+ reg_buf[1006] = readl_relaxed(drvdata->base + CORESIGHT_AUTHSTATUS);
+ reg_buf[1010] = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+ reg_buf[1011] = readl_relaxed(drvdata->base + CORESIGHT_DEVTYPE);
+ reg_buf[1012] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR4);
+ reg_buf[1013] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR5);
+ reg_buf[1014] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR6);
+ reg_buf[1015] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR7);
+ reg_buf[1016] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0);
+ reg_buf[1017] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR1);
+ reg_buf[1018] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR2);
+ reg_buf[1019] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR3);
+ reg_buf[1020] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR0);
+ reg_buf[1021] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR1);
+ reg_buf[1022] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR2);
+ reg_buf[1023] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR3);
+
+ drvdata->reg_data.magic = TMC_REG_DUMP_MAGIC_V2;
+}
+
+static int tmc_set_reg_dump(struct tmc_drvdata *drvdata)
+{
+ int ret;
+ struct amba_device *adev;
+ struct resource *res;
+ struct device *dev = drvdata->dev;
+ struct msm_dump_entry dump_entry;
+ uint32_t size;
+ static int count;
+
+ adev = to_amba_device(dev);
+ if (!adev)
+ return -EINVAL;
+
+ res = &adev->res;
+ size = resource_size(res);
+
+ drvdata->reg_buf = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!drvdata->reg_buf)
+ return -ENOMEM;
+
+ drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
+ drvdata->reg_data.len = size;
+ scnprintf(drvdata->reg_data.name, sizeof(drvdata->reg_data.name),
+ "KTMC_REG%d", count);
+
+ dump_entry.id = MSM_DUMP_DATA_TMC_REG + count;
+ dump_entry.addr = virt_to_phys(&drvdata->reg_data);
+
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+ /*
+ * Don't free the buffer in case of error since it can
+ * still be used to dump registers as part of abort to
+ * aid post crash parsing.
+ */
+ if (ret)
+ return ret;
+
+ count++;
+
+ return 0;
+}
+
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -666,13 +1863,15 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
struct resource *res = &adev->res;
struct coresight_desc *desc;
struct device_node *np = adev->dev.of_node;
+ struct coresight_cti_data *ctidata;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
+ if (!np)
+ return -ENODEV;
+
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -689,43 +1888,80 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
+ mutex_init(&drvdata->mem_lock);
+
+ drvdata->force_reg_dump = of_property_read_bool(np,
+ "qcom,force-reg-dump");
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
if (np)
ret = of_property_read_u32(np,
"arm,buffer-size",
&drvdata->size);
if (ret)
drvdata->size = SZ_1M;
+
+ drvdata->mem_size = drvdata->size;
+
+ if (of_property_read_bool(np, "arm,sg-enable"))
+ drvdata->memtype = TMC_ETR_MEM_TYPE_SG;
+ else
+ drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
+ drvdata->mem_type = drvdata->memtype;
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
pm_runtime_put(&adev->dev);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
- if (!drvdata->vaddr)
- return -ENOMEM;
-
- memset(drvdata->vaddr, 0, drvdata->size);
- drvdata->buf = drvdata->vaddr;
+ ret = tmc_etr_bam_init(adev, drvdata);
+ if (ret)
+ return ret;
} else {
drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
if (!drvdata->buf)
return -ENOMEM;
+
+ ret = tmc_etf_set_buf_dump(drvdata);
+ if (ret)
+ dev_err(dev, "TMC ETF-ETB dump setup failed. ret: %d\n",
+ ret);
}
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto err_devm_kzalloc;
+ ret = tmc_set_reg_dump(drvdata);
+ if (ret)
+ dev_err(dev, "TMC REG dump setup failed. ret: %d\n", ret);
+
+ pdata->default_sink = of_property_read_bool(np, "arm,default-sink");
+
+ ctidata = of_get_coresight_cti_data(dev, adev->dev.of_node);
+ if (IS_ERR(ctidata)) {
+ dev_err(dev, "invalid cti data\n");
+ } else if (ctidata && ctidata->nr_ctis == 2) {
+ drvdata->cti_flush = coresight_cti_get(
+ ctidata->names[0]);
+ if (IS_ERR(drvdata->cti_flush))
+ dev_err(dev, "failed to get flush cti\n");
+
+ drvdata->cti_reset = coresight_cti_get(
+ ctidata->names[1]);
+ if (IS_ERR(drvdata->cti_reset))
+ dev_err(dev, "failed to get reset cti\n");
}
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
desc->pdata = pdata;
desc->dev = dev;
desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
@@ -746,10 +1982,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
}
drvdata->csdev = coresight_register(desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto err_devm_kzalloc;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
drvdata->miscdev.name = pdata->name;
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
@@ -763,10 +1997,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
err_misc_register:
coresight_unregister(drvdata->csdev);
-err_devm_kzalloc:
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
return ret;
}
@@ -777,8 +2007,8 @@ static int tmc_remove(struct amba_device *adev)
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(drvdata->dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
+ tmc_etr_free_mem(drvdata);
+ tmc_etr_bam_exit(drvdata);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
new file mode 100644
index 000000000000..c43d8596a203
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -0,0 +1,766 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define tpda_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define tpda_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define TPDA_LOCK(drvdata) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ tpda_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define TPDA_UNLOCK(drvdata) \
+do { \
+ tpda_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+#define TPDA_CR (0x000)
+#define TPDA_Pn_CR(n) (0x004 + (n * 4))
+#define TPDA_FPID_CR (0x084)
+#define TPDA_FREQREQ_VAL (0x088)
+#define TPDA_SYNCR (0x08C)
+#define TPDA_FLUSH_CR (0x090)
+#define TPDA_FLUSH_SR (0x094)
+#define TPDA_FLUSH_ERR (0x098)
+
+#define TPDA_MAX_INPORTS 32
+
+struct tpda_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ struct mutex lock;
+ bool enable;
+ uint32_t atid;
+ uint32_t bc_esize[TPDA_MAX_INPORTS];
+ uint32_t tc_esize[TPDA_MAX_INPORTS];
+ uint32_t dsb_esize[TPDA_MAX_INPORTS];
+ uint32_t cmb_esize[TPDA_MAX_INPORTS];
+ bool trig_async;
+ bool trig_flag_ts;
+ bool trig_freq;
+ bool freq_ts;
+ uint32_t freq_req_val;
+ bool freq_req;
+};
+
+static void __tpda_enable_pre_port(struct tpda_drvdata *drvdata)
+{
+ uint32_t val;
+
+ val = tpda_readl(drvdata, TPDA_CR);
+ /* Set the master id */
+ val = val & ~(0x7F << 13);
+ val = val & ~(0x7F << 6);
+ val |= (drvdata->atid << 6);
+ if (drvdata->trig_async)
+ val = val | BIT(5);
+ else
+ val = val & ~BIT(5);
+ if (drvdata->trig_flag_ts)
+ val = val | BIT(4);
+ else
+ val = val & ~BIT(4);
+ if (drvdata->trig_freq)
+ val = val | BIT(3);
+ else
+ val = val & ~BIT(3);
+ if (drvdata->freq_ts)
+ val = val | BIT(2);
+ else
+ val = val & ~BIT(2);
+
+ /* Force ASYNC-VERSION-FREQTS sequence */
+ val = val | BIT(21);
+
+ tpda_writel(drvdata, val, TPDA_CR);
+
+ /*
+ * If FLRIE bit is set, set the master and channel
+ * id as zero
+ */
+ if (BVAL(tpda_readl(drvdata, TPDA_CR), 4))
+ tpda_writel(drvdata, 0x0, TPDA_FPID_CR);
+}
+
+static void __tpda_enable_port(struct tpda_drvdata *drvdata, int port)
+{
+ uint32_t val;
+
+ val = tpda_readl(drvdata, TPDA_Pn_CR(port));
+ if (drvdata->bc_esize[port] == 32)
+ val = val & ~BIT(4);
+ else if (drvdata->bc_esize[port] == 64)
+ val = val | BIT(4);
+
+ if (drvdata->tc_esize[port] == 32)
+ val = val & ~BIT(5);
+ else if (drvdata->tc_esize[port] == 64)
+ val = val | BIT(5);
+
+ if (drvdata->dsb_esize[port] == 32)
+ val = val & ~BIT(8);
+ else if (drvdata->dsb_esize[port] == 64)
+ val = val | BIT(8);
+
+ val = val & ~(0x3 << 6);
+ if (drvdata->cmb_esize[port] == 8)
+ val &= ~(0x3 << 6);
+ else if (drvdata->cmb_esize[port] == 32)
+ val |= (0x1 << 6);
+ else if (drvdata->cmb_esize[port] == 64)
+ val |= (0x2 << 6);
+
+ /* Set the hold time */
+ val = val & ~(0x7 << 1);
+ val |= (0x5 << 1);
+ tpda_writel(drvdata, val, TPDA_Pn_CR(port));
+ /* Enable the port */
+ val = val | BIT(0);
+ tpda_writel(drvdata, val, TPDA_Pn_CR(port));
+}
+
+static void __tpda_enable_post_port(struct tpda_drvdata *drvdata)
+{
+ uint32_t val;
+
+ val = tpda_readl(drvdata, TPDA_SYNCR);
+ /* Clear the mode */
+ val = val & ~BIT(12);
+ /* Program the counter value */
+ val = val | 0xFFF;
+ tpda_writel(drvdata, val, TPDA_SYNCR);
+
+ if (drvdata->freq_req_val)
+ tpda_writel(drvdata, drvdata->freq_req_val, TPDA_FREQREQ_VAL);
+
+ val = tpda_readl(drvdata, TPDA_CR);
+ if (drvdata->freq_req)
+ val = val | BIT(1);
+ else
+ val = val & ~BIT(1);
+ tpda_writel(drvdata, val, TPDA_CR);
+}
+
+static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
+{
+ TPDA_UNLOCK(drvdata);
+
+ if (!drvdata->enable)
+ __tpda_enable_pre_port(drvdata);
+
+ __tpda_enable_port(drvdata, port);
+
+ if (!drvdata->enable)
+ __tpda_enable_post_port(drvdata);
+
+ TPDA_LOCK(drvdata);
+}
+
+static int tpda_enable(struct coresight_device *csdev, int inport, int outport)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drvdata->lock);
+ __tpda_enable(drvdata, inport);
+ drvdata->enable = true;
+ mutex_unlock(&drvdata->lock);
+
+ dev_info(drvdata->dev, "TPDA inport %d enabled\n", inport);
+ return 0;
+}
+
+static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
+{
+ uint32_t val;
+
+ TPDA_UNLOCK(drvdata);
+
+ val = tpda_readl(drvdata, TPDA_Pn_CR(port));
+ val = val & ~BIT(0);
+ tpda_writel(drvdata, val, TPDA_Pn_CR(port));
+
+ TPDA_LOCK(drvdata);
+}
+
+static void tpda_disable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ mutex_lock(&drvdata->lock);
+ __tpda_disable(drvdata, inport);
+ drvdata->enable = false;
+ mutex_unlock(&drvdata->lock);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "TPDA inport %d disabled\n", inport);
+}
+
+static const struct coresight_ops_link tpda_link_ops = {
+ .enable = tpda_enable,
+ .disable = tpda_disable,
+};
+
+static const struct coresight_ops tpda_cs_ops = {
+ .link_ops = &tpda_link_ops,
+};
+
+static ssize_t tpda_show_trig_async_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->trig_async);
+}
+
+static ssize_t tpda_store_trig_async_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->trig_async = true;
+ else
+ drvdata->trig_async = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(trig_async_enable, S_IRUGO | S_IWUSR,
+ tpda_show_trig_async_enable,
+ tpda_store_trig_async_enable);
+
+static ssize_t tpda_show_trig_flag_ts_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->trig_flag_ts);
+}
+
+static ssize_t tpda_store_trig_flag_ts_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->trig_flag_ts = true;
+ else
+ drvdata->trig_flag_ts = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(trig_flag_ts_enable, S_IRUGO | S_IWUSR,
+ tpda_show_trig_flag_ts_enable,
+ tpda_store_trig_flag_ts_enable);
+
+static ssize_t tpda_show_trig_freq_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->trig_freq);
+}
+
+static ssize_t tpda_store_trig_freq_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->trig_freq = true;
+ else
+ drvdata->trig_freq = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(trig_freq_enable, S_IRUGO | S_IWUSR,
+ tpda_show_trig_freq_enable,
+ tpda_store_trig_freq_enable);
+
+static ssize_t tpda_show_freq_ts_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)drvdata->freq_ts);
+}
+
+static ssize_t tpda_store_freq_ts_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->freq_ts = true;
+ else
+ drvdata->freq_ts = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(freq_ts_enable, S_IRUGO | S_IWUSR, tpda_show_freq_ts_enable,
+ tpda_store_freq_ts_enable);
+
+static ssize_t tpda_show_freq_req_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->freq_req_val;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpda_store_freq_req_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->freq_req_val = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(freq_req_val, S_IRUGO | S_IWUSR, tpda_show_freq_req_val,
+ tpda_store_freq_req_val);
+
+static ssize_t tpda_show_freq_req(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->freq_req);
+}
+
+static ssize_t tpda_store_freq_req(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->freq_req = true;
+ else
+ drvdata->freq_req = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(freq_req, S_IRUGO | S_IWUSR, tpda_show_freq_req,
+ tpda_store_freq_req);
+
+static ssize_t tpda_show_global_flush_req(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ mutex_lock(&drvdata->lock);
+
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDA_UNLOCK(drvdata);
+ val = tpda_readl(drvdata, TPDA_CR);
+ TPDA_LOCK(drvdata);
+
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpda_store_global_flush_req(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDA_UNLOCK(drvdata);
+ val = tpda_readl(drvdata, TPDA_CR);
+ val = val | BIT(0);
+ tpda_writel(drvdata, val, TPDA_CR);
+ TPDA_LOCK(drvdata);
+ }
+
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(global_flush_req, S_IRUGO | S_IWUSR,
+ tpda_show_global_flush_req, tpda_store_global_flush_req);
+
+static ssize_t tpda_show_port_flush_req(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ mutex_lock(&drvdata->lock);
+
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDA_UNLOCK(drvdata);
+ val = tpda_readl(drvdata, TPDA_FLUSH_CR);
+ TPDA_LOCK(drvdata);
+
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpda_store_port_flush_req(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDA_UNLOCK(drvdata);
+ tpda_writel(drvdata, val, TPDA_FLUSH_CR);
+ TPDA_LOCK(drvdata);
+ }
+
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(port_flush_req, S_IRUGO | S_IWUSR, tpda_show_port_flush_req,
+ tpda_store_port_flush_req);
+
+static struct attribute *tpda_attrs[] = {
+ &dev_attr_trig_async_enable.attr,
+ &dev_attr_trig_flag_ts_enable.attr,
+ &dev_attr_trig_freq_enable.attr,
+ &dev_attr_freq_ts_enable.attr,
+ &dev_attr_freq_req_val.attr,
+ &dev_attr_freq_req.attr,
+ &dev_attr_global_flush_req.attr,
+ &dev_attr_port_flush_req.attr,
+ NULL,
+};
+
+static struct attribute_group tpda_attr_grp = {
+ .attrs = tpda_attrs,
+};
+
+static const struct attribute_group *tpda_attr_grps[] = {
+ &tpda_attr_grp,
+ NULL,
+};
+
+static int tpda_parse_of_data(struct tpda_drvdata *drvdata)
+{
+ int len, port, i, ret;
+ const __be32 *prop;
+ struct device_node *node = drvdata->dev->of_node;
+
+ ret = of_property_read_u32(node, "qcom,tpda-atid", &drvdata->atid);
+ if (ret) {
+ dev_err(drvdata->dev, "TPDA ATID is not specified\n");
+ return -EINVAL;
+ }
+
+ prop = of_get_property(node, "qcom,bc-elem-size", &len);
+ if (prop) {
+ len /= sizeof(__be32);
+ if (len < 2 || len > 63 || len % 2 != 0) {
+ dev_err(drvdata->dev,
+ "Dataset BC width entries are wrong\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++) {
+ port = be32_to_cpu(prop[i++]);
+ if (port >= TPDA_MAX_INPORTS) {
+ dev_err(drvdata->dev,
+ "Wrong port specified for BC\n");
+ return -EINVAL;
+ }
+ drvdata->bc_esize[port] = be32_to_cpu(prop[i]);
+ }
+ }
+
+ prop = of_get_property(node, "qcom,tc-elem-size", &len);
+ if (prop) {
+ len /= sizeof(__be32);
+ if (len < 2 || len > 63 || len % 2 != 0) {
+ dev_err(drvdata->dev,
+ "Dataset TC width entries are wrong\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++) {
+ port = be32_to_cpu(prop[i++]);
+ if (port >= TPDA_MAX_INPORTS) {
+ dev_err(drvdata->dev,
+ "Wrong port specified for TC\n");
+ return -EINVAL;
+ }
+ drvdata->tc_esize[port] = be32_to_cpu(prop[i]);
+ }
+ }
+
+ prop = of_get_property(node, "qcom,dsb-elem-size", &len);
+ if (prop) {
+ len /= sizeof(__be32);
+ if (len < 2 || len > 63 || len % 2 != 0) {
+ dev_err(drvdata->dev,
+ "Dataset DSB width entries are wrong\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++) {
+ port = be32_to_cpu(prop[i++]);
+ if (port >= TPDA_MAX_INPORTS) {
+ dev_err(drvdata->dev,
+ "Wrong port specified for DSB\n");
+ return -EINVAL;
+ }
+ drvdata->dsb_esize[port] = be32_to_cpu(prop[i]);
+ }
+ }
+
+ prop = of_get_property(node, "qcom,cmb-elem-size", &len);
+ if (prop) {
+ len /= sizeof(__be32);
+ if (len < 2 || len > 63 || len % 2 != 0) {
+ dev_err(drvdata->dev,
+ "Dataset CMB width entries are wrong\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++) {
+ port = be32_to_cpu(prop[i++]);
+ if (port >= TPDA_MAX_INPORTS) {
+ dev_err(drvdata->dev,
+ "Wrong port specified for CMB\n");
+ return -EINVAL;
+ }
+ drvdata->cmb_esize[port] = be32_to_cpu(prop[i]);
+ }
+ }
+ return 0;
+}
+
+static void tpda_init_default_data(struct tpda_drvdata *drvdata)
+{
+ drvdata->freq_ts = true;
+}
+
+static int tpda_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct tpda_drvdata *drvdata;
+ struct resource *res;
+ struct coresight_desc *desc;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpda-base");
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ mutex_init(&drvdata->lock);
+
+ ret = tpda_parse_of_data(drvdata);
+ if (ret)
+ return ret;
+
+ drvdata->clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ if (!coresight_authstatus_enabled(drvdata->base))
+ goto err;
+
+ clk_disable_unprepare(drvdata->clk);
+
+ tpda_init_default_data(drvdata);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->type = CORESIGHT_DEV_TYPE_LINK;
+ desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+ desc->ops = &tpda_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->groups = tpda_attr_grps;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_dbg(drvdata->dev, "TPDA initialized\n");
+ return 0;
+err:
+ clk_disable_unprepare(drvdata->clk);
+ return -EPERM;
+}
+
+static int tpda_remove(struct platform_device *pdev)
+{
+ struct tpda_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct of_device_id tpda_match[] = {
+ {.compatible = "qcom,coresight-tpda"},
+ {}
+};
+
+static struct platform_driver tpda_driver = {
+ .probe = tpda_probe,
+ .remove = tpda_remove,
+ .driver = {
+ .name = "coresight-tpda",
+ .owner = THIS_MODULE,
+ .of_match_table = tpda_match,
+ },
+};
+
+static int __init tpda_init(void)
+{
+ return platform_driver_register(&tpda_driver);
+}
+module_init(tpda_init);
+
+static void __exit tpda_exit(void)
+{
+ platform_driver_unregister(&tpda_driver);
+}
+module_exit(tpda_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Aggregator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
new file mode 100644
index 000000000000..596a36ed7dba
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -0,0 +1,3787 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define tpdm_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define tpdm_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define TPDM_LOCK(drvdata) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ tpdm_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define TPDM_UNLOCK(drvdata) \
+do { \
+ tpdm_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+/* GPR Registers */
+#define TPDM_GPR_CR(n) (0x0 + (n * 4))
+
+/* BC Subunit Registers */
+#define TPDM_BC_CR (0x280)
+#define TPDM_BC_SATROLL (0x284)
+#define TPDM_BC_CNTENSET (0x288)
+#define TPDM_BC_CNTENCLR (0x28C)
+#define TPDM_BC_INTENSET (0x290)
+#define TPDM_BC_INTENCLR (0x294)
+#define TPDM_BC_TRIG_LO(n) (0x298 + (n * 4))
+#define TPDM_BC_TRIG_HI(n) (0x318 + (n * 4))
+#define TPDM_BC_GANG (0x398)
+#define TPDM_BC_OVERFLOW(n) (0x39C + (n * 4))
+#define TPDM_BC_OVSR (0x3C0)
+#define TPDM_BC_SELR (0x3C4)
+#define TPDM_BC_CNTR_LO (0x3C8)
+#define TPDM_BC_CNTR_HI (0x3CC)
+#define TPDM_BC_SHADOW_LO(n) (0x3D0 + (n * 4))
+#define TPDM_BC_SHADOW_HI(n) (0x450 + (n * 4))
+#define TPDM_BC_SWINC (0x4D0)
+#define TPDM_BC_MSR(n) (0x4F0 + (n * 4))
+
+/* TC Subunit Registers */
+#define TPDM_TC_CR (0x500)
+#define TPDM_TC_CNTENSET (0x504)
+#define TPDM_TC_CNTENCLR (0x508)
+#define TPDM_TC_INTENSET (0x50C)
+#define TPDM_TC_INTENCLR (0x510)
+#define TPDM_TC_TRIG_SEL(n) (0x514 + (n * 4))
+#define TPDM_TC_TRIG_LO(n) (0x534 + (n * 4))
+#define TPDM_TC_TRIG_HI(n) (0x554 + (n * 4))
+#define TPDM_TC_OVSR_GP (0x580)
+#define TPDM_TC_OVSR_IMPL (0x584)
+#define TPDM_TC_SELR (0x588)
+#define TPDM_TC_CNTR_LO (0x58C)
+#define TPDM_TC_CNTR_HI (0x590)
+#define TPDM_TC_SHADOW_LO(n) (0x594 + (n * 4))
+#define TPDM_TC_SHADOW_HI(n) (0x644 + (n * 4))
+#define TPDM_TC_SWINC (0x700)
+#define TPDM_TC_MSR(n) (0x768 + (n * 4))
+
+/* DSB Subunit Registers */
+#define TPDM_DSB_CR (0x780)
+#define TPDM_DSB_TIER (0x784)
+#define TPDM_DSB_TPR(n) (0x788 + (n * 4))
+#define TPDM_DSB_TPMR(n) (0x7A8 + (n * 4))
+#define TPDM_DSB_XPR(n) (0x7C8 + (n * 4))
+#define TPDM_DSB_XPMR(n) (0x7E8 + (n * 4))
+#define TPDM_DSB_EDCR(n) (0x808 + (n * 4))
+#define TPDM_DSB_EDCMR(n) (0x848 + (n * 4))
+#define TPDM_DSB_CA_SELECT(n) (0x86c + (n * 4))
+#define TPDM_DSB_MSR(n) (0x980 + (n * 4))
+
+/* CMB Subunit Registers */
+#define TPDM_CMB_CR (0xA00)
+#define TPDM_CMB_TIER (0xA04)
+#define TPDM_CMB_TPR(n) (0xA08 + (n * 4))
+#define TPDM_CMB_TPMR(n) (0xA10 + (n * 4))
+#define TPDM_CMB_XPR(n) (0xA18 + (n * 4))
+#define TPDM_CMB_XPMR(n) (0xA20 + (n * 4))
+#define TPDM_CMB_MSR(n) (0xA80 + (n * 4))
+
+/* TPDM Specific Registers */
+#define TPDM_ITATBCNTRL (0xEF0)
+#define TPDM_CLK_CTRL (0x220)
+
+#define TPDM_DATASETS 32
+#define TPDM_BC_MAX_COUNTERS 32
+#define TPDM_BC_MAX_OVERFLOW 6
+#define TPDM_BC_MAX_MSR 4
+#define TPDM_TC_MAX_COUNTERS 44
+#define TPDM_TC_MAX_TRIG 8
+#define TPDM_TC_MAX_MSR 6
+#define TPDM_DSB_MAX_PATT 8
+#define TPDM_DSB_MAX_SELECT 8
+#define TPDM_DSB_MAX_MSR 32
+#define TPDM_DSB_MAX_EDCR 16
+#define TPDM_DSB_MAX_LINES 256
+#define TPDM_CMB_PATT_CMP 2
+#define TPDM_CMB_MAX_MSR 128
+
+/* DSB programming modes */
+#define TPDM_DSB_MODE_CYCACC(val) BMVAL(val, 0, 2)
+#define TPDM_DSB_MODE_PERF BIT(3)
+#define TPDM_DSB_MODE_HPBYTESEL(val) BMVAL(val, 4, 8)
+#define TPDM_MODE_ALL (0xFFFFFFF)
+
+#define NUM_OF_BITS 32
+#define TPDM_GPR_REGS_MAX 160
+
+#define TPDM_TRACE_ID_START 128
+
+#define TPDM_REVISION_A 0
+#define TPDM_REVISION_B 1
+
+enum tpdm_dataset {
+ TPDM_DS_IMPLDEF,
+ TPDM_DS_DSB,
+ TPDM_DS_CMB,
+ TPDM_DS_TC,
+ TPDM_DS_BC,
+ TPDM_DS_GPR,
+};
+
+enum tpdm_mode {
+ TPDM_MODE_ATB,
+ TPDM_MODE_APB,
+};
+
+enum tpdm_support_type {
+ TPDM_SUPPORT_TYPE_FULL,
+ TPDM_SUPPORT_TYPE_PARTIAL,
+ TPDM_SUPPORT_TYPE_NO,
+};
+
+enum tpdm_cmb_mode {
+ TPDM_CMB_MODE_CONTINUOUS,
+ TPDM_CMB_MODE_TRACE_ON_CHANGE,
+};
+
+enum tpdm_cmb_patt_bits {
+ TPDM_CMB_LSB,
+ TPDM_CMB_MSB,
+};
+
+#ifdef CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE
+static int boot_enable = 1;
+#else
+static int boot_enable;
+#endif
+
+module_param_named(
+ boot_enable, boot_enable, int, S_IRUGO
+);
+
+struct gpr_dataset {
+ DECLARE_BITMAP(gpr_dirty, TPDM_GPR_REGS_MAX);
+ uint32_t gp_regs[TPDM_GPR_REGS_MAX];
+};
+
+struct bc_dataset {
+ enum tpdm_mode capture_mode;
+ enum tpdm_mode retrieval_mode;
+ uint32_t sat_mode;
+ uint32_t enable_counters;
+ uint32_t clear_counters;
+ uint32_t enable_irq;
+ uint32_t clear_irq;
+ uint32_t trig_val_lo[TPDM_BC_MAX_COUNTERS];
+ uint32_t trig_val_hi[TPDM_BC_MAX_COUNTERS];
+ uint32_t enable_ganging;
+ uint32_t overflow_val[TPDM_BC_MAX_OVERFLOW];
+ uint32_t msr[TPDM_BC_MAX_MSR];
+};
+
+struct tc_dataset {
+ enum tpdm_mode capture_mode;
+ enum tpdm_mode retrieval_mode;
+ bool sat_mode;
+ uint32_t enable_counters;
+ uint32_t clear_counters;
+ uint32_t enable_irq;
+ uint32_t clear_irq;
+ uint32_t trig_sel[TPDM_TC_MAX_TRIG];
+ uint32_t trig_val_lo[TPDM_TC_MAX_TRIG];
+ uint32_t trig_val_hi[TPDM_TC_MAX_TRIG];
+ uint32_t msr[TPDM_TC_MAX_MSR];
+};
+
+struct dsb_dataset {
+ uint32_t mode;
+ uint32_t edge_ctrl[TPDM_DSB_MAX_EDCR];
+ uint32_t edge_ctrl_mask[TPDM_DSB_MAX_EDCR / 2];
+ uint32_t patt_val[TPDM_DSB_MAX_PATT];
+ uint32_t patt_mask[TPDM_DSB_MAX_PATT];
+ bool patt_ts;
+ bool patt_type;
+ uint32_t trig_patt_val[TPDM_DSB_MAX_PATT];
+ uint32_t trig_patt_mask[TPDM_DSB_MAX_PATT];
+ bool trig_ts;
+ uint32_t select_val[TPDM_DSB_MAX_SELECT];
+ uint32_t msr[TPDM_DSB_MAX_MSR];
+};
+
+struct cmb_dataset {
+ enum tpdm_cmb_mode mode;
+ uint32_t patt_val[TPDM_CMB_PATT_CMP];
+ uint32_t patt_mask[TPDM_CMB_PATT_CMP];
+ bool patt_ts;
+ uint32_t trig_patt_val[TPDM_CMB_PATT_CMP];
+ uint32_t trig_patt_mask[TPDM_CMB_PATT_CMP];
+ bool trig_ts;
+ uint32_t msr[TPDM_CMB_MAX_MSR];
+};
+
+struct tpdm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ struct mutex lock;
+ bool enable;
+ bool clk_enable;
+ DECLARE_BITMAP(datasets, TPDM_DATASETS);
+ DECLARE_BITMAP(enable_ds, TPDM_DATASETS);
+ enum tpdm_support_type tc_trig_type;
+ enum tpdm_support_type bc_trig_type;
+ enum tpdm_support_type bc_gang_type;
+ uint32_t bc_counters_avail;
+ uint32_t tc_counters_avail;
+ struct gpr_dataset *gpr;
+ struct bc_dataset *bc;
+ struct tc_dataset *tc;
+ struct dsb_dataset *dsb;
+ struct cmb_dataset *cmb;
+ int traceid;
+ uint32_t version;
+ bool msr_support;
+ bool msr_fix_req;
+};
+
+static void __tpdm_enable_gpr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ for (i = 0; i < TPDM_GPR_REGS_MAX; i++) {
+ if (!test_bit(i, drvdata->gpr->gpr_dirty))
+ continue;
+ tpdm_writel(drvdata, drvdata->gpr->gp_regs[i], TPDM_GPR_CR(i));
+ }
+}
+
+static void __tpdm_config_bc_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ if (!drvdata->msr_support)
+ return;
+
+ for (i = 0; i < TPDM_BC_MAX_MSR; i++)
+ tpdm_writel(drvdata, drvdata->bc->msr[i], TPDM_BC_MSR(i));
+}
+
+static void __tpdm_config_tc_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ if (!drvdata->msr_support)
+ return;
+
+ for (i = 0; i < TPDM_TC_MAX_MSR; i++)
+ tpdm_writel(drvdata, drvdata->tc->msr[i], TPDM_TC_MSR(i));
+}
+
+static void __tpdm_config_dsb_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ if (!drvdata->msr_support)
+ return;
+
+ for (i = 0; i < TPDM_DSB_MAX_MSR; i++)
+ tpdm_writel(drvdata, drvdata->dsb->msr[i], TPDM_DSB_MSR(i));
+}
+
+static void __tpdm_config_cmb_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ if (!drvdata->msr_support)
+ return;
+
+ for (i = 0; i < TPDM_CMB_MAX_MSR; i++)
+ tpdm_writel(drvdata, drvdata->cmb->msr[i], TPDM_CMB_MSR(i));
+}
+
+static void __tpdm_enable_bc(struct tpdm_drvdata *drvdata)
+{
+ int i;
+ uint32_t val;
+
+ if (drvdata->bc->sat_mode)
+ tpdm_writel(drvdata, drvdata->bc->sat_mode,
+ TPDM_BC_SATROLL);
+ else
+ tpdm_writel(drvdata, 0x0, TPDM_BC_SATROLL);
+
+ if (drvdata->bc->enable_counters) {
+ tpdm_writel(drvdata, 0xFFFFFFFF, TPDM_BC_CNTENCLR);
+ tpdm_writel(drvdata, drvdata->bc->enable_counters,
+ TPDM_BC_CNTENSET);
+ }
+ if (drvdata->bc->clear_counters)
+ tpdm_writel(drvdata, drvdata->bc->clear_counters,
+ TPDM_BC_CNTENCLR);
+
+ if (drvdata->bc->enable_irq) {
+ tpdm_writel(drvdata, 0xFFFFFFFF, TPDM_BC_INTENCLR);
+ tpdm_writel(drvdata, drvdata->bc->enable_irq,
+ TPDM_BC_INTENSET);
+ }
+ if (drvdata->bc->clear_irq)
+ tpdm_writel(drvdata, drvdata->bc->clear_irq,
+ TPDM_BC_INTENCLR);
+
+ if (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_FULL) {
+ for (i = 0; i < drvdata->bc_counters_avail; i++) {
+ tpdm_writel(drvdata, drvdata->bc->trig_val_lo[i],
+ TPDM_BC_TRIG_LO(i));
+ tpdm_writel(drvdata, drvdata->bc->trig_val_hi[i],
+ TPDM_BC_TRIG_HI(i));
+ }
+ } else if (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL) {
+ tpdm_writel(drvdata, drvdata->bc->trig_val_lo[0],
+ TPDM_BC_TRIG_LO(0));
+ tpdm_writel(drvdata, drvdata->bc->trig_val_hi[0],
+ TPDM_BC_TRIG_HI(0));
+ }
+
+ if (drvdata->bc->enable_ganging)
+ tpdm_writel(drvdata, drvdata->bc->enable_ganging, TPDM_BC_GANG);
+
+ for (i = 0; i < TPDM_BC_MAX_OVERFLOW; i++)
+ tpdm_writel(drvdata, drvdata->bc->overflow_val[i],
+ TPDM_BC_OVERFLOW(i));
+
+ __tpdm_config_bc_msr(drvdata);
+
+ val = tpdm_readl(drvdata, TPDM_BC_CR);
+ if (drvdata->bc->retrieval_mode == TPDM_MODE_APB)
+ val = val | BIT(2);
+ else
+ val = val & ~BIT(2);
+ tpdm_writel(drvdata, val, TPDM_BC_CR);
+
+ val = tpdm_readl(drvdata, TPDM_BC_CR);
+ /* Set the enable bit */
+ val = val | BIT(0);
+ tpdm_writel(drvdata, val, TPDM_BC_CR);
+}
+
+static void __tpdm_enable_tc(struct tpdm_drvdata *drvdata)
+{
+ int i;
+ uint32_t val;
+
+ if (drvdata->tc->enable_counters) {
+ tpdm_writel(drvdata, 0xF, TPDM_TC_CNTENCLR);
+ tpdm_writel(drvdata, drvdata->tc->enable_counters,
+ TPDM_TC_CNTENSET);
+ }
+ if (drvdata->tc->clear_counters)
+ tpdm_writel(drvdata, drvdata->tc->clear_counters,
+ TPDM_TC_CNTENCLR);
+
+ if (drvdata->tc->enable_irq) {
+ tpdm_writel(drvdata, 0xF, TPDM_TC_INTENCLR);
+ tpdm_writel(drvdata, drvdata->tc->enable_irq,
+ TPDM_TC_INTENSET);
+ }
+ if (drvdata->tc->clear_irq)
+ tpdm_writel(drvdata, drvdata->tc->clear_irq,
+ TPDM_TC_INTENCLR);
+
+ if (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_FULL) {
+ for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+ tpdm_writel(drvdata, drvdata->tc->trig_sel[i],
+ TPDM_TC_TRIG_SEL(i));
+ tpdm_writel(drvdata, drvdata->tc->trig_val_lo[i],
+ TPDM_TC_TRIG_LO(i));
+ tpdm_writel(drvdata, drvdata->tc->trig_val_hi[i],
+ TPDM_TC_TRIG_HI(i));
+ }
+ } else if (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL) {
+ tpdm_writel(drvdata, drvdata->tc->trig_sel[0],
+ TPDM_TC_TRIG_SEL(0));
+ tpdm_writel(drvdata, drvdata->tc->trig_val_lo[0],
+ TPDM_TC_TRIG_LO(0));
+ tpdm_writel(drvdata, drvdata->tc->trig_val_hi[0],
+ TPDM_TC_TRIG_HI(0));
+ }
+
+ __tpdm_config_tc_msr(drvdata);
+
+ val = tpdm_readl(drvdata, TPDM_TC_CR);
+ if (drvdata->tc->sat_mode)
+ val = val | BIT(4);
+ else
+ val = val & ~BIT(4);
+ if (drvdata->tc->retrieval_mode == TPDM_MODE_APB)
+ val = val | BIT(2);
+ else
+ val = val & ~BIT(2);
+ tpdm_writel(drvdata, val, TPDM_TC_CR);
+
+ val = tpdm_readl(drvdata, TPDM_TC_CR);
+ /* Set the enable bit */
+ val = val | BIT(0);
+ tpdm_writel(drvdata, val, TPDM_TC_CR);
+}
+
+static void __tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+{
+ uint32_t val, mode, i;
+
+ for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
+ tpdm_writel(drvdata, drvdata->dsb->edge_ctrl[i],
+ TPDM_DSB_EDCR(i));
+ for (i = 0; i < TPDM_DSB_MAX_EDCR / 2; i++)
+ tpdm_writel(drvdata, drvdata->dsb->edge_ctrl_mask[i],
+ TPDM_DSB_EDCMR(i));
+
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ tpdm_writel(drvdata, drvdata->dsb->patt_val[i],
+ TPDM_DSB_TPR(i));
+ tpdm_writel(drvdata, drvdata->dsb->patt_mask[i],
+ TPDM_DSB_TPMR(i));
+ }
+
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ tpdm_writel(drvdata, drvdata->dsb->trig_patt_val[i],
+ TPDM_DSB_XPR(i));
+ tpdm_writel(drvdata, drvdata->dsb->trig_patt_mask[i],
+ TPDM_DSB_XPMR(i));
+ }
+
+ for (i = 0; i < TPDM_DSB_MAX_SELECT; i++)
+ tpdm_writel(drvdata, drvdata->dsb->select_val[i],
+ TPDM_DSB_CA_SELECT(i));
+
+ val = tpdm_readl(drvdata, TPDM_DSB_TIER);
+ if (drvdata->dsb->patt_ts == true) {
+ val = val | BIT(0);
+ if (drvdata->dsb->patt_type == true)
+ val = val | BIT(2);
+ else
+ val = val & ~BIT(2);
+ } else {
+ val = val & ~BIT(0);
+ }
+ if (drvdata->dsb->trig_ts == true)
+ val = val | BIT(1);
+ else
+ val = val & ~BIT(1);
+ tpdm_writel(drvdata, val, TPDM_DSB_TIER);
+
+ if (!drvdata->msr_fix_req)
+ __tpdm_config_dsb_msr(drvdata);
+
+ val = tpdm_readl(drvdata, TPDM_DSB_CR);
+ /* Set the cycle accurate mode */
+ mode = TPDM_DSB_MODE_CYCACC(drvdata->dsb->mode);
+ val = val & ~(0x7 << 9);
+ val = val | (mode << 9);
+ /* Set the byte lane for high-performance mode */
+ mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode);
+ val = val & ~(0x1F << 2);
+ val = val | (mode << 2);
+ /* Set the performance mode */
+ if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF)
+ val = val | BIT(1);
+ else
+ val = val & ~BIT(1);
+ tpdm_writel(drvdata, val, TPDM_DSB_CR);
+
+ val = tpdm_readl(drvdata, TPDM_DSB_CR);
+ /* Set the enable bit */
+ val = val | BIT(0);
+ tpdm_writel(drvdata, val, TPDM_DSB_CR);
+
+ if (drvdata->msr_fix_req)
+ __tpdm_config_dsb_msr(drvdata);
+}
+
+static void __tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
+{
+ uint32_t val;
+
+ tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
+ TPDM_CMB_TPR(TPDM_CMB_LSB));
+ tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
+ TPDM_CMB_TPMR(TPDM_CMB_LSB));
+ tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
+ TPDM_CMB_TPR(TPDM_CMB_MSB));
+ tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
+ TPDM_CMB_TPMR(TPDM_CMB_MSB));
+
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
+ TPDM_CMB_XPR(TPDM_CMB_LSB));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
+ TPDM_CMB_XPMR(TPDM_CMB_LSB));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
+ TPDM_CMB_XPR(TPDM_CMB_MSB));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
+ TPDM_CMB_XPMR(TPDM_CMB_MSB));
+
+ val = tpdm_readl(drvdata, TPDM_CMB_TIER);
+ if (drvdata->cmb->patt_ts == true)
+ val = val | BIT(0);
+ else
+ val = val & ~BIT(0);
+ if (drvdata->cmb->trig_ts == true)
+ val = val | BIT(1);
+ else
+ val = val & ~BIT(1);
+ tpdm_writel(drvdata, val, TPDM_CMB_TIER);
+
+ __tpdm_config_cmb_msr(drvdata);
+
+ val = tpdm_readl(drvdata, TPDM_CMB_CR);
+ /* Set the flow control bit */
+ val = val & ~BIT(2);
+ if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
+ val = val & ~BIT(1);
+ else
+ val = val | BIT(1);
+ tpdm_writel(drvdata, val, TPDM_CMB_CR);
+ /* Set the enable bit */
+ val = val | BIT(0);
+ tpdm_writel(drvdata, val, TPDM_CMB_CR);
+}
+
+static void __tpdm_enable(struct tpdm_drvdata *drvdata)
+{
+ TPDM_UNLOCK(drvdata);
+
+ if (drvdata->clk_enable)
+ tpdm_writel(drvdata, 0x1, TPDM_CLK_CTRL);
+
+ if (test_bit(TPDM_DS_GPR, drvdata->enable_ds))
+ __tpdm_enable_gpr(drvdata);
+
+ if (test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ __tpdm_enable_bc(drvdata);
+
+ if (test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ __tpdm_enable_tc(drvdata);
+
+ if (test_bit(TPDM_DS_DSB, drvdata->enable_ds))
+ __tpdm_enable_dsb(drvdata);
+
+ if (test_bit(TPDM_DS_CMB, drvdata->enable_ds))
+ __tpdm_enable_cmb(drvdata);
+
+ TPDM_LOCK(drvdata);
+}
+
+static int tpdm_enable(struct coresight_device *csdev)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drvdata->lock);
+ __tpdm_enable(drvdata);
+ drvdata->enable = true;
+ mutex_unlock(&drvdata->lock);
+
+ dev_info(drvdata->dev, "TPDM tracing enabled\n");
+ return 0;
+}
+
+static void __tpdm_disable_bc(struct tpdm_drvdata *drvdata)
+{
+ uint32_t config;
+
+ config = tpdm_readl(drvdata, TPDM_BC_CR);
+ config = config & ~BIT(0);
+ tpdm_writel(drvdata, config, TPDM_BC_CR);
+}
+
+static void __tpdm_disable_tc(struct tpdm_drvdata *drvdata)
+{
+ uint32_t config;
+
+ config = tpdm_readl(drvdata, TPDM_TC_CR);
+ config = config & ~BIT(0);
+ tpdm_writel(drvdata, config, TPDM_TC_CR);
+}
+
+static void __tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
+{
+ uint32_t config;
+
+ config = tpdm_readl(drvdata, TPDM_DSB_CR);
+ config = config & ~BIT(0);
+ tpdm_writel(drvdata, config, TPDM_DSB_CR);
+}
+
+static void __tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
+{
+ uint32_t config;
+
+ config = tpdm_readl(drvdata, TPDM_CMB_CR);
+ config = config & ~BIT(0);
+ tpdm_writel(drvdata, config, TPDM_CMB_CR);
+}
+
+static void __tpdm_disable(struct tpdm_drvdata *drvdata)
+{
+ TPDM_UNLOCK(drvdata);
+
+ if (test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ __tpdm_disable_bc(drvdata);
+
+ if (test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ __tpdm_disable_tc(drvdata);
+
+ if (test_bit(TPDM_DS_DSB, drvdata->enable_ds))
+ __tpdm_disable_dsb(drvdata);
+
+ if (test_bit(TPDM_DS_CMB, drvdata->enable_ds))
+ __tpdm_disable_cmb(drvdata);
+
+ if (drvdata->clk_enable)
+ tpdm_writel(drvdata, 0x0, TPDM_CLK_CTRL);
+
+ TPDM_LOCK(drvdata);
+}
+
+static void tpdm_disable(struct coresight_device *csdev)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ mutex_lock(&drvdata->lock);
+ __tpdm_disable(drvdata);
+ drvdata->enable = false;
+ mutex_unlock(&drvdata->lock);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "TPDM tracing disabled\n");
+}
+
+static int tpdm_trace_id(struct coresight_device *csdev)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
+static const struct coresight_ops_source tpdm_source_ops = {
+ .trace_id = tpdm_trace_id,
+ .enable = tpdm_enable,
+ .disable = tpdm_disable,
+};
+
+static const struct coresight_ops tpdm_cs_ops = {
+ .source_ops = &tpdm_source_ops,
+};
+
+static ssize_t tpdm_show_available_datasets(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+
+ if (test_bit(TPDM_DS_IMPLDEF, drvdata->datasets))
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s",
+ "IMPLDEF");
+
+ if (test_bit(TPDM_DS_DSB, drvdata->datasets))
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "DSB");
+
+ if (test_bit(TPDM_DS_CMB, drvdata->datasets))
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "CMB");
+
+ if (test_bit(TPDM_DS_TC, drvdata->datasets))
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "TC");
+
+ if (test_bit(TPDM_DS_BC, drvdata->datasets))
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "BC");
+
+ if (test_bit(TPDM_DS_GPR, drvdata->datasets))
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "GPR");
+
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+ return size;
+}
+static DEVICE_ATTR(available_datasets, S_IRUGO, tpdm_show_available_datasets,
+ NULL);
+
+static ssize_t tpdm_show_enable_datasets(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size;
+
+ size = scnprintf(buf, PAGE_SIZE, "%*pb\n", TPDM_DATASETS,
+ drvdata->enable_ds);
+
+ if (PAGE_SIZE - size < 2)
+ size = -EINVAL;
+ else
+ size += scnprintf(buf + size, 2, "\n");
+ return size;
+}
+
+static ssize_t tpdm_store_enable_datasets(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int i;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ if (drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ for (i = 0; i < TPDM_DATASETS; i++) {
+ if (test_bit(i, drvdata->datasets) && (val & BIT(i)))
+ __set_bit(i, drvdata->enable_ds);
+ else
+ __clear_bit(i, drvdata->enable_ds);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(enable_datasets, S_IRUGO | S_IWUSR,
+ tpdm_show_enable_datasets, tpdm_store_enable_datasets);
+
+static ssize_t tpdm_show_gp_regs(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_GPR, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_GPR_REGS_MAX; i++) {
+ if (!test_bit(i, drvdata->gpr->gpr_dirty))
+ continue;
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->gpr->gp_regs[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_gp_regs(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_GPR, drvdata->datasets) ||
+ index >= TPDM_GPR_REGS_MAX)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->gpr->gp_regs[index] = val;
+ __set_bit(index, drvdata->gpr->gpr_dirty);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(gp_regs, S_IRUGO | S_IWUSR, tpdm_show_gp_regs,
+ tpdm_store_gp_regs);
+
+static ssize_t tpdm_show_bc_capture_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ drvdata->bc->capture_mode == TPDM_MODE_ATB ?
+ "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_bc_capture_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[20] = "";
+ uint32_t val;
+
+ if (size >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (!strcmp(str, "ATB")) {
+ drvdata->bc->capture_mode = TPDM_MODE_ATB;
+ } else if (!strcmp(str, "APB") &&
+ drvdata->bc->retrieval_mode == TPDM_MODE_APB) {
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_CR);
+ val = val | BIT(3);
+ tpdm_writel(drvdata, val, TPDM_BC_CR);
+ TPDM_LOCK(drvdata);
+
+ drvdata->bc->capture_mode = TPDM_MODE_APB;
+ } else {
+ mutex_unlock(&drvdata->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_capture_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_capture_mode, tpdm_store_bc_capture_mode);
+
+static ssize_t tpdm_show_bc_retrieval_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ drvdata->bc->retrieval_mode == TPDM_MODE_ATB ?
+ "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_bc_retrieval_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[20] = "";
+
+ if (size >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (!strcmp(str, "ATB")) {
+ drvdata->bc->retrieval_mode = TPDM_MODE_ATB;
+ } else if (!strcmp(str, "APB")) {
+ drvdata->bc->retrieval_mode = TPDM_MODE_APB;
+ } else {
+ mutex_unlock(&drvdata->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_retrieval_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_retrieval_mode, tpdm_store_bc_retrieval_mode);
+
+static ssize_t tpdm_store_bc_reset_counters(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_CR);
+ val = val | BIT(1);
+ tpdm_writel(drvdata, val, TPDM_BC_CR);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_reset_counters, S_IRUGO | S_IWUSR, NULL,
+ tpdm_store_bc_reset_counters);
+
+static ssize_t tpdm_show_bc_sat_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->bc->sat_mode);
+}
+
+static ssize_t tpdm_store_bc_sat_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->sat_mode = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_sat_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_sat_mode, tpdm_store_bc_sat_mode);
+
+static ssize_t tpdm_show_bc_enable_counters(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->bc->enable_counters);
+}
+
+static ssize_t tpdm_store_bc_enable_counters(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->enable_counters = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_enable_counters, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_enable_counters, tpdm_store_bc_enable_counters);
+
+static ssize_t tpdm_show_bc_clear_counters(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->bc->clear_counters);
+}
+
+static ssize_t tpdm_store_bc_clear_counters(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->clear_counters = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_clear_counters, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_clear_counters, tpdm_store_bc_clear_counters);
+
+static ssize_t tpdm_show_bc_enable_irq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->bc->enable_irq);
+}
+
+static ssize_t tpdm_store_bc_enable_irq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->enable_irq = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_enable_irq, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_enable_irq, tpdm_store_bc_enable_irq);
+
+static ssize_t tpdm_show_bc_clear_irq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->bc->clear_irq);
+}
+
+static ssize_t tpdm_store_bc_clear_irq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->clear_irq = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_clear_irq, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_clear_irq, tpdm_store_bc_clear_irq);
+
+static ssize_t tpdm_show_bc_trig_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_BC_MAX_COUNTERS; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->bc->trig_val_lo[i]);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_bc_trig_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets) ||
+ index >= drvdata->bc_counters_avail ||
+ drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+ (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->trig_val_lo[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_trig_val_lo, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_trig_val_lo, tpdm_store_bc_trig_val_lo);
+
+static ssize_t tpdm_show_bc_trig_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_BC_MAX_COUNTERS; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->bc->trig_val_hi[i]);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_bc_trig_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets) ||
+ index >= drvdata->bc_counters_avail ||
+ drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+ (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->trig_val_hi[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_trig_val_hi, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_trig_val_hi, tpdm_store_bc_trig_val_hi);
+
+static ssize_t tpdm_show_bc_enable_ganging(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->bc->enable_ganging);
+}
+
+static ssize_t tpdm_store_bc_enable_ganging(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->enable_ganging = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_enable_ganging, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_enable_ganging, tpdm_store_bc_enable_ganging);
+
+static ssize_t tpdm_show_bc_overflow_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_BC_MAX_OVERFLOW; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->bc->overflow_val[i]);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_bc_overflow_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets) ||
+ index >= TPDM_BC_MAX_OVERFLOW)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->overflow_val[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_overflow_val, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_overflow_val, tpdm_store_bc_overflow_val);
+
+static ssize_t tpdm_show_bc_ovsr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_OVSR);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_ovsr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_BC_OVSR);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_ovsr, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_ovsr, tpdm_store_bc_ovsr);
+
+static ssize_t tpdm_show_bc_counter_sel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_SELR);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_counter_sel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable || val >= drvdata->bc_counters_avail) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_BC_SELR);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_counter_sel, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_counter_sel, tpdm_store_bc_counter_sel);
+
+static ssize_t tpdm_show_bc_count_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_CNTR_LO);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_count_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val, select;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ select = tpdm_readl(drvdata, TPDM_BC_SELR);
+
+ /* Check if selected counter is disabled */
+ if (BVAL(tpdm_readl(drvdata, TPDM_BC_CNTENSET), select)) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ tpdm_writel(drvdata, val, TPDM_BC_CNTR_LO);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_count_val_lo, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_count_val_lo, tpdm_store_bc_count_val_lo);
+
+static ssize_t tpdm_show_bc_count_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_CNTR_HI);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_count_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val, select;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ select = tpdm_readl(drvdata, TPDM_BC_SELR);
+
+ /* Check if selected counter is disabled */
+ if (BVAL(tpdm_readl(drvdata, TPDM_BC_CNTENSET), select)) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ tpdm_writel(drvdata, val, TPDM_BC_CNTR_HI);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_count_val_hi, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_count_val_hi, tpdm_store_bc_count_val_hi);
+
+static ssize_t tpdm_show_bc_shadow_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ for (i = 0; i < drvdata->bc_counters_avail; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ tpdm_readl(drvdata, TPDM_BC_SHADOW_LO(i)));
+ }
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_shadow_val_lo, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_shadow_val_lo, NULL);
+
+static ssize_t tpdm_show_bc_shadow_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ for (i = 0; i < drvdata->bc_counters_avail; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ tpdm_readl(drvdata, TPDM_BC_SHADOW_HI(i)));
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_shadow_val_hi, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_shadow_val_hi, NULL);
+
+static ssize_t tpdm_show_bc_sw_inc(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_BC_SWINC);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_sw_inc(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_BC_SWINC);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_sw_inc, S_IRUGO | S_IWUSR,
+ tpdm_show_bc_sw_inc, tpdm_store_bc_sw_inc);
+
+static ssize_t tpdm_store_bc_msr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned num, val;
+ int nval;
+
+ if (!drvdata->msr_support)
+ return -EINVAL;
+
+ if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+ return -EPERM;
+
+ nval = sscanf(buf, "%u %x", &num, &val);
+ if (nval != 2)
+ return -EINVAL;
+
+ if (num >= TPDM_BC_MAX_MSR)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->bc->msr[num] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(bc_msr, S_IWUSR, NULL, tpdm_store_bc_msr);
+
+static ssize_t tpdm_show_tc_capture_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ drvdata->tc->capture_mode == TPDM_MODE_ATB ?
+ "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_tc_capture_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[20] = "";
+ uint32_t val;
+
+ if (size >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (!strcmp(str, "ATB")) {
+ drvdata->tc->capture_mode = TPDM_MODE_ATB;
+ } else if (!strcmp(str, "APB") &&
+ drvdata->tc->retrieval_mode == TPDM_MODE_APB) {
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_CR);
+ val = val | BIT(3);
+ tpdm_writel(drvdata, val, TPDM_TC_CR);
+ TPDM_LOCK(drvdata);
+
+ drvdata->tc->capture_mode = TPDM_MODE_APB;
+ } else {
+ mutex_unlock(&drvdata->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_capture_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_capture_mode, tpdm_store_tc_capture_mode);
+
+static ssize_t tpdm_show_tc_retrieval_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ drvdata->tc->retrieval_mode == TPDM_MODE_ATB ?
+ "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_tc_retrieval_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[20] = "";
+
+ if (size >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (!strcmp(str, "ATB")) {
+ drvdata->tc->retrieval_mode = TPDM_MODE_ATB;
+ } else if (!strcmp(str, "APB")) {
+ drvdata->tc->retrieval_mode = TPDM_MODE_APB;
+ } else {
+ mutex_unlock(&drvdata->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_retrieval_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_retrieval_mode, tpdm_store_tc_retrieval_mode);
+
+static ssize_t tpdm_store_tc_reset_counters(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_CR);
+ val = val | BIT(1);
+ tpdm_writel(drvdata, val, TPDM_TC_CR);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_reset_counters, S_IRUGO | S_IWUSR, NULL,
+ tpdm_store_tc_reset_counters);
+
+static ssize_t tpdm_show_tc_sat_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->tc->sat_mode);
+}
+
+static ssize_t tpdm_store_tc_sat_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->tc->sat_mode = true;
+ else
+ drvdata->tc->sat_mode = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_sat_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_sat_mode, tpdm_store_tc_sat_mode);
+
+static ssize_t tpdm_show_tc_enable_counters(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->tc->enable_counters);
+}
+
+static ssize_t tpdm_store_tc_enable_counters(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+ if (val >> drvdata->tc_counters_avail)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->enable_counters = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_enable_counters, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_enable_counters, tpdm_store_tc_enable_counters);
+
+static ssize_t tpdm_show_tc_clear_counters(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->tc->clear_counters);
+}
+
+static ssize_t tpdm_store_tc_clear_counters(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+ if (val >> drvdata->tc_counters_avail)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->clear_counters = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_clear_counters, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_clear_counters, tpdm_store_tc_clear_counters);
+
+static ssize_t tpdm_show_tc_enable_irq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->tc->enable_irq);
+}
+
+static ssize_t tpdm_store_tc_enable_irq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->enable_irq = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_enable_irq, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_enable_irq, tpdm_store_tc_enable_irq);
+
+static ssize_t tpdm_show_tc_clear_irq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->tc->clear_irq);
+}
+
+static ssize_t tpdm_store_tc_clear_irq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->clear_irq = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_clear_irq, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_clear_irq, tpdm_store_tc_clear_irq);
+
+static ssize_t tpdm_show_tc_trig_sel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->tc->trig_sel[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_tc_trig_sel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets) ||
+ index >= TPDM_TC_MAX_TRIG ||
+ drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+ (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->trig_sel[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_trig_sel, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_trig_sel, tpdm_store_tc_trig_sel);
+
+static ssize_t tpdm_show_tc_trig_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->tc->trig_val_lo[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_tc_trig_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets) ||
+ index >= TPDM_TC_MAX_TRIG ||
+ drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+ (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->trig_val_lo[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_trig_val_lo, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_trig_val_lo, tpdm_store_tc_trig_val_lo);
+
+static ssize_t tpdm_show_tc_trig_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->tc->trig_val_hi[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_tc_trig_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets) ||
+ index >= TPDM_TC_MAX_TRIG ||
+ drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+ (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->trig_val_hi[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_trig_val_hi, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_trig_val_hi, tpdm_store_tc_trig_val_hi);
+
+static ssize_t tpdm_show_tc_ovsr_gp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_OVSR_GP);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_ovsr_gp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_TC_OVSR_GP);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_ovsr_gp, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_ovsr_gp, tpdm_store_tc_ovsr_gp);
+
+static ssize_t tpdm_show_tc_ovsr_impl(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_OVSR_IMPL);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_ovsr_impl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_TC_OVSR_IMPL);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_ovsr_impl, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_ovsr_impl, tpdm_store_tc_ovsr_impl);
+
+static ssize_t tpdm_show_tc_counter_sel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_SELR);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_counter_sel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_TC_SELR);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_counter_sel, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_counter_sel, tpdm_store_tc_counter_sel);
+
+static ssize_t tpdm_show_tc_count_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_CNTR_LO);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_count_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val, select;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ select = tpdm_readl(drvdata, TPDM_TC_SELR);
+ select = (select >> 11) & 0x3;
+
+ /* Check if selected counter is disabled */
+ if (BVAL(tpdm_readl(drvdata, TPDM_TC_CNTENSET), select)) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ tpdm_writel(drvdata, val, TPDM_TC_CNTR_LO);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_count_val_lo, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_count_val_lo, tpdm_store_tc_count_val_lo);
+
+static ssize_t tpdm_show_tc_count_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_CNTR_HI);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_count_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val, select;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ select = tpdm_readl(drvdata, TPDM_TC_SELR);
+ select = (select >> 11) & 0x3;
+
+ /* Check if selected counter is disabled */
+ if (BVAL(tpdm_readl(drvdata, TPDM_TC_CNTENSET), select)) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ tpdm_writel(drvdata, val, TPDM_TC_CNTR_HI);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_count_val_hi, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_count_val_hi, tpdm_store_tc_count_val_hi);
+
+static ssize_t tpdm_show_tc_shadow_val_lo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ for (i = 0; i < TPDM_TC_MAX_COUNTERS; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ tpdm_readl(drvdata, TPDM_TC_SHADOW_LO(i)));
+ }
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_shadow_val_lo, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_shadow_val_lo, NULL);
+
+static ssize_t tpdm_show_tc_shadow_val_hi(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ for (i = 0; i < TPDM_TC_MAX_COUNTERS; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ tpdm_readl(drvdata, TPDM_TC_SHADOW_HI(i)));
+ }
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_shadow_val_hi, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_shadow_val_hi, NULL);
+
+static ssize_t tpdm_show_tc_sw_inc(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ TPDM_UNLOCK(drvdata);
+ val = tpdm_readl(drvdata, TPDM_TC_SWINC);
+ TPDM_LOCK(drvdata);
+ mutex_unlock(&drvdata->lock);
+ return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_sw_inc(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!drvdata->enable) {
+ mutex_unlock(&drvdata->lock);
+ return -EPERM;
+ }
+
+ if (val) {
+ TPDM_UNLOCK(drvdata);
+ tpdm_writel(drvdata, val, TPDM_TC_SWINC);
+ TPDM_LOCK(drvdata);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_sw_inc, S_IRUGO | S_IWUSR,
+ tpdm_show_tc_sw_inc, tpdm_store_tc_sw_inc);
+
+static ssize_t tpdm_store_tc_msr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned num, val;
+ int nval;
+
+ if (!drvdata->msr_support)
+ return -EINVAL;
+
+ if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+ return -EPERM;
+
+ nval = sscanf(buf, "%u %x", &num, &val);
+ if (nval != 2)
+ return -EINVAL;
+
+ if (num >= TPDM_TC_MAX_MSR)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->tc->msr[num] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(tc_msr, S_IWUSR, NULL, tpdm_store_tc_msr);
+
+static ssize_t tpdm_show_dsb_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%lx\n",
+ (unsigned long)drvdata->dsb->mode);
+}
+
+static ssize_t tpdm_store_dsb_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->dsb->mode = val & TPDM_MODE_ALL;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_mode, tpdm_store_dsb_mode);
+
+static ssize_t tpdm_show_dsb_edge_ctrl(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_EDCR; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index:0x%x Val:0x%x\n", i,
+ drvdata->dsb->edge_ctrl[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_edge_ctrl(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long start, end, edge_ctrl;
+ uint32_t val;
+ int i, bit, reg;
+
+ if (sscanf(buf, "%lx %lx %lx", &start, &end, &edge_ctrl) != 3)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ (start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES) ||
+ edge_ctrl > 0x2)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = start; i <= end; i++) {
+ reg = i / (NUM_OF_BITS / 2);
+ bit = i % (NUM_OF_BITS / 2);
+ bit = bit * 2;
+
+ val = drvdata->dsb->edge_ctrl[reg];
+ val = val | (edge_ctrl << bit);
+ drvdata->dsb->edge_ctrl[reg] = val;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_edge_ctrl, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_edge_ctrl, tpdm_store_dsb_edge_ctrl);
+
+static ssize_t tpdm_show_dsb_edge_ctrl_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_EDCR / 2; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index:0x%x Val:0x%x\n", i,
+ drvdata->dsb->edge_ctrl_mask[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_edge_ctrl_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long start, end, val;
+ uint32_t set;
+ int i, bit, reg;
+
+ if (sscanf(buf, "%lx %lx %lx", &start, &end, &val) != 3)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ (start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = start; i <= end; i++) {
+ reg = i / NUM_OF_BITS;
+ bit = (i % NUM_OF_BITS);
+
+ set = drvdata->dsb->edge_ctrl_mask[reg];
+ if (val)
+ set = set | BIT(bit);
+ else
+ set = set & ~BIT(bit);
+ drvdata->dsb->edge_ctrl_mask[reg] = set;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_edge_ctrl_mask, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_edge_ctrl_mask, tpdm_store_dsb_edge_ctrl_mask);
+
+static ssize_t tpdm_show_dsb_patt_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->dsb->patt_val[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_patt_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ index >= TPDM_DSB_MAX_PATT)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->dsb->patt_val[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_patt_val, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_patt_val, tpdm_store_dsb_patt_val);
+
+static ssize_t tpdm_show_dsb_patt_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->dsb->patt_mask[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_patt_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ index >= TPDM_DSB_MAX_PATT)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->dsb->patt_mask[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_patt_mask, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_patt_mask, tpdm_store_dsb_patt_mask);
+
+static ssize_t tpdm_show_dsb_patt_ts(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->dsb->patt_ts);
+}
+
+static ssize_t tpdm_store_dsb_patt_ts(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->dsb->patt_ts = true;
+ else
+ drvdata->dsb->patt_ts = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_patt_ts, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_patt_ts, tpdm_store_dsb_patt_ts);
+
+static ssize_t tpdm_show_dsb_patt_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->dsb->patt_type);
+}
+
+static ssize_t tpdm_store_dsb_patt_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->dsb->patt_type = true;
+ else
+ drvdata->dsb->patt_type = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_patt_type, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_patt_type, tpdm_store_dsb_patt_type);
+
+static ssize_t tpdm_show_dsb_trig_patt_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->dsb->trig_patt_val[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_trig_patt_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ index >= TPDM_DSB_MAX_PATT)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->dsb->trig_patt_val[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_trig_patt_val, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_trig_patt_val, tpdm_store_dsb_trig_patt_val);
+
+static ssize_t tpdm_show_dsb_trig_patt_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i = 0;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->dsb->trig_patt_mask[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_trig_patt_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long index, val;
+
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ index >= TPDM_DSB_MAX_PATT)
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->dsb->trig_patt_mask[index] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_trig_patt_mask, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_trig_patt_mask, tpdm_store_dsb_trig_patt_mask);
+
+static ssize_t tpdm_show_dsb_trig_ts(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->dsb->trig_ts);
+}
+
+static ssize_t tpdm_store_dsb_trig_ts(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->dsb->trig_ts = true;
+ else
+ drvdata->dsb->trig_ts = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_trig_ts, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_trig_ts, tpdm_store_dsb_trig_ts);
+
+static ssize_t tpdm_show_dsb_select_val(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ ssize_t size = 0;
+ int i;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_DSB_MAX_SELECT; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index:0x%x Val:0x%x\n", i,
+ drvdata->dsb->select_val[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+
+static ssize_t tpdm_store_dsb_select_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long start, end;
+ uint32_t val;
+ int i, bit, reg;
+
+ if (sscanf(buf, "%lx %lx", &start, &end) != 2)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+ (start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ for (i = start; i <= end; i++) {
+ reg = i / NUM_OF_BITS;
+ bit = (i % NUM_OF_BITS);
+
+ val = drvdata->dsb->select_val[reg];
+ val = val | BIT(bit);
+ drvdata->dsb->select_val[reg] = val;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_select_val, S_IRUGO | S_IWUSR,
+ tpdm_show_dsb_select_val, tpdm_store_dsb_select_val);
+
+static ssize_t tpdm_store_dsb_msr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned num, val;
+ int nval;
+
+ if (!drvdata->msr_support)
+ return -EINVAL;
+
+ if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+ return -EPERM;
+
+ nval = sscanf(buf, "%u %x", &num, &val);
+ if (nval != 2)
+ return -EINVAL;
+
+ if (num >= TPDM_DSB_MAX_MSR)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->dsb->msr[num] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(dsb_msr, S_IWUSR, NULL, tpdm_store_dsb_msr);
+
+static ssize_t tpdm_show_cmb_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "continuous trace_on_change");
+}
+static DEVICE_ATTR(cmb_available_modes, S_IRUGO, tpdm_show_cmb_available_modes,
+ NULL);
+
+static ssize_t tpdm_show_cmb_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS ?
+ "continuous" : "trace_on_change");
+}
+
+static ssize_t tpdm_store_cmb_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[20] = "";
+
+ if (strlen(buf) >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (!strcmp(str, "continuous")) {
+ drvdata->cmb->mode = TPDM_CMB_MODE_CONTINUOUS;
+ } else if (!strcmp(str, "trace_on_change")) {
+ drvdata->cmb->mode = TPDM_CMB_MODE_TRACE_ON_CHANGE;
+ } else {
+ mutex_unlock(&drvdata->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_mode, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_mode, tpdm_store_cmb_mode);
+
+static ssize_t tpdm_show_cmb_patt_val_lsb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->patt_val[TPDM_CMB_LSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_val_lsb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->patt_val[TPDM_CMB_LSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_patt_val_lsb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_patt_val_lsb,
+ tpdm_store_cmb_patt_val_lsb);
+
+static ssize_t tpdm_show_cmb_patt_mask_lsb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->patt_mask[TPDM_CMB_LSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_mask_lsb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->patt_mask[TPDM_CMB_LSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_patt_mask_lsb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_patt_mask_lsb, tpdm_store_cmb_patt_mask_lsb);
+
+static ssize_t tpdm_show_cmb_patt_val_msb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->patt_val[TPDM_CMB_MSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_val_msb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->patt_val[TPDM_CMB_MSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_patt_val_msb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_patt_val_msb,
+ tpdm_store_cmb_patt_val_msb);
+
+static ssize_t tpdm_show_cmb_patt_mask_msb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->patt_mask[TPDM_CMB_MSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_mask_msb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->patt_mask[TPDM_CMB_MSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_patt_mask_msb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_patt_mask_msb, tpdm_store_cmb_patt_mask_msb);
+
+static ssize_t tpdm_show_cmb_patt_ts(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->cmb->patt_ts);
+}
+
+static ssize_t tpdm_store_cmb_patt_ts(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->cmb->patt_ts = true;
+ else
+ drvdata->cmb->patt_ts = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_patt_ts, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_patt_ts, tpdm_store_cmb_patt_ts);
+
+static ssize_t tpdm_show_cmb_trig_patt_val_lsb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->trig_patt_val[TPDM_CMB_LSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_val_lsb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->trig_patt_val[TPDM_CMB_LSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_val_lsb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_trig_patt_val_lsb,
+ tpdm_store_cmb_trig_patt_val_lsb);
+
+static ssize_t tpdm_show_cmb_trig_patt_mask_lsb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_mask_lsb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_mask_lsb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_trig_patt_mask_lsb,
+ tpdm_store_cmb_trig_patt_mask_lsb);
+
+static ssize_t tpdm_show_cmb_trig_patt_val_msb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->trig_patt_val[TPDM_CMB_MSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_val_msb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->trig_patt_val[TPDM_CMB_MSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_val_msb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_trig_patt_val_msb,
+ tpdm_store_cmb_trig_patt_val_msb);
+
+static ssize_t tpdm_show_cmb_trig_patt_mask_msb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ val = drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB];
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_mask_msb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_mask_msb, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_trig_patt_mask_msb,
+ tpdm_store_cmb_trig_patt_mask_msb);
+
+static ssize_t tpdm_show_cmb_trig_ts(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned)drvdata->cmb->trig_ts);
+}
+
+static ssize_t tpdm_store_cmb_trig_ts(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ mutex_lock(&drvdata->lock);
+ if (val)
+ drvdata->cmb->trig_ts = true;
+ else
+ drvdata->cmb->trig_ts = false;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_trig_ts, S_IRUGO | S_IWUSR,
+ tpdm_show_cmb_trig_ts, tpdm_store_cmb_trig_ts);
+
+static ssize_t tpdm_store_cmb_msr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned num, val;
+ int nval;
+
+ if (!drvdata->msr_support)
+ return -EINVAL;
+
+ if (!test_bit(TPDM_DS_CMB, drvdata->datasets))
+ return -EPERM;
+
+ nval = sscanf(buf, "%u %x", &num, &val);
+ if (nval != 2)
+ return -EINVAL;
+
+ if (num >= TPDM_CMB_MAX_MSR)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->lock);
+ drvdata->cmb->msr[num] = val;
+ mutex_unlock(&drvdata->lock);
+ return size;
+}
+static DEVICE_ATTR(cmb_msr, S_IWUSR, NULL, tpdm_store_cmb_msr);
+
+static struct attribute *tpdm_bc_attrs[] = {
+ &dev_attr_bc_capture_mode.attr,
+ &dev_attr_bc_retrieval_mode.attr,
+ &dev_attr_bc_reset_counters.attr,
+ &dev_attr_bc_sat_mode.attr,
+ &dev_attr_bc_enable_counters.attr,
+ &dev_attr_bc_clear_counters.attr,
+ &dev_attr_bc_enable_irq.attr,
+ &dev_attr_bc_clear_irq.attr,
+ &dev_attr_bc_trig_val_lo.attr,
+ &dev_attr_bc_trig_val_hi.attr,
+ &dev_attr_bc_enable_ganging.attr,
+ &dev_attr_bc_overflow_val.attr,
+ &dev_attr_bc_ovsr.attr,
+ &dev_attr_bc_counter_sel.attr,
+ &dev_attr_bc_count_val_lo.attr,
+ &dev_attr_bc_count_val_hi.attr,
+ &dev_attr_bc_shadow_val_lo.attr,
+ &dev_attr_bc_shadow_val_hi.attr,
+ &dev_attr_bc_sw_inc.attr,
+ &dev_attr_bc_msr.attr,
+ NULL,
+};
+
+static struct attribute *tpdm_tc_attrs[] = {
+ &dev_attr_tc_capture_mode.attr,
+ &dev_attr_tc_retrieval_mode.attr,
+ &dev_attr_tc_reset_counters.attr,
+ &dev_attr_tc_sat_mode.attr,
+ &dev_attr_tc_enable_counters.attr,
+ &dev_attr_tc_clear_counters.attr,
+ &dev_attr_tc_enable_irq.attr,
+ &dev_attr_tc_clear_irq.attr,
+ &dev_attr_tc_trig_sel.attr,
+ &dev_attr_tc_trig_val_lo.attr,
+ &dev_attr_tc_trig_val_hi.attr,
+ &dev_attr_tc_ovsr_gp.attr,
+ &dev_attr_tc_ovsr_impl.attr,
+ &dev_attr_tc_counter_sel.attr,
+ &dev_attr_tc_count_val_lo.attr,
+ &dev_attr_tc_count_val_hi.attr,
+ &dev_attr_tc_shadow_val_lo.attr,
+ &dev_attr_tc_shadow_val_hi.attr,
+ &dev_attr_tc_sw_inc.attr,
+ &dev_attr_tc_msr.attr,
+ NULL,
+};
+
+static struct attribute *tpdm_dsb_attrs[] = {
+ &dev_attr_dsb_mode.attr,
+ &dev_attr_dsb_edge_ctrl.attr,
+ &dev_attr_dsb_edge_ctrl_mask.attr,
+ &dev_attr_dsb_patt_val.attr,
+ &dev_attr_dsb_patt_mask.attr,
+ &dev_attr_dsb_patt_ts.attr,
+ &dev_attr_dsb_patt_type.attr,
+ &dev_attr_dsb_trig_patt_val.attr,
+ &dev_attr_dsb_trig_patt_mask.attr,
+ &dev_attr_dsb_trig_ts.attr,
+ &dev_attr_dsb_select_val.attr,
+ &dev_attr_dsb_msr.attr,
+ NULL,
+};
+
+static struct attribute *tpdm_cmb_attrs[] = {
+ &dev_attr_cmb_available_modes.attr,
+ &dev_attr_cmb_mode.attr,
+ &dev_attr_cmb_patt_val_lsb.attr,
+ &dev_attr_cmb_patt_mask_lsb.attr,
+ &dev_attr_cmb_patt_val_msb.attr,
+ &dev_attr_cmb_patt_mask_msb.attr,
+ &dev_attr_cmb_patt_ts.attr,
+ &dev_attr_cmb_trig_patt_val_lsb.attr,
+ &dev_attr_cmb_trig_patt_mask_lsb.attr,
+ &dev_attr_cmb_trig_patt_val_msb.attr,
+ &dev_attr_cmb_trig_patt_mask_msb.attr,
+ &dev_attr_cmb_trig_ts.attr,
+ &dev_attr_cmb_msr.attr,
+ NULL,
+};
+
+static struct attribute_group tpdm_bc_attr_grp = {
+ .attrs = tpdm_bc_attrs,
+};
+
+static struct attribute_group tpdm_tc_attr_grp = {
+ .attrs = tpdm_tc_attrs,
+};
+
+static struct attribute_group tpdm_dsb_attr_grp = {
+ .attrs = tpdm_dsb_attrs,
+};
+
+static struct attribute_group tpdm_cmb_attr_grp = {
+ .attrs = tpdm_cmb_attrs,
+};
+
+static struct attribute *tpdm_attrs[] = {
+ &dev_attr_available_datasets.attr,
+ &dev_attr_enable_datasets.attr,
+ &dev_attr_gp_regs.attr,
+ NULL,
+};
+
+static struct attribute_group tpdm_attr_grp = {
+ .attrs = tpdm_attrs,
+};
+static const struct attribute_group *tpdm_attr_grps[] = {
+ &tpdm_attr_grp,
+ &tpdm_bc_attr_grp,
+ &tpdm_tc_attr_grp,
+ &tpdm_dsb_attr_grp,
+ &tpdm_cmb_attr_grp,
+ NULL,
+};
+
+static int tpdm_datasets_alloc(struct tpdm_drvdata *drvdata)
+{
+ if (test_bit(TPDM_DS_GPR, drvdata->datasets)) {
+ drvdata->gpr = devm_kzalloc(drvdata->dev, sizeof(*drvdata->gpr),
+ GFP_KERNEL);
+ if (!drvdata->gpr)
+ return -ENOMEM;
+ }
+ if (test_bit(TPDM_DS_BC, drvdata->datasets)) {
+ drvdata->bc = devm_kzalloc(drvdata->dev, sizeof(*drvdata->bc),
+ GFP_KERNEL);
+ if (!drvdata->bc)
+ return -ENOMEM;
+ }
+ if (test_bit(TPDM_DS_TC, drvdata->datasets)) {
+ drvdata->tc = devm_kzalloc(drvdata->dev, sizeof(*drvdata->tc),
+ GFP_KERNEL);
+ if (!drvdata->tc)
+ return -ENOMEM;
+ }
+ if (test_bit(TPDM_DS_DSB, drvdata->datasets)) {
+ drvdata->dsb = devm_kzalloc(drvdata->dev, sizeof(*drvdata->dsb),
+ GFP_KERNEL);
+ if (!drvdata->dsb)
+ return -ENOMEM;
+ }
+ if (test_bit(TPDM_DS_CMB, drvdata->datasets)) {
+ drvdata->cmb = devm_kzalloc(drvdata->dev, sizeof(*drvdata->cmb),
+ GFP_KERNEL);
+ if (!drvdata->cmb)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
+{
+ if (test_bit(TPDM_DS_BC, drvdata->datasets))
+ drvdata->bc->retrieval_mode = TPDM_MODE_ATB;
+
+ if (test_bit(TPDM_DS_TC, drvdata->datasets))
+ drvdata->tc->retrieval_mode = TPDM_MODE_ATB;
+
+ if (test_bit(TPDM_DS_DSB, drvdata->datasets))
+ drvdata->dsb->trig_ts = true;
+
+ if (test_bit(TPDM_DS_CMB, drvdata->datasets))
+ drvdata->cmb->trig_ts = true;
+}
+
+static int tpdm_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ uint32_t pidr, devid;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct tpdm_drvdata *drvdata;
+ struct resource *res;
+ struct coresight_desc *desc;
+ static int traceid = TPDM_TRACE_ID_START;
+ uint32_t version;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpdm-base");
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ drvdata->clk_enable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,clk-enable");
+
+ drvdata->msr_fix_req = of_property_read_bool(pdev->dev.of_node,
+ "qcom,msr-fix-req");
+
+ mutex_init(&drvdata->lock);
+
+ drvdata->clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ version = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR2);
+ drvdata->version = BMVAL(version, 4, 7);
+
+ if (drvdata->version)
+ drvdata->msr_support = true;
+
+ pidr = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR0);
+ for (i = 0; i < TPDM_DATASETS; i++) {
+ if (pidr & BIT(i)) {
+ __set_bit(i, drvdata->datasets);
+ __set_bit(i, drvdata->enable_ds);
+ }
+ }
+
+ ret = tpdm_datasets_alloc(drvdata);
+ if (ret)
+ return ret;
+
+ tpdm_init_default_data(drvdata);
+
+ devid = tpdm_readl(drvdata, CORESIGHT_DEVID);
+ drvdata->tc_trig_type = BMVAL(devid, 27, 28);
+ drvdata->bc_trig_type = BMVAL(devid, 25, 26);
+ drvdata->bc_gang_type = BMVAL(devid, 23, 24);
+ drvdata->bc_counters_avail = BMVAL(devid, 6, 10) + 1;
+ drvdata->tc_counters_avail = BMVAL(devid, 4, 5) + 1;
+
+ clk_disable_unprepare(drvdata->clk);
+
+ drvdata->traceid = traceid++;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc->ops = &tpdm_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->groups = tpdm_attr_grps;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_dbg(drvdata->dev, "TPDM initialized\n");
+
+ if (boot_enable)
+ coresight_enable(drvdata->csdev);
+
+ return 0;
+}
+
+static int tpdm_remove(struct platform_device *pdev)
+{
+ struct tpdm_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct of_device_id tpdm_match[] = {
+ {.compatible = "qcom,coresight-tpdm"},
+ {}
+};
+
+static struct platform_driver tpdm_driver = {
+ .probe = tpdm_probe,
+ .remove = tpdm_remove,
+ .driver = {
+ .name = "coresight-tpdm",
+ .owner = THIS_MODULE,
+ .of_match_table = tpdm_match,
+ },
+};
+
+static int __init tpdm_init(void)
+{
+ return platform_driver_register(&tpdm_driver);
+}
+module_init(tpdm_init);
+
+static void __exit tpdm_exit(void)
+{
+ platform_driver_unregister(&tpdm_driver);
+}
+module_exit(tpdm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7214efd10db5..3fd080b94069 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -103,9 +103,19 @@ static void tpiu_disable(struct coresight_device *csdev)
dev_info(drvdata->dev, "TPIU disabled\n");
}
+static void tpiu_abort(struct coresight_device *csdev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ tpiu_disable_hw(drvdata);
+
+ dev_info(drvdata->dev, "TPIU aborted\n");
+}
+
static const struct coresight_ops_sink tpiu_sink_ops = {
.enable = tpiu_enable,
.disable = tpiu_disable,
+ .abort = tpiu_abort,
};
static const struct coresight_ops tpiu_cs_ops = {
@@ -152,6 +162,9 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
pm_runtime_put(&adev->dev);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 93738dfbf631..9b0cc7baca73 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
+static struct coresight_device *curr_sink;
static int coresight_id_match(struct device *dev, void *data)
{
@@ -308,6 +309,9 @@ static int coresight_build_paths(struct coresight_device *csdev,
int i, ret = -EINVAL;
struct coresight_connection *conn;
+ if (!csdev)
+ return ret;
+
list_add(&csdev->path_link, path);
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -382,7 +386,127 @@ out:
}
EXPORT_SYMBOL_GPL(coresight_disable);
-static ssize_t enable_sink_show(struct device *dev,
+void coresight_abort(void)
+{
+ if (!mutex_trylock(&coresight_mutex)) {
+ pr_err_ratelimited("coresight: abort could not be processed\n");
+ return;
+ }
+ if (!curr_sink)
+ goto out;
+
+ if (curr_sink->enable && sink_ops(curr_sink)->abort) {
+ sink_ops(curr_sink)->abort(curr_sink);
+ curr_sink->enable = false;
+ }
+
+out:
+ mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_abort);
+
+static int coresight_disable_all_source(struct device *dev, void *data)
+{
+ struct coresight_device *csdev;
+ LIST_HEAD(path);
+
+ csdev = to_coresight_device(dev);
+
+ /*
+ * No need to care about components that are not sources or not enabled
+ */
+ if (!csdev->enable || csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
+ return 0;
+
+ coresight_disable_source(csdev);
+
+ return 0;
+}
+
+static int coresight_toggle_source_path(struct device *dev, void *data)
+{
+ struct coresight_device *csdev;
+ bool *enable = data;
+ int ret;
+ LIST_HEAD(path);
+
+ csdev = to_coresight_device(dev);
+
+ /*
+ * No need to care about components that are not sources or not enabled
+ */
+ if (!csdev->enable || csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
+ return 0;
+
+ if (*enable) {
+ ret = coresight_build_paths(csdev, &path, true);
+ if (ret) {
+ dev_err(&csdev->dev, "building path(s) failed\n");
+ return ret;
+ }
+ } else {
+ if (coresight_build_paths(csdev, &path, false))
+ dev_err(&csdev->dev, "releasing path(s) failed\n");
+ }
+
+ return 0;
+}
+
+static int coresight_switch_sink(struct coresight_device *csdev)
+{
+ int ret;
+ LIST_HEAD(slist);
+ bool enable = false;
+
+ mutex_lock(&coresight_mutex);
+
+ /* If curr_sink is same as new requested sink then do nothing. */
+ if (curr_sink == csdev)
+ goto out;
+
+ /*
+ * If curr_sink is NULL then sink is getting set for the first time.
+ * No source should be enabled at this time.
+ */
+ if (!curr_sink) {
+ csdev->activated = true;
+ goto out;
+ }
+
+ /* curr_sink is different from csdev */
+ bus_for_each_dev(&coresight_bustype, NULL,
+ &enable, coresight_toggle_source_path);
+
+ csdev->activated = true;
+ curr_sink->activated = false;
+
+ enable = true;
+ ret = bus_for_each_dev(&coresight_bustype, NULL, &enable,
+ coresight_toggle_source_path);
+ if (ret)
+ goto err;
+out:
+ curr_sink = csdev;
+ mutex_unlock(&coresight_mutex);
+ return 0;
+
+err:
+ /* Disable sources */
+ bus_for_each_dev(&coresight_bustype, NULL,
+ &enable, coresight_disable_all_source);
+
+ enable = false;
+ bus_for_each_dev(&coresight_bustype, NULL,
+ &enable, coresight_toggle_source_path);
+
+ csdev->activated = false;
+ curr_sink->activated = true;
+
+ mutex_unlock(&coresight_mutex);
+ return ret;
+}
+
+static ssize_t curr_sink_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -390,7 +514,7 @@ static ssize_t enable_sink_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated);
}
-static ssize_t enable_sink_store(struct device *dev,
+static ssize_t curr_sink_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
@@ -403,16 +527,13 @@ static ssize_t enable_sink_store(struct device *dev,
return ret;
if (val)
- csdev->activated = true;
- else
- csdev->activated = false;
+ coresight_switch_sink(csdev);
return size;
-
}
-static DEVICE_ATTR_RW(enable_sink);
+static DEVICE_ATTR_RW(curr_sink);
-static ssize_t enable_source_show(struct device *dev,
+static ssize_t enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -420,7 +541,7 @@ static ssize_t enable_source_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
}
-static ssize_t enable_source_store(struct device *dev,
+static ssize_t enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
@@ -442,16 +563,16 @@ static ssize_t enable_source_store(struct device *dev,
return size;
}
-static DEVICE_ATTR_RW(enable_source);
+static DEVICE_ATTR_RW(enable);
static struct attribute *coresight_sink_attrs[] = {
- &dev_attr_enable_sink.attr,
+ &dev_attr_curr_sink.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_sink);
static struct attribute *coresight_source_attrs[] = {
- &dev_attr_enable_source.attr,
+ &dev_attr_enable.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_source);
@@ -696,6 +817,21 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
coresight_fixup_device_conns(csdev);
coresight_fixup_orphan_conns(csdev);
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ if (desc->pdata->default_sink) {
+ if (curr_sink) {
+ dev_warn(&csdev->dev,
+ "overwritting curr sink %s",
+ dev_name(&curr_sink->dev));
+ curr_sink->activated = false;
+ }
+
+ curr_sink = csdev;
+ curr_sink->activated = true;
+ }
+ }
+
mutex_unlock(&coresight_mutex);
return csdev;
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index b0973617826f..061ddadd1122 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
#include <linux/cpumask.h>
#include <asm/smp_plat.h>
@@ -118,8 +119,9 @@ struct coresight_platform_data *of_get_coresight_platform_data(
if (!pdata)
return ERR_PTR(-ENOMEM);
- /* Use device name as sysfs handle */
- pdata->name = dev_name(dev);
+ ret = of_property_read_string(node, "coresight-name", &pdata->name);
+ if (ret)
+ return ERR_PTR(ret);
/* Get the number of input and output port for this component */
of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
@@ -169,15 +171,19 @@ struct coresight_platform_data *of_get_coresight_platform_data(
if (!rdev)
continue;
- pdata->child_names[i] = dev_name(rdev);
+ ret = of_property_read_string(rparent, "coresight-name",
+ &pdata->child_names[i]);
+ if (ret)
+ pdata->child_names[i] = dev_name(rdev);
+
pdata->child_ports[i] = rendpoint.id;
i++;
} while (ep);
}
- /* Affinity defaults to CPU0 */
- pdata->cpu = 0;
+ /* Affinity defaults to -1 (invalid) */
+ pdata->cpu = -1;
dn = of_parse_phandle(node, "cpu", 0);
for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) {
if (dn == of_get_cpu_node(cpu, NULL)) {
@@ -189,3 +195,45 @@ struct coresight_platform_data *of_get_coresight_platform_data(
return pdata;
}
EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node)
+{
+ int i, ret;
+ uint32_t ctis_len;
+ struct device_node *child_node;
+ struct coresight_cti_data *ctidata;
+
+ ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+ if (!ctidata)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_get_property(node, "coresight-ctis", &ctis_len))
+ ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (ctidata->nr_ctis) {
+ ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+ sizeof(*ctidata->names),
+ GFP_KERNEL);
+ if (!ctidata->names)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ctidata->nr_ctis; i++) {
+ child_node = of_parse_phandle(node, "coresight-ctis",
+ i);
+ if (!child_node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_property_read_string(child_node,
+ "coresight-name",
+ &ctidata->names[i]);
+ of_node_put(child_node);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+ return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index e7a348807f0c..847a39b35307 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -9,6 +9,8 @@ config STM
Say Y here to enable System Trace Module device support.
+if STM
+
config STM_DUMMY
tristate "Dummy STM driver"
help
@@ -25,3 +27,16 @@ config STM_SOURCE_CONSOLE
If you want to send kernel console messages over STM devices,
say Y.
+
+config STM_SOURCE_HEARTBEAT
+ tristate "Heartbeat over STM devices"
+ help
+ This is a kernel space trace source that sends periodic
+ heartbeat messages to trace hosts over STM devices. It is
+ also useful for testing stm class drivers and the stm class
+ framework itself.
+
+ If you want to send heartbeat messages over STM devices,
+ say Y.
+
+endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index f9312c38dd7a..a9ce3d487e57 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -5,5 +5,7 @@ stm_core-y := core.o policy.o
obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o
stm_console-y := console.o
+stm_heartbeat-y := heartbeat.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index b6445d9e5453..02095410cb33 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev,
static DEVICE_ATTR_RO(channels);
+static ssize_t hw_override_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct stm_device *stm = to_stm_device(dev);
+ int ret;
+
+ ret = sprintf(buf, "%u\n", stm->data->hw_override);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(hw_override);
+
static struct attribute *stm_attrs[] = {
&dev_attr_masters.attr,
&dev_attr_channels.attr,
+ &dev_attr_hw_override.attr,
NULL,
};
@@ -113,6 +128,7 @@ struct stm_device *stm_find_device(const char *buf)
stm = to_stm_device(dev);
if (!try_module_get(stm->owner)) {
+ /* matches class_find_device() above */
put_device(dev);
return NULL;
}
@@ -125,7 +141,7 @@ struct stm_device *stm_find_device(const char *buf)
* @stm: stm device, previously acquired by stm_find_device()
*
* This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
*/
void stm_put_device(struct stm_device *stm)
{
@@ -185,6 +201,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
return;
@@ -199,6 +218,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
@@ -288,6 +310,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
}
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
/* output is already assigned -- shouldn't happen */
if (WARN_ON_ONCE(output->nr_chans))
goto unlock;
@@ -304,6 +327,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
ret = 0;
unlock:
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
return ret;
@@ -312,11 +336,18 @@ unlock:
static void stm_output_free(struct stm_device *stm, struct stm_output *output)
{
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
+static void stm_output_init(struct stm_output *output)
+{
+ spin_lock_init(&output->lock);
+}
+
static int major_match(struct device *dev, const void *data)
{
unsigned int major = *(unsigned int *)data;
@@ -339,6 +370,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
if (!stmf)
return -ENOMEM;
+ stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
if (!try_module_get(stmf->stm->owner))
@@ -349,6 +381,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ /* matches class_find_device() above */
+ put_device(dev);
kfree(stmf);
return err;
@@ -357,9 +391,19 @@ err_free:
static int stm_char_release(struct inode *inode, struct file *file)
{
struct stm_file *stmf = file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ if (stm->data->unlink)
+ stm->data->unlink(stm->data, stmf->output.master,
+ stmf->output.channel);
+
+ stm_output_free(stm, &stmf->output);
- stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
+ /*
+ * matches the stm_char_open()'s
+ * class_find_device() + try_module_get()
+ */
+ stm_put_device(stm);
kfree(stmf);
return 0;
@@ -380,8 +424,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
return ret;
}
-static void stm_write(struct stm_data *data, unsigned int master,
- unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+ unsigned int channel, const char *buf, size_t count)
{
unsigned int flags = STP_PACKET_TIMESTAMPED;
const unsigned char *p = buf, nil = 0;
@@ -393,9 +437,14 @@ static void stm_write(struct stm_data *data, unsigned int master,
sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
sz, p);
flags = 0;
+
+ if (sz < 0)
+ break;
}
data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return pos;
}
static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +455,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
char *kbuf;
int err;
+ if (count + 1 > PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
/*
* if no m/c have been assigned to this writer up to this
* point, use "default" policy entry
@@ -430,8 +482,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
return -EFAULT;
}
- stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
- count);
+ count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+ kbuf, count);
kfree(kbuf);
@@ -509,16 +561,12 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (ret)
goto err_free;
- ret = 0;
-
if (stm->data->link)
ret = stm->data->link(stm->data, stmf->output.master,
stmf->output.channel);
- if (ret) {
+ if (ret)
stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
- }
err_free:
kfree(id);
@@ -633,17 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
stm->dev.parent = parent;
stm->dev.release = stm_device_release;
- err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
- if (err)
- goto err_device;
-
- err = device_add(&stm->dev);
- if (err)
- goto err_device;
-
+ mutex_init(&stm->link_mutex);
spin_lock_init(&stm->link_lock);
INIT_LIST_HEAD(&stm->link_list);
+ /* initialize the object before it is accessible via sysfs */
spin_lock_init(&stm->mc_lock);
mutex_init(&stm->policy_mutex);
stm->sw_nmasters = nmasters;
@@ -651,9 +693,20 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
stm->data = stm_data;
stm_data->stm = stm;
+ err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
+ if (err)
+ goto err_device;
+
+ err = device_add(&stm->dev);
+ if (err)
+ goto err_device;
+
return 0;
err_device:
+ unregister_chrdev(stm->major, stm_data->name);
+
+ /* matches device_initialize() above */
put_device(&stm->dev);
err_free:
kfree(stm);
@@ -662,20 +715,28 @@ err_free:
}
EXPORT_SYMBOL_GPL(stm_register_device);
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm);
void stm_unregister_device(struct stm_data *stm_data)
{
struct stm_device *stm = stm_data->stm;
struct stm_source_device *src, *iter;
- int i;
+ int i, ret;
- spin_lock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
- __stm_source_link_drop(src, stm);
+ ret = __stm_source_link_drop(src, stm);
+ /*
+ * src <-> stm link must not change under the same
+ * stm::link_mutex, so complain loudly if it has;
+ * also in this situation ret!=0 means this src is
+ * not connected to this stm and it should be otherwise
+ * safe to proceed with the tear-down of stm.
+ */
+ WARN_ON_ONCE(ret);
}
- spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
synchronize_srcu(&stm_source_srcu);
@@ -694,6 +755,17 @@ void stm_unregister_device(struct stm_data *stm_data)
}
EXPORT_SYMBOL_GPL(stm_unregister_device);
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ * stm::link_mutex
+ * stm::link_lock
+ * src::link_lock
+ */
+
/**
* stm_source_link_add() - connect an stm_source device to an stm device
* @src: stm_source device
@@ -710,6 +782,7 @@ static int stm_source_link_add(struct stm_source_device *src,
char *id;
int err;
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -719,6 +792,7 @@ static int stm_source_link_add(struct stm_source_device *src,
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
id = kstrdup(src->data->name, GFP_KERNEL);
if (id) {
@@ -753,9 +827,9 @@ static int stm_source_link_add(struct stm_source_device *src,
fail_free_output:
stm_output_free(stm, &src->output);
- stm_put_device(stm);
fail_detach:
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -764,6 +838,7 @@ fail_detach:
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
return err;
}
@@ -776,28 +851,55 @@ fail_detach:
* If @stm is @src::link, disconnect them from one another and put the
* reference on the @stm device.
*
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
*/
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm)
{
struct stm_device *link;
+ int ret = 0;
+
+ lockdep_assert_held(&stm->link_mutex);
+ /* for stm::link_list modification, we hold both mutex and spinlock */
+ spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
- if (WARN_ON_ONCE(link != stm)) {
- spin_unlock(&src->link_lock);
- return;
+
+ /*
+ * The linked device may have changed since we last looked, because
+ * we weren't holding the src::link_lock back then; if this is the
+ * case, tell the caller to retry.
+ */
+ if (link != stm) {
+ ret = -EAGAIN;
+ goto unlock;
}
stm_output_free(link, &src->output);
- /* caller must hold stm::link_lock */
list_del_init(&src->link_entry);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
+unlock:
spin_unlock(&src->link_lock);
+ spin_unlock(&stm->link_lock);
+
+ /*
+ * Call the unlink callbacks for both source and stm, when we know
+ * that we have actually performed the unlinking.
+ */
+ if (!ret) {
+ if (src->data->unlink)
+ src->data->unlink(src->data);
+
+ if (stm->data->unlink)
+ stm->data->unlink(stm->data, src->output.master,
+ src->output.channel);
+ }
+
+ return ret;
}
/**
@@ -813,21 +915,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
static void stm_source_link_drop(struct stm_source_device *src)
{
struct stm_device *stm;
- int idx;
+ int idx, ret;
+retry:
idx = srcu_read_lock(&stm_source_srcu);
+ /*
+ * The stm device will be valid for the duration of this
+ * read section, but the link may change before we grab
+ * the src::link_lock in __stm_source_link_drop().
+ */
stm = srcu_dereference(src->link, &stm_source_srcu);
+ ret = 0;
if (stm) {
- if (src->data->unlink)
- src->data->unlink(src->data);
-
- spin_lock(&stm->link_lock);
- __stm_source_link_drop(src, stm);
- spin_unlock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
+ ret = __stm_source_link_drop(src, stm);
+ mutex_unlock(&stm->link_mutex);
}
srcu_read_unlock(&stm_source_srcu, idx);
+
+ /* if it did change, retry */
+ if (ret == -EAGAIN)
+ goto retry;
}
static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +972,10 @@ static ssize_t stm_source_link_store(struct device *dev,
return -EINVAL;
err = stm_source_link_add(src, link);
- if (err)
+ if (err) {
+ /* matches the stm_find_device() above */
stm_put_device(link);
+ }
return err ? : count;
}
@@ -925,6 +1037,7 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
+ stm_output_init(&src->output);
spin_lock_init(&src->link_lock);
INIT_LIST_HEAD(&src->link_entry);
src->data = data;
@@ -973,9 +1086,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan,
stm = srcu_dereference(src->link, &stm_source_srcu);
if (stm)
- stm_write(stm->data, src->output.master,
- src->output.channel + chan,
- buf, count);
+ count = stm_write(stm->data, src->output.master,
+ src->output.channel + chan,
+ buf, count);
else
count = -ENODEV;
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index 3709bef0b21f..a86612d989f9 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -40,22 +40,71 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
return size;
}
-static struct stm_data dummy_stm = {
- .name = "dummy_stm",
- .sw_start = 0x0000,
- .sw_end = 0xffff,
- .sw_nchannels = 0xffff,
- .packet = dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0400);
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+ unsigned int channel)
+{
+ if (fail_mode && (channel & fail_mode))
+ return -EINVAL;
+
+ return 0;
+}
static int dummy_stm_init(void)
{
- return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+ int i, ret = -ENOMEM;
+
+ if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < nr_dummies; i++) {
+ dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+ if (!dummy_stm[i].name)
+ goto fail_unregister;
+
+ dummy_stm[i].sw_start = 0x0000;
+ dummy_stm[i].sw_end = 0xffff;
+ dummy_stm[i].sw_nchannels = 0xffff;
+ dummy_stm[i].packet = dummy_stm_packet;
+ dummy_stm[i].link = dummy_stm_link;
+
+ ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+ if (ret)
+ goto fail_free;
+ }
+
+ return 0;
+
+fail_unregister:
+ for (i--; i >= 0; i--) {
+ stm_unregister_device(&dummy_stm[i]);
+fail_free:
+ kfree(dummy_stm[i].name);
+ }
+
+ return ret;
+
}
static void dummy_stm_exit(void)
{
- stm_unregister_device(&dummy_stm);
+ int i;
+
+ for (i = 0; i < nr_dummies; i++) {
+ stm_unregister_device(&dummy_stm[i]);
+ kfree(dummy_stm[i].name);
+ }
}
module_init(dummy_stm_init);
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
new file mode 100644
index 000000000000..3da7b673aab2
--- /dev/null
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -0,0 +1,126 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX 32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0400);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+ struct stm_source_data data;
+ struct hrtimer hrtimer;
+ unsigned int active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+ struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+ hrtimer);
+
+ stm_source_write(&heartbeat->data, 0, str, sizeof str);
+ if (heartbeat->active)
+ hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+ return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+ struct stm_heartbeat *heartbeat =
+ container_of(data, struct stm_heartbeat, data);
+
+ heartbeat->active = 1;
+ hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+ HRTIMER_MODE_ABS);
+
+ return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+ struct stm_heartbeat *heartbeat =
+ container_of(data, struct stm_heartbeat, data);
+
+ heartbeat->active = 0;
+ hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+ int i, ret = -ENOMEM;
+
+ if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < nr_devs; i++) {
+ stm_heartbeat[i].data.name =
+ kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+ if (!stm_heartbeat[i].data.name)
+ goto fail_unregister;
+
+ stm_heartbeat[i].data.nr_chans = 1;
+ stm_heartbeat[i].data.link = stm_heartbeat_link;
+ stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
+ hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ stm_heartbeat[i].hrtimer.function =
+ stm_heartbeat_hrtimer_handler;
+
+ ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+ if (ret)
+ goto fail_free;
+ }
+
+ return 0;
+
+fail_unregister:
+ for (i--; i >= 0; i--) {
+ stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+ kfree(stm_heartbeat[i].data.name);
+ }
+
+ return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+ int i;
+
+ for (i = 0; i < nr_devs; i++) {
+ stm_source_unregister_device(&stm_heartbeat[i].data);
+ kfree(stm_heartbeat[i].data.name);
+ }
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 11ab6d01adf6..1c061cb9bff0 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
{
struct stm_device *stm = policy->stm;
+ /*
+ * stp_policy_release() will not call here if the policy is already
+ * unbound; other users should not either, as no link exists between
+ * this policy and anything else in that case
+ */
if (WARN_ON_ONCE(!policy->stm))
return;
- mutex_lock(&stm->policy_mutex);
- stm->policy = NULL;
- mutex_unlock(&stm->policy_mutex);
+ lockdep_assert_held(&stm->policy_mutex);
+ stm->policy = NULL;
policy->stm = NULL;
stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
static void stp_policy_release(struct config_item *item)
{
struct stp_policy *policy = to_stp_policy(item);
+ struct stm_device *stm = policy->stm;
+ /* a policy *can* be unbound and still exist in configfs tree */
+ if (!stm)
+ return;
+
+ mutex_lock(&stm->policy_mutex);
stp_policy_unbind(policy);
+ mutex_unlock(&stm->policy_mutex);
+
kfree(policy);
}
@@ -320,16 +332,17 @@ stp_policies_make(struct config_group *group, const char *name)
/*
* node must look like <device_name>.<policy_name>, where
- * <device_name> is the name of an existing stm device and
- * <policy_name> is an arbitrary string
+ * <device_name> is the name of an existing stm device; may
+ * contain dots;
+ * <policy_name> is an arbitrary string; may not contain dots
*/
- p = strchr(devname, '.');
+ p = strrchr(devname, '.');
if (!p) {
kfree(devname);
return ERR_PTR(-EINVAL);
}
- *p++ = '\0';
+ *p = '\0';
stm = stm_find_device(devname);
kfree(devname);
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 95ece0292c99..4e8c6926260f 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -45,6 +45,7 @@ struct stm_device {
int major;
unsigned int sw_nmasters;
struct stm_data *data;
+ struct mutex link_mutex;
spinlock_t link_lock;
struct list_head link_list;
/* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
container_of((_d), struct stm_device, dev)
struct stm_output {
+ spinlock_t lock;
unsigned int master;
unsigned int channel;
unsigned int nr_chans;