diff options
Diffstat (limited to 'drivers')
115 files changed, 4643 insertions, 868 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 176b59f5bc47..bd70f8db469b 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -117,6 +117,26 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) EXPORT_SYMBOL_GPL(platform_get_irq); /** + * platform_irq_count - Count the number of IRQs a platform device uses + * @dev: platform device + * + * Return: Number of IRQs a platform device uses or EPROBE_DEFER + */ +int platform_irq_count(struct platform_device *dev) +{ + int ret, nr = 0; + + while ((ret = platform_get_irq(dev, nr)) >= 0) + nr++; + + if (ret == -EPROBE_DEFER) + return ret; + + return nr; +} +EXPORT_SYMBOL_GPL(platform_irq_count); + +/** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 7cb64e012f1f..a1721a3b80cc 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -1780,6 +1780,15 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, if (!mask_info) return -EIO; + mutex_lock(&driver->diag_maskclear_mutex); + if (driver->mask_clear) { + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag:%s: count = %zu\n", __func__, count); + mutex_unlock(&driver->diag_maskclear_mutex); + return -EIO; + } + mutex_unlock(&driver->diag_maskclear_mutex); + mutex_lock(&mask_info->lock); mask = (struct diag_msg_mask_t *)(mask_info->ptr); for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index eb715cc8501c..ca54b24ec604 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -215,6 +215,12 @@ static void usb_connect_work_fn(struct work_struct *work) */ static void usb_disconnect(struct diag_usb_info *ch) { + if (!ch) + return; + + if (!atomic_read(&ch->connected) && driver->usb_connected) + diag_clear_masks(NULL); + if (ch && ch->ops && ch->ops->close) ch->ops->close(ch->ctxt, DIAG_USB_MODE); } diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index dccaa6a0d9c4..2aef98f4fe04 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -467,6 +467,8 @@ struct diagchar_dev { struct class *diagchar_class; struct device *diag_dev; int ref_count; + int mask_clear; + struct mutex diag_maskclear_mutex; struct mutex diagchar_mutex; struct mutex diag_file_mutex; wait_queue_head_t wait_q; @@ -625,6 +627,7 @@ void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc); void diag_cmd_remove_reg_by_pid(int pid); void diag_cmd_remove_reg_by_proc(int proc); int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry); +void diag_clear_masks(struct diag_md_session_t *info); void diag_record_stats(int type, int flag); diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index a39e4929d999..9ed43cdc3845 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -389,6 +389,27 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask) return ret; } +void diag_clear_masks(struct diag_md_session_t *info) +{ + int ret; + char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0}; + char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0}; + char cmd_disable_event_mask[] = { 0x60, 0}; + + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: %s: masks clear request upon %s\n", __func__, + ((info) ? "ODL exit" : "USB Disconnection")); + + ret = diag_process_apps_masks(cmd_disable_log_mask, + sizeof(cmd_disable_log_mask), info); + ret = diag_process_apps_masks(cmd_disable_msg_mask, + sizeof(cmd_disable_msg_mask), info); + ret = diag_process_apps_masks(cmd_disable_event_mask, + sizeof(cmd_disable_event_mask), info); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag:%s: masks cleared successfully\n", __func__); +} + static void diag_close_logging_process(const int pid) { int i; @@ -400,6 +421,12 @@ static void diag_close_logging_process(const int pid) if (!session_info) return; + diag_clear_masks(session_info); + + mutex_lock(&driver->diag_maskclear_mutex); + driver->mask_clear = 1; + mutex_unlock(&driver->diag_maskclear_mutex); + session_peripheral_mask = session_info->peripheral_mask; diag_md_session_close(session_info); for (i = 0; i < NUM_MD_SESSIONS; i++) @@ -475,9 +502,14 @@ static int diag_remove_client_entry(struct file *file) } static int diagchar_close(struct inode *inode, struct file *file) { + int ret; DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n", current->comm); - return diag_remove_client_entry(file); + ret = diag_remove_client_entry(file); + mutex_lock(&driver->diag_maskclear_mutex); + driver->mask_clear = 0; + mutex_unlock(&driver->diag_maskclear_mutex); + return ret; } void diag_record_stats(int type, int flag) @@ -3358,6 +3390,7 @@ static int __init diagchar_init(void) non_hdlc_data.len = 0; mutex_init(&driver->hdlc_disable_mutex); mutex_init(&driver->diagchar_mutex); + mutex_init(&driver->diag_maskclear_mutex); mutex_init(&driver->diag_file_mutex); mutex_init(&driver->delayed_rsp_mutex); mutex_init(&apps_data_mutex); diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index 8205e5b05d85..0111b02634c8 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -1232,8 +1232,6 @@ static int diagfwd_mux_open(int id, int mode) static int diagfwd_mux_close(int id, int mode) { - uint8_t i; - switch (mode) { case DIAG_USB_MODE: driver->usb_connected = 0; @@ -1248,15 +1246,16 @@ static int diagfwd_mux_close(int id, int mode) driver->md_session_mode == DIAG_MD_NONE) || (driver->md_session_mode == DIAG_MD_PERIPHERAL)) { /* - * In this case the channel must not be closed. This case - * indicates that the USB is removed but there is a client - * running in background with Memory Device mode + * This case indicates that the USB is removed + * but there is a client running in background + * with Memory Device mode. */ } else { - for (i = 0; i < NUM_PERIPHERALS; i++) { - diagfwd_close(i, TYPE_DATA); - diagfwd_close(i, TYPE_CMD); - } + /* + * With clearing of masks on ODL exit and + * USB disconnection, closing of the channel is + * not needed.This enables read and drop of stale packets. + */ /* Re enable HDLC encoding */ pr_debug("diag: In %s, re-enabling HDLC encoding\n", __func__); diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig index d94678e9e4fc..650a23ea1ead 100644 --- a/drivers/clk/msm/Kconfig +++ b/drivers/clk/msm/Kconfig @@ -2,6 +2,7 @@ config COMMON_CLK_MSM tristate "Support for MSM clock controllers" depends on OF depends on ARCH_QCOM + select RATIONAL help This support clock controller used by MSM devices which support global, mmss and gpu clock controller. diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c index e469c61cc3db..71c5541d0c0d 100644 --- a/drivers/clk/msm/clock-gcc-cobalt.c +++ b/drivers/clk/msm/clock-gcc-cobalt.c @@ -2354,11 +2354,13 @@ static struct mux_clk gcc_debug_mux = { &gpu_gcc_debug_clk.c, &gfx_gcc_debug_clk.c, &debug_mmss_clk.c, + &debug_cpu_clk.c, ), MUX_SRC_LIST( { &gpu_gcc_debug_clk.c, 0x013d }, { &gfx_gcc_debug_clk.c, 0x013d }, { &debug_mmss_clk.c, 0x0022 }, + { &debug_cpu_clk.c, 0x00c0 }, { &snoc_clk.c, 0x0000 }, { &cnoc_clk.c, 0x000e }, { &bimc_clk.c, 0x00a9 }, @@ -2853,6 +2855,7 @@ static struct clk_lookup msm_clocks_measure_cobalt[] = { CLK_LIST(gpu_gcc_debug_clk), CLK_LIST(gfx_gcc_debug_clk), CLK_LIST(debug_mmss_clk), + CLK_LIST(debug_cpu_clk), CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"), }; @@ -2889,6 +2892,9 @@ static int msm_clock_debug_cobalt_probe(struct platform_device *pdev) debug_mmss_clk.dev = &pdev->dev; debug_mmss_clk.clk_id = "debug_mmss_clk"; + debug_cpu_clk.dev = &pdev->dev; + debug_cpu_clk.clk_id = "debug_cpu_clk"; + ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_measure_cobalt, ARRAY_SIZE(msm_clocks_measure_cobalt)); diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index 9d605503520f..8ae6a4e994f0 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -48,6 +48,7 @@ enum clk_osm_bases { OSM_BASE, PLL_BASE, + EFUSE_BASE, NUM_BASES, }; @@ -76,6 +77,7 @@ enum clk_osm_trace_packet_id { #define MEM_ACC_SEQ_CONST(n) (n) #define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40)) #define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n))) +#define SEQ_REG1_MSMCOBALT_V2 0x1048 #define OSM_TABLE_SIZE 40 #define MAX_CLUSTER_CNT 2 @@ -115,7 +117,7 @@ enum clk_osm_trace_packet_id { #define PLL_TEST_CTL_HI 0x1C #define PLL_STATUS 0x2C #define PLL_LOCK_DET_MASK BIT(16) -#define PLL_WAIT_LOCK_TIME_US 5 +#define PLL_WAIT_LOCK_TIME_US 10 #define PLL_WAIT_LOCK_TIME_NS (PLL_WAIT_LOCK_TIME_US * 1000) #define PLL_MIN_LVAL 43 @@ -164,7 +166,8 @@ enum clk_osm_trace_packet_id { #define DCVS_DROOP_EN_MASK BIT(5) #define LMH_PS_EN_MASK BIT(6) #define IGNORE_PLL_LOCK_MASK BIT(15) -#define SAFE_FREQ_WAIT_NS 1000 +#define SAFE_FREQ_WAIT_NS 5000 +#define DEXT_DECREMENT_WAIT_NS 1000 #define DCVS_BOOST_TIMER_REG0 0x1084 #define DCVS_BOOST_TIMER_REG1 0x1088 #define DCVS_BOOST_TIMER_REG2 0x108C @@ -173,7 +176,8 @@ enum clk_osm_trace_packet_id { #define PS_BOOST_TIMER_REG2 0x109C #define BOOST_PROG_SYNC_DELAY_REG 0x10A0 #define DROOP_CTRL_REG 0x10A4 -#define DROOP_PROG_SYNC_DELAY_REG 0x10B8 +#define DROOP_RELEASE_TIMER_CTRL 0x10A8 +#define DROOP_PROG_SYNC_DELAY_REG 0x10BC #define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC #define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0 #define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4 @@ -208,7 +212,13 @@ enum clk_osm_trace_packet_id { #define PLL_DD_D0_USER_CTL_LO 0x17916208 #define PLL_DD_D1_USER_CTL_LO 0x17816208 +#define PWRCL_EFUSE_SHIFT 0 +#define PWRCL_EFUSE_MASK 0 +#define PERFCL_EFUSE_SHIFT 29 +#define PERFCL_EFUSE_MASK 0x7 + static void __iomem *virt_base; +static void __iomem *debug_base; #define lmh_lite_clk_src_source_val 1 @@ -335,6 +345,9 @@ struct clk_osm { bool trace_en; }; +static bool msmcobalt_v1; +static bool msmcobalt_v2; + static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val, u32 offset, u32 mask) { @@ -525,14 +538,45 @@ static struct clk_osm perfcl_clk = { }, }; +static struct clk_ops clk_ops_cpu_dbg_mux; + +static struct mux_clk cpu_debug_mux = { + .offset = 0x0, + .mask = 0x3, + .shift = 8, + .ops = &mux_reg_ops, + MUX_SRC_LIST( + { &pwrcl_clk.c, 0x00 }, + { &perfcl_clk.c, 0x01 }, + ), + .base = &debug_base, + .c = { + .dbg_name = "cpu_debug_mux", + .ops = &clk_ops_cpu_dbg_mux, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(cpu_debug_mux.c), + }, +}; + static struct clk_lookup cpu_clocks_osm[] = { CLK_LIST(pwrcl_clk), CLK_LIST(perfcl_clk), CLK_LIST(sys_apcsaux_clk_gcc), CLK_LIST(xo_ao), CLK_LIST(osm_clk_src), + CLK_LIST(cpu_debug_mux), }; +static unsigned long cpu_dbg_mux_get_rate(struct clk *clk) +{ + /* Account for the divider between the clock and the debug mux */ + if (!strcmp(clk->parent->dbg_name, "pwrcl_clk")) + return clk->rate/4; + else if (!strcmp(clk->parent->dbg_name, "perfcl_clk")) + return clk->rate/8; + return clk->rate; +} + static void clk_osm_print_osm_table(struct clk_osm *c) { int i; @@ -901,6 +945,22 @@ static int clk_osm_resources_init(struct platform_device *pdev) } } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "debug"); + if (!res) { + dev_err(&pdev->dev, "Failed to get debug mux base\n"); + return -EINVAL; + } + + debug_base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!debug_base) { + dev_err(&pdev->dev, "Unable to map in debug mux base\n"); + return -ENOMEM; + } + + clk_ops_cpu_dbg_mux = clk_ops_gen_mux; + clk_ops_cpu_dbg_mux.get_rate = cpu_dbg_mux_get_rate; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_common"); if (!res) { dev_err(&pdev->dev, "Failed to get apcs common base\n"); @@ -913,6 +973,35 @@ static int clk_osm_resources_init(struct platform_device *pdev) return -ENOMEM; } + /* efuse speed bin fuses are optional */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "pwrcl_efuse"); + if (res) { + pbase = (unsigned long)res->start; + vbase = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!vbase) { + dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n"); + return -ENOMEM; + } + pwrcl_clk.pbases[EFUSE_BASE] = pbase; + pwrcl_clk.vbases[EFUSE_BASE] = vbase; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "perfcl_efuse"); + if (res) { + pbase = (unsigned long)res->start; + vbase = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!vbase) { + dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n"); + return -ENOMEM; + } + perfcl_clk.pbases[EFUSE_BASE] = pbase; + perfcl_clk.vbases[EFUSE_BASE] = vbase; + } + vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl"); if (IS_ERR(vdd_pwrcl)) { rc = PTR_ERR(vdd_pwrcl); @@ -1582,6 +1671,9 @@ static void clk_osm_setup_osm_was(struct clk_osm *c) u32 cc_hyst; u32 val; + if (msmcobalt_v2) + return; + val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG); val |= IGNORE_PLL_LOCK_MASK; cc_hyst = clk_osm_read_reg(c, SPM_CC_HYSTERESIS); @@ -1673,19 +1765,19 @@ static void clk_osm_setup_fsms(struct clk_osm *c) if (c->boost_fsm_en) { val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG); clk_osm_write_reg(c, val | CC_BOOST_EN_MASK, PDN_FSM_CTRL_REG); + val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG0); val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); - val |= BVAL(31, 16, clk_osm_count_ns(c, - SAFE_FREQ_WAIT_NS)); + val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS)); clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG0); val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG1); val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); - val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS)); + val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG1); val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG2); - val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); + val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS)); clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG2); } @@ -1696,12 +1788,19 @@ static void clk_osm_setup_fsms(struct clk_osm *c) PDN_FSM_CTRL_REG); val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG0); + val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS)); clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG0); val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG1); val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); + val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG1); + + val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG2); + val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS)); + clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG2); + } /* PS FSM */ @@ -1709,13 +1808,19 @@ static void clk_osm_setup_fsms(struct clk_osm *c) val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG); clk_osm_write_reg(c, val | PS_BOOST_EN_MASK, PDN_FSM_CTRL_REG); - val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG0) | - BVAL(31, 16, clk_osm_count_ns(c, 1000)); + val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG0); + val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); + val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS)); clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG0); - val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG1) | - clk_osm_count_ns(c, 1000); + val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG1); + val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); + val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS)); clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG1); + + val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG2); + val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS)); + clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG2); } /* PLL signal timing control */ @@ -1728,15 +1833,15 @@ static void clk_osm_setup_fsms(struct clk_osm *c) val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG); clk_osm_write_reg(c, val | WFX_DROOP_EN_MASK, PDN_FSM_CTRL_REG); - val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG) | - BVAL(31, 16, clk_osm_count_ns(c, 1000)); + val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG); + val |= BVAL(31, 16, clk_osm_count_ns(c, 500)); clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG); val = clk_osm_read_reg(c, - DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG) | - BVAL(31, 16, clk_osm_count_ns(c, 1000)); + DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); + val |= BVAL(31, 16, clk_osm_count_ns(c, 500)); clk_osm_write_reg(c, val, - DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); + DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); } /* PC/RET FSM */ @@ -1745,9 +1850,15 @@ static void clk_osm_setup_fsms(struct clk_osm *c) clk_osm_write_reg(c, val | PC_RET_EXIT_DROOP_EN_MASK, PDN_FSM_CTRL_REG); - val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG) | - BVAL(15, 0, clk_osm_count_ns(c, 5000)); + val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG); + val |= BVAL(15, 0, clk_osm_count_ns(c, 500)); clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG); + + val = clk_osm_read_reg(c, + DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); + val |= BVAL(15, 0, clk_osm_count_ns(c, 500)); + clk_osm_write_reg(c, val, + DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); } /* DCVS droop FSM - only if RCGwRC is not used for di/dt control */ @@ -1758,14 +1869,14 @@ static void clk_osm_setup_fsms(struct clk_osm *c) } if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) { - val = clk_osm_read_reg(c, - DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG) | - BVAL(15, 0, clk_osm_count_ns(c, 1000)); - clk_osm_write_reg(c, val, - DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG); - val = clk_osm_read_reg(c, DROOP_CTRL_REG) | - BVAL(22, 16, 0x2); + clk_osm_write_reg(c, clk_osm_count_ns(c, 250), + DROOP_RELEASE_TIMER_CTRL); + clk_osm_write_reg(c, clk_osm_count_ns(c, 500), + DCVS_DROOP_TIMER_CTRL); + val = clk_osm_read_reg(c, DROOP_CTRL_REG); + val |= BIT(31) | BVAL(22, 16, 0x2) | + BVAL(6, 0, 0x8); clk_osm_write_reg(c, val, DROOP_CTRL_REG); } } @@ -1803,6 +1914,9 @@ static void clk_osm_do_additional_setup(struct clk_osm *c, clk_osm_write_reg(c, RCG_UPDATE_SUCCESS, SEQ_REG(84)); clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(85)); + /* ITM to OSM handoff */ + clk_osm_setup_itm_to_osm_handoff(); + pr_debug("seq_size: %lu, seqbr_size: %lu\n", ARRAY_SIZE(seq_instr), ARRAY_SIZE(seq_br_instr)); clk_osm_setup_sequencer(&pwrcl_clk); @@ -1835,18 +1949,22 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c) /* Ensure writes complete before returning */ mb(); } else { - scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1), - c->apm_threshold_vc); + if (msmcobalt_v1) { + scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1), + c->apm_threshold_vc); + scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(73), + 0x3b | c->apm_threshold_vc << 6); + } else if (msmcobalt_v2) { + clk_osm_write_reg(c, c->apm_threshold_vc, + SEQ_REG1_MSMCOBALT_V2); + } scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(72), c->apm_crossover_vc); - /* SEQ_REG(8) = address of SEQ_REG(1) init by TZ */ clk_osm_write_reg(c, c->apm_threshold_vc, SEQ_REG(15)); scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31), c->apm_threshold_vc != 0 ? c->apm_threshold_vc - 1 : 0xff); - scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(73), - 0x3b | c->apm_threshold_vc << 6); scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76), 0x39 | c->apm_threshold_vc << 6); } @@ -2441,13 +2559,23 @@ static unsigned long osm_clk_init_rate = 200000000; static int cpu_clock_osm_driver_probe(struct platform_device *pdev) { - char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0"; - char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0"; int rc, cpu; + int speedbin = 0, pvs_ver = 0; + u32 pte_efuse; + char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0"; + char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0"; struct cpu_cycle_counter_cb cb = { .get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter, }; + if (of_find_compatible_node(NULL, NULL, + "qcom,cpu-clock-osm-msmcobalt-v1")) { + msmcobalt_v1 = true; + } else if (of_find_compatible_node(NULL, NULL, + "qcom,cpu-clock-osm-msmcobalt-v2")) { + msmcobalt_v2 = true; + } + rc = clk_osm_resources_init(pdev); if (rc) { if (rc != -EPROBE_DEFER) @@ -2462,6 +2590,24 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) return rc; } + if ((pwrcl_clk.secure_init || perfcl_clk.secure_init) && + msmcobalt_v2) { + pr_err("unsupported configuration for msmcobalt v2\n"); + return -EINVAL; + } + + if (pwrcl_clk.vbases[EFUSE_BASE]) { + /* Multiple speed-bins are supported */ + pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]); + speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) & + PWRCL_EFUSE_MASK); + snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr), + "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver); + } + + dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n", + speedbin, pvs_ver); + rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr); if (rc) { @@ -2470,6 +2616,18 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) return rc; } + if (perfcl_clk.vbases[EFUSE_BASE]) { + /* Multiple speed-bins are supported */ + pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]); + speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) & + PERFCL_EFUSE_MASK); + snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr), + "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver); + } + + dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n", + speedbin, pvs_ver); + rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr); if (rc) { dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n", @@ -2509,10 +2667,6 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) clk_osm_print_osm_table(&pwrcl_clk); clk_osm_print_osm_table(&perfcl_clk); - /* Program the minimum PLL frequency */ - clk_osm_write_reg(&pwrcl_clk, PLL_MIN_LVAL, SEQ_REG(27)); - clk_osm_write_reg(&perfcl_clk, PLL_MIN_LVAL, SEQ_REG(27)); - rc = clk_osm_setup_hw_table(&pwrcl_clk); if (rc) { dev_err(&pdev->dev, "failed to setup power cluster hardware table\n"); @@ -2531,8 +2685,6 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) goto exit; } - clk_osm_setup_itm_to_osm_handoff(); - /* LLM Freq Policy Tuning */ rc = clk_osm_set_llm_freq_policy(pdev); if (rc < 0) { @@ -2666,7 +2818,8 @@ exit: } static struct of_device_id match_table[] = { - { .compatible = "qcom,cpu-clock-osm" }, + { .compatible = "qcom,cpu-clock-osm-msmcobalt-v1" }, + { .compatible = "qcom,cpu-clock-osm-msmcobalt-v2" }, {} }; diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c index e0f75b0022fc..b997e79e3d73 100644 --- a/drivers/devfreq/governor_bw_hwmon.c +++ b/drivers/devfreq/governor_bw_hwmon.c @@ -67,8 +67,6 @@ struct hwmon_node { unsigned long hyst_en; unsigned long above_low_power; unsigned long prev_req; - unsigned long up_wake_mbps; - unsigned long down_wake_mbps; unsigned int wake; unsigned int down_cnt; ktime_t prev_ts; @@ -191,7 +189,7 @@ static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms) return mbps; } -static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon) +static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon) { struct devfreq *df; struct hwmon_node *node; @@ -220,9 +218,9 @@ static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon) * bandwidth usage and do the bandwidth calculation based on just * this micro sample. */ - if (mbps > node->up_wake_mbps) { + if (mbps > node->hw->up_wake_mbps) { wake = UP_WAKE; - } else if (mbps < node->down_wake_mbps) { + } else if (mbps < node->hw->down_wake_mbps) { if (node->down_cnt) node->down_cnt--; if (node->down_cnt <= 0) @@ -241,6 +239,50 @@ static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon) return wake; } +static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon) +{ + struct devfreq *df; + struct hwmon_node *node; + unsigned long bytes, mbps; + int wake = 0; + + df = hwmon->df; + node = df->data; + + /* + * If this read is in response to an IRQ, the HW monitor should + * return the measurement in the micro sample that triggered the IRQ. + * Otherwise, it should return the maximum measured value in any + * micro sample since the last time we called get_bytes_and_clear() + */ + bytes = hwmon->get_bytes_and_clear(hwmon); + mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC); + node->max_mbps = mbps; + + if (mbps > node->hw->up_wake_mbps) + wake = UP_WAKE; + else if (mbps < node->hw->down_wake_mbps) + wake = DOWN_WAKE; + + node->wake = wake; + node->sampled = true; + + trace_bw_hwmon_meas(dev_name(df->dev.parent), + mbps, + node->sample_ms * USEC_PER_MSEC, + wake); + + return 1; +} + +static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon) +{ + if (hwmon->set_hw_events) + return __bw_hwmon_hw_sample_end(hwmon); + else + return __bw_hwmon_sw_sample_end(hwmon); +} + int bw_hwmon_sample_end(struct bw_hwmon *hwmon) { unsigned long flags; @@ -275,12 +317,14 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node, struct bw_hwmon *hw = node->hw; unsigned int new_bw, io_percent; ktime_t ts; - unsigned int ms; + unsigned int ms = 0; spin_lock_irqsave(&irq_lock, flags); - ts = ktime_get(); - ms = ktime_to_ms(ktime_sub(ts, node->prev_ts)); + if (!hw->set_hw_events) { + ts = ktime_get(); + ms = ktime_to_ms(ktime_sub(ts, node->prev_ts)); + } if (!node->sampled || ms >= node->sample_ms) __bw_hwmon_sample_end(node->hw); node->sampled = false; @@ -388,9 +432,10 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node, /* Stretch the short sample window size, if the traffic is too low */ if (meas_mbps < MIN_MBPS) { - node->up_wake_mbps = (max(MIN_MBPS, req_mbps) + hw->up_wake_mbps = (max(MIN_MBPS, req_mbps) * (100 + node->up_thres)) / 100; - node->down_wake_mbps = 0; + hw->down_wake_mbps = 0; + hw->undo_over_req_mbps = 0; thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2), node->sample_ms); } else { @@ -401,13 +446,22 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node, * reduce the vote based on the measured mbps being less than * the previous measurement that caused the "over request". */ - node->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100; - node->down_wake_mbps = (meas_mbps * node->down_thres) / 100; + hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100; + hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100; + if (node->wake == UP_WAKE) + hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone); + else + hw->undo_over_req_mbps = 0; thres = mbps_to_bytes(meas_mbps, node->sample_ms); } - node->down_cnt = node->down_count; - node->bytes = hw->set_thres(hw, thres); + if (hw->set_hw_events) { + hw->down_cnt = node->down_count; + hw->set_hw_events(hw, node->sample_ms); + } else { + node->down_cnt = node->down_count; + node->bytes = hw->set_thres(hw, thres); + } node->wake = 0; node->prev_req = req_mbps; @@ -432,8 +486,8 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node, trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent), new_bw, *freq, - node->up_wake_mbps, - node->down_wake_mbps); + hw->up_wake_mbps, + hw->down_wake_mbps); return req_mbps; } @@ -503,6 +557,9 @@ static int start_monitor(struct devfreq *df, bool init) node->resume_freq = 0; node->resume_ab = 0; mbps = (df->previous_freq * node->io_percent) / 100; + hw->up_wake_mbps = mbps; + hw->down_wake_mbps = MIN_MBPS; + hw->undo_over_req_mbps = 0; ret = hw->start_hwmon(hw, mbps); } else { ret = hw->resume_hwmon(hw); diff --git a/drivers/devfreq/governor_bw_hwmon.h b/drivers/devfreq/governor_bw_hwmon.h index 59bf48ac5af7..7578399cfb88 100644 --- a/drivers/devfreq/governor_bw_hwmon.h +++ b/drivers/devfreq/governor_bw_hwmon.h @@ -48,6 +48,8 @@ struct bw_hwmon { int (*suspend_hwmon)(struct bw_hwmon *hw); int (*resume_hwmon)(struct bw_hwmon *hw); unsigned long (*set_thres)(struct bw_hwmon *hw, unsigned long bytes); + unsigned long (*set_hw_events)(struct bw_hwmon *hw, + unsigned int sample_ms); unsigned long (*get_bytes_and_clear)(struct bw_hwmon *hw); int (*set_throttle_adj)(struct bw_hwmon *hw, uint adj); u32 (*get_throttle_adj)(struct bw_hwmon *hw); @@ -55,6 +57,11 @@ struct bw_hwmon { struct device_node *of_node; struct devfreq_governor *gov; + unsigned long up_wake_mbps; + unsigned long undo_over_req_mbps; + unsigned long down_wake_mbps; + unsigned int down_cnt; + struct devfreq *df; }; diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 18fdd400ac7a..362493118670 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1193,7 +1193,8 @@ static int adreno_init(struct kgsl_device *device) if (!adreno_is_a3xx(adreno_dev)) { int r = kgsl_allocate_global(device, - &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, 0, 0); + &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, + 0, 0, "alwayson"); adreno_dev->cmdbatch_profile_index = 0; @@ -1279,7 +1280,7 @@ static void _setup_throttling_counters(struct adreno_device *adreno_dev) static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev) { - int i; + int i, adj; uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS]; struct adreno_busy_data *busy = &adreno_dev->busy_data; @@ -1300,8 +1301,14 @@ static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev) adreno_dev->gpmu_throttle_counters[i], &busy->throttle_cycles[i]); } - i = th[CRC_MORE50PCT] - th[IDLE_10PCT]; - return th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (i < 0 ? 0 : i) * 3; + adj = th[CRC_MORE50PCT] - th[IDLE_10PCT]; + adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3; + + trace_kgsl_clock_throttling( + th[IDLE_10PCT], th[CRC_50PCT], + th[CRC_MORE50PCT], th[CRC_LESS50PCT], + adj); + return adj; } static void _update_threshold_count(struct adreno_device *adreno_dev, diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 2accbe5c5764..97e71464c2df 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -174,7 +174,7 @@ static int _a3xx_pwron_fixup(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &adreno_dev->pwron_fixup, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup"); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c index b15d23cfbe0a..7a691667e59f 100644 --- a/drivers/gpu/msm/adreno_a4xx.c +++ b/drivers/gpu/msm/adreno_a4xx.c @@ -1360,7 +1360,7 @@ static int _a4xx_pwron_fixup(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &adreno_dev->pwron_fixup, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup"); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 3252bfb764f2..583de85678fc 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -244,7 +244,8 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, + 0, "crit_pkts"); if (ret) return ret; @@ -258,19 +259,19 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev) ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf1, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "crit_pkts_refbuf1"); if (ret) return ret; ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf2, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "crit_pkts_refbuf2"); if (ret) return ret; ret = kgsl_allocate_global(&adreno_dev->dev, &crit_pkts_refbuf3, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "crit_pkts_refbuf3"); if (ret) return ret; @@ -2366,7 +2367,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile, } ret = kgsl_allocate_global(device, ucode, fw->size - 4, - KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode"); if (ret) goto done; diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index c1463b824c67..ffd7acbbe1f9 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -490,7 +490,8 @@ static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev, int ret; ret = kgsl_allocate_global(device, &rb->preemption_desc, - A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED); + A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED, + "preemption_desc"); if (ret) return ret; @@ -525,7 +526,8 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) /* Allocate mem for storing preemption smmu record */ return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE, - KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED); + KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED, + "smmu_info"); } #else static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) @@ -555,7 +557,8 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) /* Allocate mem for storing preemption counters */ ret = kgsl_allocate_global(device, &preempt->counters, adreno_dev->num_ringbuffers * - A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0); + A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, + "preemption_counters"); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c index 04d82844a5e9..aeffeab2f6dc 100644 --- a/drivers/gpu/msm/adreno_a5xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c @@ -1033,11 +1033,11 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev) /* The script buffers needs 2 extra qwords on the end */ if (kgsl_allocate_global(device, &capturescript, script_size + 16, KGSL_MEMFLAGS_GPUREADONLY, - KGSL_MEMDESC_PRIVILEGED)) + KGSL_MEMDESC_PRIVILEGED, "capturescript")) return; if (kgsl_allocate_global(device, ®isters, data_size, 0, - KGSL_MEMDESC_PRIVILEGED)) { + KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) { kgsl_free_global(KGSL_DEVICE(adreno_dev), &capturescript); return; } diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c index c4fab8a5528a..d8af520b2fe6 100644 --- a/drivers/gpu/msm/adreno_profile.c +++ b/drivers/gpu/msm/adreno_profile.c @@ -1071,7 +1071,8 @@ void adreno_profile_init(struct adreno_device *adreno_dev) /* allocate shared_buffer, which includes pre_ib and post_ib */ profile->shared_size = ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS; ret = kgsl_allocate_global(device, &profile->shared_buffer, - profile->shared_size * sizeof(unsigned int), 0, 0); + profile->shared_size * sizeof(unsigned int), + 0, 0, "profile"); if (ret) { profile->shared_size = 0; diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 5ffb0b2513f3..07ef09034d7c 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -250,12 +250,12 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev, * switch pagetable */ ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->pagetable_desc, - PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED); + PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc"); if (ret) return ret; - return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc, - KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0); + KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, + 0, "ringbuffer"); } int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 691f687cd839..f9eb080d903b 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4474,13 +4474,13 @@ int kgsl_device_platform_probe(struct kgsl_device *device) goto error_close_mmu; status = kgsl_allocate_global(device, &device->memstore, - KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG); + KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore"); if (status != 0) goto error_close_mmu; status = kgsl_allocate_global(device, &device->scratch, - PAGE_SIZE, 0, 0); + PAGE_SIZE, 0, 0, "scratch"); if (status != 0) goto error_free_memstore; diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index df9eb9ebd779..2f293e4da398 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -280,6 +280,29 @@ static const struct file_operations process_sparse_mem_fops = { .release = process_mem_release, }; +static int globals_print(struct seq_file *s, void *unused) +{ + kgsl_print_global_pt_entries(s); + return 0; +} + +static int globals_open(struct inode *inode, struct file *file) +{ + return single_open(file, globals_print, NULL); +} + +static int globals_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations global_fops = { + .open = globals_open, + .read = seq_read, + .llseek = seq_lseek, + .release = globals_release, +}; + /** * kgsl_process_init_debugfs() - Initialize debugfs for a process * @private: Pointer to process private structure created for the process @@ -336,6 +359,9 @@ void kgsl_core_debugfs_init(void) kgsl_debugfs_dir = debugfs_create_dir("kgsl", NULL); + debugfs_create_file("globals", 0444, kgsl_debugfs_dir, NULL, + &global_fops); + debug_dir = debugfs_create_dir("debug", kgsl_debugfs_dir); debugfs_create_file("strict_memory", 0644, debug_dir, NULL, diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 166bb68e64a1..71b6086423d6 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -38,6 +38,10 @@ #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu)) +#define ADDR_IN_GLOBAL(_a) \ + (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \ + ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE))) + static struct kgsl_mmu_pt_ops iommu_pt_ops; static bool need_iommu_sync; @@ -92,19 +96,41 @@ static struct kmem_cache *addr_entry_cache; #define GLOBAL_PT_ENTRIES 32 -static struct kgsl_memdesc *global_pt_entries[GLOBAL_PT_ENTRIES]; +struct global_pt_entry { + struct kgsl_memdesc *memdesc; + char name[32]; +}; + +static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES]; static struct kgsl_memdesc *kgsl_global_secure_pt_entry; static int global_pt_count; uint64_t global_pt_alloc; static struct kgsl_memdesc gpu_qdss_desc; +void kgsl_print_global_pt_entries(struct seq_file *s) +{ + int i; + + for (i = 0; i < global_pt_count; i++) { + struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc; + + if (memdesc == NULL) + continue; + + seq_printf(s, "0x%16.16llX-0x%16.16llX %16llu %s\n", + memdesc->gpuaddr, memdesc->gpuaddr + memdesc->size - 1, + memdesc->size, global_pt_entries[i].name); + } +} + static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable) { unsigned int i; for (i = 0; i < global_pt_count; i++) { - if (global_pt_entries[i] != NULL) - kgsl_mmu_unmap(pagetable, global_pt_entries[i]); + if (global_pt_entries[i].memdesc != NULL) + kgsl_mmu_unmap(pagetable, + global_pt_entries[i].memdesc); } } @@ -113,8 +139,9 @@ static void kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable) unsigned int i; for (i = 0; i < global_pt_count; i++) { - if (global_pt_entries[i] != NULL) { - int ret = kgsl_mmu_map(pagetable, global_pt_entries[i]); + if (global_pt_entries[i].memdesc != NULL) { + int ret = kgsl_mmu_map(pagetable, + global_pt_entries[i].memdesc); BUG_ON(ret); } @@ -152,17 +179,17 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu, return; for (i = 0; i < global_pt_count; i++) { - if (global_pt_entries[i] == memdesc) { + if (global_pt_entries[i].memdesc == memdesc) { memdesc->gpuaddr = 0; memdesc->priv &= ~KGSL_MEMDESC_GLOBAL; - global_pt_entries[i] = NULL; + global_pt_entries[i].memdesc = NULL; return; } } } static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, - struct kgsl_memdesc *memdesc) + struct kgsl_memdesc *memdesc, const char *name) { if (memdesc->gpuaddr != 0) return; @@ -174,7 +201,10 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, memdesc->priv |= KGSL_MEMDESC_GLOBAL; global_pt_alloc += memdesc->size; - global_pt_entries[global_pt_count++] = memdesc; + global_pt_entries[global_pt_count].memdesc = memdesc; + strlcpy(global_pt_entries[global_pt_count].name, name, + sizeof(global_pt_entries[global_pt_count].name)); + global_pt_count++; } void kgsl_add_global_secure_entry(struct kgsl_device *device, @@ -220,7 +250,7 @@ static void kgsl_setup_qdss_desc(struct kgsl_device *device) return; } - kgsl_mmu_add_global(device, &gpu_qdss_desc); + kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss"); } static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu) @@ -493,8 +523,62 @@ struct _mem_entry { unsigned int priv; int pending_free; pid_t pid; + char name[32]; }; +static void _get_global_entries(uint64_t faultaddr, + struct _mem_entry *prev, + struct _mem_entry *next) +{ + int i; + uint64_t prevaddr = 0; + struct global_pt_entry *p = NULL; + + uint64_t nextaddr = (uint64_t) -1; + struct global_pt_entry *n = NULL; + + for (i = 0; i < global_pt_count; i++) { + uint64_t addr; + + if (global_pt_entries[i].memdesc == NULL) + continue; + + addr = global_pt_entries[i].memdesc->gpuaddr; + if ((addr < faultaddr) && (addr > prevaddr)) { + prevaddr = addr; + p = &global_pt_entries[i]; + } + + if ((addr > faultaddr) && (addr < nextaddr)) { + nextaddr = addr; + n = &global_pt_entries[i]; + } + } + + if (p != NULL) { + prev->gpuaddr = p->memdesc->gpuaddr; + prev->size = p->memdesc->size; + prev->flags = p->memdesc->flags; + prev->priv = p->memdesc->priv; + prev->pid = 0; + strlcpy(prev->name, p->name, sizeof(prev->name)); + } + + if (n != NULL) { + next->gpuaddr = n->memdesc->gpuaddr; + next->size = n->memdesc->size; + next->flags = n->memdesc->flags; + next->priv = n->memdesc->priv; + next->pid = 0; + strlcpy(next->name, n->name, sizeof(next->name)); + } +} + +void __kgsl_get_memory_usage(struct _mem_entry *entry) +{ + kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags); +} + static void _get_entries(struct kgsl_process_private *private, uint64_t faultaddr, struct _mem_entry *prev, struct _mem_entry *next) @@ -529,6 +613,7 @@ static void _get_entries(struct kgsl_process_private *private, prev->priv = p->memdesc.priv; prev->pending_free = p->pending_free; prev->pid = private->pid; + __kgsl_get_memory_usage(prev); } if (n != NULL) { @@ -538,6 +623,7 @@ static void _get_entries(struct kgsl_process_private *private, next->priv = n->memdesc.priv; next->pending_free = n->pending_free; next->pid = private->pid; + __kgsl_get_memory_usage(next); } } @@ -553,7 +639,9 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, /* Set the maximum possible size as an initial value */ nextentry->gpuaddr = (uint64_t) -1; - if (context) { + if (ADDR_IN_GLOBAL(faultaddr)) { + _get_global_entries(faultaddr, preventry, nextentry); + } else if (context) { private = context->proc_priv; spin_lock(&private->mem_lock); _get_entries(private, faultaddr, preventry, nextentry); @@ -563,18 +651,13 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry) { - char name[32]; - memset(name, 0, sizeof(name)); - - kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags); - KGSL_LOG_DUMP(device, "[%016llX - %016llX] %s %s (pid = %d) (%s)\n", entry->gpuaddr, entry->gpuaddr + entry->size, entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "", entry->pending_free ? "(pending free)" : "", - entry->pid, name); + entry->pid, entry->name); } static void _check_if_freed(struct kgsl_iommu_context *ctx, @@ -1395,7 +1478,7 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu) } } - kgsl_iommu_add_global(mmu, &iommu->setstate); + kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate"); kgsl_setup_qdss_desc(device); done: @@ -2220,10 +2303,6 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable, return addr; } -#define ADDR_IN_GLOBAL(_a) \ - (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \ - ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE))) - static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr, uint64_t size) { diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index 10f6b8049d36..4371c9a1b87e 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -543,12 +543,12 @@ void kgsl_mmu_remove_global(struct kgsl_device *device, EXPORT_SYMBOL(kgsl_mmu_remove_global); void kgsl_mmu_add_global(struct kgsl_device *device, - struct kgsl_memdesc *memdesc) + struct kgsl_memdesc *memdesc, const char *name) { struct kgsl_mmu *mmu = &device->mmu; if (MMU_OP_VALID(mmu, mmu_add_global)) - mmu->mmu_ops->mmu_add_global(mmu, memdesc); + mmu->mmu_ops->mmu_add_global(mmu, memdesc, name); } EXPORT_SYMBOL(kgsl_mmu_add_global); @@ -620,7 +620,7 @@ static struct kgsl_mmu_pt_ops nommu_pt_ops = { }; static void nommu_add_global(struct kgsl_mmu *mmu, - struct kgsl_memdesc *memdesc) + struct kgsl_memdesc *memdesc, const char *name) { memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl); } diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index 53645cc1741c..acbc0e784cf2 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -75,7 +75,7 @@ struct kgsl_mmu_ops { (struct kgsl_mmu *mmu); int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *); void (*mmu_add_global)(struct kgsl_mmu *mmu, - struct kgsl_memdesc *memdesc); + struct kgsl_memdesc *memdesc, const char *name); void (*mmu_remove_global)(struct kgsl_mmu *mmu, struct kgsl_memdesc *memdesc); struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu, @@ -174,6 +174,7 @@ struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *, void kgsl_add_global_secure_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc); +void kgsl_print_global_pt_entries(struct seq_file *s); void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable); int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable, @@ -198,7 +199,7 @@ int kgsl_mmu_find_region(struct kgsl_pagetable *pagetable, uint64_t *gpuaddr, uint64_t size, unsigned int align); void kgsl_mmu_add_global(struct kgsl_device *device, - struct kgsl_memdesc *memdesc); + struct kgsl_memdesc *memdesc, const char *name); void kgsl_mmu_remove_global(struct kgsl_device *device, struct kgsl_memdesc *memdesc); diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index 565ae4c39fdd..9e6817c76df8 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -279,7 +279,7 @@ kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc) */ static inline int kgsl_allocate_global(struct kgsl_device *device, struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags, - unsigned int priv) + unsigned int priv, const char *name) { int ret; @@ -297,7 +297,7 @@ static inline int kgsl_allocate_global(struct kgsl_device *device, } if (ret == 0) - kgsl_mmu_add_global(device, memdesc); + kgsl_mmu_add_global(device, memdesc, name); return ret; } diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 1b51eb591036..4ef9f80177d6 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -1192,6 +1192,41 @@ TRACE_EVENT(sparse_unbind, ); +TRACE_EVENT(kgsl_clock_throttling, + TP_PROTO( + int idle_10pct, + int crc_50pct, + int crc_more50pct, + int crc_less50pct, + int adj + ), + TP_ARGS( + idle_10pct, + crc_50pct, + crc_more50pct, + crc_less50pct, + adj + ), + TP_STRUCT__entry( + __field(int, idle_10pct) + __field(int, crc_50pct) + __field(int, crc_more50pct) + __field(int, crc_less50pct) + __field(int, adj) + ), + TP_fast_assign( + __entry->idle_10pct = idle_10pct; + __entry->crc_50pct = crc_50pct; + __entry->crc_more50pct = crc_more50pct; + __entry->crc_less50pct = crc_less50pct; + __entry->adj = adj; + ), + TP_printk("idle_10=%d crc_50=%d crc_more50=%d crc_less50=%d adj=%d", + __entry->idle_10pct, __entry->crc_50pct, __entry->crc_more50pct, + __entry->crc_less50pct, __entry->adj + ) +); + #endif /* _KGSL_TRACE_H */ /* This part must be outside protection */ diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index bc5126a08ecb..b02abfc58aea 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -1195,4 +1195,14 @@ config TOUCHSCREEN_FT5X06 To compile this driver as a module, choose M here: the module will be called ft5x06_ts. +config FT_SECURE_TOUCH + bool "Secure Touch support for Focaltech Touchscreen" + depends on TOUCHSCREEN_FT5X06 + help + Say Y here + -Focaltech touch driver is connected + -To enable secure touch for Focaltech touch driver + + If unsure, say N. + endif diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c index b02e8ecf8608..499ba692d53c 100644 --- a/drivers/input/touchscreen/ft5x06_ts.c +++ b/drivers/input/touchscreen/ft5x06_ts.c @@ -47,6 +47,14 @@ #define FT_SUSPEND_LEVEL 1 #endif +#if defined(CONFIG_FT_SECURE_TOUCH) +#include <linux/completion.h> +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> +static irqreturn_t ft5x06_ts_interrupt(int irq, void *data); +#endif + #define FT_DRIVER_VERSION 0x02 #define FT_META_REGS 3 @@ -148,9 +156,6 @@ #define FT_FW_FILE_MAJ_VER_FT6X36(x) ((x)->data[0x10a]) #define FT_FW_FILE_VENDOR_ID_FT6X36(x) ((x)->data[0x108]) -#define FT_SYSFS_TS_INFO "ts_info" - - /** * Application data verification will be run before upgrade flow. * Firmware image stores some flags with negative and positive value @@ -202,6 +207,8 @@ #define PINCTRL_STATE_SUSPEND "pmx_ts_suspend" #define PINCTRL_STATE_RELEASE "pmx_ts_release" +static irqreturn_t ft5x06_ts_interrupt(int irq, void *data); + enum { FT_BLOADER_VERSION_LZ4 = 0, FT_BLOADER_VERSION_Z7 = 1, @@ -215,8 +222,17 @@ enum { FT_FT5336_FAMILY_ID_0x14 = 0x14, }; -#define FT_STORE_TS_INFO(buf, id, name, max_tch, group_id, fw_vkey_support, \ - fw_name, fw_maj, fw_min, fw_sub_min) \ +#define FT_STORE_TS_INFO(buf, id, fw_maj, fw_min, fw_sub_min) \ + snprintf(buf, FT_INFO_MAX_LEN, \ + "vendor name = Focaltech\n" \ + "model = 0x%x\n" \ + "fw_version = %d.%d.%d\n", \ + id, fw_maj, fw_min, fw_sub_min) +#define FT_TS_INFO_SYSFS_DIR_NAME "ts_info" +static char *ts_info_buff; + +#define FT_STORE_TS_DBG_INFO(buf, id, name, max_tch, group_id, \ + fw_vkey_support, fw_name, fw_maj, fw_min, fw_sub_min) \ snprintf(buf, FT_INFO_MAX_LEN, \ "controller\t= focaltech\n" \ "model\t\t= 0x%x\n" \ @@ -240,6 +256,7 @@ struct ft5x06_ts_data { struct ft5x06_gesture_platform_data *gesture_pdata; struct regulator *vdd; struct regulator *vcc_i2c; + struct mutex ft_clk_io_ctrl_mutex; char fw_name[FT_FW_NAME_MAX_LEN]; bool loading_fw; u8 family_id; @@ -251,8 +268,9 @@ struct ft5x06_ts_data { u32 tch_data_len; u8 fw_ver[3]; u8 fw_vendor_id; - struct kobject ts_info_kobj; + struct kobject *ts_info_kobj; #if defined(CONFIG_FB) + struct work_struct fb_notify_work; struct notifier_block fb_notif; #elif defined(CONFIG_HAS_EARLYSUSPEND) struct early_suspend early_suspend; @@ -261,11 +279,255 @@ struct ft5x06_ts_data { struct pinctrl_state *pinctrl_state_active; struct pinctrl_state *pinctrl_state_suspend; struct pinctrl_state *pinctrl_state_release; +#if defined(CONFIG_FT_SECURE_TOUCH) + atomic_t st_enabled; + atomic_t st_pending_irqs; + struct completion st_powerdown; + struct completion st_irq_processed; + bool st_initialized; + struct clk *core_clk; + struct clk *iface_clk; +#endif }; static int ft5x06_ts_start(struct device *dev); static int ft5x06_ts_stop(struct device *dev); +#if defined(CONFIG_FT_SECURE_TOUCH) +static void ft5x06_secure_touch_init(struct ft5x06_ts_data *data) +{ + data->st_initialized = 0; + + init_completion(&data->st_powerdown); + init_completion(&data->st_irq_processed); + + /* Get clocks */ + data->core_clk = devm_clk_get(&data->client->dev, "core_clk"); + if (IS_ERR(data->core_clk)) { + data->core_clk = NULL; + dev_warn(&data->client->dev, + "%s: core_clk is not defined\n", __func__); + } + + data->iface_clk = devm_clk_get(&data->client->dev, "iface_clk"); + if (IS_ERR(data->iface_clk)) { + data->iface_clk = NULL; + dev_warn(&data->client->dev, + "%s: iface_clk is not defined", __func__); + } + data->st_initialized = 1; +} + +static void ft5x06_secure_touch_notify(struct ft5x06_ts_data *data) +{ + sysfs_notify(&data->input_dev->dev.kobj, NULL, "secure_touch"); +} + +static irqreturn_t ft5x06_filter_interrupt(struct ft5x06_ts_data *data) +{ + if (atomic_read(&data->st_enabled)) { + if (atomic_cmpxchg(&data->st_pending_irqs, 0, 1) == 0) { + reinit_completion(&data->st_irq_processed); + ft5x06_secure_touch_notify(data); + wait_for_completion_interruptible( + &data->st_irq_processed); + } + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* + * 'blocking' variable will have value 'true' when we want to prevent the driver + * from accessing the xPU/SMMU protected HW resources while the session is + * active. + */ +static void ft5x06_secure_touch_stop(struct ft5x06_ts_data *data, bool blocking) +{ + if (atomic_read(&data->st_enabled)) { + atomic_set(&data->st_pending_irqs, -1); + ft5x06_secure_touch_notify(data); + if (blocking) + wait_for_completion_interruptible( + &data->st_powerdown); + } +} + +static int ft5x06_clk_prepare_enable(struct ft5x06_ts_data *data) +{ + int ret; + + ret = clk_prepare_enable(data->iface_clk); + if (ret) { + dev_err(&data->client->dev, + "error on clk_prepare_enable(iface_clk):%d\n", ret); + return ret; + } + + ret = clk_prepare_enable(data->core_clk); + if (ret) { + clk_disable_unprepare(data->iface_clk); + dev_err(&data->client->dev, + "error clk_prepare_enable(core_clk):%d\n", ret); + } + return ret; +} + +static void ft5x06_clk_disable_unprepare(struct ft5x06_ts_data *data) +{ + clk_disable_unprepare(data->core_clk); + clk_disable_unprepare(data->iface_clk); +} + +static int ft5x06_bus_get(struct ft5x06_ts_data *data) +{ + int retval; + + mutex_lock(&data->ft_clk_io_ctrl_mutex); + retval = pm_runtime_get_sync(data->client->adapter->dev.parent); + if (retval >= 0 && data->core_clk != NULL && data->iface_clk != NULL) { + retval = ft5x06_clk_prepare_enable(data); + if (retval) + pm_runtime_put_sync(data->client->adapter->dev.parent); + } + mutex_unlock(&data->ft_clk_io_ctrl_mutex); + return retval; +} + +static void ft5x06_bus_put(struct ft5x06_ts_data *data) +{ + mutex_lock(&data->ft_clk_io_ctrl_mutex); + if (data->core_clk != NULL && data->iface_clk != NULL) + ft5x06_clk_disable_unprepare(data); + pm_runtime_put_sync(data->client->adapter->dev.parent); + mutex_unlock(&data->ft_clk_io_ctrl_mutex); +} + +static ssize_t ft5x06_secure_touch_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ft5x06_ts_data *data = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%d", atomic_read(&data->st_enabled)); +} + +/* + * Accept only "0" and "1" valid values. + * "0" will reset the st_enabled flag, then wake up the reading process and + * the interrupt handler. + * The bus driver is notified via pm_runtime that it is not required to stay + * awake anymore. + * It will also make sure the queue of events is emptied in the controller, + * in case a touch happened in between the secure touch being disabled and + * the local ISR being ungated. + * "1" will set the st_enabled flag and clear the st_pending_irqs flag. + * The bus driver is requested via pm_runtime to stay awake. + */ +static ssize_t ft5x06_secure_touch_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ft5x06_ts_data *data = dev_get_drvdata(dev); + unsigned long value; + int err = 0; + + if (count > 2) + return -EINVAL; + err = kstrtoul(buf, 10, &value); + if (err != 0) + return err; + + if (!data->st_initialized) + return -EIO; + + err = count; + switch (value) { + case 0: + if (atomic_read(&data->st_enabled) == 0) + break; + ft5x06_bus_put(data); + atomic_set(&data->st_enabled, 0); + ft5x06_secure_touch_notify(data); + complete(&data->st_irq_processed); + ft5x06_ts_interrupt(data->client->irq, data); + complete(&data->st_powerdown); + break; + + case 1: + if (atomic_read(&data->st_enabled)) { + err = -EBUSY; + break; + } + synchronize_irq(data->client->irq); + if (ft5x06_bus_get(data) < 0) { + dev_err(&data->client->dev, "ft5x06_bus_get failed\n"); + err = -EIO; + break; + } + reinit_completion(&data->st_powerdown); + reinit_completion(&data->st_irq_processed); + atomic_set(&data->st_enabled, 1); + atomic_set(&data->st_pending_irqs, 0); + break; + + default: + dev_err(&data->client->dev, "unsupported value: %lu\n", value); + err = -EINVAL; + break; + } + return err; +} + +/* + * This function returns whether there are pending interrupts, or + * other error conditions that need to be signaled to the userspace library, + * according tot he following logic: + * - st_enabled is 0 if secure touch is not enabled, returning -EBADF + * - st_pending_irqs is -1 to signal that secure touch is in being stopped, + * returning -EINVAL + * - st_pending_irqs is 1 to signal that there is a pending irq, returning + * the value "1" to the sysfs read operation + * - st_pending_irqs is 0 (only remaining case left) if the pending interrupt + * has been processed, so the interrupt handler can be allowed to continue. + */ +static ssize_t ft5x06_secure_touch_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ft5x06_ts_data *data = dev_get_drvdata(dev); + int val = 0; + + if (atomic_read(&data->st_enabled) == 0) + return -EBADF; + if (atomic_cmpxchg(&data->st_pending_irqs, -1, 0) == -1) + return -EINVAL; + if (atomic_cmpxchg(&data->st_pending_irqs, 1, 0) == 1) + val = 1; + else + complete(&data->st_irq_processed); + return scnprintf(buf, PAGE_SIZE, "%u", val); +} +#else +static void ft5x06_secure_touch_init(struct ft5x06_ts_data *data) +{ +} +static irqreturn_t ft5x06_filter_interrupt(struct ft5x06_ts_data *data) +{ + return IRQ_NONE; +} +static void ft5x06_secure_touch_stop(struct ft5x06_ts_data *data, bool blocking) +{ +} +#endif + +static struct device_attribute attrs[] = { +#if defined(CONFIG_FT_SECURE_TOUCH) + __ATTR(secure_touch_enable, (S_IRUGO | S_IWUSR | S_IWGRP), + ft5x06_secure_touch_enable_show, + ft5x06_secure_touch_enable_store), + __ATTR(secure_touch, S_IRUGO, + ft5x06_secure_touch_show, NULL), +#endif +}; + static inline bool ft5x06_gesture_support_enabled(void) { return config_enabled(CONFIG_TOUCHSCREEN_FT5X06_GESTURE); @@ -592,6 +854,9 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } + if (ft5x06_filter_interrupt(data) == IRQ_HANDLED) + return IRQ_HANDLED; + ip_dev = data->input_dev; buf = data->tch_data; @@ -619,6 +884,10 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id) } for (i = 0; i < data->pdata->num_max_touches; i++) { + /* + * Getting the finger ID of the touch event incase of + * multiple touch events + */ id = (buf[FT_TOUCH_ID_POS + FT_ONE_TCH_LEN * i]) >> 4; if (id >= FT_MAX_ID) break; @@ -1056,6 +1325,8 @@ static int ft5x06_ts_suspend(struct device *dev) return 0; } + ft5x06_secure_touch_stop(data, true); + if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support && device_may_wakeup(dev) && data->gesture_pdata->gesture_enable_to_set) { @@ -1082,6 +1353,8 @@ static int ft5x06_ts_resume(struct device *dev) return 0; } + ft5x06_secure_touch_stop(data, true); + if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support && device_may_wakeup(dev) && !(data->gesture_pdata->in_pocket) && @@ -1137,6 +1410,13 @@ static int ft5x06_ts_resume(struct device *dev) #endif #if defined(CONFIG_FB) +static void fb_notify_resume_work(struct work_struct *work) +{ + struct ft5x06_ts_data *ft5x06_data = + container_of(work, struct ft5x06_ts_data, fb_notify_work); + ft5x06_ts_resume(&ft5x06_data->client->dev); +} + static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { @@ -1145,13 +1425,27 @@ static int fb_notifier_callback(struct notifier_block *self, struct ft5x06_ts_data *ft5x06_data = container_of(self, struct ft5x06_ts_data, fb_notif); - if (evdata && evdata->data && event == FB_EVENT_BLANK && - ft5x06_data && ft5x06_data->client) { + if (evdata && evdata->data && ft5x06_data && ft5x06_data->client) { blank = evdata->data; - if (*blank == FB_BLANK_UNBLANK) - ft5x06_ts_resume(&ft5x06_data->client->dev); - else if (*blank == FB_BLANK_POWERDOWN) - ft5x06_ts_suspend(&ft5x06_data->client->dev); + if (ft5x06_data->pdata->resume_in_workqueue) { + if (event == FB_EARLY_EVENT_BLANK && + *blank == FB_BLANK_UNBLANK) + schedule_work(&ft5x06_data->fb_notify_work); + else if (event == FB_EVENT_BLANK && + *blank == FB_BLANK_POWERDOWN) { + flush_work(&ft5x06_data->fb_notify_work); + ft5x06_ts_suspend(&ft5x06_data->client->dev); + } + } else { + if (event == FB_EVENT_BLANK) { + if (*blank == FB_BLANK_UNBLANK) + ft5x06_ts_resume( + &ft5x06_data->client->dev); + else if (*blank == FB_BLANK_POWERDOWN) + ft5x06_ts_suspend( + &ft5x06_data->client->dev); + } + } } return 0; @@ -1163,6 +1457,13 @@ static void ft5x06_ts_early_suspend(struct early_suspend *handler) struct ft5x06_ts_data, early_suspend); + /* + * During early suspend/late resume, the driver doesn't access xPU/SMMU + * protected HW resources. So, there is no compelling need to block, + * but notifying the userspace that a power event has occurred is + * enough. Hence 'blocking' variable can be set to false. + */ + ft5x06_secure_touch_stop(data, false); ft5x06_ts_suspend(&data->client->dev); } @@ -1172,6 +1473,7 @@ static void ft5x06_ts_late_resume(struct early_suspend *handler) struct ft5x06_ts_data, early_suspend); + ft5x06_secure_touch_stop(data, false); ft5x06_ts_resume(&data->client->dev); } #endif @@ -1488,11 +1790,13 @@ static int ft5x06_fw_upgrade(struct device *dev, bool force) ft5x06_update_fw_ver(data); - FT_STORE_TS_INFO(data->ts_info, data->family_id, data->pdata->name, + FT_STORE_TS_DBG_INFO(data->ts_info, data->family_id, data->pdata->name, data->pdata->num_max_touches, data->pdata->group_id, data->pdata->fw_vkey_support ? "yes" : "no", data->pdata->fw_name, data->fw_ver[0], data->fw_ver[1], data->fw_ver[2]); + FT_STORE_TS_INFO(ts_info_buff, data->family_id, data->fw_ver[0], + data->fw_ver[1], data->fw_ver[2]); rel_fw: release_firmware(fw); return rc; @@ -1595,22 +1899,13 @@ static ssize_t ft5x06_fw_name_store(struct device *dev, static DEVICE_ATTR(fw_name, 0664, ft5x06_fw_name_show, ft5x06_fw_name_store); - -static ssize_t ft5x06_ts_info_show(struct kobject *kobj, +static ssize_t ts_info_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct ft5x06_ts_data *data = container_of(kobj, - struct ft5x06_ts_data, ts_info_kobj); - return snprintf(buf, FT_INFO_MAX_LEN, - "vendor name = Focaltech\n" - "model = 0x%x\n" - "fw_verion = %d.%d.%d\n", - data->family_id, data->fw_ver[0], - data->fw_ver[1], data->fw_ver[2]); + strlcpy(buf, ts_info_buff, FT_INFO_MAX_LEN); + return strnlen(buf, FT_INFO_MAX_LEN); } - -static struct kobj_attribute ts_info_attribute = __ATTR(ts_info, - 0664, ft5x06_ts_info_show, NULL); +static struct kobj_attribute ts_info_attr = __ATTR_RO(ts_info); static bool ft5x06_debug_addr_is_valid(int addr) { @@ -1918,6 +2213,9 @@ static int ft5x06_parse_dt(struct device *dev, pdata->gesture_support = of_property_read_bool(np, "focaltech,gesture-support"); + pdata->resume_in_workqueue = of_property_read_bool(np, + "focaltech,resume-in-workqueue"); + rc = of_property_read_u32(np, "focaltech,family-id", &temp_val); if (!rc) pdata->family_id = temp_val; @@ -1959,7 +2257,8 @@ static int ft5x06_ts_probe(struct i2c_client *client, struct dentry *temp; u8 reg_value; u8 reg_addr; - int err, len; + int err, len, retval, attr_count; + if (client->dev.of_node) { pdata = devm_kzalloc(&client->dev, sizeof(struct ft5x06_ts_platform_data), GFP_KERNEL); @@ -2246,29 +2545,52 @@ static int ft5x06_ts_probe(struct i2c_client *client, dev_dbg(&client->dev, "touch threshold = %d\n", reg_value * 4); /*creation touch panel info kobj*/ - data->ts_info_kobj = *(kobject_create_and_add(FT_SYSFS_TS_INFO, - kernel_kobj)); - if (!&(data->ts_info_kobj)) { - dev_err(&client->dev, "kob creation failed .\n"); + data->ts_info_kobj = kobject_create_and_add(FT_TS_INFO_SYSFS_DIR_NAME, + kernel_kobj); + if (!data->ts_info_kobj) { + dev_err(&client->dev, "kobject creation failed.\n"); } else { - err = sysfs_create_file(&(data->ts_info_kobj), - &ts_info_attribute.attr); + err = sysfs_create_file(data->ts_info_kobj, &ts_info_attr.attr); if (err) { - kobject_put(&(data->ts_info_kobj)); - dev_err(&client->dev, "sysfs create fail .\n"); + kobject_put(data->ts_info_kobj); + dev_err(&client->dev, "sysfs creation failed.\n"); + } else { + ts_info_buff = devm_kzalloc(&client->dev, + FT_INFO_MAX_LEN, GFP_KERNEL); + if (!ts_info_buff) + goto free_debug_dir; + } + } + + /*Initialize secure touch */ + ft5x06_secure_touch_init(data); + ft5x06_secure_touch_stop(data, true); + mutex_init(&(data->ft_clk_io_ctrl_mutex)); + + /* Creation of secure touch sysfs files */ + for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { + retval = sysfs_create_file(&data->input_dev->dev.kobj, + &attrs[attr_count].attr); + if (retval < 0) { + dev_err(&client->dev, + "%s: Failed to create sysfs attributes\n", + __func__); + goto free_secure_touch_sysfs; } } ft5x06_update_fw_ver(data); ft5x06_update_fw_vendor_id(data); - FT_STORE_TS_INFO(data->ts_info, data->family_id, data->pdata->name, + FT_STORE_TS_DBG_INFO(data->ts_info, data->family_id, data->pdata->name, data->pdata->num_max_touches, data->pdata->group_id, data->pdata->fw_vkey_support ? "yes" : "no", data->pdata->fw_name, data->fw_ver[0], data->fw_ver[1], data->fw_ver[2]); - + FT_STORE_TS_INFO(ts_info_buff, data->family_id, data->fw_ver[0], + data->fw_ver[1], data->fw_ver[2]); #if defined(CONFIG_FB) + INIT_WORK(&data->fb_notify_work, fb_notify_resume_work); data->fb_notif.notifier_call = fb_notifier_callback; err = fb_register_client(&data->fb_notif); @@ -2285,6 +2607,11 @@ static int ft5x06_ts_probe(struct i2c_client *client, #endif return 0; +free_secure_touch_sysfs: + for (attr_count--; attr_count >= 0; attr_count--) { + sysfs_remove_file(&data->input_dev->dev.kobj, + &attrs[attr_count].attr); + } free_debug_dir: debugfs_remove_recursive(data->dir); free_force_update_fw_sys: @@ -2345,7 +2672,7 @@ unreg_inputdev: static int ft5x06_ts_remove(struct i2c_client *client) { struct ft5x06_ts_data *data = i2c_get_clientdata(client); - int retval; + int retval, attr_count; if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support) { device_init_wakeup(&client->dev, 0); @@ -2388,6 +2715,11 @@ static int ft5x06_ts_remove(struct i2c_client *client) } } + for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { + sysfs_remove_file(&data->input_dev->dev.kobj, + &attrs[attr_count].attr); + } + if (data->pdata->power_on) data->pdata->power_on(false); else @@ -2399,7 +2731,7 @@ static int ft5x06_ts_remove(struct i2c_client *client) ft5x06_power_init(data, false); input_unregister_device(data->input_dev); - kobject_put(&(data->ts_info_kobj)); + kobject_put(data->ts_info_kobj); return 0; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 190d294197a7..0608e08b4427 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -96,7 +96,7 @@ static void gic_do_wait_for_rwp(void __iomem *base) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed_no_log(base + GICD_CTLR) & GICD_CTLR_RWP) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -182,7 +182,7 @@ static int gic_peek_irq(struct irq_data *d, u32 offset) else base = gic_data.dist_base; - return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); + return !!(readl_relaxed_no_log(base + offset + (gic_irq(d) / 32) * 4) & mask); } static void gic_poke_irq(struct irq_data *d, u32 offset) diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 572e3c637c7b..39a0845a886f 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -951,7 +951,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type, spin_unlock_irqrestore(&vfe_dev->common_data-> common_dev_data_lock, flags); } else { - if (frame_src == VFE_PIX_0) { + if (frame_src <= VFE_RAW_2) { msm_isp_check_for_output_error(vfe_dev, ts, &event_data.u.sof_info); } diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index fdee3cabd097..a4eb80f31984 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -370,9 +370,6 @@ void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev, struct msm_vfe_fetch_engine_info *fetch_engine_info) { struct msm_isp_event_data fe_rd_done_event; - if (!fetch_engine_info->is_busy) - return; - memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data)); fe_rd_done_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id; diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c index 2af70b2ee1bf..3b38882c4c45 100644 --- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c +++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c @@ -1390,7 +1390,7 @@ int msm_jpegdma_hw_get_max_downscale(struct msm_jpegdma_device *dma) */ int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma) { - int i; + int i, j; int ret; unsigned int cnt; const void *property; @@ -1401,32 +1401,37 @@ int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma) dev_dbg(dma->dev, "Missing qos settings\n"); return 0; } + cnt /= 4; + if (cnt % 2) + return -EINVAL; + + dma->qos_regs_num = cnt / 2; - dma->qos_regs = kzalloc((sizeof(*dma->qos_regs) * cnt), GFP_KERNEL); + dma->qos_regs = kzalloc((sizeof(struct jpegdma_reg_cfg) * + dma->qos_regs_num), GFP_KERNEL); if (!dma->qos_regs) return -ENOMEM; - for (i = 0; i < cnt; i = i + 2) { + for (i = 0, j = 0; i < cnt; i += 2, j++) { ret = of_property_read_u32_index(dma->dev->of_node, "qcom,qos-reg-settings", i, - &dma->qos_regs[i].reg); + &dma->qos_regs[j].reg); if (ret < 0) { - dev_err(dma->dev, "can not read qos reg %d\n", i); + dev_err(dma->dev, "can not read qos reg %d\n", j); goto error; } ret = of_property_read_u32_index(dma->dev->of_node, "qcom,qos-reg-settings", i + 1, - &dma->qos_regs[i].val); + &dma->qos_regs[j].val); if (ret < 0) { - dev_err(dma->dev, "can not read qos setting %d\n", i); + dev_err(dma->dev, "can not read qos setting %d\n", j); goto error; } - dev_dbg(dma->dev, "Qos idx %d, reg %x val %x\n", i, - dma->qos_regs[i].reg, dma->qos_regs[i].val); + dev_dbg(dma->dev, "Qos idx %d, reg %x val %x\n", j, + dma->qos_regs[j].reg, dma->qos_regs[j].val); } - dma->qos_regs_num = cnt; return 0; error: @@ -1452,7 +1457,7 @@ void msm_jpegdma_hw_put_qos(struct msm_jpegdma_device *dma) */ int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma) { - int i; + int i, j; int ret; unsigned int cnt; const void *property; @@ -1463,33 +1468,38 @@ int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma) dev_dbg(dma->dev, "Missing vbif settings\n"); return 0; } + cnt /= 4; + if (cnt % 2) + return -EINVAL; - dma->vbif_regs = kzalloc((sizeof(*dma->vbif_regs) * cnt), GFP_KERNEL); + dma->vbif_regs_num = cnt / 2; + + dma->vbif_regs = kzalloc((sizeof(struct jpegdma_reg_cfg) * + dma->vbif_regs_num), GFP_KERNEL); if (!dma->vbif_regs) return -ENOMEM; - for (i = 0; i < cnt; i = i + 2) { + for (i = 0, j = 0; i < cnt; i += 2, j++) { ret = of_property_read_u32_index(dma->dev->of_node, "qcom,vbif-reg-settings", i, - &dma->vbif_regs[i].reg); + &dma->vbif_regs[j].reg); if (ret < 0) { - dev_err(dma->dev, "can not read vbif reg %d\n", i); + dev_err(dma->dev, "can not read vbif reg %d\n", j); goto error; } ret = of_property_read_u32_index(dma->dev->of_node, "qcom,vbif-reg-settings", i + 1, - &dma->vbif_regs[i].val); + &dma->vbif_regs[j].val); if (ret < 0) { - dev_err(dma->dev, "can not read vbif setting %d\n", i); + dev_err(dma->dev, "can not read vbif setting %d\n", j); goto error; } - dev_dbg(dma->dev, "Vbif idx %d, reg %x val %x\n", i, - dma->vbif_regs[i].reg, dma->vbif_regs[i].val); + dev_dbg(dma->dev, "Vbif idx %d, reg %x val %x\n", j, + dma->vbif_regs[j].reg, dma->vbif_regs[j].val); } - dma->vbif_regs_num = cnt; return 0; error: @@ -1515,7 +1525,7 @@ void msm_jpegdma_hw_put_vbif(struct msm_jpegdma_device *dma) */ int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma) { - int i; + int i, j; int ret; unsigned int cnt; const void *property; @@ -1526,35 +1536,39 @@ int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma) dev_dbg(dma->dev, "Missing prefetch settings\n"); return 0; } + cnt /= 4; + if (cnt % 2) + return -EINVAL; + + dma->prefetch_regs_num = cnt / 2; - dma->prefetch_regs = kcalloc(cnt, sizeof(*dma->prefetch_regs), - GFP_KERNEL); + dma->prefetch_regs = kzalloc((sizeof(struct jpegdma_reg_cfg) * + dma->prefetch_regs_num), GFP_KERNEL); if (!dma->prefetch_regs) return -ENOMEM; - for (i = 0; i < cnt; i = i + 2) { + for (i = 0, j = 0; i < cnt; i += 2, j++) { ret = of_property_read_u32_index(dma->dev->of_node, "qcom,prefetch-reg-settings", i, - &dma->prefetch_regs[i].reg); + &dma->prefetch_regs[j].reg); if (ret < 0) { - dev_err(dma->dev, "can not read prefetch reg %d\n", i); + dev_err(dma->dev, "can not read prefetch reg %d\n", j); goto error; } ret = of_property_read_u32_index(dma->dev->of_node, "qcom,prefetch-reg-settings", i + 1, - &dma->prefetch_regs[i].val); + &dma->prefetch_regs[j].val); if (ret < 0) { dev_err(dma->dev, "can not read prefetch setting %d\n", - i); + j); goto error; } - dev_dbg(dma->dev, "Prefetch idx %d, reg %x val %x\n", i, - dma->prefetch_regs[i].reg, dma->prefetch_regs[i].val); + dev_dbg(dma->dev, "Prefetch idx %d, reg %x val %x\n", j, + dma->prefetch_regs[j].reg, dma->prefetch_regs[j].val); } - dma->prefetch_regs_num = cnt; return 0; error: diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h index c2b928d2b5d3..198d130b24fc 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h @@ -101,4 +101,61 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_3ph = { {0x38, 0xFE}, {0x81c, 0x6}, }; + +struct csiphy_settings_t csiphy_combo_mode_v5_0 = { + { + {0x818, 0x1}, + {0x81c, 0x2}, + {0x004, 0x08}, + {0x704, 0x08}, + {0x204, 0x08}, + {0x404, 0x08}, + {0x604, 0x08}, + {0x02c, 0x1}, + {0x22c, 0x1}, + {0x42c, 0x1}, + {0x62c, 0x1}, + {0x72c, 0x1}, + {0x034, 0x0f}, + {0x234, 0x0f}, + {0x434, 0x0f}, + {0x634, 0x0f}, + {0x734, 0x0f}, + {0x01c, 0x0a}, + {0x21c, 0x0a}, + {0x41c, 0x0a}, + {0x61c, 0x0a}, + {0x71c, 0x0a}, + {0x014, 0x60}, + {0x214, 0x60}, + {0x414, 0x60}, + {0x614, 0x60}, + {0x714, 0x60}, + {0x728, 0x4}, + {0x428, 0x0a}, + {0x628, 0x0e}, + {0x03c, 0xb8}, + {0x73c, 0xb8}, + {0x23c, 0xb8}, + {0x43c, 0xb8}, + {0x63c, 0xb8}, + {0x000, 0x91}, + {0x700, 0x80}, + {0x200, 0x91}, + {0x400, 0x91}, + {0x600, 0x80}, + {0x70c, 0xA5}, + {0x60c, 0xA5}, + {0x010, 0x52}, + {0x710, 0x52}, + {0x210, 0x52}, + {0x410, 0x52}, + {0x610, 0x52}, + {0x038, 0xfe}, + {0x738, 0x1f}, + {0x238, 0xfe}, + {0x438, 0xfe}, + {0x638, 0x1f}, + } +}; #endif diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c index 30af06dc64f5..bdee620e8d45 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c @@ -46,7 +46,7 @@ #define NUM_LANES_OFFSET 4 #define CSI_3PHASE_HW 1 -#define MAX_LANES 4 +#define MAX_DPHY_DATA_LN 4 #define CLOCK_OFFSET 0x700 #define CSIPHY_SOF_DEBUG_COUNT 2 @@ -55,6 +55,22 @@ static struct v4l2_file_operations msm_csiphy_v4l2_subdev_fops; +static void msm_csiphy_write_settings( + struct csiphy_device *csiphy_dev, + struct csiphy_settings_t csiphy_settings) +{ + int i = 0; + + for (i = 0; i < MAX_CSIPHY_SETTINGS; i++) { + if (csiphy_settings.settings[i].addr == 0 && + csiphy_settings.settings[i].data == 0) + break; + + msm_camera_io_w(csiphy_settings.settings[i].data, + csiphy_dev->base + csiphy_settings.settings[i].addr); + } +} + static void msm_csiphy_cphy_irq_config( struct csiphy_device *csiphy_dev, struct msm_camera_csiphy_params *csiphy_params) @@ -418,7 +434,7 @@ static int msm_csiphy_2phase_lane_config( csiphybase = csiphy_dev->base; lane_mask = csiphy_params->lane_mask & 0x1f; - for (i = 0; i < MAX_LANES; i++) { + for (i = 0; i < MAX_DPHY_DATA_LN; i++) { if (mask == 0x2) { if (lane_mask & mask) lane_enable |= 0x80; @@ -437,7 +453,7 @@ static int msm_csiphy_2phase_lane_config( csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. mipi_csiphy_3ph_cmn_ctrl6.addr); - for (i = 0, mask = 0x1; i < MAX_LANES; i++) { + for (i = 0, mask = 0x1; i < MAX_DPHY_DATA_LN; i++) { if (!(lane_mask & mask)) { if (mask == 0x2) i--; @@ -562,112 +578,134 @@ static int msm_csiphy_2phase_lane_config_v50( struct csiphy_device *csiphy_dev, struct msm_camera_csiphy_params *csiphy_params) { - uint32_t val = 0, lane_enable = 0, clk_lane, mask = 1; + uint32_t lane_enable = 0, mask = 1; uint16_t lane_mask = 0, i = 0, offset; void __iomem *csiphybase; csiphybase = csiphy_dev->base; lane_mask = csiphy_params->lane_mask & 0x1f; - for (i = 0; i < MAX_LANES; i++) { + + lane_enable = msm_camera_io_r(csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_cmn_ctrl5.addr); + + /* write settle count and lane_enable */ + for (i = 0; i < MAX_DPHY_DATA_LN; i++) { if (mask == 0x2) { if (lane_mask & mask) lane_enable |= 0x80; i--; - } else if (lane_mask & mask) + offset = CLOCK_OFFSET; + } else if (lane_mask & mask) { lane_enable |= 0x1 << (i<<1); + offset = 0x200*i; + } + + if (lane_mask & mask) + msm_camera_io_w((csiphy_params->settle_cnt & 0xFF), + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg2.addr + offset); mask <<= 1; } - CDBG("%s:%d lane_enable: %d\n", __func__, __LINE__, lane_enable); + CDBG("%s:%d lane_enable: 0x%x\n", __func__, __LINE__, lane_enable); msm_camera_io_w(lane_enable, csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. mipi_csiphy_3ph_cmn_ctrl5.addr); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_cmn_ctrl6.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_cmn_ctrl6.addr); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_cmn_ctrl7.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_cmn_ctrl7.addr); - - for (i = 0, mask = 0x1; i < MAX_LANES; i++) { - if (!(lane_mask & mask)) { - if (mask == 0x2) - i--; - mask <<= 0x1; - continue; - } - if (mask == 0x2) { - val = 4; - offset = CLOCK_OFFSET; - clk_lane = 1; - i--; - } else { - offset = 0x200*i; - val = 0; - clk_lane = 0; - } - if (csiphy_params->combo_mode == 1) { - val |= 0xA; - if (mask == csiphy_dev->ctrl_reg-> - csiphy_reg.combo_clk_mask) { - val |= 0x4; - clk_lane = 1; - } - } - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl11.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl11.addr + offset); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl13.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl13.addr + offset); + /* write mode specific settings */ + if (csiphy_params->combo_mode == 1) + msm_csiphy_write_settings(csiphy_dev, + csiphy_dev->ctrl_reg->csiphy_combo_mode_settings); + else { msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg7.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg7.addr + offset); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg5.data, + mipi_csiphy_3ph_cmn_ctrl6.data, csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg5.addr + offset); - if (clk_lane == 1) - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnck_ctrl10.data, - csiphybase + - csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnck_ctrl10.addr); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl15.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl15.addr + offset); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl0.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl0.addr + offset); + mipi_csiphy_3ph_cmn_ctrl6.addr); msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg1.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg1.addr + offset); - msm_camera_io_w((csiphy_params->settle_cnt & 0xFF), + mipi_csiphy_3ph_cmn_ctrl7.data, csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg2.addr + offset); - if (clk_lane == 1) - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnck_ctrl3.data, csiphybase + - csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnck_ctrl3.addr); + mipi_csiphy_3ph_cmn_ctrl7.addr); msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg4.data, csiphybase + + mipi_csiphy_2ph_lnck_ctrl10.data, + csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_cfg4.addr + offset); + mipi_csiphy_2ph_lnck_ctrl10.addr); msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl14.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_2ph_lnn_ctrl14.addr + offset); - mask <<= 1; + mipi_csiphy_2ph_lnck_ctrl3.data, csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_2ph_lnck_ctrl3.addr); + + for (i = 0, mask = 0x1; i < MAX_DPHY_DATA_LN; i++) { + if (!(lane_mask & mask)) { + if (mask == 0x2) + i--; + mask <<= 0x1; + continue; + } + if (mask == 0x2) { + offset = CLOCK_OFFSET; + i--; + } else { + offset = 0x200*i; + } + + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl11.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl11.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl13.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl13.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg7.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg7.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg5.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg5.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl15.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl15.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl0.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl0.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg1.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg1.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg4.data, csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_cfg4.addr + offset); + msm_camera_io_w(csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl14.data, + csiphybase + csiphy_dev->ctrl_reg-> + csiphy_3ph_reg. + mipi_csiphy_2ph_lnn_ctrl14.addr + offset); + mask <<= 1; + } } msm_csiphy_cphy_irq_config(csiphy_dev, csiphy_params); return 0; @@ -986,19 +1024,20 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev) } CDBG("%s:%d called\n", __func__, __LINE__); + if (csiphy_dev->ref_count++) { + CDBG("%s csiphy refcount = %d\n", __func__, + csiphy_dev->ref_count); + return rc; + } + + CDBG("%s:%d called\n", __func__, __LINE__); if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) { pr_err("%s: csiphy invalid state %d\n", __func__, csiphy_dev->csiphy_state); rc = -EINVAL; return rc; } - CDBG("%s:%d called\n", __func__, __LINE__); - if (csiphy_dev->ref_count++) { - CDBG("%s csiphy refcount = %d\n", __func__, - csiphy_dev->ref_count); - return rc; - } CDBG("%s:%d called\n", __func__, __LINE__); rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY, @@ -1063,6 +1102,14 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev) return rc; } csiphy_dev->csiphy_sof_debug_count = 0; + + CDBG("%s:%d called\n", __func__, __LINE__); + if (csiphy_dev->ref_count++) { + CDBG("%s csiphy refcount = %d\n", __func__, + csiphy_dev->ref_count); + return rc; + } + CDBG("%s:%d called\n", __func__, __LINE__); if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) { pr_err("%s: csiphy invalid state %d\n", __func__, @@ -1070,13 +1117,7 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev) rc = -EINVAL; return rc; } - CDBG("%s:%d called\n", __func__, __LINE__); - if (csiphy_dev->ref_count++) { - CDBG("%s csiphy refcount = %d\n", __func__, - csiphy_dev->ref_count); - return rc; - } CDBG("%s:%d called\n", __func__, __LINE__); rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY, CAM_AHB_SVS_VOTE); @@ -1651,6 +1692,8 @@ static int csiphy_probe(struct platform_device *pdev) new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v5_0; new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V50; new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW; + new_csiphy_dev->ctrl_reg->csiphy_combo_mode_settings = + csiphy_combo_mode_v5_0; } else { pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__, new_csiphy_dev->hw_dts_version); diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h index 4944ac606bc8..aba88da1157e 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h @@ -24,12 +24,17 @@ #define MAX_CSIPHY 3 #define CSIPHY_NUM_CLK_MAX 16 +#define MAX_CSIPHY_SETTINGS 120 struct csiphy_reg_t { uint32_t addr; uint32_t data; }; +struct csiphy_settings_t { + struct csiphy_reg_t settings[MAX_CSIPHY_SETTINGS]; +}; + struct csiphy_reg_parms_t { /*MIPI CSI PHY registers*/ uint32_t mipi_csiphy_lnn_cfg1_addr; @@ -140,6 +145,7 @@ struct csiphy_reg_3ph_parms_t { struct csiphy_ctrl_t { struct csiphy_reg_parms_t csiphy_reg; struct csiphy_reg_3ph_parms_t csiphy_3ph_reg; + struct csiphy_settings_t csiphy_combo_mode_settings; }; enum msm_csiphy_state_t { diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c index 3124fd8a1777..5f749bd46273 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c @@ -631,11 +631,6 @@ static int32_t msm_flash_release( struct msm_flash_ctrl_t *flash_ctrl) { int32_t rc = 0; - if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_RELEASE) { - pr_err("%s:%d Invalid flash state = %d", - __func__, __LINE__, flash_ctrl->flash_state); - return 0; - } rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, NULL); if (rc < 0) { @@ -663,24 +658,49 @@ static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl, rc = msm_flash_init(flash_ctrl, flash_data); break; case CFG_FLASH_RELEASE: - if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT) + if (flash_ctrl->flash_state != MSM_CAMERA_FLASH_RELEASE) { rc = flash_ctrl->func_tbl->camera_flash_release( flash_ctrl); + } else { + CDBG(pr_fmt("Invalid state : %d\n"), + flash_ctrl->flash_state); + } break; case CFG_FLASH_OFF: - if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT) + if ((flash_ctrl->flash_state != MSM_CAMERA_FLASH_RELEASE) && + (flash_ctrl->flash_state != MSM_CAMERA_FLASH_OFF)) { rc = flash_ctrl->func_tbl->camera_flash_off( flash_ctrl, flash_data); + if (!rc) + flash_ctrl->flash_state = MSM_CAMERA_FLASH_OFF; + } else { + CDBG(pr_fmt("Invalid state : %d\n"), + flash_ctrl->flash_state); + } break; case CFG_FLASH_LOW: - if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT) + if ((flash_ctrl->flash_state == MSM_CAMERA_FLASH_OFF) || + (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)) { rc = flash_ctrl->func_tbl->camera_flash_low( flash_ctrl, flash_data); + if (!rc) + flash_ctrl->flash_state = MSM_CAMERA_FLASH_LOW; + } else { + CDBG(pr_fmt("Invalid state : %d\n"), + flash_ctrl->flash_state); + } break; case CFG_FLASH_HIGH: - if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT) + if ((flash_ctrl->flash_state == MSM_CAMERA_FLASH_OFF) || + (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)) { rc = flash_ctrl->func_tbl->camera_flash_high( flash_ctrl, flash_data); + if (!rc) + flash_ctrl->flash_state = MSM_CAMERA_FLASH_HIGH; + } else { + CDBG(pr_fmt("Invalid state : %d\n"), + flash_ctrl->flash_state); + } break; default: rc = -EFAULT; diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h index f6ac16f57080..ad619fa4eb63 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h @@ -27,6 +27,9 @@ enum msm_camera_flash_state_t { MSM_CAMERA_FLASH_INIT, + MSM_CAMERA_FLASH_OFF, + MSM_CAMERA_FLASH_LOW, + MSM_CAMERA_FLASH_HIGH, MSM_CAMERA_FLASH_RELEASE, }; diff --git a/drivers/media/platform/msm/sde/Kconfig b/drivers/media/platform/msm/sde/Kconfig index fcd461bb1167..85f5f4257ddb 100644 --- a/drivers/media/platform/msm/sde/Kconfig +++ b/drivers/media/platform/msm/sde/Kconfig @@ -5,4 +5,13 @@ config MSM_SDE_ROTATOR select VIDEOBUF2_CORE select SW_SYNC if SYNC ---help--- - Enable support of V4L2 rotator driver.
\ No newline at end of file + Enable support of V4L2 rotator driver. + +config MSM_SDE_ROTATOR_EVTLOG_DEBUG + depends on MSM_SDE_ROTATOR + bool "Enable sde rotator debugging" + ---help--- + The SDE rotator debugging provides support to enable rotator debugging + features to: Dump rotator registers during driver errors, panic + driver during fatal errors and enable some rotator driver logging + into an internal buffer (this avoids logging overhead). diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h index b3e81705bdf4..67c6b11329d8 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h @@ -92,6 +92,12 @@ enum sde_bus_clients { SDE_MAX_BUS_CLIENTS }; +enum sde_rot_regdump_access { + SDE_ROT_REGDUMP_READ, + SDE_ROT_REGDUMP_WRITE, + SDE_ROT_REGDUMP_MAX +}; + struct reg_bus_client { char name[MAX_CLIENT_NAME_LEN]; short usecase_ndx; @@ -107,6 +113,21 @@ struct sde_smmu_client { bool domain_attached; }; +struct sde_rot_vbif_debug_bus { + u32 disable_bus_addr; + u32 block_bus_addr; + u32 bit_offset; + u32 block_cnt; + u32 test_pnt_cnt; +}; + +struct sde_rot_regdump { + char *name; + u32 offset; + u32 len; + enum sde_rot_regdump_access access; +}; + struct sde_rot_data_type { u32 mdss_version; @@ -140,6 +161,14 @@ struct sde_rot_data_type { int iommu_attached; int iommu_ref_cnt; + + struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus; + u32 nrt_vbif_dbg_bus_size; + + struct sde_rot_regdump *regdump; + u32 regdump_size; + + void *sde_rot_hw; }; int sde_rotator_base_init(struct sde_rot_data_type **pmdata, diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c index e3e71936b8e4..3e5cbdecfba4 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c @@ -35,6 +35,7 @@ #include "sde_rotator_r1.h" #include "sde_rotator_r3.h" #include "sde_rotator_trace.h" +#include "sde_rotator_debug.h" /* waiting for hw time out, 3 vsync for 30fps*/ #define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100 @@ -127,6 +128,7 @@ static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus, bus->curr_bw_uc_idx = new_uc_idx; bus->curr_quota_val = quota; + SDEROT_EVTLOG(new_uc_idx, quota); SDEROT_DBG("uc_idx=%d quota=%llu\n", new_uc_idx, quota); ATRACE_BEGIN("msm_bus_scale_req_rot"); ret = msm_bus_scale_client_update_request(bus->bus_hdl, @@ -274,6 +276,7 @@ static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on) return; } + SDEROT_EVTLOG(on); SDEROT_DBG("%s: rotator regulators", on ? "Enable" : "Disable"); ret = sde_rot_enable_vreg(mgr->module_power.vreg_config, mgr->module_power.num_vreg, on); @@ -307,6 +310,7 @@ static int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable) } if (changed) { + SDEROT_EVTLOG(enable); SDEROT_DBG("Rotator clk %s\n", enable ? "enable" : "disable"); for (i = 0; i < mgr->num_rot_clk; i++) { clk = mgr->rot_clk[i].clk; @@ -394,6 +398,7 @@ static bool sde_rotator_is_work_pending(struct sde_rot_mgr *mgr, static void sde_rotator_clear_fence(struct sde_rot_entry *entry) { if (entry->input_fence) { + SDEROT_EVTLOG(entry->input_fence, 1111); SDEROT_DBG("sys_fence_put i:%p\n", entry->input_fence); sde_rotator_put_sync_fence(entry->input_fence); entry->input_fence = NULL; @@ -404,6 +409,7 @@ static void sde_rotator_clear_fence(struct sde_rot_entry *entry) if (entry->fenceq && entry->fenceq->timeline) sde_rotator_resync_timeline(entry->fenceq->timeline); + SDEROT_EVTLOG(entry->output_fence, 2222); SDEROT_DBG("sys_fence_put o:%p\n", entry->output_fence); sde_rotator_put_sync_fence(entry->output_fence); entry->output_fence = NULL; @@ -565,6 +571,7 @@ static struct sde_rot_perf *sde_rotator_find_session( static void sde_rotator_release_data(struct sde_rot_entry *entry) { + SDEROT_EVTLOG(entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr); sde_mdp_data_free(&entry->src_buf, true, DMA_TO_DEVICE); sde_mdp_data_free(&entry->dst_buf, true, DMA_FROM_DEVICE); } @@ -719,6 +726,10 @@ static struct sde_rot_hw_resource *sde_rotator_get_hw_resource( } } atomic_inc(&hw->num_active); + SDEROT_EVTLOG(atomic_read(&hw->num_active), hw->pending_count, + mgr->rdot_limit, entry->perf->rdot_limit, + mgr->wrot_limit, entry->perf->wrot_limit, + entry->item.session_id, entry->item.sequence_id); SDEROT_DBG("active=%d pending=%d rdot=%u/%u wrot=%u/%u s:%d.%d\n", atomic_read(&hw->num_active), hw->pending_count, mgr->rdot_limit, entry->perf->rdot_limit, @@ -766,6 +777,8 @@ static void sde_rotator_put_hw_resource(struct sde_rot_queue *queue, if (hw_res) wake_up(&hw_res->wait_queue); } + SDEROT_EVTLOG(atomic_read(&hw->num_active), hw->pending_count, + entry->item.session_id, entry->item.sequence_id); SDEROT_DBG("active=%d pending=%d s:%d.%d\n", atomic_read(&hw->num_active), hw->pending_count, entry->item.session_id, entry->item.sequence_id); @@ -1125,6 +1138,15 @@ static void sde_rotator_commit_handler(struct work_struct *work) mgr = entry->private->mgr; + SDEROT_EVTLOG( + entry->item.session_id, entry->item.sequence_id, + entry->item.src_rect.x, entry->item.src_rect.y, + entry->item.src_rect.w, entry->item.src_rect.h, + entry->item.dst_rect.x, entry->item.dst_rect.y, + entry->item.dst_rect.w, entry->item.dst_rect.h, + entry->item.flags, + entry->dnsc_factor_w, entry->dnsc_factor_h); + SDEDEV_DBG(mgr->device, "commit handler s:%d.%u src:(%d,%d,%d,%d) dst:(%d,%d,%d,%d) f:0x%x dnsc:%u/%u\n", entry->item.session_id, entry->item.sequence_id, @@ -1233,11 +1255,13 @@ static void sde_rotator_done_handler(struct work_struct *work) entry->item.flags, entry->dnsc_factor_w, entry->dnsc_factor_h); + SDEROT_EVTLOG(entry->item.session_id, 0); ret = mgr->ops_wait_for_entry(hw, entry); if (ret) { SDEROT_ERR("fail to wait for completion %d\n", ret); atomic_inc(&request->failed_count); } + SDEROT_EVTLOG(entry->item.session_id, 1); if (entry->item.ts) entry->item.ts[SDE_ROTATOR_TS_DONE] = ktime_get(); diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c index dae1b51bfaa8..c609dbd2036e 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c @@ -22,6 +22,619 @@ #include "sde_rotator_core.h" #include "sde_rotator_dev.h" +#ifdef CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG +#define SDE_EVTLOG_DEFAULT_ENABLE 1 +#else +#define SDE_EVTLOG_DEFAULT_ENABLE 0 +#endif +#define SDE_EVTLOG_DEFAULT_PANIC 1 +#define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM +#define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM + +/* + * evtlog will print this number of entries when it is called through + * sysfs node or panic. This prevents kernel log from evtlog message + * flood. + */ +#define SDE_ROT_EVTLOG_PRINT_ENTRY 256 + +/* + * evtlog keeps this number of entries in memory for debug purpose. This + * number must be greater than print entry to prevent out of bound evtlog + * entry array access. + */ +#define SDE_ROT_EVTLOG_ENTRY (SDE_ROT_EVTLOG_PRINT_ENTRY * 4) +#define SDE_ROT_EVTLOG_MAX_DATA 15 +#define SDE_ROT_EVTLOG_BUF_MAX 512 +#define SDE_ROT_EVTLOG_BUF_ALIGN 32 +#define SDE_ROT_DEBUG_BASE_MAX 10 + +static DEFINE_SPINLOCK(sde_rot_xlock); + +/* + * tlog - EVTLOG entry structure + * @counter - EVTLOG entriy counter + * @time - timestamp of EVTLOG entry + * @name - function name of EVTLOG entry + * @line - line number of EVTLOG entry + * @data - EVTLOG data contents + * @data_cnt - number of data contents + * @pid - pid of current calling thread + */ +struct tlog { + u32 counter; + s64 time; + const char *name; + int line; + u32 data[SDE_ROT_EVTLOG_MAX_DATA]; + u32 data_cnt; + int pid; +}; + +/* + * sde_rot_dbg_evtlog - EVTLOG debug data structure + * @logs - EVTLOG entries + * @first - first entry index in the EVTLOG + * @last - last entry index in the EVTLOG + * @curr - curr entry index in the EVTLOG + * @evtlog - EVTLOG debugfs handle + * @evtlog_enable - boolean indicates EVTLOG enable/disable + * @panic_on_err - boolean indicates issue panic after EVTLOG dump + * @enable_reg_dump - control in-log/memory dump for rotator registers + * @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus + * @evtlog_dump_work - schedule work strucutre for timeout handler + * @work_dump_reg - storage for register dump control in schedule work + * @work_panic - storage for panic control in schedule work + * @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work + * @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping + * @reg_dump_array - memory buffer for rotator registers dumping + */ +struct sde_rot_dbg_evtlog { + struct tlog logs[SDE_ROT_EVTLOG_ENTRY]; + u32 first; + u32 last; + u32 curr; + struct dentry *evtlog; + u32 evtlog_enable; + u32 panic_on_err; + u32 enable_reg_dump; + u32 enable_vbif_dbgbus_dump; + struct work_struct evtlog_dump_work; + bool work_dump_reg; + bool work_panic; + bool work_vbif_dbgbus; + u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */ + u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX]; +} sde_rot_dbg_evtlog; + +/* + * sde_rot_evtlog_is_enabled - helper function for checking EVTLOG + * enable/disable + * @flag - EVTLOG option flag + */ +static inline bool sde_rot_evtlog_is_enabled(u32 flag) +{ + return (flag & sde_rot_dbg_evtlog.evtlog_enable) || + (flag == SDE_ROT_EVTLOG_ALL && + sde_rot_dbg_evtlog.evtlog_enable); +} + +/* + * __vbif_debug_bus - helper function for VBIF debug bus dump + * @head - VBIF debug bus data structure + * @vbif_base - VBIF IO mapped address + * @dump_addr - output buffer for memory dump option + * @in_log - boolean indicates in-log dump option + */ +static void __vbif_debug_bus(struct sde_rot_vbif_debug_bus *head, + void __iomem *vbif_base, u32 *dump_addr, bool in_log) +{ + int i, j; + u32 val; + + if (!dump_addr && !in_log) + return; + + for (i = 0; i < head->block_cnt; i++) { + writel_relaxed(1 << (i + head->bit_offset), + vbif_base + head->block_bus_addr); + /* make sure that current bus blcok enable */ + wmb(); + for (j = 0; j < head->test_pnt_cnt; j++) { + writel_relaxed(j, vbif_base + head->block_bus_addr + 4); + /* make sure that test point is enabled */ + wmb(); + val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT); + if (dump_addr) { + *dump_addr++ = head->block_bus_addr; + *dump_addr++ = i; + *dump_addr++ = j; + *dump_addr++ = val; + } + if (in_log) + pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n", + head->block_bus_addr, i, j, val); + } + } +} + +/* + * sde_rot_dump_vbif_debug_bus - VBIF debug bus dump + * @bus_dump_flag - dump flag controlling in-log/memory dump option + * @dump_mem - output buffer for memory dump location + */ +static void sde_rot_dump_vbif_debug_bus(u32 bus_dump_flag, + u32 **dump_mem) +{ + struct sde_rot_data_type *mdata = sde_rot_get_mdata(); + bool in_log, in_mem; + u32 *dump_addr = NULL; + u32 value; + struct sde_rot_vbif_debug_bus *head; + phys_addr_t phys = 0; + int i, list_size = 0; + void __iomem *vbif_base; + struct sde_rot_vbif_debug_bus *dbg_bus; + u32 bus_size; + + pr_info("======== NRT VBIF Debug bus DUMP =========\n"); + vbif_base = mdata->vbif_nrt_io.base; + dbg_bus = mdata->nrt_vbif_dbg_bus; + bus_size = mdata->nrt_vbif_dbg_bus_size; + + if (!vbif_base || !dbg_bus || !bus_size) + return; + + /* allocate memory for each test point */ + for (i = 0; i < bus_size; i++) { + head = dbg_bus + i; + list_size += (head->block_cnt * head->test_pnt_cnt); + } + + /* 4 bytes * 4 entries for each test point*/ + list_size *= 16; + + in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG); + in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM); + + if (in_mem) { + if (!(*dump_mem)) + *dump_mem = dma_alloc_coherent(&mdata->pdev->dev, + list_size, &phys, GFP_KERNEL); + + if (*dump_mem) { + dump_addr = *dump_mem; + pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n", + __func__, dump_addr, dump_addr + list_size); + } else { + in_mem = false; + pr_err("dump_mem: allocation fails\n"); + } + } + + sde_smmu_ctrl(1); + + value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON); + writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON); + + /* make sure that vbif core is on */ + wmb(); + + for (i = 0; i < bus_size; i++) { + head = dbg_bus + i; + + writel_relaxed(0, vbif_base + head->disable_bus_addr); + writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL); + /* make sure that other bus is off */ + wmb(); + + __vbif_debug_bus(head, vbif_base, dump_addr, in_log); + if (dump_addr) + dump_addr += (head->block_cnt * head->test_pnt_cnt * 4); + } + + sde_smmu_ctrl(0); + + pr_info("========End VBIF Debug bus=========\n"); +} + +/* + * sde_rot_dump_reg - helper function for dumping rotator register set content + * @dump_name - register set name + * @reg_dump_flag - dumping flag controlling in-log/memory dump location + * @addr - starting address offset for dumping + * @len - range of the register set + * @dump_mem - output buffer for memory dump location option + */ +void sde_rot_dump_reg(const char *dump_name, u32 reg_dump_flag, u32 addr, + int len, u32 **dump_mem) +{ + struct sde_rot_data_type *mdata = sde_rot_get_mdata(); + bool in_log, in_mem; + u32 *dump_addr = NULL; + phys_addr_t phys = 0; + int i; + + in_log = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG); + in_mem = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM); + + pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n", + reg_dump_flag, in_log, in_mem); + + if (len % 16) + len += 16; + len /= 16; + + if (in_mem) { + if (!(*dump_mem)) + *dump_mem = dma_alloc_coherent(&mdata->pdev->dev, + len * 16, &phys, GFP_KERNEL); + + if (*dump_mem) { + dump_addr = *dump_mem; + pr_info("%s: start_addr:0x%p end_addr:0x%p reg_addr=0x%X\n", + dump_name, dump_addr, dump_addr + (u32)len * 16, + addr); + } else { + in_mem = false; + pr_err("dump_mem: kzalloc fails!\n"); + } + } + + + for (i = 0; i < len; i++) { + u32 x0, x4, x8, xc; + + x0 = readl_relaxed(mdata->sde_io.base + addr+0x0); + x4 = readl_relaxed(mdata->sde_io.base + addr+0x4); + x8 = readl_relaxed(mdata->sde_io.base + addr+0x8); + xc = readl_relaxed(mdata->sde_io.base + addr+0xc); + + if (in_log) + pr_info("0x%08X : %08x %08x %08x %08x\n", + addr, x0, x4, x8, xc); + + if (dump_addr && in_mem) { + dump_addr[i*4] = x0; + dump_addr[i*4 + 1] = x4; + dump_addr[i*4 + 2] = x8; + dump_addr[i*4 + 3] = xc; + } + + addr += 16; + } +} + +/* + * sde_rot_dump_reg_all - dumping all SDE rotator registers + */ +static void sde_rot_dump_reg_all(void) +{ + struct sde_rot_data_type *mdata = sde_rot_get_mdata(); + struct sde_rot_regdump *head, *regdump; + u32 regdump_size; + int i; + + regdump = mdata->regdump; + regdump_size = mdata->regdump_size; + + if (!regdump || !regdump_size) + return; + + /* Enable clock to rotator if not yet enabled */ + sde_smmu_ctrl(1); + + for (i = 0; (i < regdump_size) && (i < SDE_ROT_DEBUG_BASE_MAX); i++) { + head = ®dump[i]; + + if (head->access == SDE_ROT_REGDUMP_WRITE) { + writel_relaxed(1, mdata->sde_io.base + head->offset); + /* Make sure write go through */ + wmb(); + } else { + sde_rot_dump_reg(head->name, + sde_rot_dbg_evtlog.enable_reg_dump, + head->offset, head->len, + &sde_rot_dbg_evtlog.reg_dump_array[i]); + } + } + + /* Disable rotator clock */ + sde_smmu_ctrl(0); +} + +/* + * __sde_rot_evtlog_dump_calc_range - calculate dump range for EVTLOG + */ +static bool __sde_rot_evtlog_dump_calc_range(void) +{ + static u32 next; + bool need_dump = true; + unsigned long flags; + struct sde_rot_dbg_evtlog *evtlog = &sde_rot_dbg_evtlog; + + spin_lock_irqsave(&sde_rot_xlock, flags); + + evtlog->first = next; + + if (evtlog->last == evtlog->first) { + need_dump = false; + goto dump_exit; + } + + if (evtlog->last < evtlog->first) { + evtlog->first %= SDE_ROT_EVTLOG_ENTRY; + if (evtlog->last < evtlog->first) + evtlog->last += SDE_ROT_EVTLOG_ENTRY; + } + + if ((evtlog->last - evtlog->first) > SDE_ROT_EVTLOG_PRINT_ENTRY) { + pr_warn("evtlog buffer overflow before dump: %d\n", + evtlog->last - evtlog->first); + evtlog->first = evtlog->last - SDE_ROT_EVTLOG_PRINT_ENTRY; + } + next = evtlog->first + 1; + +dump_exit: + spin_unlock_irqrestore(&sde_rot_xlock, flags); + + return need_dump; +} + +/* + * sde_rot_evtlog_dump_entry - helper function for EVTLOG content dumping + * @evtlog_buf: EVTLOG dump output buffer + * @evtlog_buf_size: EVTLOG output buffer size + */ +static ssize_t sde_rot_evtlog_dump_entry(char *evtlog_buf, + ssize_t evtlog_buf_size) +{ + int i; + ssize_t off = 0; + struct tlog *log, *prev_log; + unsigned long flags; + + spin_lock_irqsave(&sde_rot_xlock, flags); + + log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.first % + SDE_ROT_EVTLOG_ENTRY]; + + prev_log = &sde_rot_dbg_evtlog.logs[(sde_rot_dbg_evtlog.first - 1) % + SDE_ROT_EVTLOG_ENTRY]; + + off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d", + log->name, log->line); + + if (off < SDE_ROT_EVTLOG_BUF_ALIGN) { + memset((evtlog_buf + off), 0x20, + (SDE_ROT_EVTLOG_BUF_ALIGN - off)); + off = SDE_ROT_EVTLOG_BUF_ALIGN; + } + + off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), + "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_rot_dbg_evtlog.first, + log->time, (log->time - prev_log->time), log->pid); + + for (i = 0; i < log->data_cnt; i++) + off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), + "%x ", log->data[i]); + + off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n"); + + spin_unlock_irqrestore(&sde_rot_xlock, flags); + + return off; +} + +/* + * sde_rot_evtlog_dump_all - Dumping all content in EVTLOG buffer + */ +static void sde_rot_evtlog_dump_all(void) +{ + char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX]; + + while (__sde_rot_evtlog_dump_calc_range()) { + sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX); + pr_info("%s", evtlog_buf); + } +} + +/* + * sde_rot_evtlog_dump_open - debugfs open handler for evtlog dump + * @inode: debugfs inode + * @file: file handler + */ +static int sde_rot_evtlog_dump_open(struct inode *inode, struct file *file) +{ + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + return 0; +} + +/* + * sde_rot_evtlog_dump_read - debugfs read handler for evtlog dump + * @file: file handler + * @buff: user buffer content for debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX]; + + if (__sde_rot_evtlog_dump_calc_range()) { + len = sde_rot_evtlog_dump_entry(evtlog_buf, + SDE_ROT_EVTLOG_BUF_MAX); + if (copy_to_user(buff, evtlog_buf, len)) + return -EFAULT; + *ppos += len; + } + + return len; +} + +/* + * sde_rot_evtlog_dump_write - debugfs write handler for evtlog dump + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_rot_evtlog_dump_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + sde_rot_evtlog_dump_all(); + + sde_rot_dump_reg_all(); + + if (sde_rot_dbg_evtlog.panic_on_err) + panic("evtlog_dump_write"); + + return count; +} + +/* + * sde_rot_evtlog_dump_helper - helper function for evtlog dump + * @dead: boolean indicates panic after dump + * @panic_name: Panic signature name show up in log + * @dump_rot: boolean indicates rotator register dump + * @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump + */ +static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name, + bool dump_rot, bool dump_vbif_debug_bus) +{ + sde_rot_evtlog_dump_all(); + + if (dump_rot) + sde_rot_dump_reg_all(); + + if (dump_vbif_debug_bus) + sde_rot_dump_vbif_debug_bus( + sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump, + &sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump); + + if (dead) + panic(panic_name); +} + +/* + * sde_rot_evtlog_debug_work - schedule work function for evtlog dump + * @work: schedule work structure + */ +static void sde_rot_evtlog_debug_work(struct work_struct *work) +{ + sde_rot_evtlog_dump_helper( + sde_rot_dbg_evtlog.work_panic, + "evtlog_workitem", + sde_rot_dbg_evtlog.work_dump_reg, + sde_rot_dbg_evtlog.work_vbif_dbgbus); +} + +/* + * sde_rot_dump_panic - Issue evtlog dump and generic panic + */ +void sde_rot_dump_panic(void) +{ + sde_rot_evtlog_dump_all(); + sde_rot_dump_reg_all(); + + panic("sde_rotator"); +} + +/* + * sde_rot_evtlog_tout_handler - log dump timeout handler + * @queue: boolean indicate putting log dump into queue + * @name: function name having timeout + */ +void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...) +{ + int i; + bool dead = false; + bool dump_rot = false; + bool dump_vbif_dbgbus = false; + char *blk_name = NULL; + va_list args; + + if (!sde_rot_evtlog_is_enabled(SDE_ROT_EVTLOG_DEFAULT)) + return; + + if (queue && work_pending(&sde_rot_dbg_evtlog.evtlog_dump_work)) + return; + + va_start(args, name); + for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) { + blk_name = va_arg(args, char*); + if (IS_ERR_OR_NULL(blk_name)) + break; + + if (!strcmp(blk_name, "rot")) + dump_rot = true; + + if (!strcmp(blk_name, "vbif_dbg_bus")) + dump_vbif_dbgbus = true; + + if (!strcmp(blk_name, "panic")) + dead = true; + } + va_end(args); + + if (queue) { + /* schedule work to dump later */ + sde_rot_dbg_evtlog.work_panic = dead; + sde_rot_dbg_evtlog.work_dump_reg = dump_rot; + sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus; + schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work); + } else { + sde_rot_evtlog_dump_helper(dead, name, dump_rot, + dump_vbif_dbgbus); + } +} + +/* + * sde_rot_evtlog - log contents into memory for dump analysis + * @name: Name of function calling evtlog + * @line: line number of calling function + * @flag: Log control flag + */ +void sde_rot_evtlog(const char *name, int line, int flag, ...) +{ + unsigned long flags; + int i, val = 0; + va_list args; + struct tlog *log; + + if (!sde_rot_evtlog_is_enabled(flag)) + return; + + spin_lock_irqsave(&sde_rot_xlock, flags); + log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.curr]; + log->time = ktime_to_us(ktime_get()); + log->name = name; + log->line = line; + log->data_cnt = 0; + log->pid = current->pid; + + va_start(args, flag); + for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) { + + val = va_arg(args, int); + if (val == SDE_ROT_DATA_LIMITER) + break; + + log->data[i] = val; + } + va_end(args); + log->data_cnt = i; + sde_rot_dbg_evtlog.curr = + (sde_rot_dbg_evtlog.curr + 1) % SDE_ROT_EVTLOG_ENTRY; + sde_rot_dbg_evtlog.last++; + + spin_unlock_irqrestore(&sde_rot_xlock, flags); +} + /* * sde_rotator_stat_show - Show statistics on read to this debugfs file * @s: Pointer to sequence file structure @@ -249,6 +862,58 @@ static int sde_rotator_core_create_debugfs( return 0; } +static const struct file_operations sde_rot_evtlog_fops = { + .open = sde_rot_evtlog_dump_open, + .read = sde_rot_evtlog_dump_read, + .write = sde_rot_evtlog_dump_write, +}; + +static int sde_rotator_evtlog_create_debugfs( + struct sde_rot_mgr *mgr, + struct dentry *debugfs_root) +{ + int i; + + sde_rot_dbg_evtlog.evtlog = debugfs_create_dir("evtlog", debugfs_root); + if (IS_ERR_OR_NULL(sde_rot_dbg_evtlog.evtlog)) { + pr_err("debugfs_create_dir fail, error %ld\n", + PTR_ERR(sde_rot_dbg_evtlog.evtlog)); + sde_rot_dbg_evtlog.evtlog = NULL; + return -ENODEV; + } + + INIT_WORK(&sde_rot_dbg_evtlog.evtlog_dump_work, + sde_rot_evtlog_debug_work); + sde_rot_dbg_evtlog.work_panic = false; + + for (i = 0; i < SDE_ROT_EVTLOG_ENTRY; i++) + sde_rot_dbg_evtlog.logs[i].counter = i; + + debugfs_create_file("dump", 0644, sde_rot_dbg_evtlog.evtlog, NULL, + &sde_rot_evtlog_fops); + debugfs_create_u32("enable", 0644, sde_rot_dbg_evtlog.evtlog, + &sde_rot_dbg_evtlog.evtlog_enable); + debugfs_create_u32("panic", 0644, sde_rot_dbg_evtlog.evtlog, + &sde_rot_dbg_evtlog.panic_on_err); + debugfs_create_u32("reg_dump", 0644, sde_rot_dbg_evtlog.evtlog, + &sde_rot_dbg_evtlog.enable_reg_dump); + debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog, + &sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump); + + sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE; + sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC; + sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP; + sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump = + SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP; + + pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n", + sde_rot_dbg_evtlog.evtlog_enable, + sde_rot_dbg_evtlog.panic_on_err, + sde_rot_dbg_evtlog.enable_reg_dump); + + return 0; +} + /* * struct sde_rotator_stat_ops - processed statistics file operations */ @@ -335,6 +1000,12 @@ struct dentry *sde_rotator_create_debugfs( return NULL; } + if (sde_rotator_evtlog_create_debugfs(rot_dev->mgr, debugfs_root)) { + SDEROT_ERR("fail create evtlog debugfs\n"); + debugfs_remove_recursive(debugfs_root); + return NULL; + } + return debugfs_root; } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h index 2ed1b759f3e9..dcda54274fad 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h @@ -16,6 +16,32 @@ #include <linux/types.h> #include <linux/dcache.h> +#define SDE_ROT_DATA_LIMITER (-1) +#define SDE_ROT_EVTLOG_TOUT_DATA_LIMITER (NULL) + +enum sde_rot_dbg_reg_dump_flag { + SDE_ROT_DBG_DUMP_IN_LOG = BIT(0), + SDE_ROT_DBG_DUMP_IN_MEM = BIT(1), +}; + +enum sde_rot_dbg_evtlog_flag { + SDE_ROT_EVTLOG_DEFAULT = BIT(0), + SDE_ROT_EVTLOG_IOMMU = BIT(1), + SDE_ROT_EVTLOG_DBG = BIT(6), + SDE_ROT_EVTLOG_ALL = BIT(7) +}; + +#define SDEROT_EVTLOG(...) sde_rot_evtlog(__func__, __LINE__, \ + SDE_ROT_EVTLOG_DEFAULT, ##__VA_ARGS__, SDE_ROT_DATA_LIMITER) + +#define SDEROT_EVTLOG_TOUT_HANDLER(...) \ + sde_rot_evtlog_tout_handler(false, __func__, ##__VA_ARGS__, \ + SDE_ROT_EVTLOG_TOUT_DATA_LIMITER) + +void sde_rot_evtlog(const char *name, int line, int flag, ...); +void sde_rot_dump_panic(void); +void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...); + struct sde_rotator_device; #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index 76fd674d161d..548131393835 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -34,6 +34,7 @@ #include "sde_rotator_r3_hwio.h" #include "sde_rotator_r3_debug.h" #include "sde_rotator_trace.h" +#include "sde_rotator_debug.h" /* XIN mapping */ #define XIN_SSPP 0 @@ -198,6 +199,29 @@ static u32 sde_hw_rotator_output_pixfmts[] = { SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC, }; +static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = { + {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */ + {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */ + {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */ +}; + +static struct sde_rot_regdump sde_rot_r3_regdump[] = { + { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ }, + { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ }, + { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ }, + { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100, + SDE_ROT_REGDUMP_READ }, + /* + * Need to perform a SW reset to REGDMA in order to access the + * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE. + * REGDMA RAM should be dump at last. + */ + { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1, + SDE_ROT_REGDUMP_WRITE }, + { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000, + SDE_ROT_REGDUMP_READ }, +}; + /* Invalid software timestamp value for initialization */ #define SDE_REGDMA_SWTS_INVALID (~0) @@ -332,6 +356,8 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot) REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET), SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_FSM_STATE)); + + SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic"); } /** @@ -1484,6 +1510,10 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw, &entry->dst_buf); } + SDEROT_EVTLOG(flags, item->input.width, item->input.height, + item->output.width, item->output.height, + entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr); + if (mdata->default_ot_rd_limit) { struct sde_mdp_set_ot_params ot_params; @@ -1677,6 +1707,13 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot) set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map); } + mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3; + mdata->nrt_vbif_dbg_bus_size = + ARRAY_SIZE(nrt_vbif_dbg_bus_r3); + + mdata->regdump = sde_rot_r3_regdump; + mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump); + return 0; } @@ -2202,6 +2239,7 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr) clk_set_flags(mgr->rot_clk[mgr->core_clk_idx].clk, CLKFLAG_NORETAIN_PERIPH); + mdata->sde_rot_hw = rot; return 0; error_hw_rev_init: if (rot->irq_num >= 0) diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c index 6cc975e22cd4..7bbd8aa53342 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c @@ -33,6 +33,7 @@ #include "sde_rotator_util.h" #include "sde_rotator_io_util.h" #include "sde_rotator_smmu.h" +#include "sde_rotator_debug.h" #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec" #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec" @@ -332,6 +333,8 @@ int sde_smmu_ctrl(int enable) int rc = 0; mutex_lock(&sde_smmu_ref_cnt_lock); + SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt, + mdata->iommu_attached); SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n", __builtin_return_address(0), enable, mdata->iommu_ref_cnt, mdata->iommu_attached); @@ -407,9 +410,10 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain, sde_smmu = (struct sde_smmu_client *)token; - /* TODO: trigger rotator panic and dump */ - SDEROT_ERR("TODO: trigger rotator panic and dump, iova=0x%08lx\n", - iova); + /* trigger rotator panic and dump */ + SDEROT_ERR("trigger rotator panic and dump, iova=0x%08lx\n", iova); + + sde_rot_dump_panic(); return rc; } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 566441e9c546..f0a3875a8f28 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -90,6 +90,7 @@ static void msm_comm_generate_session_error(struct msm_vidc_inst *inst); static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst); static void handle_session_error(enum hal_command_response cmd, void *data); static void msm_vidc_print_running_insts(struct msm_vidc_core *core); +static void msm_comm_print_debug_info(struct msm_vidc_inst *inst); bool msm_comm_turbo_session(struct msm_vidc_inst *inst) { @@ -905,7 +906,7 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst, call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data); dprintk(VIDC_ERR, "sess resp timeout can potentially crash the system\n"); - + msm_comm_print_debug_info(inst); BUG_ON(inst->core->resources.debug_timeout); rc = -EIO; } else { @@ -1601,6 +1602,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data) struct msm_vidc_cb_cmd_done *response = data; struct msm_vidc_core *core = NULL; struct hfi_device *hdev = NULL; + struct msm_vidc_inst *inst = NULL; int rc = 0; subsystem_crashed("venus"); @@ -1640,6 +1642,19 @@ static void handle_sys_error(enum hal_command_response cmd, void *data) dprintk(VIDC_ERR, "SYS_ERROR can potentially crash the system\n"); + /* + * For SYS_ERROR, there will not be any inst pointer. + * Just grab one of the inst from instances list and + * use it. + */ + + mutex_lock(&core->lock); + inst = list_first_entry(&core->instances, + struct msm_vidc_inst, list); + mutex_unlock(&core->lock); + + msm_comm_print_debug_info(inst); + BUG_ON(core->resources.debug_timeout); } @@ -2450,6 +2465,7 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst) call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data); dprintk(VIDC_ERR, "ABORT timeout can potentially crash the system\n"); + msm_comm_print_debug_info(inst); BUG_ON(inst->core->resources.debug_timeout); rc = -EBUSY; @@ -2522,6 +2538,7 @@ int msm_comm_check_core_init(struct msm_vidc_core *core) { int rc = 0; struct hfi_device *hdev; + struct msm_vidc_inst *inst = NULL; mutex_lock(&core->lock); if (core->state >= VIDC_CORE_INIT_DONE) { @@ -2540,6 +2557,17 @@ int msm_comm_check_core_init(struct msm_vidc_core *core) call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data); dprintk(VIDC_ERR, "SYS_INIT timeout can potentially crash the system\n"); + /* + * For SYS_INIT, there will not be any inst pointer. + * Just grab one of the inst from instances list and + * use it. + */ + inst = list_first_entry(&core->instances, + struct msm_vidc_inst, list); + + mutex_unlock(&core->lock); + msm_comm_print_debug_info(inst); + mutex_lock(&core->lock); BUG_ON(core->resources.debug_timeout); rc = -EIO; @@ -4017,6 +4045,8 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype, call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data); dprintk(VIDC_ERR, "SESS_PROP timeout can potentially crash the system\n"); + if (inst->core->resources.debug_timeout) + msm_comm_print_debug_info(inst); BUG_ON(inst->core->resources.debug_timeout); rc = -ETIMEDOUT; @@ -5232,3 +5262,92 @@ int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a) exit: return rc; } + +void msm_comm_print_inst_info(struct msm_vidc_inst *inst) +{ + struct buffer_info *temp; + struct internal_buf *buf; + int i = 0; + bool is_decode = false; + enum vidc_ports port; + + if (!inst) { + dprintk(VIDC_ERR, "%s - invalid param %p\n", + __func__, inst); + return; + } + + is_decode = inst->session_type == MSM_VIDC_DECODER; + port = is_decode ? OUTPUT_PORT : CAPTURE_PORT; + dprintk(VIDC_ERR, + "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n", + is_decode ? "Decode" : "Encode", inst->fmts[port]->name, + inst->prop.height[port], inst->prop.width[port], + inst->prop.fps, inst->prop.bitrate, + !inst->bit_depth ? "8" : "10"); + + dprintk(VIDC_ERR, + "---Buffer details for inst: %p of type: %d---\n", + inst, inst->session_type); + mutex_lock(&inst->registeredbufs.lock); + dprintk(VIDC_ERR, "registered buffer list:\n"); + list_for_each_entry(temp, &inst->registeredbufs.list, list) + for (i = 0; i < temp->num_planes; i++) + dprintk(VIDC_ERR, + "type: %d plane: %d addr: %pa size: %d\n", + temp->type, i, &temp->device_addr[i], + temp->size[i]); + + mutex_unlock(&inst->registeredbufs.lock); + + mutex_lock(&inst->scratchbufs.lock); + dprintk(VIDC_ERR, "scratch buffer list:\n"); + list_for_each_entry(buf, &inst->scratchbufs.list, list) + dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", + buf->buffer_type, &buf->handle->device_addr, + buf->handle->size); + mutex_unlock(&inst->scratchbufs.lock); + + mutex_lock(&inst->persistbufs.lock); + dprintk(VIDC_ERR, "persist buffer list:\n"); + list_for_each_entry(buf, &inst->persistbufs.list, list) + dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", + buf->buffer_type, &buf->handle->device_addr, + buf->handle->size); + mutex_unlock(&inst->persistbufs.lock); + + mutex_lock(&inst->outputbufs.lock); + dprintk(VIDC_ERR, "dpb buffer list:\n"); + list_for_each_entry(buf, &inst->outputbufs.list, list) + dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", + buf->buffer_type, &buf->handle->device_addr, + buf->handle->size); + mutex_unlock(&inst->outputbufs.lock); +} + +static void msm_comm_print_debug_info(struct msm_vidc_inst *inst) +{ + struct msm_vidc_core *core = NULL; + struct msm_vidc_inst *temp = NULL; + + if (!inst || !inst->core) { + dprintk(VIDC_ERR, "%s - invalid param %p %p\n", + __func__, inst, core); + return; + } + core = inst->core; + + dprintk(VIDC_ERR, "Venus core frequency = %lu", + msm_comm_get_clock_rate(core)); + dprintk(VIDC_ERR, "Printing instance info that caused Error\n"); + msm_comm_print_inst_info(inst); + dprintk(VIDC_ERR, "Printing remaining instances info\n"); + mutex_lock(&core->lock); + list_for_each_entry(temp, &core->instances, list) { + /* inst already printed above. Hence don't repeat.*/ + if (temp == inst) + continue; + msm_comm_print_inst_info(temp); + } + mutex_unlock(&core->lock); +} diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 337760508eb1..eac7f658eb31 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -98,4 +98,5 @@ int msm_comm_ctrl_deinit(struct msm_vidc_inst *inst); void msm_comm_cleanup_internal_buffers(struct msm_vidc_inst *inst); int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a); bool msm_comm_turbo_session(struct msm_vidc_inst *inst); +void msm_comm_print_inst_info(struct msm_vidc_inst *inst); #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index 1bdc5bf2c93d..25fccab99fb3 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -1238,11 +1238,6 @@ int msm_vidc_smmu_fault_handler(struct iommu_domain *domain, { struct msm_vidc_core *core = token; struct msm_vidc_inst *inst; - struct buffer_info *temp; - struct internal_buf *buf; - int i = 0; - bool is_decode = false; - enum vidc_ports port; if (!domain || !core) { dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n", @@ -1257,52 +1252,7 @@ int msm_vidc_smmu_fault_handler(struct iommu_domain *domain, mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { - is_decode = inst->session_type == MSM_VIDC_DECODER; - port = is_decode ? OUTPUT_PORT : CAPTURE_PORT; - dprintk(VIDC_ERR, - "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n", - is_decode ? "Decode" : "Encode", inst->fmts[port]->name, - inst->prop.height[port], inst->prop.width[port], - inst->prop.fps, inst->prop.bitrate, - !inst->bit_depth ? "8" : "10"); - - dprintk(VIDC_ERR, - "---Buffer details for inst: %pK of type: %d---\n", - inst, inst->session_type); - mutex_lock(&inst->registeredbufs.lock); - dprintk(VIDC_ERR, "registered buffer list:\n"); - list_for_each_entry(temp, &inst->registeredbufs.list, list) - for (i = 0; i < temp->num_planes; i++) - dprintk(VIDC_ERR, - "type: %d plane: %d addr: %pa size: %d\n", - temp->type, i, &temp->device_addr[i], - temp->size[i]); - - mutex_unlock(&inst->registeredbufs.lock); - - mutex_lock(&inst->scratchbufs.lock); - dprintk(VIDC_ERR, "scratch buffer list:\n"); - list_for_each_entry(buf, &inst->scratchbufs.list, list) - dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", - buf->buffer_type, &buf->handle->device_addr, - buf->handle->size); - mutex_unlock(&inst->scratchbufs.lock); - - mutex_lock(&inst->persistbufs.lock); - dprintk(VIDC_ERR, "persist buffer list:\n"); - list_for_each_entry(buf, &inst->persistbufs.list, list) - dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", - buf->buffer_type, &buf->handle->device_addr, - buf->handle->size); - mutex_unlock(&inst->persistbufs.lock); - - mutex_lock(&inst->outputbufs.lock); - dprintk(VIDC_ERR, "dpb buffer list:\n"); - list_for_each_entry(buf, &inst->outputbufs.list, list) - dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", - buf->buffer_type, &buf->handle->device_addr, - buf->handle->size); - mutex_unlock(&inst->outputbufs.lock); + msm_comm_print_inst_info(inst); } core->smmu_fault_handled = true; mutex_unlock(&core->lock); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h index 23d84e9f7f7a..f6120dc7d1d5 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h @@ -16,6 +16,7 @@ #define DT_PARSE #include <linux/of.h> #include "msm_vidc_resources.h" +#include "msm_vidc_common.h" void msm_vidc_free_platform_resources( struct msm_vidc_platform_resources *res); diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index 5a51fbcf9318..e0fb31de38ff 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -119,7 +119,7 @@ static inline bool __core_in_valid_state(struct venus_hfi_device *device) return device->state != VENUS_STATE_DEINIT; } -static void __dump_packet(u8 *packet) +static void __dump_packet(u8 *packet, enum vidc_msg_prio log_level) { u32 c = 0, packet_size = *(u32 *)packet; const int row_size = 32; @@ -132,7 +132,7 @@ static void __dump_packet(u8 *packet) packet_size % row_size : row_size; hex_dump_to_buffer(packet + c * row_size, bytes_to_read, row_size, 4, row, sizeof(row), false); - dprintk(VIDC_PKT, "%s\n", row); + dprintk(log_level, "%s\n", row); } } @@ -342,7 +342,7 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet, if (msm_vidc_debug & VIDC_PKT) { dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo); - __dump_packet(packet); + __dump_packet(packet, VIDC_PKT); } packet_size_in_words = (*(u32 *)packet) >> 2; @@ -548,7 +548,7 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet, if (msm_vidc_debug & VIDC_PKT) { dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo); - __dump_packet(packet); + __dump_packet(packet, VIDC_PKT); } return rc; @@ -2517,7 +2517,6 @@ static int venus_hfi_session_clean(void *session) mutex_lock(&device->lock); __session_clean(sess_close); - __flush_debug_queue(device, NULL); mutex_unlock(&device->lock); return 0; @@ -3337,6 +3336,7 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet) { bool local_packet = false; enum vidc_msg_prio log_level = VIDC_FW; + unsigned int pending_packet_count = 0; if (!device) { dprintk(VIDC_ERR, "%s: Invalid params\n", __func__); @@ -3361,6 +3361,23 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet) log_level = VIDC_ERR; } + /* + * In FATAL situation, print all the pending messages in msg + * queue. This is useful for debugging. At this time, message + * queues may be corrupted. Hence don't trust them and just print + * first max_packets packets. + */ + + if (local_packet) { + dprintk(VIDC_ERR, + "Printing all pending messages in message Queue\n"); + while (!__iface_msgq_read(device, packet) && + pending_packet_count < max_packets) { + __dump_packet(packet, log_level); + pending_packet_count++; + } + } + while (!__iface_dbgq_read(device, packet)) { struct hfi_msg_sys_coverage_packet *pkt = (struct hfi_msg_sys_coverage_packet *) packet; diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c index 0c5754341991..b6a476cd882d 100644 --- a/drivers/mfd/wcd9xxx-irq.c +++ b/drivers/mfd/wcd9xxx-irq.c @@ -450,10 +450,22 @@ int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res) { int i, ret; u8 irq_level[wcd9xxx_res->num_irq_regs]; + struct irq_domain *domain; + struct device_node *pnode; mutex_init(&wcd9xxx_res->irq_lock); mutex_init(&wcd9xxx_res->nested_irq_lock); + pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node); + if (unlikely(!pnode)) + return -EINVAL; + + domain = irq_find_host(pnode); + if (unlikely(!domain)) + return -EINVAL; + + wcd9xxx_res->domain = domain; + wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res); if (!wcd9xxx_res->irq) { pr_warn("%s: irq driver is not yet initialized\n", __func__); @@ -553,7 +565,6 @@ void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res) if (wcd9xxx_res->irq) { disable_irq_wake(wcd9xxx_res->irq); free_irq(wcd9xxx_res->irq, wcd9xxx_res); - /* Release parent's of node */ wcd9xxx_res->irq = 0; wcd9xxx_irq_put_upstream_irq(wcd9xxx_res); } @@ -626,19 +637,14 @@ wcd9xxx_irq_add_domain(struct device_node *node, static struct wcd9xxx_irq_drv_data * wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res) { - struct device_node *pnode; struct irq_domain *domain; - pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node); - /* Shouldn't happen */ - if (unlikely(!pnode)) - return NULL; + domain = wcd9xxx_res->domain; - domain = irq_find_host(pnode); - if (unlikely(!domain)) + if (domain) + return domain->host_data; + else return NULL; - - return (struct wcd9xxx_irq_drv_data *)domain->host_data; } static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset) @@ -669,10 +675,6 @@ static unsigned int wcd9xxx_irq_get_upstream_irq( { struct wcd9xxx_irq_drv_data *data; - /* Hold parent's of node */ - if (!of_node_get(of_irq_find_parent(wcd9xxx_res->dev->of_node))) - return -EINVAL; - data = wcd9xxx_get_irq_drv_d(wcd9xxx_res); if (!data) { pr_err("%s: interrupt controller is not registerd\n", __func__); @@ -686,8 +688,7 @@ static unsigned int wcd9xxx_irq_get_upstream_irq( static void wcd9xxx_irq_put_upstream_irq( struct wcd9xxx_core_resource *wcd9xxx_res) { - /* Hold parent's of node */ - of_node_put(of_irq_find_parent(wcd9xxx_res->dev->of_node)); + wcd9xxx_res->domain = NULL; } static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq) diff --git a/drivers/misc/qcom/Kconfig b/drivers/misc/qcom/Kconfig index a62297e913d2..9c73960f01ff 100644 --- a/drivers/misc/qcom/Kconfig +++ b/drivers/misc/qcom/Kconfig @@ -1,6 +1,7 @@ config MSM_QDSP6V2_CODECS bool "Audio QDSP6V2 APR support" depends on MSM_SMD + select SND_SOC_QDSP6V2 help Enable Audio codecs with APR IPC protocol support between application processor and QDSP6 for B-family. APR is @@ -9,6 +10,7 @@ config MSM_QDSP6V2_CODECS config MSM_ULTRASOUND bool "QDSP6V2 HW Ultrasound support" + select SND_SOC_QDSP6V2 help Enable HW Ultrasound support in QDSP6V2. QDSP6V2 can support HW encoder & decoder and diff --git a/drivers/misc/qcom/qdsp6v2/aac_in.c b/drivers/misc/qcom/qdsp6v2/aac_in.c index c9d5dbb0b313..7176c114f85b 100644 --- a/drivers/misc/qcom/qdsp6v2/aac_in.c +++ b/drivers/misc/qcom/qdsp6v2/aac_in.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -421,6 +421,8 @@ static long aac_in_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_aac_enc_config cfg; struct msm_audio_aac_enc_config32 cfg_32; + memset(&cfg_32, 0, sizeof(cfg_32)); + cmd = AUDIO_GET_AAC_ENC_CONFIG; rc = aac_in_ioctl_shared(file, cmd, &cfg); if (rc) { diff --git a/drivers/misc/qcom/qdsp6v2/amrnb_in.c b/drivers/misc/qcom/qdsp6v2/amrnb_in.c index eb92137f0671..1bb441bd2ff4 100644 --- a/drivers/misc/qcom/qdsp6v2/amrnb_in.c +++ b/drivers/misc/qcom/qdsp6v2/amrnb_in.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2010-2012, 2014 The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2012, 2014, 2016 The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -221,6 +222,8 @@ static long amrnb_in_compat_ioctl(struct file *file, struct msm_audio_amrnb_enc_config_v2 *amrnb_config; struct msm_audio_amrnb_enc_config_v2_32 amrnb_config_32; + memset(&amrnb_config_32, 0, sizeof(amrnb_config_32)); + amrnb_config = (struct msm_audio_amrnb_enc_config_v2 *)audio->enc_cfg; amrnb_config_32.band_mode = amrnb_config->band_mode; diff --git a/drivers/misc/qcom/qdsp6v2/amrwb_in.c b/drivers/misc/qcom/qdsp6v2/amrwb_in.c index 4cea3dc63389..4f94ed2673e6 100644 --- a/drivers/misc/qcom/qdsp6v2/amrwb_in.c +++ b/drivers/misc/qcom/qdsp6v2/amrwb_in.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2011-2012, 2014 The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2012, 2014, 2016 The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -216,6 +217,8 @@ static long amrwb_in_compat_ioctl(struct file *file, struct msm_audio_amrwb_enc_config *amrwb_config; struct msm_audio_amrwb_enc_config_32 amrwb_config_32; + memset(&amrwb_config_32, 0, sizeof(amrwb_config_32)); + amrwb_config = (struct msm_audio_amrwb_enc_config *)audio->enc_cfg; amrwb_config_32.band_mode = amrwb_config->band_mode; diff --git a/drivers/misc/qcom/qdsp6v2/audio_alac.c b/drivers/misc/qcom/qdsp6v2/audio_alac.c index 9748db30fac3..3de204c1ebc8 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_alac.c +++ b/drivers/misc/qcom/qdsp6v2/audio_alac.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -196,6 +196,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_alac_config *alac_config; struct msm_audio_alac_config_32 alac_config_32; + memset(&alac_config_32, 0, sizeof(alac_config_32)); + alac_config = (struct msm_audio_alac_config *)audio->codec_cfg; alac_config_32.frameLength = alac_config->frameLength; alac_config_32.compatVersion = diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c index ee5991177687..bfd730017d41 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c +++ b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -205,6 +205,10 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_amrwbplus_config_v2 *amrwbplus_config; struct msm_audio_amrwbplus_config_v2_32 amrwbplus_config_32; + + memset(&amrwbplus_config_32, 0, + sizeof(amrwbplus_config_32)); + amrwbplus_config = (struct msm_audio_amrwbplus_config_v2 *) audio->codec_cfg; diff --git a/drivers/misc/qcom/qdsp6v2/audio_ape.c b/drivers/misc/qcom/qdsp6v2/audio_ape.c index b4c2ddb947de..670ec555b8c6 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_ape.c +++ b/drivers/misc/qcom/qdsp6v2/audio_ape.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -180,6 +180,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_ape_config *ape_config; struct msm_audio_ape_config_32 ape_config_32; + memset(&ape_config_32, 0, sizeof(ape_config_32)); + ape_config = (struct msm_audio_ape_config *)audio->codec_cfg; ape_config_32.compatibleVersion = ape_config->compatibleVersion; ape_config_32.compressionLevel = diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c index 3a8834446ea4..3632fc2b961b 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c +++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c @@ -630,6 +630,8 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, case AUDIO_EFFECTS_GET_BUF_AVAIL32: { struct msm_hwacc_buf_avail32 buf_avail; + memset(&buf_avail, 0, sizeof(buf_avail)); + buf_avail.input_num_avail = atomic_read(&effects->in_count); buf_avail.output_num_avail = atomic_read(&effects->out_count); pr_debug("%s: write buf avail: %d, read buf avail: %d\n", diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c index a15fd87c7be8..91bbba176dfd 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c +++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -302,6 +302,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_aac_config *aac_config; struct msm_audio_aac_config32 aac_config_32; + memset(&aac_config_32, 0, sizeof(aac_config_32)); + aac_config = (struct msm_audio_aac_config *)audio->codec_cfg; aac_config_32.format = aac_config->format; aac_config_32.audio_object = aac_config->audio_object; diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c index 5c23da7f8857..f7ad8f61f2e7 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c +++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c @@ -1936,6 +1936,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, case AUDIO_GET_CONFIG_32: { struct msm_audio_config32 cfg_32; mutex_lock(&audio->lock); + memset(&cfg_32, 0, sizeof(cfg_32)); cfg_32.buffer_size = audio->pcm_cfg.buffer_size; cfg_32.buffer_count = audio->pcm_cfg.buffer_count; cfg_32.channel_count = audio->pcm_cfg.channel_count; @@ -2032,6 +2033,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, audio->buf_cfg.frames_per_buf); mutex_lock(&audio->lock); + memset(&cfg_32, 0, sizeof(cfg_32)); cfg_32.meta_info_enable = audio->buf_cfg.meta_info_enable; cfg_32.frames_per_buf = audio->buf_cfg.frames_per_buf; if (copy_to_user((void *)arg, &cfg_32, diff --git a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c index 8d96b99d8f84..21ad33b7fd5d 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c +++ b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2009-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -217,6 +217,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_wmapro_config *wmapro_config; struct msm_audio_wmapro_config32 wmapro_config_32; + memset(&wmapro_config_32, 0, sizeof(wmapro_config_32)); + wmapro_config = (struct msm_audio_wmapro_config *)audio->codec_cfg; wmapro_config_32.armdatareqthr = wmapro_config->armdatareqthr; diff --git a/drivers/misc/qcom/qdsp6v2/evrc_in.c b/drivers/misc/qcom/qdsp6v2/evrc_in.c index 2f931be226c6..aab8e27c0094 100644 --- a/drivers/misc/qcom/qdsp6v2/evrc_in.c +++ b/drivers/misc/qcom/qdsp6v2/evrc_in.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -224,6 +224,8 @@ static long evrc_in_compat_ioctl(struct file *file, struct msm_audio_evrc_enc_config32 cfg_32; struct msm_audio_evrc_enc_config *enc_cfg; + memset(&cfg_32, 0, sizeof(cfg_32)); + enc_cfg = audio->enc_cfg; cfg_32.cdma_rate = enc_cfg->cdma_rate; cfg_32.min_bit_rate = enc_cfg->min_bit_rate; diff --git a/drivers/misc/qcom/qdsp6v2/qcelp_in.c b/drivers/misc/qcom/qdsp6v2/qcelp_in.c index b5d5ad113722..aabf5d33a507 100644 --- a/drivers/misc/qcom/qdsp6v2/qcelp_in.c +++ b/drivers/misc/qcom/qdsp6v2/qcelp_in.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -225,6 +225,8 @@ static long qcelp_in_compat_ioctl(struct file *file, struct msm_audio_qcelp_enc_config32 cfg_32; struct msm_audio_qcelp_enc_config *enc_cfg; + memset(&cfg_32, 0, sizeof(cfg_32)); + enc_cfg = (struct msm_audio_qcelp_enc_config *)audio->enc_cfg; cfg_32.cdma_rate = enc_cfg->cdma_rate; cfg_32.min_bit_rate = enc_cfg->min_bit_rate; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 14ed8e775e6a..0542ba51445f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2337,9 +2337,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) switch (host->timing) { /* HS400 tuning is done in HS200 mode */ case MMC_TIMING_MMC_HS400: - err = -EINVAL; - goto out_unlock; - + if (!(mmc->caps2 & MMC_CAP2_HS400_POST_TUNING)) { + err = -EINVAL; + goto out_unlock; + } case MMC_TIMING_MMC_HS200: /* * Periodic re-tuning for HS400 is not expected to be needed, so diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 6c42ca14d2fd..9753fbb596cf 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include <linux/gpio.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinmux.h> @@ -39,6 +40,8 @@ #define PMIC_GPIO_SUBTYPE_GPIOC_4CH 0x5 #define PMIC_GPIO_SUBTYPE_GPIO_8CH 0x9 #define PMIC_GPIO_SUBTYPE_GPIOC_8CH 0xd +#define PMIC_GPIO_SUBTYPE_GPIO_LV 0x10 +#define PMIC_GPIO_SUBTYPE_GPIO_MV 0x11 #define PMIC_MPP_REG_RT_STS 0x10 #define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1 @@ -47,8 +50,11 @@ #define PMIC_GPIO_REG_MODE_CTL 0x40 #define PMIC_GPIO_REG_DIG_VIN_CTL 0x41 #define PMIC_GPIO_REG_DIG_PULL_CTL 0x42 +#define PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL 0x44 +#define PMIC_GPIO_REG_DIG_IN_CTL 0x43 #define PMIC_GPIO_REG_DIG_OUT_CTL 0x45 #define PMIC_GPIO_REG_EN_CTL 0x46 +#define PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL 0x4A /* PMIC_GPIO_REG_MODE_CTL */ #define PMIC_GPIO_REG_MODE_VALUE_SHIFT 0x1 @@ -57,6 +63,13 @@ #define PMIC_GPIO_REG_MODE_DIR_SHIFT 4 #define PMIC_GPIO_REG_MODE_DIR_MASK 0x7 +#define PMIC_GPIO_MODE_DIGITAL_INPUT 0 +#define PMIC_GPIO_MODE_DIGITAL_OUTPUT 1 +#define PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT 2 +#define PMIC_GPIO_MODE_ANALOG_PASS_THRU 3 + +#define PMIC_GPIO_REG_LV_MV_MODE_DIR_MASK 0x3 + /* PMIC_GPIO_REG_DIG_VIN_CTL */ #define PMIC_GPIO_REG_VIN_SHIFT 0 #define PMIC_GPIO_REG_VIN_MASK 0x7 @@ -68,6 +81,16 @@ #define PMIC_GPIO_PULL_DOWN 4 #define PMIC_GPIO_PULL_DISABLE 5 +/* PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL for LV/MV */ +#define PMIC_GPIO_LV_MV_OUTPUT_INVERT 0x80 +#define PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT 7 +#define PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK 0xF + +/* PMIC_GPIO_REG_DIG_IN_CTL */ +#define PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN 0x80 +#define PMIC_GPIO_LV_MV_DIG_IN_DTEST_SEL_MASK 0x7 +#define PMIC_GPIO_DIG_IN_DTEST_SEL_MASK 0xf + /* PMIC_GPIO_REG_DIG_OUT_CTL */ #define PMIC_GPIO_REG_OUT_STRENGTH_SHIFT 0 #define PMIC_GPIO_REG_OUT_STRENGTH_MASK 0x3 @@ -87,9 +110,29 @@ #define PMIC_GPIO_PHYSICAL_OFFSET 1 +/* PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL */ +#define PMIC_GPIO_LV_MV_ANA_MUX_SEL_MASK 0x3 + /* Qualcomm specific pin configurations */ #define PMIC_GPIO_CONF_PULL_UP (PIN_CONFIG_END + 1) #define PMIC_GPIO_CONF_STRENGTH (PIN_CONFIG_END + 2) +#define PMIC_GPIO_CONF_ATEST (PIN_CONFIG_END + 3) +#define PMIC_GPIO_CONF_DTEST_BUFFER (PIN_CONFIG_END + 4) + +/* The index of each function in pmic_gpio_functions[] array */ +enum pmic_gpio_func_index { + PMIC_GPIO_FUNC_INDEX_NORMAL = 0x00, + PMIC_GPIO_FUNC_INDEX_PAIRED = 0x01, + PMIC_GPIO_FUNC_INDEX_FUNC1 = 0x02, + PMIC_GPIO_FUNC_INDEX_FUNC2 = 0x03, + PMIC_GPIO_FUNC_INDEX_FUNC3 = 0x04, + PMIC_GPIO_FUNC_INDEX_FUNC4 = 0x05, + PMIC_GPIO_FUNC_INDEX_DTEST1 = 0x06, + PMIC_GPIO_FUNC_INDEX_DTEST2 = 0x07, + PMIC_GPIO_FUNC_INDEX_DTEST3 = 0x08, + PMIC_GPIO_FUNC_INDEX_DTEST4 = 0x09, + PMIC_GPIO_FUNC_INDEX_ANALOG = 0x10, +}; /** * struct pmic_gpio_pad - keep current GPIO settings @@ -101,12 +144,16 @@ * open-drain or open-source mode. * @output_enabled: Set to true if GPIO output logic is enabled. * @input_enabled: Set to true if GPIO input buffer logic is enabled. + * @lv_mv_type: Set to true if GPIO subtype is GPIO_LV(0x10) or GPIO_MV(0x11). * @num_sources: Number of power-sources supported by this GPIO. * @power_source: Current power-source used. * @buffer_type: Push-pull, open-drain or open-source. * @pullup: Constant current which flow trough GPIO output buffer. * @strength: No, Low, Medium, High * @function: See pmic_gpio_functions[] + * @atest: the ATEST selection for GPIO analog-pass-through mode + * @dtest_buffer: the DTEST buffer selection for digital input mode, + * the default value is INT_MAX if not used. */ struct pmic_gpio_pad { u16 base; @@ -116,12 +163,15 @@ struct pmic_gpio_pad { bool have_buffer; bool output_enabled; bool input_enabled; + bool lv_mv_type; unsigned int num_sources; unsigned int power_source; unsigned int buffer_type; unsigned int pullup; unsigned int strength; unsigned int function; + unsigned int atest; + unsigned int dtest_buffer; }; struct pmic_gpio_state { @@ -134,12 +184,15 @@ struct pmic_gpio_state { static const struct pinconf_generic_params pmic_gpio_bindings[] = { {"qcom,pull-up-strength", PMIC_GPIO_CONF_PULL_UP, 0}, {"qcom,drive-strength", PMIC_GPIO_CONF_STRENGTH, 0}, + {"qcom,atest", PMIC_GPIO_CONF_ATEST, 0}, + {"qcom,dtest-buffer", PMIC_GPIO_CONF_DTEST_BUFFER, 0}, }; #ifdef CONFIG_DEBUG_FS static const struct pin_config_item pmic_conf_items[ARRAY_SIZE(pmic_gpio_bindings)] = { PCONFDUMP(PMIC_GPIO_CONF_PULL_UP, "pull up strength", NULL, true), PCONFDUMP(PMIC_GPIO_CONF_STRENGTH, "drive-strength", NULL, true), + PCONFDUMP(PMIC_GPIO_CONF_ATEST, "atest", NULL, true), }; #endif @@ -151,11 +204,25 @@ static const char *const pmic_gpio_groups[] = { "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", }; +/* + * Treat LV/MV GPIO analog-pass-through mode as a function, add it + * to the end of the function list. Add placeholder for the reserved + * functions defined in LV/MV OUTPUT_SOURCE_SEL register. + */ static const char *const pmic_gpio_functions[] = { - PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED, - PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2, - PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2, - PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4, + [PMIC_GPIO_FUNC_INDEX_NORMAL] = PMIC_GPIO_FUNC_NORMAL, + [PMIC_GPIO_FUNC_INDEX_PAIRED] = PMIC_GPIO_FUNC_PAIRED, + [PMIC_GPIO_FUNC_INDEX_FUNC1] = PMIC_GPIO_FUNC_FUNC1, + [PMIC_GPIO_FUNC_INDEX_FUNC2] = PMIC_GPIO_FUNC_FUNC2, + [PMIC_GPIO_FUNC_INDEX_FUNC3] = PMIC_GPIO_FUNC_FUNC3, + [PMIC_GPIO_FUNC_INDEX_FUNC4] = PMIC_GPIO_FUNC_FUNC4, + [PMIC_GPIO_FUNC_INDEX_DTEST1] = PMIC_GPIO_FUNC_DTEST1, + [PMIC_GPIO_FUNC_INDEX_DTEST2] = PMIC_GPIO_FUNC_DTEST2, + [PMIC_GPIO_FUNC_INDEX_DTEST3] = PMIC_GPIO_FUNC_DTEST3, + [PMIC_GPIO_FUNC_INDEX_DTEST4] = PMIC_GPIO_FUNC_DTEST4, + "reserved-a", "reserved-b", "reserved-c", + "reserved-d", "reserved-e", "reserved-f", + [PMIC_GPIO_FUNC_INDEX_ANALOG] = PMIC_GPIO_FUNC_ANALOG, }; static inline struct pmic_gpio_state *to_gpio_state(struct gpio_chip *chip) @@ -252,21 +319,74 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function, pad->function = function; - val = 0; + val = PMIC_GPIO_MODE_DIGITAL_INPUT; if (pad->output_enabled) { if (pad->input_enabled) - val = 2; + val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT; else - val = 1; + val = PMIC_GPIO_MODE_DIGITAL_OUTPUT; + } + + if (function > PMIC_GPIO_FUNC_INDEX_DTEST4 && + function < PMIC_GPIO_FUNC_INDEX_ANALOG) { + pr_err("reserved function: %s hasn't been enabled\n", + pmic_gpio_functions[function]); + return -EINVAL; } - val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT; - val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; - val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; + if (pad->lv_mv_type) { + if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) { + val = PMIC_GPIO_MODE_ANALOG_PASS_THRU; + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_MODE_CTL, val); + if (ret < 0) + return ret; - ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val); - if (ret < 0) - return ret; + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL, + pad->atest); + if (ret < 0) + return ret; + } else { + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_MODE_CTL, val); + if (ret < 0) + return ret; + + val = pad->out_value + << PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT; + val |= pad->function + & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK; + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL, val); + if (ret < 0) + return ret; + } + } else { + /* + * GPIO not of LV/MV subtype doesn't have "func3", "func4" + * "analog" functions, and "dtest1" to "dtest4" functions + * have register value 2 bits lower than the function index + * in pmic_gpio_functions[]. + */ + if (function == PMIC_GPIO_FUNC_INDEX_FUNC3 + || function == PMIC_GPIO_FUNC_INDEX_FUNC4 + || function == PMIC_GPIO_FUNC_INDEX_ANALOG) { + return -EINVAL; + } else if (function >= PMIC_GPIO_FUNC_INDEX_DTEST1 && + function <= PMIC_GPIO_FUNC_INDEX_DTEST4) { + pad->function -= (PMIC_GPIO_FUNC_INDEX_DTEST1 - + PMIC_GPIO_FUNC_INDEX_FUNC3); + } + + val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT; + val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; + val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; + + ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val); + if (ret < 0) + return ret; + } val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT; @@ -326,6 +446,12 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev, case PMIC_GPIO_CONF_STRENGTH: arg = pad->strength; break; + case PMIC_GPIO_CONF_ATEST: + arg = pad->atest; + break; + case PMIC_GPIO_CONF_DTEST_BUFFER: + arg = pad->dtest_buffer; + break; default: return -EINVAL; } @@ -379,7 +505,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin, pad->is_enabled = false; break; case PIN_CONFIG_POWER_SOURCE: - if (arg > pad->num_sources) + if (arg >= pad->num_sources) return -EINVAL; pad->power_source = arg; break; @@ -400,6 +526,18 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin, return -EINVAL; pad->strength = arg; break; + case PMIC_GPIO_CONF_ATEST: + if (arg > PMIC_GPIO_AOUT_ATEST4) + return -EINVAL; + pad->atest = arg; + break; + case PMIC_GPIO_CONF_DTEST_BUFFER: + if ((pad->lv_mv_type && arg > PMIC_GPIO_DIN_DTEST4) + || (!pad->lv_mv_type && arg > + PMIC_GPIO_DIG_IN_DTEST_SEL_MASK)) + return -EINVAL; + pad->dtest_buffer = arg; + break; default: return -EINVAL; } @@ -424,19 +562,64 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin, if (ret < 0) return ret; - val = 0; + val = PMIC_GPIO_MODE_DIGITAL_INPUT; if (pad->output_enabled) { if (pad->input_enabled) - val = 2; + val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT; else - val = 1; + val = PMIC_GPIO_MODE_DIGITAL_OUTPUT; + } + + if (pad->dtest_buffer != INT_MAX) { + val = pad->dtest_buffer; + if (pad->lv_mv_type) + val |= PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN; + + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_DIG_IN_CTL, val); + if (ret < 0) + return ret; } - val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT; - val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; - val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; + if (pad->lv_mv_type) { + if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) { + val = PMIC_GPIO_MODE_ANALOG_PASS_THRU; + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_MODE_CTL, val); + if (ret < 0) + return ret; + + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL, + pad->atest); + if (ret < 0) + return ret; + } else { + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_MODE_CTL, val); + if (ret < 0) + return ret; + + val = pad->out_value + << PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT; + val |= pad->function + & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK; + ret = pmic_gpio_write(state, pad, + PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL, val); + if (ret < 0) + return ret; + } + } else { + val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT; + val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; + val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; + + ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val); + if (ret < 0) + return ret; + } - return pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val); + return ret; } static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, @@ -444,7 +627,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, { struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev); struct pmic_gpio_pad *pad; - int ret, val; + int ret, val, function; static const char *const biases[] = { "pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA", @@ -475,14 +658,28 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; pad->out_value = ret; } + /* + * For GPIO not of LV/MV subtypes, the register value of + * the function mapping from "dtest1" to "dtest4" is 2 bits + * lower than the function index in pmic_gpio_functions[]. + */ + if (!pad->lv_mv_type && + pad->function >= PMIC_GPIO_FUNC_INDEX_FUNC3) { + function = pad->function + (PMIC_GPIO_FUNC_INDEX_DTEST1 + - PMIC_GPIO_FUNC_INDEX_FUNC3); + } else { + function = pad->function; + } seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); - seq_printf(s, " %-7s", pmic_gpio_functions[pad->function]); + seq_printf(s, " %-7s", pmic_gpio_functions[function]); seq_printf(s, " vin-%d", pad->power_source); seq_printf(s, " %-27s", biases[pad->pullup]); seq_printf(s, " %-10s", buffer_types[pad->buffer_type]); seq_printf(s, " %-4s", pad->out_value ? "high" : "low"); seq_printf(s, " %-7s", strengths[pad->strength]); + if (pad->dtest_buffer != INT_MAX) + seq_printf(s, " dtest buffer %d", pad->dtest_buffer); } } @@ -622,40 +819,72 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, case PMIC_GPIO_SUBTYPE_GPIOC_8CH: pad->num_sources = 8; break; + case PMIC_GPIO_SUBTYPE_GPIO_LV: + pad->num_sources = 1; + pad->have_buffer = true; + pad->lv_mv_type = true; + break; + case PMIC_GPIO_SUBTYPE_GPIO_MV: + pad->num_sources = 2; + pad->have_buffer = true; + pad->lv_mv_type = true; + break; default: dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype); return -ENODEV; } - val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL); - if (val < 0) - return val; + if (pad->lv_mv_type) { + val = pmic_gpio_read(state, pad, + PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL); + if (val < 0) + return val; + + pad->out_value = !!(val & PMIC_GPIO_LV_MV_OUTPUT_INVERT); + pad->function = val & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK; - pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT; + val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL); + if (val < 0) + return val; + + dir = val & PMIC_GPIO_REG_LV_MV_MODE_DIR_MASK; + } else { + val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL); + if (val < 0) + return val; + + pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT; + + dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT; + dir &= PMIC_GPIO_REG_MODE_DIR_MASK; + pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; + pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK; + } - dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT; - dir &= PMIC_GPIO_REG_MODE_DIR_MASK; switch (dir) { - case 0: + case PMIC_GPIO_MODE_DIGITAL_INPUT: pad->input_enabled = true; pad->output_enabled = false; break; - case 1: + case PMIC_GPIO_MODE_DIGITAL_OUTPUT: pad->input_enabled = false; pad->output_enabled = true; break; - case 2: + case PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT: pad->input_enabled = true; pad->output_enabled = true; break; + case PMIC_GPIO_MODE_ANALOG_PASS_THRU: + if (pad->lv_mv_type) + pad->function = PMIC_GPIO_FUNC_INDEX_ANALOG; + else + return -ENODEV; + break; default: dev_err(state->dev, "unknown GPIO direction\n"); return -ENODEV; } - pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; - pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK; - val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL); if (val < 0) return val; @@ -670,6 +899,17 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, pad->pullup = val >> PMIC_GPIO_REG_PULL_SHIFT; pad->pullup &= PMIC_GPIO_REG_PULL_MASK; + val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_IN_CTL); + if (val < 0) + return val; + + if (pad->lv_mv_type && (val & PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN)) + pad->dtest_buffer = val & PMIC_GPIO_LV_MV_DIG_IN_DTEST_SEL_MASK; + else if (!pad->lv_mv_type) + pad->dtest_buffer = val & PMIC_GPIO_DIG_IN_DTEST_SEL_MASK; + else + pad->dtest_buffer = INT_MAX; + val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL); if (val < 0) return val; @@ -680,6 +920,13 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, pad->buffer_type = val >> PMIC_GPIO_REG_OUT_TYPE_SHIFT; pad->buffer_type &= PMIC_GPIO_REG_OUT_TYPE_MASK; + if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) { + val = pmic_gpio_read(state, pad, + PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL); + if (val < 0) + return val; + pad->atest = val & PMIC_GPIO_LV_MV_ANA_MUX_SEL_MASK; + } /* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */ pad->is_enabled = true; return 0; @@ -693,18 +940,19 @@ static int pmic_gpio_probe(struct platform_device *pdev) struct pmic_gpio_pad *pad, *pads; struct pmic_gpio_state *state; int ret, npins, i; - u32 res[2]; + u32 reg; - ret = of_property_read_u32_array(dev->of_node, "reg", res, 2); + ret = of_property_read_u32(dev->of_node, "reg", ®); if (ret < 0) { - dev_err(dev, "missing base address and/or range"); + dev_err(dev, "missing base address"); return ret; } - npins = res[1] / PMIC_GPIO_ADDRESS_RANGE; - + npins = platform_irq_count(pdev); if (!npins) return -EINVAL; + if (npins < 0) + return npins; BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups)); @@ -752,7 +1000,7 @@ static int pmic_gpio_probe(struct platform_device *pdev) if (pad->irq < 0) return pad->irq; - pad->base = res[0] + i * PMIC_GPIO_ADDRESS_RANGE; + pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE; ret = pmic_gpio_populate(state, pad); if (ret < 0) @@ -805,6 +1053,7 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */ { .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */ { .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */ + { .compatible = "qcom,spmi-gpio" }, /* Generic */ { }, }; diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 9ce0e30e33e8..73547abc5cf5 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014, 2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include <linux/gpio.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinmux.h> @@ -87,6 +88,10 @@ #define PMIC_MPP_REG_AIN_ROUTE_SHIFT 0 #define PMIC_MPP_REG_AIN_ROUTE_MASK 0x7 +/* PMIC_MPP_REG_SINK_CTL */ +#define PMIC_MPP_REG_CURRENT_SINK_MASK 0x7 +#define MPP_CURRENT_SINK_MA_STEP_SIZE 5 + #define PMIC_MPP_MODE_DIGITAL_INPUT 0 #define PMIC_MPP_MODE_DIGITAL_OUTPUT 1 #define PMIC_MPP_MODE_DIGITAL_BIDIR 2 @@ -106,6 +111,7 @@ #define PMIC_MPP_CONF_ANALOG_LEVEL (PIN_CONFIG_END + 2) #define PMIC_MPP_CONF_DTEST_SELECTOR (PIN_CONFIG_END + 3) #define PMIC_MPP_CONF_PAIRED (PIN_CONFIG_END + 4) +#define PMIC_MPP_CONF_DTEST_BUFFER (PIN_CONFIG_END + 5) /** * struct pmic_mpp_pad - keep current MPP settings @@ -124,6 +130,7 @@ * @function: See pmic_mpp_functions[]. * @drive_strength: Amount of current in sink mode * @dtest: DTEST route selector + * @dtest_buffer: the DTEST buffer selection for digital input mode */ struct pmic_mpp_pad { u16 base; @@ -141,6 +148,7 @@ struct pmic_mpp_pad { unsigned int function; unsigned int drive_strength; unsigned int dtest; + unsigned int dtest_buffer; }; struct pmic_mpp_state { @@ -155,6 +163,7 @@ static const struct pinconf_generic_params pmic_mpp_bindings[] = { {"qcom,analog-level", PMIC_MPP_CONF_ANALOG_LEVEL, 0}, {"qcom,dtest", PMIC_MPP_CONF_DTEST_SELECTOR, 0}, {"qcom,paired", PMIC_MPP_CONF_PAIRED, 0}, + {"qcom,dtest-buffer", PMIC_MPP_CONF_DTEST_BUFFER, 0}, }; #ifdef CONFIG_DEBUG_FS @@ -163,6 +172,7 @@ static const struct pin_config_item pmic_conf_items[] = { PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true), PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true), PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false), + PCONFDUMP(PMIC_MPP_CONF_DTEST_BUFFER, "dtest buffer", NULL, true), }; #endif @@ -392,6 +402,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, case PMIC_MPP_CONF_ANALOG_LEVEL: arg = pad->aout_level; break; + case PMIC_MPP_CONF_DTEST_BUFFER: + arg = pad->dtest_buffer; + break; default: return -EINVAL; } @@ -457,7 +470,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, pad->dtest = arg; break; case PIN_CONFIG_DRIVE_STRENGTH: - arg = pad->drive_strength; + pad->drive_strength = arg; break; case PMIC_MPP_CONF_AMUX_ROUTE: if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4) @@ -470,6 +483,15 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, case PMIC_MPP_CONF_PAIRED: pad->paired = !!arg; break; + case PMIC_MPP_CONF_DTEST_BUFFER: + /* + * 0xf is the max value which selects + * 4 dtest rails simultaneously + */ + if (arg > 0xf) + return -EINVAL; + pad->dtest_buffer = arg; + break; default: return -EINVAL; } @@ -481,6 +503,11 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, if (ret < 0) return ret; + val = pad->dtest_buffer; + ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_IN_CTL, val); + if (ret < 0) + return ret; + val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT; ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL, val); @@ -497,6 +524,16 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, if (ret < 0) return ret; + val = 0; + if (pad->drive_strength >= MPP_CURRENT_SINK_MA_STEP_SIZE) + val = DIV_ROUND_UP(pad->drive_strength, + MPP_CURRENT_SINK_MA_STEP_SIZE) - 1; + + val &= PMIC_MPP_REG_CURRENT_SINK_MASK; + ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, val); + if (ret < 0) + return ret; + ret = pmic_mpp_write_mode_ctl(state, pad); if (ret < 0) return ret; @@ -544,6 +581,8 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev, seq_printf(s, " dtest%d", pad->dtest); if (pad->paired) seq_puts(s, " paired"); + if (pad->dtest_buffer) + seq_printf(s, " dtest buffer %d", pad->dtest_buffer); } } @@ -741,7 +780,7 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state, sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK; if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST) - pad->dtest = sel + 1; + pad->dtest = sel - PMIC_MPP_SELECTOR_DTEST_FIRST + 1; else if (sel == PMIC_MPP_SELECTOR_PAIRED) pad->paired = true; @@ -752,6 +791,12 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state, pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT; pad->power_source &= PMIC_MPP_REG_VIN_MASK; + val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_IN_CTL); + if (val < 0) + return val; + + pad->dtest_buffer = val; + val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL); if (val < 0) return val; @@ -770,7 +815,8 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state, if (val < 0) return val; - pad->drive_strength = val; + val &= PMIC_MPP_REG_CURRENT_SINK_MASK; + pad->drive_strength = (val + 1) * MPP_CURRENT_SINK_MA_STEP_SIZE; val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL); if (val < 0) @@ -795,17 +841,19 @@ static int pmic_mpp_probe(struct platform_device *pdev) struct pmic_mpp_pad *pad, *pads; struct pmic_mpp_state *state; int ret, npins, i; - u32 res[2]; + u32 reg; - ret = of_property_read_u32_array(dev->of_node, "reg", res, 2); + ret = of_property_read_u32(dev->of_node, "reg", ®); if (ret < 0) { - dev_err(dev, "missing base address and/or range"); + dev_err(dev, "missing base address"); return ret; } - npins = res[1] / PMIC_MPP_ADDRESS_RANGE; + npins = platform_irq_count(pdev); if (!npins) return -EINVAL; + if (npins < 0) + return npins; BUG_ON(npins > ARRAY_SIZE(pmic_mpp_groups)); @@ -854,7 +902,7 @@ static int pmic_mpp_probe(struct platform_device *pdev) if (pad->irq < 0) return pad->irq; - pad->base = res[0] + i * PMIC_MPP_ADDRESS_RANGE; + pad->base = reg + i * PMIC_MPP_ADDRESS_RANGE; ret = pmic_mpp_populate(state, pad); if (ret < 0) @@ -908,6 +956,7 @@ static const struct of_device_id pmic_mpp_of_match[] = { { .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */ { .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */ { .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */ + { .compatible = "qcom,spmi-mpp" }, /* Generic */ { }, }; diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index 19a3c3bc2f1f..e51176ec83d2 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -23,6 +23,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <dt-bindings/pinctrl/qcom,pmic-gpio.h> @@ -650,11 +651,12 @@ static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl, } static const struct of_device_id pm8xxx_gpio_of_match[] = { - { .compatible = "qcom,pm8018-gpio", .data = (void *)6 }, - { .compatible = "qcom,pm8038-gpio", .data = (void *)12 }, - { .compatible = "qcom,pm8058-gpio", .data = (void *)40 }, - { .compatible = "qcom,pm8917-gpio", .data = (void *)38 }, - { .compatible = "qcom,pm8921-gpio", .data = (void *)44 }, + { .compatible = "qcom,pm8018-gpio" }, + { .compatible = "qcom,pm8038-gpio" }, + { .compatible = "qcom,pm8058-gpio" }, + { .compatible = "qcom,pm8917-gpio" }, + { .compatible = "qcom,pm8921-gpio" }, + { .compatible = "qcom,ssbi-gpio" }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match); @@ -665,14 +667,19 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) struct pinctrl_pin_desc *pins; struct pm8xxx_gpio *pctrl; int ret; - int i; + int i, npins; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); + npins = platform_irq_count(pdev); + if (!npins) + return -EINVAL; + if (npins < 0) + return npins; + pctrl->npins = npins; pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index b868ef1766a0..e9f01de51e18 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -23,6 +23,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <dt-bindings/pinctrl/qcom,pmic-mpp.h> @@ -741,11 +742,12 @@ static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl, } static const struct of_device_id pm8xxx_mpp_of_match[] = { - { .compatible = "qcom,pm8018-mpp", .data = (void *)6 }, - { .compatible = "qcom,pm8038-mpp", .data = (void *)6 }, - { .compatible = "qcom,pm8917-mpp", .data = (void *)10 }, - { .compatible = "qcom,pm8821-mpp", .data = (void *)4 }, - { .compatible = "qcom,pm8921-mpp", .data = (void *)12 }, + { .compatible = "qcom,pm8018-mpp" }, + { .compatible = "qcom,pm8038-mpp" }, + { .compatible = "qcom,pm8917-mpp" }, + { .compatible = "qcom,pm8821-mpp" }, + { .compatible = "qcom,pm8921-mpp" }, + { .compatible = "qcom,ssbi-mpp" }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match); @@ -756,14 +758,19 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) struct pinctrl_pin_desc *pins; struct pm8xxx_mpp *pctrl; int ret; - int i; + int i, npins; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); + npins = platform_irq_count(pdev); + if (!npins) + return -EINVAL; + if (npins < 0) + return npins; + pctrl->npins = npins; pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c index 6addf14d7126..a02247d3e938 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -2435,6 +2435,7 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params) int res; struct ipa_rm_create_params mhi_prod_params; struct ipa_rm_create_params mhi_cons_params; + struct ipa_rm_perf_profile profile; IPA_MHI_FUNC_ENTRY(); @@ -2506,6 +2507,14 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params) goto fail_create_rm_prod; } + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 1000; + res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile); + if (res) { + IPA_MHI_ERR("fail to set profile to MHI_PROD\n"); + goto fail_perf_rm_prod; + } + /* Create CONS in IPA RM */ memset(&mhi_cons_params, 0, sizeof(mhi_cons_params)); mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS; @@ -2518,6 +2527,14 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params) goto fail_create_rm_cons; } + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 1000; + res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile); + if (res) { + IPA_MHI_ERR("fail to set profile to MHI_CONS\n"); + goto fail_perf_rm_cons; + } + /* Initialize uC interface */ ipa_uc_mhi_init(ipa_mhi_uc_ready_cb, ipa_mhi_uc_wakeup_request_cb); @@ -2530,7 +2547,10 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params) IPA_MHI_FUNC_EXIT(); return 0; +fail_perf_rm_cons: + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); fail_create_rm_cons: +fail_perf_rm_prod: ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); fail_create_rm_prod: destroy_workqueue(ipa_mhi_client_ctx->wq); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c index c0ac544fa271..695c8bc4cbc0 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -2397,7 +2397,7 @@ begin: if (skb->len < IPA_PKT_STATUS_SIZE) { WARN_ON(sys->prev_skb != NULL); IPADBG("status straddles buffer\n"); - sys->prev_skb = skb; + sys->prev_skb = skb_copy(skb, GFP_KERNEL); sys->len_partial = skb->len; return rc; } @@ -2482,7 +2482,7 @@ begin: !status->exception) { WARN_ON(sys->prev_skb != NULL); IPADBG("Ins header in next buffer\n"); - sys->prev_skb = skb; + sys->prev_skb = skb_copy(skb, GFP_KERNEL); sys->len_partial = skb->len; return rc; } diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 2420dd78b4c0..1be9a6745531 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -2394,18 +2394,20 @@ static void rmnet_ipa_get_stats_and_update(bool reset) } rc = ipa_qmi_get_data_stats(&req, resp); + if (rc) { + IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc); + kfree(resp); + return; + } - if (!rc) { - memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); - msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; - msg_meta.msg_len = - sizeof(struct ipa_get_data_stats_resp_msg_v01); - rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); - if (rc) { - IPAWANERR("ipa2_send_msg failed: %d\n", rc); - kfree(resp); - return; - } + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01); + rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa2_send_msg failed: %d\n", rc); + kfree(resp); + return; } } @@ -2454,18 +2456,20 @@ static void rmnet_ipa_get_network_stats_and_update(void) req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id; rc = ipa_qmi_get_network_stats(&req, resp); + if (rc) { + IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc); + kfree(resp); + return; + } - if (!rc) { - memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); - msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; - msg_meta.msg_len = - sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); - rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); - if (rc) { - IPAWANERR("ipa2_send_msg failed: %d\n", rc); - kfree(resp); - return; - } + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); + rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa2_send_msg failed: %d\n", rc); + kfree(resp); + return; } } diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index aebdaab3ac77..2cd08d77df6e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -2467,18 +2467,20 @@ static void rmnet_ipa_get_stats_and_update(void) req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; rc = ipa3_qmi_get_data_stats(&req, resp); + if (rc) { + IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc); + kfree(resp); + return; + } - if (!rc) { - memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); - msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; - msg_meta.msg_len = - sizeof(struct ipa_get_data_stats_resp_msg_v01); - rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); - if (rc) { - IPAWANERR("ipa_send_msg failed: %d\n", rc); - kfree(resp); - return; - } + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01); + rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(resp); + return; } } @@ -2527,18 +2529,20 @@ static void rmnet_ipa_get_network_stats_and_update(void) req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id; rc = ipa3_qmi_get_network_stats(&req, resp); + if (rc) { + IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc); + kfree(resp); + return; + } - if (!rc) { - memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); - msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; - msg_meta.msg_len = - sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); - rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); - if (rc) { - IPAWANERR("ipa_send_msg failed: %d\n", rc); - kfree(resp); - return; - } + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); + rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(resp); + return; } } diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile index e1686e608906..c20fd2b42487 100644 --- a/drivers/platform/msm/ipa/test/Makefile +++ b/drivers/platform/msm/ipa/test/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o -ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o +ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o diff --git a/drivers/platform/msm/ipa/test/ipa_test_dma.c b/drivers/platform/msm/ipa/test/ipa_test_dma.c new file mode 100644 index 000000000000..af1b584b0d8c --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_dma.c @@ -0,0 +1,931 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/ipa.h> +#include "../ipa_v3/ipa_i.h" +#include "ipa_ut_framework.h" + +#define IPA_TEST_DMA_WQ_NAME_BUFF_SZ 64 +#define IPA_TEST_DMA_MT_TEST_NUM_WQ 500 +#define IPA_TEST_DMA_MEMCPY_BUFF_SIZE 16384 +#define IPA_TEST_DMA_MAX_PKT_SIZE 0xFF00 +#define IPA_DMA_TEST_LOOP_NUM 1000 +#define IPA_DMA_TEST_INT_LOOP_NUM 50 +#define IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM 128 +#define IPA_DMA_RUN_TEST_UNIT_IN_LOOP(test_unit, iters, rc, args...) \ + do { \ + int __i; \ + for (__i = 0; __i < iters; __i++) { \ + IPA_UT_LOG(#test_unit " START iter %d\n", __i); \ + rc = test_unit(args); \ + if (!rc) \ + continue; \ + IPA_UT_LOG(#test_unit " failed %d\n", rc); \ + break; \ + } \ + } while (0) + +/** + * struct ipa_test_dma_async_user_data - user_data structure for async memcpy + * @src_mem: source memory buffer + * @dest_mem: destination memory buffer + * @call_serial_number: Id of the caller + * @copy_done: Completion object + */ +struct ipa_test_dma_async_user_data { + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + int call_serial_number; + struct completion copy_done; +}; + +/** + * ipa_test_dma_setup() - Suite setup function + */ +static int ipa_test_dma_setup(void **ppriv) +{ + int rc; + + IPA_UT_DBG("Start Setup\n"); + + if (!ipa3_ctx) { + IPA_UT_ERR("No IPA ctx\n"); + return -EINVAL; + } + + rc = ipa_dma_init(); + if (rc) + IPA_UT_ERR("Fail to init ipa_dma - return code %d\n", rc); + else + IPA_UT_DBG("ipa_dma_init() Completed successfully!\n"); + + *ppriv = NULL; + + return rc; +} + +/** + * ipa_test_dma_teardown() - Suite teardown function + */ +static int ipa_test_dma_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + ipa_dma_destroy(); + return 0; +} + +static int ipa_test_dma_alloc_buffs(struct ipa_mem_buffer *src, + struct ipa_mem_buffer *dest, + int size) +{ + int i; + static int val = 1; + int rc; + + val++; + src->size = size; + src->base = dma_alloc_coherent(ipa3_ctx->pdev, src->size, + &src->phys_base, GFP_KERNEL); + if (!src->base) { + IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size); + IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem"); + return -ENOMEM; + } + + dest->size = size; + dest->base = dma_alloc_coherent(ipa3_ctx->pdev, dest->size, + &dest->phys_base, GFP_KERNEL); + if (!dest->base) { + IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size); + IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem"); + rc = -ENOMEM; + goto fail_alloc_dest; + } + + memset(dest->base, 0, dest->size); + for (i = 0; i < src->size; i++) + memset(src->base + i, (val + i) & 0xFF, 1); + rc = memcmp(dest->base, src->base, dest->size); + if (rc == 0) { + IPA_UT_LOG("dest & src buffers are equal\n"); + IPA_UT_TEST_FAIL_REPORT("dest & src buffers are equal"); + rc = -EFAULT; + goto fail_buf_cmp; + } + + return 0; + +fail_buf_cmp: + dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base, + dest->phys_base); +fail_alloc_dest: + dma_free_coherent(ipa3_ctx->pdev, src->size, src->base, + src->phys_base); + return rc; +} + +static void ipa_test_dma_destroy_buffs(struct ipa_mem_buffer *src, + struct ipa_mem_buffer *dest) +{ + dma_free_coherent(ipa3_ctx->pdev, src->size, src->base, + src->phys_base); + dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base, + dest->phys_base); +} + +/** + * ipa_test_dma_memcpy_sync() - memcpy in sync mode + * + * @size: buffer size + * @expect_fail: test expects the memcpy to fail + * + * To be run during tests + * 1. Alloc src and dst buffers + * 2. sync memcpy src to dst via dma + * 3. compare src and dts if memcpy succeeded as expected + */ +static int ipa_test_dma_memcpy_sync(int size, bool expect_fail) +{ + int rc = 0; + int i; + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + u8 *src; + u8 *dest; + + rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + return rc; + } + + rc = ipa_dma_sync_memcpy(dest_mem.phys_base, src_mem.phys_base, size); + if (!expect_fail && rc) { + IPA_UT_LOG("fail to sync memcpy - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + goto free_buffs; + } + if (expect_fail && !rc) { + IPA_UT_LOG("sync memcpy succeeded while expected to fail\n"); + IPA_UT_TEST_FAIL_REPORT( + "sync memcpy succeeded while expected to fail"); + rc = -EFAULT; + goto free_buffs; + } + + if (!rc) { + /* if memcpy succeeded, compare the buffers */ + rc = memcmp(dest_mem.base, src_mem.base, size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equals\n"); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equals"); + src = src_mem.base; + dest = dest_mem.base; + for (i = 0; i < size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_LOG("byte: %d 0x%x != 0x%x\n", + i, *(src + i), *(dest + i)); + } + } + } + } else { + /* if memcpy failed as expected, update the rc */ + rc = 0; + } + +free_buffs: + ipa_test_dma_destroy_buffs(&src_mem, &dest_mem); + return rc; +} + +static void ipa_test_dma_async_memcpy_cb(void *comp_obj) +{ + struct completion *xfer_done; + + if (!comp_obj) { + IPA_UT_ERR("Invalid Input\n"); + return; + } + xfer_done = (struct completion *)comp_obj; + complete(xfer_done); +} + +static void ipa_test_dma_async_memcpy_cb_user_data(void *user_param) +{ + int rc; + int i; + u8 *src; + u8 *dest; + struct ipa_test_dma_async_user_data *udata = + (struct ipa_test_dma_async_user_data *)user_param; + + if (!udata) { + IPA_UT_ERR("Invalid user param\n"); + return; + } + + rc = memcmp(udata->dest_mem.base, udata->src_mem.base, + udata->src_mem.size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equal sn=%d\n", + udata->call_serial_number); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equal"); + src = udata->src_mem.base; + dest = udata->dest_mem.base; + for (i = 0; i < udata->src_mem.size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_ERR("byte: %d 0x%x != 0x%x\n", i, + *(src + i), *(dest + i)); + } + } + return; + } + + IPA_UT_LOG("Notify on async memcopy sn=%d\n", + udata->call_serial_number); + complete(&(udata->copy_done)); +} + +/** + * ipa_test_dma_memcpy_async() - memcpy in async mode + * + * @size: buffer size + * @expect_fail: test expected the memcpy to fail + * + * To be run during tests + * 1. Alloc src and dst buffers + * 2. async memcpy src to dst via dma and wait for completion + * 3. compare src and dts if memcpy succeeded as expected + */ +static int ipa_test_dma_memcpy_async(int size, bool expect_fail) +{ + int rc = 0; + int i; + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + u8 *src; + u8 *dest; + struct completion xfer_done; + + rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + return rc; + } + + init_completion(&xfer_done); + rc = ipa_dma_async_memcpy(dest_mem.phys_base, src_mem.phys_base, size, + ipa_test_dma_async_memcpy_cb, &xfer_done); + if (!expect_fail && rc) { + IPA_UT_LOG("fail to initiate async memcpy - rc=%d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy initiate failed"); + goto free_buffs; + } + if (expect_fail && !rc) { + IPA_UT_LOG("async memcpy succeeded while expected to fail\n"); + IPA_UT_TEST_FAIL_REPORT( + "async memcpy succeeded while expected to fail"); + rc = -EFAULT; + goto free_buffs; + } + + if (!rc) { + /* if memcpy succeeded, compare the buffers */ + wait_for_completion(&xfer_done); + rc = memcmp(dest_mem.base, src_mem.base, size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equals\n"); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equals"); + src = src_mem.base; + dest = dest_mem.base; + for (i = 0; i < size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_LOG("byte: %d 0x%x != 0x%x\n", + i, *(src + i), *(dest + i)); + } + } + } + } else { + /* if memcpy failed as expected, update the rc */ + rc = 0; + } + +free_buffs: + ipa_test_dma_destroy_buffs(&src_mem, &dest_mem); + return rc; +} + +/** + * ipa_test_dma_sync_async_memcpy() - memcpy in sync and then async mode + * + * @size: buffer size + * + * To be run during tests + * 1. several sync memcopy in row + * 2. several async memcopy - + * back-to-back (next async try initiated after prev is completed) + */ +static int ipa_test_dma_sync_async_memcpy(int size) +{ + int rc; + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync, + IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false); + if (rc) { + IPA_UT_LOG("sync memcopy fail rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcopy fail"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async, + IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false); + if (rc) { + IPA_UT_LOG("async memcopy fail rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcopy fail"); + return rc; + } + + return 0; +} + +/** + * TEST: test control API - enable/disable dma + * 1. enable dma + * 2. disable dma + */ +static int ipa_test_dma_control_api(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: memcpy before dma enable + * + * 1. sync memcpy - should fail + * 2. async memcpy - should fail + */ +static int ipa_test_dma_memcpy_before_enable(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true); + if (rc) { + IPA_UT_LOG("sync memcpy succeeded unexpectedly rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true); + if (rc) { + IPA_UT_LOG("async memcpy succeeded unexpectedly rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly"); + return rc; + } + + return 0; +} + +/** + * TEST: Sync memory copy + * + * 1. dma enable + * 2. sync memcpy + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Async memory copy + * + * 1. dma enable + * 2. async memcpy + * 3. dma disable + */ +static int ipa_test_dma_async_memcpy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of sync memory copy + * + * 1. dma enable + * 2. sync memcpy in loop - in row + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync, + IPA_DMA_TEST_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("Iterations of sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("Iterations of sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of async memory copy + * + * 1. dma enable + * 2. async memcpy in loop - back-to-back + * next async copy is initiated once previous one completed + * 3. dma disable + */ +static int ipa_test_dma_async_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async, + IPA_DMA_TEST_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("Iterations of async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("Iterations of async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of interleaved sync and async memory copy + * + * 1. dma enable + * 2. sync and async memcpy in loop - interleaved + * 3. dma disable + */ +static int ipa_test_dma_interleaved_sync_async_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_sync_async_memcpy, + IPA_DMA_TEST_INT_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE); + if (rc) { + IPA_UT_LOG( + "Iterations of interleaved sync async memcpy failed rc=%d\n" + , rc); + IPA_UT_TEST_FAIL_REPORT( + "Iterations of interleaved sync async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +static atomic_t ipa_test_dma_mt_test_pass; + +static void ipa_test_dma_wrapper_test_one_sync(struct work_struct *work) +{ + int rc; + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("fail sync memcpy from thread rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail sync memcpy from thread"); + return; + } + atomic_inc(&ipa_test_dma_mt_test_pass); +} + +static void ipa_test_dma_wrapper_test_one_async(struct work_struct *work) +{ + int rc; + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("fail async memcpy from thread rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail async memcpy from thread"); + return; + } + atomic_inc(&ipa_test_dma_mt_test_pass); +} + +/** + * TEST: Multiple threads running sync and sync mem copy + * + * 1. dma enable + * 2. In-loop + * 2.1 create wq for sync memcpy + * 2.2 create wq for async memcpy + * 2.3 queue sync memcpy work + * 2.4 queue async memcoy work + * 3. In-loop + * 3.1 flush and destroy wq sync + * 3.2 flush and destroy wq async + * 3. dma disable + */ +static int ipa_test_dma_mt_sync_async(void *priv) +{ + int rc; + int i; + static struct workqueue_struct *wq_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct workqueue_struct *wq_async[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct work_struct work_async[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct work_struct work_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + char buff[IPA_TEST_DMA_WQ_NAME_BUFF_SZ]; + + memset(wq_sync, 0, sizeof(wq_sync)); + memset(wq_sync, 0, sizeof(wq_async)); + memset(work_async, 0, sizeof(work_async)); + memset(work_sync, 0, sizeof(work_sync)); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + atomic_set(&ipa_test_dma_mt_test_pass, 0); + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + snprintf(buff, sizeof(buff), "ipa_test_dmaSwq%d", i); + wq_sync[i] = create_singlethread_workqueue(buff); + if (!wq_sync[i]) { + IPA_UT_ERR("failed to create sync wq#%d\n", i); + rc = -EFAULT; + goto fail_create_wq; + } + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipa_test_dmaAwq%d", i); + wq_async[i] = create_singlethread_workqueue(buff); + if (!wq_async[i]) { + IPA_UT_ERR("failed to create async wq#%d\n", i); + rc = -EFAULT; + goto fail_create_wq; + } + + INIT_WORK(&work_sync[i], ipa_test_dma_wrapper_test_one_sync); + queue_work(wq_sync[i], &work_sync[i]); + INIT_WORK(&work_async[i], ipa_test_dma_wrapper_test_one_async); + queue_work(wq_async[i], &work_async[i]); + } + + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + flush_workqueue(wq_sync[i]); + destroy_workqueue(wq_sync[i]); + flush_workqueue(wq_async[i]); + destroy_workqueue(wq_async[i]); + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + if ((2 * IPA_TEST_DMA_MT_TEST_NUM_WQ) != + atomic_read(&ipa_test_dma_mt_test_pass)) { + IPA_UT_LOG( + "Multi-threaded sync/async memcopy failed passed=%d\n" + , atomic_read(&ipa_test_dma_mt_test_pass)); + IPA_UT_TEST_FAIL_REPORT( + "Multi-threaded sync/async memcopy failed"); + return -EFAULT; + } + + return 0; + +fail_create_wq: + (void)ipa_dma_disable(); + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + if (wq_sync[i]) + destroy_workqueue(wq_sync[i]); + if (wq_async[i]) + destroy_workqueue(wq_async[i]); + } + + return rc; +} + +/** + * TEST: Several parallel async memory copy iterations + * + * 1. create several user_data structures - one per iteration + * 2. allocate buffs. Give slice for each iteration + * 3. iterations of async mem copy + * 4. wait for all to complete + * 5. dma disable + */ +static int ipa_test_dma_parallel_async_memcpy_in_loop(void *priv) +{ + int rc; + struct ipa_test_dma_async_user_data *udata; + struct ipa_mem_buffer all_src_mem; + struct ipa_mem_buffer all_dest_mem; + int i; + bool is_fail = false; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + udata = kzalloc(IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM * + sizeof(struct ipa_test_dma_async_user_data), GFP_KERNEL); + if (!udata) { + IPA_UT_ERR("fail allocate user_data array\n"); + (void)ipa_dma_disable(); + return -ENOMEM; + } + + rc = ipa_test_dma_alloc_buffs(&all_src_mem, &all_dest_mem, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + kfree(udata); + (void)ipa_dma_disable(); + return rc; + } + + for (i = 0 ; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) { + udata[i].src_mem.size = + IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM; + udata[i].src_mem.base = all_src_mem.base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].src_mem.phys_base = all_src_mem.phys_base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + + udata[i].dest_mem.size = + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].dest_mem.base = all_dest_mem.base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].dest_mem.phys_base = all_dest_mem.phys_base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + + udata[i].call_serial_number = i + 1; + init_completion(&(udata[i].copy_done)); + rc = ipa_dma_async_memcpy(udata[i].dest_mem.phys_base, + udata[i].src_mem.phys_base, + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM), + ipa_test_dma_async_memcpy_cb_user_data, &udata[i]); + if (rc) { + IPA_UT_LOG("async memcpy initiation fail i=%d rc=%d\n", + i, rc); + is_fail = true; + } + } + + for (i = 0; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) + wait_for_completion(&udata[i].copy_done); + + ipa_test_dma_destroy_buffs(&all_src_mem, &all_dest_mem); + kfree(udata); + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + if (is_fail) { + IPA_UT_LOG("async memcopy failed\n"); + IPA_UT_TEST_FAIL_REPORT("async memcopy failed"); + return -EFAULT; + } + + return 0; +} + +/** + * TEST: Sync memory copy + * + * 1. dma enable + * 2. sync memcpy with max packet size + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy_max_pkt_size(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MAX_PKT_SIZE, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(dma, "DMA for GSI", + ipa_test_dma_setup, ipa_test_dma_teardown) +{ + IPA_UT_ADD_TEST(control_api, + "Control API", + ipa_test_dma_control_api, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(memcpy_before_enable, + "Call memcpy before dma enable and expect it to fail", + ipa_test_dma_memcpy_before_enable, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy, + "Sync memory copy", + ipa_test_dma_sync_memcpy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy, + "Async memory copy", + ipa_test_dma_async_memcpy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_in_loop, + "Several sync memory copy iterations", + ipa_test_dma_sync_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy_in_loop, + "Several async memory copy iterations", + ipa_test_dma_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(interleaved_sync_async_memcpy_in_loop, + "Several interleaved sync and async memory copy iterations", + ipa_test_dma_interleaved_sync_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(multi_threaded_multiple_sync_async_memcpy, + "Several multi-threaded sync and async memory copy iterations", + ipa_test_dma_mt_sync_async, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(parallel_async_memcpy_in_loop, + "Several parallel async memory copy iterations", + ipa_test_dma_parallel_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_max_pkt_size, + "Sync memory copy with max packet size", + ipa_test_dma_sync_memcpy_max_pkt_size, + true, IPA_HW_v3_0, IPA_HW_MAX), +} IPA_UT_DEFINE_SUITE_END(dma); diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h index 944800f8e4be..bbb8b771ba0b 100644 --- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h +++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h @@ -21,6 +21,7 @@ * No importance for order. */ IPA_UT_DECLARE_SUITE(mhi); +IPA_UT_DECLARE_SUITE(dma); IPA_UT_DECLARE_SUITE(example); @@ -31,6 +32,7 @@ IPA_UT_DECLARE_SUITE(example); IPA_UT_DEFINE_ALL_SUITES_START { IPA_UT_REGISTER_SUITE(mhi), + IPA_UT_REGISTER_SUITE(dma), IPA_UT_REGISTER_SUITE(example), } IPA_UT_DEFINE_ALL_SUITES_END; diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index b1c3441b285a..545a1e684b25 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -227,6 +227,8 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(usb_otg), POWER_SUPPLY_ATTR(battery_charging_enabled), POWER_SUPPLY_ATTR(charging_enabled), + POWER_SUPPLY_ATTR(step_charging_enabled), + POWER_SUPPLY_ATTR(step_charging_step), POWER_SUPPLY_ATTR(pin_enabled), POWER_SUPPLY_ATTR(input_suspend), POWER_SUPPLY_ATTR(input_voltage_regulation), diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c index 2f439b11089f..0be535194b49 100644 --- a/drivers/power/qcom-charger/qpnp-smb2.c +++ b/drivers/power/qcom-charger/qpnp-smb2.c @@ -324,6 +324,7 @@ static enum power_supply_property smb2_usb_props[] = { POWER_SUPPLY_PROP_PD_ALLOWED, POWER_SUPPLY_PROP_PD_ACTIVE, POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, + POWER_SUPPLY_PROP_INPUT_CURRENT_NOW, }; static int smb2_usb_get_prop(struct power_supply *psy, @@ -383,6 +384,9 @@ static int smb2_usb_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED: rc = smblib_get_prop_input_current_settled(chg, val); break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW: + rc = smblib_get_prop_usb_current_now(chg, val); + break; default: pr_err("get prop %d is not supported\n", psp); rc = -EINVAL; @@ -582,42 +586,51 @@ static enum power_supply_property smb2_batt_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL, + POWER_SUPPLY_PROP_CHARGER_TEMP, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, }; static int smb2_batt_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { + int rc; struct smb_charger *chg = power_supply_get_drvdata(psy); switch (psp) { case POWER_SUPPLY_PROP_STATUS: - smblib_get_prop_batt_status(chg, val); + rc = smblib_get_prop_batt_status(chg, val); break; case POWER_SUPPLY_PROP_HEALTH: - smblib_get_prop_batt_health(chg, val); + rc = smblib_get_prop_batt_health(chg, val); break; case POWER_SUPPLY_PROP_PRESENT: - smblib_get_prop_batt_present(chg, val); + rc = smblib_get_prop_batt_present(chg, val); break; case POWER_SUPPLY_PROP_INPUT_SUSPEND: - smblib_get_prop_input_suspend(chg, val); + rc = smblib_get_prop_input_suspend(chg, val); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: - smblib_get_prop_batt_charge_type(chg, val); + rc = smblib_get_prop_batt_charge_type(chg, val); break; case POWER_SUPPLY_PROP_CAPACITY: - smblib_get_prop_batt_capacity(chg, val); + rc = smblib_get_prop_batt_capacity(chg, val); break; case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: - smblib_get_prop_system_temp_level(chg, val); + rc = smblib_get_prop_system_temp_level(chg, val); + break; + case POWER_SUPPLY_PROP_CHARGER_TEMP: + rc = smblib_get_prop_charger_temp(chg, val); + break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + rc = smblib_get_prop_charger_temp_max(chg, val); break; default: pr_err("batt power supply prop %d not supported\n", psp); return -EINVAL; } - return 0; + return rc; } static int smb2_batt_set_prop(struct power_supply *psy, @@ -1219,7 +1232,11 @@ static int smb2_probe(struct platform_device *pdev) return -EINVAL; } - smblib_init(chg); + rc = smblib_init(chg); + if (rc < 0) { + pr_err("Smblib_init failed rc=%d\n", rc); + goto cleanup; + } rc = smb2_parse_dt(chip); if (rc < 0) { diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c index 15f07ee7d36c..ee4f65430d8b 100644 --- a/drivers/power/qcom-charger/smb-lib.c +++ b/drivers/power/qcom-charger/smb-lib.c @@ -12,6 +12,7 @@ #include <linux/device.h> #include <linux/regmap.h> +#include <linux/iio/consumer.h> #include <linux/power_supply.h> #include <linux/regulator/driver.h> #include <linux/irq.h> @@ -1172,12 +1173,20 @@ int smblib_get_prop_usb_online(struct smb_charger *chg, int smblib_get_prop_usb_voltage_now(struct smb_charger *chg, union power_supply_propval *val) { - if (chg->vbus_present) - val->intval = MICRO_5V; - else - val->intval = 0; + int rc = 0; - return 0; + rc = smblib_get_prop_usb_present(chg, val); + if (rc < 0 || !val->intval) + return rc; + + if (!chg->iio.usbin_v_chan || + PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER) + chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v"); + + if (IS_ERR(chg->iio.usbin_v_chan)) + return PTR_ERR(chg->iio.usbin_v_chan); + + return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval); } int smblib_get_prop_usb_current_max(struct smb_charger *chg, @@ -1187,6 +1196,59 @@ int smblib_get_prop_usb_current_max(struct smb_charger *chg, return 0; } +int smblib_get_prop_usb_current_now(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc = 0; + + rc = smblib_get_prop_usb_present(chg, val); + if (rc < 0 || !val->intval) + return rc; + + if (!chg->iio.usbin_i_chan || + PTR_ERR(chg->iio.usbin_i_chan) == -EPROBE_DEFER) + chg->iio.usbin_i_chan = iio_channel_get(chg->dev, "usbin_i"); + + if (IS_ERR(chg->iio.usbin_i_chan)) + return PTR_ERR(chg->iio.usbin_i_chan); + + return iio_read_channel_processed(chg->iio.usbin_i_chan, &val->intval); +} + +int smblib_get_prop_charger_temp(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc; + + if (!chg->iio.temp_chan || + PTR_ERR(chg->iio.temp_chan) == -EPROBE_DEFER) + chg->iio.temp_chan = iio_channel_get(chg->dev, "charger_temp"); + + if (IS_ERR(chg->iio.temp_chan)) + return PTR_ERR(chg->iio.temp_chan); + + rc = iio_read_channel_processed(chg->iio.temp_chan, &val->intval); + val->intval /= 100; + return rc; +} + +int smblib_get_prop_charger_temp_max(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc; + + if (!chg->iio.temp_max_chan || + PTR_ERR(chg->iio.temp_max_chan) == -EPROBE_DEFER) + chg->iio.temp_max_chan = iio_channel_get(chg->dev, + "charger_temp_max"); + if (IS_ERR(chg->iio.temp_max_chan)) + return PTR_ERR(chg->iio.temp_max_chan); + + rc = iio_read_channel_processed(chg->iio.temp_max_chan, &val->intval); + val->intval /= 100; + return rc; +} + int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg, union power_supply_propval *val) { @@ -1992,7 +2054,7 @@ done: vote(chg->awake_votable, PL_VOTER, false, 0); } -int smblib_create_votables(struct smb_charger *chg) +static int smblib_create_votables(struct smb_charger *chg) { int rc = 0; @@ -2086,6 +2148,42 @@ int smblib_create_votables(struct smb_charger *chg) return rc; } +static void smblib_destroy_votables(struct smb_charger *chg) +{ + if (chg->usb_suspend_votable) + destroy_votable(chg->usb_suspend_votable); + if (chg->dc_suspend_votable) + destroy_votable(chg->dc_suspend_votable); + if (chg->fcc_max_votable) + destroy_votable(chg->fcc_max_votable); + if (chg->fcc_votable) + destroy_votable(chg->fcc_votable); + if (chg->fv_votable) + destroy_votable(chg->fv_votable); + if (chg->usb_icl_votable) + destroy_votable(chg->usb_icl_votable); + if (chg->dc_icl_votable) + destroy_votable(chg->dc_icl_votable); + if (chg->pd_allowed_votable) + destroy_votable(chg->pd_allowed_votable); + if (chg->awake_votable) + destroy_votable(chg->awake_votable); + if (chg->pl_disable_votable) + destroy_votable(chg->pl_disable_votable); +} + +static void smblib_iio_deinit(struct smb_charger *chg) +{ + if (!IS_ERR_OR_NULL(chg->iio.temp_chan)) + iio_channel_release(chg->iio.temp_chan); + if (!IS_ERR_OR_NULL(chg->iio.temp_max_chan)) + iio_channel_release(chg->iio.temp_max_chan); + if (!IS_ERR_OR_NULL(chg->iio.usbin_i_chan)) + iio_channel_release(chg->iio.usbin_i_chan); + if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan)) + iio_channel_release(chg->iio.usbin_v_chan); +} + int smblib_init(struct smb_charger *chg) { int rc = 0; @@ -2130,18 +2228,19 @@ int smblib_init(struct smb_charger *chg) int smblib_deinit(struct smb_charger *chg) { - destroy_votable(chg->usb_suspend_votable); - destroy_votable(chg->dc_suspend_votable); - destroy_votable(chg->fcc_max_votable); - destroy_votable(chg->fcc_votable); - destroy_votable(chg->fv_votable); - destroy_votable(chg->usb_icl_votable); - destroy_votable(chg->dc_icl_votable); - destroy_votable(chg->pd_allowed_votable); - destroy_votable(chg->awake_votable); - destroy_votable(chg->pl_disable_votable); - - power_supply_unreg_notifier(&chg->nb); + switch (chg->mode) { + case PARALLEL_MASTER: + power_supply_unreg_notifier(&chg->nb); + smblib_destroy_votables(chg); + break; + case PARALLEL_SLAVE: + break; + default: + dev_err(chg->dev, "Unsupported mode %d\n", chg->mode); + return -EINVAL; + } + + smblib_iio_deinit(chg); return 0; } diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h index 974dbc7f85dd..1b0c221b4764 100644 --- a/drivers/power/qcom-charger/smb-lib.h +++ b/drivers/power/qcom-charger/smb-lib.h @@ -88,10 +88,18 @@ struct parallel_params { int slave_fcc; }; +struct smb_iio { + struct iio_channel *temp_chan; + struct iio_channel *temp_max_chan; + struct iio_channel *usbin_i_chan; + struct iio_channel *usbin_v_chan; +}; + struct smb_charger { struct device *dev; struct regmap *regmap; struct smb_params param; + struct smb_iio iio; int *debug_mask; enum smb_mode mode; @@ -236,6 +244,8 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_usb_current_max(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_usb_current_now(struct smb_charger *chg, + union power_supply_propval *val); int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_typec_mode(struct smb_charger *chg, @@ -246,6 +256,10 @@ int smblib_get_prop_pd_allowed(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_input_current_settled(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_charger_temp(struct smb_charger *chg, + union power_supply_propval *val); +int smblib_get_prop_charger_temp_max(struct smb_charger *chg, + union power_supply_propval *val); int smblib_set_prop_usb_current_max(struct smb_charger *chg, const union power_supply_propval *val); int smblib_set_prop_usb_voltage_min(struct smb_charger *chg, diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c index cc72772dba88..a77b0bbc193c 100644 --- a/drivers/power/qcom-charger/smb138x-charger.c +++ b/drivers/power/qcom-charger/smb138x-charger.c @@ -267,6 +267,8 @@ static enum power_supply_property smb138x_batt_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CHARGER_TEMP, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, }; static int smb138x_batt_get_prop(struct power_supply *psy, @@ -296,6 +298,12 @@ static int smb138x_batt_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_CAPACITY: rc = smblib_get_prop_batt_capacity(chg, val); break; + case POWER_SUPPLY_PROP_CHARGER_TEMP: + rc = smblib_get_prop_charger_temp(chg, val); + break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + rc = smblib_get_prop_charger_temp_max(chg, val); + break; default: pr_err("batt power supply get prop %d not supported\n", prop); @@ -381,6 +389,8 @@ static enum power_supply_property smb138x_parallel_props[] = { POWER_SUPPLY_PROP_INPUT_SUSPEND, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_CHARGER_TEMP, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, }; static int smb138x_parallel_get_prop(struct power_supply *psy, @@ -415,6 +425,12 @@ static int smb138x_parallel_get_prop(struct power_supply *psy, rc = smblib_get_charge_param(chg, &chg->param.fcc, &val->intval); break; + case POWER_SUPPLY_PROP_CHARGER_TEMP: + rc = smblib_get_prop_charger_temp(chg, val); + break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + rc = smblib_get_prop_charger_temp_max(chg, val); + break; default: pr_err("parallel power supply get prop %d not supported\n", prop); diff --git a/drivers/power/qcom/debug_core.c b/drivers/power/qcom/debug_core.c index d3620bbbeafa..ccef04ae9eb2 100644 --- a/drivers/power/qcom/debug_core.c +++ b/drivers/power/qcom/debug_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,6 +22,8 @@ #include "soc/qcom/msm-core.h" #define MAX_PSTATES 50 +#define NUM_OF_PENTRY 3 /* number of variables for ptable node */ +#define NUM_OF_EENTRY 2 /* number of variables for enable node */ enum arg_offset { CPU_OFFSET, @@ -82,15 +84,28 @@ static struct debugfs_blob_wrapper help_msg = { }; -static void add_to_ptable(uint64_t *arg) +static void add_to_ptable(unsigned int *arg) { struct core_debug *node; int i, cpu = arg[CPU_OFFSET]; + uint32_t freq = arg[FREQ_OFFSET]; + uint32_t power = arg[POWER_OFFSET]; if (!cpu_possible(cpu)) return; + if ((freq == 0) || (power == 0)) { + pr_warn("Incorrect power data\n"); + return; + } + node = &per_cpu(c_dgfs, cpu); + + if (node->len >= MAX_PSTATES) { + pr_warn("Dropped ptable update - no space left.\n"); + return; + } + if (!node->head) { node->head = kzalloc(sizeof(struct cpu_pstate_pwr) * (MAX_PSTATES + 1), @@ -98,24 +113,18 @@ static void add_to_ptable(uint64_t *arg) if (!node->head) return; } - for (i = 0; i < MAX_PSTATES; i++) { - if (node->head[i].freq == arg[FREQ_OFFSET]) { - node->head[i].power = arg[POWER_OFFSET]; + + for (i = 0; i < node->len; i++) { + if (node->head[i].freq == freq) { + node->head[i].power = power; return; } - if (node->head[i].freq == 0) - break; - } - - if (i == MAX_PSTATES) { - pr_warn("Dropped ptable update - no space left.\n"); - return; } /* Insert a new frequency (may need to move things around to keep in ascending order). */ for (i = MAX_PSTATES - 1; i > 0; i--) { - if (node->head[i-1].freq > arg[FREQ_OFFSET]) { + if (node->head[i-1].freq > freq) { node->head[i].freq = node->head[i-1].freq; node->head[i].power = node->head[i-1].power; } else if (node->head[i-1].freq != 0) { @@ -123,23 +132,29 @@ static void add_to_ptable(uint64_t *arg) } } - node->head[i].freq = arg[FREQ_OFFSET]; - node->head[i].power = arg[POWER_OFFSET]; - node->len++; + if (node->len < MAX_PSTATES) { + node->head[i].freq = freq; + node->head[i].power = power; + node->len++; + } if (node->ptr) node->ptr->len = node->len; } -static int split_ptable_args(char *line, uint64_t *arg) +static int split_ptable_args(char *line, unsigned int *arg, uint32_t n) { char *args; int i; int ret = 0; - for (i = 0; line; i++) { + for (i = 0; i < n; i++) { + if (!line) + break; args = strsep(&line, " "); - ret = kstrtoull(args, 10, &arg[i]); + ret = kstrtouint(args, 10, &arg[i]); + if (ret) + return ret; } return ret; } @@ -149,7 +164,7 @@ static ssize_t msm_core_ptable_write(struct file *file, { char *kbuf; int ret; - uint64_t arg[3]; + unsigned int arg[3]; if (len == 0) return 0; @@ -163,7 +178,7 @@ static ssize_t msm_core_ptable_write(struct file *file, goto done; } kbuf[len] = '\0'; - ret = split_ptable_args(kbuf, arg); + ret = split_ptable_args(kbuf, arg, NUM_OF_PENTRY); if (!ret) { add_to_ptable(arg); ret = len; @@ -201,7 +216,7 @@ static int msm_core_ptable_read(struct seq_file *m, void *data) seq_printf(m, "--- CPU%d - Live numbers at %ldC---\n", cpu, node->ptr->temp); print_table(m, msm_core_data[cpu].ptable, - msm_core_data[cpu].len); + node->driver_len); } } return 0; @@ -212,7 +227,7 @@ static ssize_t msm_core_enable_write(struct file *file, { char *kbuf; int ret; - uint64_t arg[3]; + unsigned int arg[3]; int cpu; if (len == 0) @@ -227,7 +242,7 @@ static ssize_t msm_core_enable_write(struct file *file, goto done; } kbuf[len] = '\0'; - ret = split_ptable_args(kbuf, arg); + ret = split_ptable_args(kbuf, arg, NUM_OF_EENTRY); if (ret) goto done; cpu = arg[CPU_OFFSET]; diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c index fe5dbbeac15e..b0439871c41a 100644 --- a/drivers/regulator/cpr3-mmss-regulator.c +++ b/drivers/regulator/cpr3-mmss-regulator.c @@ -242,8 +242,8 @@ static const int msmcobalt_v2_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = { #define MSMCOBALT_MMSS_CPR_SENSOR_COUNT 35 -#define MSMCOBALT_MMSS_AGING_SENSOR_ID 17 -#define MSMCOBALT_MMSS_AGING_BYPASS_MASK0 0 +#define MSMCOBALT_MMSS_AGING_SENSOR_ID 29 +#define MSMCOBALT_MMSS_AGING_BYPASS_MASK0 (GENMASK(23, 0)) #define MSMCOBALT_MMSS_MAX_TEMP_POINTS 3 #define MSMCOBALT_MMSS_TEMP_SENSOR_ID_START 12 diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c index 6e8db03fe16e..232373092746 100644 --- a/drivers/regulator/cpr3-regulator.c +++ b/drivers/regulator/cpr3-regulator.c @@ -3523,7 +3523,7 @@ cleanup: if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { rc2 = cpr3_ctrl_clear_cpr4_config(ctrl); - if (rc) { + if (rc2) { cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", rc2); rc = rc2; @@ -3725,6 +3725,17 @@ static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl) return 0; } + /* + * Verify that the aging possible register (if specified) has an + * acceptable value. + */ + if (ctrl->aging_possible_reg) { + reg = readl_relaxed(ctrl->aging_possible_reg); + reg &= ctrl->aging_possible_mask; + if (reg != ctrl->aging_possible_val) + return 0; + } + restore_current_corner = kcalloc(vreg_count, sizeof(*restore_current_corner), GFP_KERNEL); restore_vreg_enabled = kcalloc(vreg_count, @@ -3798,7 +3809,7 @@ static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl) max_aging_volt = max(max_aging_volt, aging_volt); } else { cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n", - rc, CPR3_AGING_RETRY_COUNT); + j, rc); ctrl->aging_failed = true; ctrl->aging_required = false; goto cleanup; @@ -5992,6 +6003,21 @@ int cpr3_regulator_register(struct platform_device *pdev, } ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res)); + if (ctrl->aging_possible_mask) { + /* + * Aging possible register address is required if an aging + * possible mask has been specified. + */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "aging_allowed"); + if (!res || !res->start) { + cpr3_err(ctrl, "CPR aging allowed address is missing\n"); + return -ENXIO; + } + ctrl->aging_possible_reg = devm_ioremap(dev, res->start, + resource_size(res)); + } + if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) { ctrl->irq = platform_get_irq_byname(pdev, "cpr"); if (ctrl->irq < 0) { diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h index 0907518722df..8897def3ef76 100644 --- a/drivers/regulator/cpr3-regulator.h +++ b/drivers/regulator/cpr3-regulator.h @@ -532,6 +532,9 @@ struct cpr3_panic_regs_info { * that this CPR3 controller manages. * @cpr_ctrl_base: Virtual address of the CPR3 controller base register * @fuse_base: Virtual address of fuse row 0 + * @aging_possible_reg: Virtual address of an optional platform-specific + * register that must be ready to determine if it is + * possible to perform an aging measurement. * @list: list head used in a global cpr3-regulator list so that * cpr3-regulator structs can be found easily in RAM dumps * @thread: Array of CPR3 threads managed by the CPR3 controller @@ -671,6 +674,11 @@ struct cpr3_panic_regs_info { * @aging_sensor: Array of CPR3 aging sensors which are used to perform * aging measurements at a runtime. * @aging_sensor_count: Number of elements in the aging_sensor array + * @aging_possible_mask: Optional bitmask used to mask off the + * aging_possible_reg register. + * @aging_possible_val: Optional value that the masked aging_possible_reg + * register must have in order for a CPR aging measurement + * to be possible. * @step_quot_fixed: Fixed step quotient value used for target quotient * adjustment if use_dynamic_step_quot is not set. * This parameter is only relevant for CPR4 controllers @@ -721,6 +729,7 @@ struct cpr3_controller { int ctrl_id; void __iomem *cpr_ctrl_base; void __iomem *fuse_base; + void __iomem *aging_possible_reg; struct list_head list; struct cpr3_thread *thread; int thread_count; @@ -784,6 +793,8 @@ struct cpr3_controller { bool aging_failed; struct cpr3_aging_sensor_info *aging_sensor; int aging_sensor_count; + u32 aging_possible_mask; + u32 aging_possible_val; u32 step_quot_fixed; u32 initial_temp_band; diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c index 9d55e9af2e7c..51179f28fcf5 100644 --- a/drivers/regulator/cpr3-util.c +++ b/drivers/regulator/cpr3-util.c @@ -1169,6 +1169,24 @@ int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl) of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-aging-ref-voltage", &ctrl->aging_ref_volt); + /* Aging possible bitmask is optional */ + ctrl->aging_possible_mask = 0; + of_property_read_u32(ctrl->dev->of_node, + "qcom,cpr-aging-allowed-reg-mask", + &ctrl->aging_possible_mask); + + if (ctrl->aging_possible_mask) { + /* + * Aging possible register value required if bitmask is + * specified + */ + rc = cpr3_parse_ctrl_u32(ctrl, + "qcom,cpr-aging-allowed-reg-value", + &ctrl->aging_possible_val, 0, UINT_MAX); + if (rc) + return rc; + } + if (of_find_property(ctrl->dev->of_node, "clock-names", NULL)) { ctrl->core_clk = devm_clk_get(ctrl->dev, "core_clk"); if (IS_ERR(ctrl->core_clk)) { diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c index 083459f96ac4..284180b0e72f 100644 --- a/drivers/regulator/cprh-kbss-regulator.c +++ b/drivers/regulator/cprh-kbss-regulator.c @@ -69,8 +69,9 @@ struct cprh_msmcobalt_kbss_fuses { /* * Fuse combos 0 - 7 map to CPR fusing revision 0 - 7 with speed bin fuse = 0. + * Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1. */ -#define CPRH_MSMCOBALT_KBSS_FUSE_COMBO_COUNT 8 +#define CPRH_MSMCOBALT_KBSS_FUSE_COMBO_COUNT 16 /* * Constants which define the name of each fuse corner. @@ -206,11 +207,19 @@ msmcobalt_v1_kbss_fuse_ref_volt[MSMCOBALT_KBSS_FUSE_CORNERS] = { * Open loop voltage fuse reference voltages in microvolts for MSMCOBALT v2 */ static const int -msmcobalt_v2_kbss_fuse_ref_volt[MSMCOBALT_KBSS_FUSE_CORNERS] = { - 688000, - 756000, - 828000, - 1056000, +msmcobalt_v2_kbss_fuse_ref_volt[2][MSMCOBALT_KBSS_FUSE_CORNERS] = { + [MSMCOBALT_KBSS_POWER_CLUSTER_ID] = { + 688000, + 756000, + 828000, + 1056000, + }, + [MSMCOBALT_KBSS_PERFORMANCE_CLUSTER_ID] = { + 756000, + 756000, + 828000, + 1056000, + }, }; #define MSMCOBALT_KBSS_FUSE_STEP_VOLT 10000 @@ -391,7 +400,7 @@ static int cprh_msmcobalt_kbss_calculate_open_loop_voltages( { struct device_node *node = vreg->of_node; struct cprh_msmcobalt_kbss_fuses *fuse = vreg->platform_fuses; - int i, j, soc_revision, rc = 0; + int i, j, soc_revision, id, rc = 0; bool allow_interpolation; u64 freq_low, volt_low, freq_high, volt_high; const int *ref_volt; @@ -407,13 +416,12 @@ static int cprh_msmcobalt_kbss_calculate_open_loop_voltages( goto done; } + id = vreg->thread->ctrl->ctrl_id; soc_revision = vreg->thread->ctrl->soc_revision; if (soc_revision == 1) ref_volt = msmcobalt_v1_kbss_fuse_ref_volt; - else if (soc_revision == 2) - ref_volt = msmcobalt_v2_kbss_fuse_ref_volt; else - ref_volt = msmcobalt_v2_kbss_fuse_ref_volt; + ref_volt = msmcobalt_v2_kbss_fuse_ref_volt[id]; for (i = 0; i < vreg->fuse_corner_count; i++) { fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse( diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index bfa82ca64499..a3bcfb42ca6a 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -996,7 +996,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) } } -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) +static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) { int err = 0; @@ -1027,7 +1027,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) vote = ufs_qcom_get_bus_vote(host, mode); if (vote >= 0) - err = ufs_qcom_set_bus_vote(host, vote); + err = __ufs_qcom_set_bus_vote(host, vote); else err = vote; @@ -1038,6 +1038,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) return err; } +static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int vote, err; + + /* + * In case ufs_qcom_init() is not yet done, simply ignore. + * This ufs_qcom_set_bus_vote() shall be called from + * ufs_qcom_init() after init is done. + */ + if (!host) + return 0; + + if (on) { + vote = host->bus_vote.saved_vote; + if (vote == host->bus_vote.min_bw_vote) + ufs_qcom_update_bus_bw_vote(host); + } else { + vote = host->bus_vote.min_bw_vote; + } + + err = __ufs_qcom_set_bus_vote(host, vote); + if (err) + dev_err(hba->dev, "%s: set bus vote failed %d\n", + __func__, err); + + return err; +} + static ssize_t show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, char *buf) @@ -1403,7 +1432,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - int vote = 0; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -1428,9 +1456,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); - vote = host->bus_vote.saved_vote; - if (vote == host->bus_vote.min_bw_vote) - ufs_qcom_update_bus_bw_vote(host); err = ufs_qcom_ice_resume(host); if (err) @@ -1449,14 +1474,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); } - vote = host->bus_vote.min_bw_vote; } - err = ufs_qcom_set_bus_vote(host, vote); - if (err) - dev_err(hba->dev, "%s: set bus vote failed %d\n", - __func__, err); - out: return err; } @@ -2011,6 +2030,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_set_caps(hba); ufs_qcom_advertise_quirks(hba); + ufs_qcom_set_bus_vote(hba, true); ufs_qcom_setup_clocks(hba, true, false); if (hba->dev->id < MAX_UFS_QCOM_HOSTS) @@ -2521,6 +2541,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .full_reset = ufs_qcom_full_reset, .update_sec_cfg = ufs_qcom_update_sec_cfg, .get_scale_down_gear = ufs_qcom_get_scale_down_gear, + .set_bus_vote = ufs_qcom_set_bus_vote, .dbg_register_dump = ufs_qcom_dump_dbg_regs, #ifdef CONFIG_DEBUG_FS .add_debugfs = ufs_qcom_dbg_add_debugfs, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ce779d760c69..a49b3c7bc4ef 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7555,6 +7555,13 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on, if (!head || list_empty(head)) goto out; + /* call vendor specific bus vote before enabling the clocks */ + if (on) { + ret = ufshcd_vops_set_bus_vote(hba, on); + if (ret) + return ret; + } + /* * vendor specific setup_clocks ops may depend on clocks managed by * this standard driver hence call the vendor specific setup_clocks @@ -7593,11 +7600,24 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on, * this standard driver hence call the vendor specific setup_clocks * after enabling the clocks managed here. */ - if (on) + if (on) { ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context); + if (ret) + goto out; + } + + /* + * call vendor specific bus vote to remove the vote after + * disabling the clocks. + */ + if (!on) + ret = ufshcd_vops_set_bus_vote(hba, on); out: if (ret) { + if (on) + /* Can't do much if this fails */ + (void) ufshcd_vops_set_bus_vote(hba, false); list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) clk_disable_unprepare(clki->clk); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 552d50081e3f..b79bebb58dcd 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -309,6 +309,7 @@ struct ufs_pwr_mode_info { * @update_sec_cfg: called to restore host controller secure configuration * @get_scale_down_gear: called to get the minimum supported gear to * scale down + * @set_bus_vote: called to vote for the required bus bandwidth * @add_debugfs: used to add debugfs entries * @remove_debugfs: used to remove debugfs entries */ @@ -335,6 +336,7 @@ struct ufs_hba_variant_ops { void (*dbg_register_dump)(struct ufs_hba *hba); int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg); u32 (*get_scale_down_gear)(struct ufs_hba *); + int (*set_bus_vote)(struct ufs_hba *, bool); #ifdef CONFIG_DEBUG_FS void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root); void (*remove_debugfs)(struct ufs_hba *hba); @@ -1259,6 +1261,13 @@ static inline u32 ufshcd_vops_get_scale_down_gear(struct ufs_hba *hba) return UFS_HS_G1; } +static inline int ufshcd_vops_set_bus_vote(struct ufs_hba *hba, bool on) +{ + if (hba->var && hba->var->vops && hba->var->vops->set_bus_vote) + return hba->var->vops->set_bus_vote(hba, on); + return 0; +} + #ifdef CONFIG_DEBUG_FS static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba, struct dentry *root) diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c index ea4dd2ce4e1d..fbde0d318584 100644 --- a/drivers/slimbus/slim-msm-ngd.c +++ b/drivers/slimbus/slim-msm-ngd.c @@ -167,6 +167,7 @@ static int ngd_qmi_available(struct notifier_block *n, unsigned long code, SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code); switch (code) { case QMI_SERVER_ARRIVE: + atomic_set(&dev->ssr_in_progress, 0); schedule_work(&dev->dsp.dom_up); break; default: @@ -214,6 +215,8 @@ static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code, switch (code) { case SUBSYS_BEFORE_SHUTDOWN: case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01: + SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code); + atomic_set(&dev->ssr_in_progress, 1); /* wait for current transaction */ mutex_lock(&dev->tx_lock); /* make sure autosuspend is not called until ADSP comes up*/ @@ -866,7 +869,7 @@ static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc, } if (dev->bulk.size > dev->bulk.buf_sz) { void *temp = krealloc(dev->bulk.base, dev->bulk.size, - GFP_KERNEL); + GFP_KERNEL | GFP_DMA); if (!temp) { ret = -ENOMEM; goto retpath; @@ -1316,8 +1319,10 @@ hw_init_retry: if (ret) { SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n", ret, retries); - msm_slim_qmi_power_request(dev, false); - if (retries < INIT_MX_RETRIES) { + if (!atomic_read(&dev->ssr_in_progress)) + msm_slim_qmi_power_request(dev, false); + if (retries < INIT_MX_RETRIES && + !atomic_read(&dev->ssr_in_progress)) { retries++; goto hw_init_retry; } @@ -1416,7 +1421,8 @@ capability_retry: SLIM_WARN(dev, "slim capability time-out:%d, stat:0x%x,cfg:0x%x\n", retries, laddr, cfg); - if (retries < INIT_MX_RETRIES) { + if ((retries < INIT_MX_RETRIES) && + !atomic_read(&dev->ssr_in_progress)) { retries++; goto capability_retry; } @@ -1683,7 +1689,7 @@ static int ngd_slim_probe(struct platform_device *pdev) /* typical txn numbers and size used in bulk operation */ dev->bulk.buf_sz = SLIM_MAX_TXNS * 8; - dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL); + dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA); if (!dev->bulk.base) { ret = -ENOMEM; goto err_nobulk; @@ -1780,6 +1786,7 @@ static int ngd_slim_probe(struct platform_device *pdev) dev->ee = 1; dev->irq = irq->start; dev->bam.irq = bam_irq->start; + atomic_set(&dev->ssr_in_progress, 0); if (rxreg_access) dev->use_rx_msgqs = MSM_MSGQ_DISABLED; @@ -1938,6 +1945,7 @@ static int ngd_slim_runtime_resume(struct device *device) struct platform_device *pdev = to_platform_device(device); struct msm_slim_ctrl *dev = platform_get_drvdata(pdev); int ret = 0; + mutex_lock(&dev->tx_lock); if (dev->state >= MSM_CTRL_ASLEEP) ret = ngd_slim_power_up(dev, false); diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h index 7616e714299c..65b9fae8040b 100644 --- a/drivers/slimbus/slim-msm.h +++ b/drivers/slimbus/slim-msm.h @@ -314,6 +314,7 @@ struct msm_slim_ctrl { void (*rx_slim)(struct msm_slim_ctrl *dev, u8 *buf); u32 current_rx_buf[10]; int current_count; + atomic_t ssr_in_progress; }; struct msm_sat_chan { diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 0e74093eeb2b..f884f829b51c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -417,6 +417,15 @@ config ICNSS control messages to FW over QMI channel. It is also responsible for handling WLAN PD restart notifications. +config ICNSS_DEBUG + bool "ICNSS debug support" + depends on ICNSS + ---help--- + Say 'Y' here to enable ICNSS driver debug support. Debug support + primarily consists of logs consisting of information related to + hardware register access and enabling BUG_ON for certain cases to aid + the debugging. + config MSM_SECURE_BUFFER bool "Helper functions for securing buffers through TZ" help diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index e9e65ea443dd..fb8437d7a7c3 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -70,7 +70,7 @@ obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o obj-$(CONFIG_QCOM_IRQ_HELPER) += irq-helper.o obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o -obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o +obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o obj-$(CONFIG_SOC_BUS) += socinfo.o obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/ obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index 20a4a3c7fdf0..f00570aa5fe8 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -4113,6 +4113,37 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( return xprt_ptr; } +static struct channel_ctx *get_first_ch_ctx( + struct glink_core_xprt_ctx *xprt_ctx) +{ + unsigned long flags; + struct channel_ctx *ctx; + + spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + if (!list_empty(&xprt_ctx->channels)) { + ctx = list_first_entry(&xprt_ctx->channels, + struct channel_ctx, port_list_node); + rwref_get(&ctx->ch_state_lhb2); + } else { + ctx = NULL; + } + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + return ctx; +} + +static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr, + struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx) +{ + unsigned long flags, d_flags; + + spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); + list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels); + spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); +} + /** * glink_core_channel_cleanup() - cleanup all channels for the transport * @@ -4123,7 +4154,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) { unsigned long flags, d_flags; - struct channel_ctx *ctx, *tmp_ctx; + struct channel_ctx *ctx; struct channel_lcid *temp_lcid, *temp_lcid1; struct glink_core_xprt_ctx *dummy_xprt_ctx; @@ -4132,29 +4163,18 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) GLINK_ERR("%s: Dummy Transport creation failed\n", __func__); return; } - rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_get(&xprt_ptr->xprt_state_lhb0); - spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); - spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); - - list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels, - port_list_node) { + ctx = get_first_ch_ctx(xprt_ptr); + while (ctx) { rwref_write_get_atomic(&ctx->ch_state_lhb2, true); if (ctx->local_open_state == GLINK_CHANNEL_OPENED || ctx->local_open_state == GLINK_CHANNEL_OPENING) { - rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); - list_move_tail(&ctx->port_list_node, - &dummy_xprt_ctx->channels); ctx->transport_ptr = dummy_xprt_ctx; rwref_write_put(&ctx->ch_state_lhb2); + glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx); } else { /* local state is in either CLOSED or CLOSING */ - spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, - flags); - spin_unlock_irqrestore( - &dummy_xprt_ctx->xprt_ctx_lock_lhb1, - d_flags); glink_core_remote_close_common(ctx, true); if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) glink_core_ch_close_ack_common(ctx, true); @@ -4162,22 +4182,21 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) if (ch_is_fully_closed(ctx)) glink_delete_ch_from_list(ctx, false); rwref_write_put(&ctx->ch_state_lhb2); - spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, - d_flags); - spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); } + rwref_put(&ctx->ch_state_lhb2); + ctx = get_first_ch_ctx(xprt_ptr); } + spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); list_for_each_entry_safe(temp_lcid, temp_lcid1, &xprt_ptr->free_lcid_list, list_node) { list_del(&temp_lcid->list_node); kfree(&temp_lcid->list_node); } - dummy_xprt_ctx->dummy_in_use = false; spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); - spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_read_put(&xprt_ptr->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + dummy_xprt_ctx->dummy_in_use = false; while (!list_empty(&dummy_xprt_ctx->channels)) { ctx = list_first_entry(&dummy_xprt_ctx->channels, struct channel_ctx, port_list_node); diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 26b1cad9d5aa..aaca82f87159 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -44,11 +44,11 @@ #include "wlan_firmware_service_v01.h" -#define ICNSS_PANIC 1 #define WLFW_TIMEOUT_MS 3000 #define WLFW_SERVICE_INS_ID_V01 0 #define MAX_PROP_SIZE 32 -#define NUM_LOG_PAGES 4 +#define NUM_LOG_PAGES 10 +#define NUM_REG_LOG_PAGES 4 /* * Registers: MPM2_PSHOLD @@ -57,9 +57,12 @@ #define MPM_WCSSAON_CONFIG_OFFSET 0x18 #define MPM_WCSSAON_CONFIG_ARES_N BIT(0) #define MPM_WCSSAON_CONFIG_WLAN_DISABLE BIT(1) +#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD BIT(6) +#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL BIT(7) #define MPM_WCSSAON_CONFIG_FORCE_ACTIVE BIT(14) #define MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE BIT(19) #define MPM_WCSSAON_CONFIG_DISCONNECT_CLR BIT(21) +#define MPM_WCSSAON_CONFIG_M2W_CLAMP_EN BIT(22) /* * Registers: WCSS_SR_SHADOW_REGISTERS @@ -148,6 +151,10 @@ #define ICNSS_HW_REG_RETRY 10 +#define WCSS_HM_A_PMM_HW_VERSION_V10 0x40000000 +#define WCSS_HM_A_PMM_HW_VERSION_V20 0x40010000 +#define WCSS_HM_A_PMM_HW_VERSION_Q10 0x40010001 + #define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN" #define ICNSS_WLAN_SERVICE_NAME "wlan/fw" @@ -156,6 +163,15 @@ ipc_log_string(icnss_ipc_log_context, _x); \ } while (0) +#ifdef CONFIG_ICNSS_DEBUG +#define icnss_ipc_log_long_string(_x...) do { \ + if (icnss_ipc_log_long_context) \ + ipc_log_string(icnss_ipc_log_long_context, _x); \ + } while (0) +#else +#define icnss_ipc_log_long_string(_x...) +#endif + #define icnss_pr_err(_fmt, ...) do { \ pr_err(_fmt, ##__VA_ARGS__); \ icnss_ipc_log_string("ERR: " pr_fmt(_fmt), \ @@ -180,7 +196,13 @@ ##__VA_ARGS__); \ } while (0) -#ifdef ICNSS_PANIC +#define icnss_reg_dbg(_fmt, ...) do { \ + pr_debug(_fmt, ##__VA_ARGS__); \ + icnss_ipc_log_long_string("REG: " pr_fmt(_fmt), \ + ##__VA_ARGS__); \ + } while (0) + +#ifdef CONFIG_ICNSS_DEBUG #define ICNSS_ASSERT(_condition) do { \ if (!(_condition)) { \ icnss_pr_err("ASSERT at line %d\n", \ @@ -215,6 +237,10 @@ module_param(quirks, ulong, 0600); void *icnss_ipc_log_context; +#ifdef CONFIG_ICNSS_DEBUG +void *icnss_ipc_log_long_context; +#endif + #define ICNSS_EVENT_PENDING 2989 enum icnss_driver_event_type { @@ -383,7 +409,7 @@ static u32 icnss_hw_read_reg(void *base, u32 offset) { u32 rdata = readl_relaxed(base + offset); - icnss_pr_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata); + icnss_reg_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata); return rdata; } @@ -395,7 +421,7 @@ static void icnss_hw_write_reg_field(void *base, u32 offset, u32 mask, u32 val) val = (rdata & ~mask) | (val << shift); - icnss_pr_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n", + icnss_reg_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n", offset, rdata, val); icnss_hw_write_reg(base, offset, val); @@ -414,12 +440,12 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val, rdata = readl_relaxed(base + offset); - icnss_pr_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n", + icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n", offset, val, rdata, mask); while ((rdata & mask) != val) { if (retry != 0 && r >= retry) { - icnss_pr_err(" POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n", + icnss_pr_err("POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n", offset, val, rdata, mask); return -EIO; @@ -430,8 +456,8 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val, rdata = readl_relaxed(base + offset); if (retry) - icnss_pr_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n", - offset, val, rdata, mask); + icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n", + offset, val, rdata, mask); } @@ -758,6 +784,32 @@ static void icnss_hw_top_level_reset(struct icnss_priv *priv) ICNSS_HW_REG_RETRY); } +static void icnss_hw_io_reset(struct icnss_priv *priv, bool on) +{ + u32 hw_version = priv->soc_info.soc_id; + + if (on && !test_bit(ICNSS_FW_READY, &priv->state)) + return; + + icnss_pr_dbg("HW io reset: %s, SoC: 0x%x, state: 0x%lx\n", + on ? "ON" : "OFF", priv->soc_info.soc_id, priv->state); + + if (hw_version == WCSS_HM_A_PMM_HW_VERSION_V10 || + hw_version == WCSS_HM_A_PMM_HW_VERSION_V20) { + icnss_hw_write_reg_field(priv->mpm_config_va, + MPM_WCSSAON_CONFIG_OFFSET, + MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL, 0); + icnss_hw_write_reg_field(priv->mpm_config_va, + MPM_WCSSAON_CONFIG_OFFSET, + MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD, on); + } else if (hw_version == WCSS_HM_A_PMM_HW_VERSION_Q10) { + icnss_hw_write_reg_field(priv->mpm_config_va, + MPM_WCSSAON_CONFIG_OFFSET, + MPM_WCSSAON_CONFIG_M2W_CLAMP_EN, + on); + } +} + int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv) { u32 rdata; @@ -1123,7 +1175,7 @@ static int icnss_hw_power_on(struct icnss_priv *priv) int ret = 0; unsigned long flags; - icnss_pr_dbg("Power on: state: 0x%lx\n", priv->state); + icnss_pr_dbg("HW Power on: state: 0x%lx\n", priv->state); spin_lock_irqsave(&priv->on_off_lock, flags); if (test_bit(ICNSS_POWER_ON, &priv->state)) { @@ -1143,6 +1195,8 @@ static int icnss_hw_power_on(struct icnss_priv *priv) icnss_hw_top_level_release_reset(priv); + icnss_hw_io_reset(penv, 1); + return ret; out: clear_bit(ICNSS_POWER_ON, &priv->state); @@ -1157,7 +1211,7 @@ static int icnss_hw_power_off(struct icnss_priv *priv) if (test_bit(HW_ALWAYS_ON, &quirks)) return 0; - icnss_pr_dbg("Power off: 0x%lx\n", priv->state); + icnss_pr_dbg("HW Power off: 0x%lx\n", priv->state); spin_lock_irqsave(&priv->on_off_lock, flags); if (!test_bit(ICNSS_POWER_ON, &priv->state)) { @@ -1167,6 +1221,8 @@ static int icnss_hw_power_off(struct icnss_priv *priv) clear_bit(ICNSS_POWER_ON, &priv->state); spin_unlock_irqrestore(&priv->on_off_lock, flags); + icnss_hw_io_reset(penv, 0); + icnss_hw_reset(priv); icnss_clk_deinit(priv); @@ -1191,6 +1247,8 @@ int icnss_power_on(struct device *dev) return -EINVAL; } + icnss_pr_dbg("Power On: 0x%lx\n", priv->state); + return icnss_hw_power_on(priv); } EXPORT_SYMBOL(icnss_power_on); @@ -1205,6 +1263,8 @@ int icnss_power_off(struct device *dev) return -EINVAL; } + icnss_pr_dbg("Power Off: 0x%lx\n", priv->state); + return icnss_hw_power_off(priv); } EXPORT_SYMBOL(icnss_power_off); @@ -1235,11 +1295,11 @@ int icnss_map_msa_permissions(struct icnss_priv *priv, u32 index) ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems, dest_vmids, dest_perms, dest_nelems); if (ret) { - icnss_pr_err("region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n", + icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n", index, &addr, size, ret); goto out; } - icnss_pr_dbg("hypervisor map for region %u: source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n", + icnss_pr_dbg("Hypervisor map for region %u: source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n", index, source_vmlist[0], dest_nelems, dest_vmids[0], dest_vmids[1], dest_vmids[2]); out: @@ -1271,7 +1331,7 @@ int icnss_unmap_msa_permissions(struct icnss_priv *priv, u32 index) ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems, dest_vmids, dest_perms, dest_nelems); if (ret) { - icnss_pr_err("region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n", + icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n", index, &addr, size, ret); goto out; } @@ -1310,16 +1370,14 @@ static void icnss_remove_msa_permissions(struct icnss_priv *priv) static int wlfw_msa_mem_info_send_sync_msg(void) { - int ret = 0; + int ret; int i; struct wlfw_msa_info_req_msg_v01 req; struct wlfw_msa_info_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { - ret = -ENODEV; - goto out; - } + if (!penv || !penv->wlfw_clnt) + return -ENODEV; icnss_pr_dbg("Sending MSA mem info, state: 0x%lx\n", penv->state); @@ -1342,15 +1400,14 @@ static int wlfw_msa_mem_info_send_sync_msg(void) ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req), &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); if (ret < 0) { - icnss_pr_err("Send req failed %d\n", ret); + icnss_pr_err("Send MSA Mem info req failed %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", + icnss_pr_err("QMI MSA Mem info request failed %d %d\n", resp.resp.result, resp.resp.error); ret = resp.resp.result; - penv->stats.msa_info_err++; goto out; } @@ -1361,7 +1418,6 @@ static int wlfw_msa_mem_info_send_sync_msg(void) icnss_pr_err("Invalid memory region length received: %d\n", resp.mem_region_info_len); ret = -EINVAL; - penv->stats.msa_info_err++; goto out; } @@ -1379,7 +1435,11 @@ static int wlfw_msa_mem_info_send_sync_msg(void) penv->icnss_mem_region[i].secure_flag); } + return 0; + out: + penv->stats.msa_info_err++; + ICNSS_ASSERT(false); return ret; } @@ -1390,10 +1450,8 @@ static int wlfw_msa_ready_send_sync_msg(void) struct wlfw_msa_ready_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { - ret = -ENODEV; - goto out; - } + if (!penv || !penv->wlfw_clnt) + return -ENODEV; icnss_pr_dbg("Sending MSA ready request message, state: 0x%lx\n", penv->state); @@ -1413,20 +1471,23 @@ static int wlfw_msa_ready_send_sync_msg(void) ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req), &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); if (ret < 0) { - penv->stats.msa_ready_err++; - icnss_pr_err("Send req failed %d\n", ret); + icnss_pr_err("Send MSA ready req failed %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", + icnss_pr_err("QMI MSA ready request failed %d %d\n", resp.resp.result, resp.resp.error); - penv->stats.msa_ready_err++; ret = resp.resp.result; goto out; } penv->stats.msa_ready_resp++; + + return 0; + out: + penv->stats.msa_ready_err++; + ICNSS_ASSERT(false); return ret; } @@ -1437,10 +1498,8 @@ static int wlfw_ind_register_send_sync_msg(void) struct wlfw_ind_register_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { - ret = -ENODEV; - goto out; - } + if (!penv || !penv->wlfw_clnt) + return -ENODEV; icnss_pr_dbg("Sending indication register message, state: 0x%lx\n", penv->state); @@ -1468,21 +1527,24 @@ static int wlfw_ind_register_send_sync_msg(void) ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req), &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); - penv->stats.ind_register_resp++; if (ret < 0) { - icnss_pr_err("Send req failed %d\n", ret); - penv->stats.ind_register_err++; + icnss_pr_err("Send indication register req failed %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", + icnss_pr_err("QMI indication register request failed %d %d\n", resp.resp.result, resp.resp.error); ret = resp.resp.result; - penv->stats.ind_register_err++; goto out; } + penv->stats.ind_register_resp++; + + return 0; + out: + penv->stats.ind_register_err++; + ICNSS_ASSERT(false); return ret; } @@ -1493,10 +1555,8 @@ static int wlfw_cap_send_sync_msg(void) struct wlfw_cap_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { - ret = -ENODEV; - goto out; - } + if (!penv || !penv->wlfw_clnt) + return -ENODEV; icnss_pr_dbg("Sending capability message, state: 0x%lx\n", penv->state); @@ -1515,16 +1575,14 @@ static int wlfw_cap_send_sync_msg(void) &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); if (ret < 0) { - icnss_pr_err("Send req failed %d\n", ret); - penv->stats.cap_err++; + icnss_pr_err("Send capability req failed %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", + icnss_pr_err("QMI capability request failed %d %d\n", resp.resp.result, resp.resp.error); ret = resp.resp.result; - penv->stats.cap_err++; goto out; } @@ -1546,7 +1604,12 @@ static int wlfw_cap_send_sync_msg(void) penv->board_info.board_id, penv->soc_info.soc_id, penv->fw_version_info.fw_version, penv->fw_version_info.fw_build_timestamp); + + return 0; + out: + penv->stats.cap_err++; + ICNSS_ASSERT(false); return ret; } @@ -1557,10 +1620,8 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode) struct wlfw_wlan_mode_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { - ret = -ENODEV; - goto out; - } + if (!penv || !penv->wlfw_clnt) + return -ENODEV; /* During recovery do not send mode request for WLAN OFF as * FW not able to process it. @@ -1592,20 +1653,24 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode) &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); if (ret < 0) { - icnss_pr_err("Send req failed %d\n", ret); - penv->stats.mode_req_err++; + icnss_pr_err("Send mode req failed, mode: %d ret: %d\n", + mode, ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", - resp.resp.result, resp.resp.error); + icnss_pr_err("QMI mode request failed mode: %d, %d %d\n", + mode, resp.resp.result, resp.resp.error); ret = resp.resp.result; - penv->stats.mode_req_err++; goto out; } penv->stats.mode_resp++; + + return 0; + out: + penv->stats.mode_req_err++; + ICNSS_ASSERT(false); return ret; } @@ -1616,10 +1681,8 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data) struct wlfw_wlan_cfg_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { + if (!penv || !penv->wlfw_clnt) return -ENODEV; - goto out; - } icnss_pr_dbg("Sending config request, state: 0x%lx\n", penv->state); @@ -1641,20 +1704,23 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data) &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); if (ret < 0) { - icnss_pr_err("Send req failed %d\n", ret); - penv->stats.cfg_req_err++; + icnss_pr_err("Send config req failed %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", + icnss_pr_err("QMI config request failed %d %d\n", resp.resp.result, resp.resp.error); ret = resp.resp.result; - penv->stats.cfg_req_err++; goto out; } penv->stats.cfg_resp++; + + return 0; + out: + penv->stats.cfg_req_err++; + ICNSS_ASSERT(false); return ret; } @@ -1665,10 +1731,8 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log) struct wlfw_ini_resp_msg_v01 resp; struct msg_desc req_desc, resp_desc; - if (!penv || !penv->wlfw_clnt) { - ret = -ENODEV; - goto out; - } + if (!penv || !penv->wlfw_clnt) + return -ENODEV; icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log: %d\n", penv->state, enable_fw_log); @@ -1692,20 +1756,24 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log) ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req), &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS); if (ret < 0) { - icnss_pr_err("send req failed %d\n", ret); - penv->stats.ini_req_err++; + icnss_pr_err("Send INI req failed fw_log: %d, ret: %d\n", + enable_fw_log, ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI request failed %d %d\n", - resp.resp.result, resp.resp.error); + icnss_pr_err("QMI INI request failed fw_log: %d, %d %d\n", + enable_fw_log, resp.resp.result, resp.resp.error); ret = resp.resp.result; - penv->stats.ini_req_err++; goto out; } penv->stats.ini_resp++; + + return 0; + out: + penv->stats.ini_req_err++; + ICNSS_ASSERT(false); return ret; } @@ -1789,12 +1857,11 @@ static int icnss_driver_event_server_arrive(void *data) goto out; } - ret = qmi_connect_to_service(penv->wlfw_clnt, - WLFW_SERVICE_ID_V01, - WLFW_SERVICE_VERS_V01, - WLFW_SERVICE_INS_ID_V01); + ret = qmi_connect_to_service(penv->wlfw_clnt, WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01); if (ret < 0) { - icnss_pr_err("Server not found : %d\n", ret); + icnss_pr_err("QMI WLAN Service not found : %d\n", ret); goto fail; } @@ -1815,10 +1882,8 @@ static int icnss_driver_event_server_arrive(void *data) goto fail; ret = wlfw_ind_register_send_sync_msg(); - if (ret < 0) { - icnss_pr_err("Failed to send indication message: %d\n", ret); + if (ret < 0) goto err_power_on; - } if (!penv->msa_va) { icnss_pr_err("Invalid MSA address\n"); @@ -1827,27 +1892,21 @@ static int icnss_driver_event_server_arrive(void *data) } ret = wlfw_msa_mem_info_send_sync_msg(); - if (ret < 0) { - icnss_pr_err("Failed to send MSA info: %d\n", ret); + if (ret < 0) goto err_power_on; - } + ret = icnss_setup_msa_permissions(penv); - if (ret < 0) { - icnss_pr_err("Failed to setup msa permissions: %d\n", - ret); + if (ret < 0) goto err_power_on; - } + ret = wlfw_msa_ready_send_sync_msg(); - if (ret < 0) { - icnss_pr_err("Failed to send MSA ready : %d\n", ret); + if (ret < 0) goto err_setup_msa; - } ret = wlfw_cap_send_sync_msg(); - if (ret < 0) { - icnss_pr_err("Failed to get capability: %d\n", ret); + if (ret < 0) goto err_setup_msa; - } + return ret; err_setup_msa: @@ -2680,14 +2739,10 @@ int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config, sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req.shadow_reg_len); ret = wlfw_wlan_cfg_send_sync_msg(&req); - if (ret) { - icnss_pr_err("Failed to send cfg, ret = %d\n", ret); + if (ret) goto out; - } skip: ret = wlfw_wlan_mode_send_sync_msg(mode); - if (ret) - icnss_pr_err("Failed to send mode, ret = %d\n", ret); out: if (test_bit(SKIP_QMI, &quirks)) ret = 0; @@ -3416,6 +3471,8 @@ static int icnss_probe(struct platform_device *pdev) return -EEXIST; } + icnss_pr_dbg("Platform driver probe\n"); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -3515,9 +3572,8 @@ static int icnss_probe(struct platform_device *pdev) } else { priv->smmu_iova_start = res->start; priv->smmu_iova_len = resource_size(res); - icnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n", - &priv->smmu_iova_start, - priv->smmu_iova_len); + icnss_pr_dbg("SMMU IOVA start: %pa, len: %zu\n", + &priv->smmu_iova_start, priv->smmu_iova_len); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -3527,7 +3583,7 @@ static int icnss_probe(struct platform_device *pdev) } else { priv->smmu_iova_ipa_start = res->start; priv->smmu_iova_ipa_len = resource_size(res); - icnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n", + icnss_pr_dbg("SMMU IOVA IPA start: %pa, len: %zu\n", &priv->smmu_iova_ipa_start, priv->smmu_iova_ipa_len); } @@ -3688,6 +3744,26 @@ static struct platform_driver icnss_driver = { }, }; +#ifdef CONFIG_ICNSS_DEBUG +static void __init icnss_ipc_log_long_context_init(void) +{ + icnss_ipc_log_long_context = ipc_log_context_create(NUM_REG_LOG_PAGES, + "icnss_long", 0); + if (!icnss_ipc_log_long_context) + icnss_pr_err("Unable to create register log context\n"); +} + +static void __exit icnss_ipc_log_long_context_destroy(void) +{ + ipc_log_context_destroy(icnss_ipc_log_long_context); + icnss_ipc_log_long_context = NULL; +} +#else + +static void __init icnss_ipc_log_long_context_init(void) { } +static void __exit icnss_ipc_log_long_context_destroy(void) { } +#endif + static int __init icnss_initialize(void) { icnss_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES, @@ -3695,6 +3771,8 @@ static int __init icnss_initialize(void) if (!icnss_ipc_log_context) icnss_pr_err("Unable to create log context\n"); + icnss_ipc_log_long_context_init(); + return platform_driver_register(&icnss_driver); } @@ -3703,6 +3781,8 @@ static void __exit icnss_exit(void) platform_driver_unregister(&icnss_driver); ipc_log_context_destroy(icnss_ipc_log_context); icnss_ipc_log_context = NULL; + + icnss_ipc_log_long_context_destroy(); } diff --git a/drivers/soc/qcom/icnss_utils.c b/drivers/soc/qcom/icnss_utils.c new file mode 100644 index 000000000000..5e187d5df8b3 --- /dev/null +++ b/drivers/soc/qcom/icnss_utils.c @@ -0,0 +1,132 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/slab.h> + +#define ICNSS_MAX_CH_NUM 45 + +static DEFINE_MUTEX(unsafe_channel_list_lock); +static DEFINE_MUTEX(dfs_nol_info_lock); + +static struct icnss_unsafe_channel_list { + u16 unsafe_ch_count; + u16 unsafe_ch_list[ICNSS_MAX_CH_NUM]; +} unsafe_channel_list; + +static struct icnss_dfs_nol_info { + void *dfs_nol_info; + u16 dfs_nol_info_len; +} dfs_nol_info; + +int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count) +{ + mutex_lock(&unsafe_channel_list_lock); + if ((!unsafe_ch_list) || (ch_count > ICNSS_MAX_CH_NUM)) { + mutex_unlock(&unsafe_channel_list_lock); + return -EINVAL; + } + + unsafe_channel_list.unsafe_ch_count = ch_count; + + if (ch_count != 0) { + memcpy( + (char *)unsafe_channel_list.unsafe_ch_list, + (char *)unsafe_ch_list, ch_count * sizeof(u16)); + } + mutex_unlock(&unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL(icnss_set_wlan_unsafe_channel); + +int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, + u16 *ch_count, u16 buf_len) +{ + mutex_lock(&unsafe_channel_list_lock); + if (!unsafe_ch_list || !ch_count) { + mutex_unlock(&unsafe_channel_list_lock); + return -EINVAL; + } + + if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) { + mutex_unlock(&unsafe_channel_list_lock); + return -ENOMEM; + } + + *ch_count = unsafe_channel_list.unsafe_ch_count; + memcpy( + (char *)unsafe_ch_list, + (char *)unsafe_channel_list.unsafe_ch_list, + unsafe_channel_list.unsafe_ch_count * sizeof(u16)); + mutex_unlock(&unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL(icnss_get_wlan_unsafe_channel); + +int icnss_wlan_set_dfs_nol(const void *info, u16 info_len) +{ + void *temp; + struct icnss_dfs_nol_info *dfs_info; + + mutex_lock(&dfs_nol_info_lock); + if (!info || !info_len) { + mutex_unlock(&dfs_nol_info_lock); + return -EINVAL; + } + + temp = kmalloc(info_len, GFP_KERNEL); + if (!temp) { + mutex_unlock(&dfs_nol_info_lock); + return -ENOMEM; + } + + memcpy(temp, info, info_len); + dfs_info = &dfs_nol_info; + kfree(dfs_info->dfs_nol_info); + + dfs_info->dfs_nol_info = temp; + dfs_info->dfs_nol_info_len = info_len; + mutex_unlock(&dfs_nol_info_lock); + + return 0; +} +EXPORT_SYMBOL(icnss_wlan_set_dfs_nol); + +int icnss_wlan_get_dfs_nol(void *info, u16 info_len) +{ + int len; + struct icnss_dfs_nol_info *dfs_info; + + mutex_lock(&dfs_nol_info_lock); + if (!info || !info_len) { + mutex_unlock(&dfs_nol_info_lock); + return -EINVAL; + } + + dfs_info = &dfs_nol_info; + + if (dfs_info->dfs_nol_info == NULL || + dfs_info->dfs_nol_info_len == 0) { + mutex_unlock(&dfs_nol_info_lock); + return -ENOENT; + } + + len = min(info_len, dfs_info->dfs_nol_info_len); + + memcpy(info, dfs_info->dfs_nol_info, len); + mutex_unlock(&dfs_nol_info_lock); + + return len; +} +EXPORT_SYMBOL(icnss_wlan_get_dfs_nol); diff --git a/drivers/soc/qcom/irq-helper.c b/drivers/soc/qcom/irq-helper.c index 7bb371f7991e..370801291230 100644 --- a/drivers/soc/qcom/irq-helper.c +++ b/drivers/soc/qcom/irq-helper.c @@ -78,10 +78,12 @@ IRQ_HELPER_ATTR(irq_blacklist_on, 0444, show_deploy, NULL); static struct irq_helper *irq_h; +/* Do not call this API in an atomic context */ int irq_blacklist_on(void) { bool flag = false; + might_sleep(); if (!irq_h) { pr_err("%s: init function is not called", __func__); return -EPERM; @@ -103,10 +105,12 @@ int irq_blacklist_on(void) } EXPORT_SYMBOL(irq_blacklist_on); +/* Do not call this API in an atomic context */ int irq_blacklist_off(void) { bool flag = false; + might_sleep(); if (!irq_h) { pr_err("%s: init function is not called", __func__); return -EPERM; diff --git a/drivers/soc/qcom/remoteqdss.c b/drivers/soc/qcom/remoteqdss.c index e66ca587adca..5e2a5babdcc8 100644 --- a/drivers/soc/qcom/remoteqdss.c +++ b/drivers/soc/qcom/remoteqdss.c @@ -28,8 +28,8 @@ static struct dentry *remoteqdss_dir; #define REMOTEQDSS_ERR(fmt, ...) \ pr_debug("%s: " fmt, __func__, ## __VA_ARGS__) -#define REMOTEQDSS_ERR_CALLER(fmt, ...) \ - pr_debug("%pf: " fmt, __builtin_return_address(1), ## __VA_ARGS__) +#define REMOTEQDSS_ERR_CALLER(fmt, caller, ...) \ + pr_debug("%pf: " fmt, caller, ## __VA_ARGS__) struct qdss_msg_translation { u64 val; @@ -97,7 +97,7 @@ struct remoteqdss_query_swentity_fmt { /* msgs is a null terminated array */ static void remoteqdss_err_translation(struct qdss_msg_translation *msgs, - u64 err) + u64 err, const void *caller) { static DEFINE_RATELIMIT_STATE(rl, 5 * HZ, 2); struct qdss_msg_translation *msg; @@ -110,12 +110,13 @@ static void remoteqdss_err_translation(struct qdss_msg_translation *msgs, for (msg = msgs; msg->msg; msg++) { if (err == msg->val && __ratelimit(&rl)) { - REMOTEQDSS_ERR_CALLER("0x%llx: %s\n", err, msg->msg); + REMOTEQDSS_ERR_CALLER("0x%llx: %s\n", caller, err, + msg->msg); return; } } - REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", err); + REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", caller, err); } /* Shared across all remoteqdss scm functions */ @@ -160,7 +161,7 @@ static void free_remoteqdss_data(struct remoteqdss_data *data) } static int remoteqdss_do_scm_call(struct scm_desc *desc, - dma_addr_t addr, size_t size) + dma_addr_t addr, size_t size, const void *caller) { int ret; @@ -175,7 +176,7 @@ static int remoteqdss_do_scm_call(struct scm_desc *desc, if (ret) return ret; - remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0]); + remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0], caller); ret = desc->ret[0] ? -EINVAL : 0; return ret; } @@ -194,7 +195,8 @@ static int remoteqdss_scm_query_swtrace(void *priv, u64 *val) fmt->subsys_id = data->id; fmt->cmd_id = CMD_ID_QUERY_SWTRACE_STATE; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); *val = desc.ret[1]; dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); @@ -216,7 +218,8 @@ static int remoteqdss_scm_filter_swtrace(void *priv, u64 val) fmt->h.cmd_id = CMD_ID_FILTER_SWTRACE_STATE; fmt->state = (uint32_t)val; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; @@ -241,7 +244,8 @@ static int remoteqdss_scm_query_tag(void *priv, u64 *val) fmt->subsys_id = data->id; fmt->cmd_id = CMD_ID_QUERY_SWEVENT_TAG; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); *val = desc.ret[1]; dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); @@ -268,7 +272,8 @@ static int remoteqdss_scm_query_swevent(void *priv, u64 *val) fmt->h.cmd_id = CMD_ID_QUERY_SWEVENT; fmt->event_group = data->sw_event_group; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); *val = desc.ret[1]; dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); @@ -291,7 +296,8 @@ static int remoteqdss_scm_filter_swevent(void *priv, u64 val) fmt->event_group = data->sw_event_group; fmt->event_mask = (uint32_t)val; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; @@ -317,7 +323,8 @@ static int remoteqdss_scm_query_swentity(void *priv, u64 *val) fmt->h.cmd_id = CMD_ID_QUERY_SWENTITY; fmt->entity_group = data->sw_entity_group; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); *val = desc.ret[1]; dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); @@ -340,7 +347,8 @@ static int remoteqdss_scm_filter_swentity(void *priv, u64 val) fmt->entity_group = data->sw_entity_group; fmt->entity_mask = (uint32_t)val; - ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt), + __builtin_return_address(0)); dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c index 5eaf2db32d21..03a1591e5b09 100644 --- a/drivers/soc/qcom/rpm-smd.c +++ b/drivers/soc/qcom/rpm-smd.c @@ -795,45 +795,23 @@ static int msm_rpm_read_sleep_ack(void) { int ret; char buf[MAX_ERR_BUFFER_SIZE] = {0}; - uint32_t msg_id; if (glink_enabled) ret = msm_rpm_glink_rx_poll(glink_data->glink_handle); else { ret = msm_rpm_read_smd_data(buf); - if (!ret) { - /* - * Mimic Glink behavior to ensure that the - * data is read and the msg is removed from - * the wait list. We should have gotten here - * only when there are no drivers waiting on - * ACKs. msm_rpm_get_entry_from_msg_id() - * return non-NULL only then. - */ - msg_id = msm_rpm_get_msg_id_from_ack(buf); - msm_rpm_process_ack(msg_id, 0); + if (!ret) ret = smd_is_pkt_avail(msm_rpm_data.ch_info); - } } return ret; } -static void msm_rpm_flush_noack_messages(void) -{ - while (!list_empty(&msm_rpm_wait_list)) { - if (!msm_rpm_read_sleep_ack()) - break; - } -} - static int msm_rpm_flush_requests(bool print) { struct rb_node *t; int ret; int count = 0; - msm_rpm_flush_noack_messages(); - for (t = rb_first(&tr_root); t; t = rb_next(t)) { struct slp_buf *s = rb_entry(t, struct slp_buf, node); @@ -1102,18 +1080,14 @@ static void msm_rpm_notify(void *data, unsigned event) bool msm_rpm_waiting_for_ack(void) { - bool ret = false; + bool ret; unsigned long flags; - struct msm_rpm_wait_data *elem = NULL; spin_lock_irqsave(&msm_rpm_list_lock, flags); - elem = list_first_entry_or_null(&msm_rpm_wait_list, - struct msm_rpm_wait_data, list); - if (elem) - ret = !elem->delete_on_ack; + ret = list_empty(&msm_rpm_wait_list); spin_unlock_irqrestore(&msm_rpm_list_lock, flags); - return ret; + return !ret; } static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 23e32214756a..ea4f557fcd70 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -538,6 +538,9 @@ static struct msm_soc_info cpu_of_id[] = { /* falcon ID */ [317] = {MSM_CPU_FALCON, "MSMFALCON"}, + /* triton ID */ + [318] = {MSM_CPU_TRITON, "MSMTRITON"}, + /* Uninitialized IDs are not known to run Linux. MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are considered as unknown CPU. */ @@ -1207,6 +1210,10 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 317; strlcpy(dummy_socinfo.build_id, "msmfalcon - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_msmtriton()) { + dummy_socinfo.id = 318; + strlcpy(dummy_socinfo.build_id, "msmtriton - ", + sizeof(dummy_socinfo.build_id)); } else if (early_machine_is_apqcobalt()) { dummy_socinfo.id = 319; strlcpy(dummy_socinfo.build_id, "apqcobalt - ", diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c index 0ed8a6533e00..76d941ceb77e 100644 --- a/drivers/soc/qcom/subsystem_restart.c +++ b/drivers/soc/qcom/subsystem_restart.c @@ -746,6 +746,28 @@ static void subsys_stop(struct subsys_device *subsys) notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL); } +int subsystem_set_fwname(const char *name, const char *fw_name) +{ + struct subsys_device *subsys; + + if (!name) + return -EINVAL; + + if (!fw_name) + return -EINVAL; + + subsys = find_subsys(name); + if (!subsys) + return -EINVAL; + + pr_debug("Changing subsys [%s] fw_name to [%s]\n", name, fw_name); + strlcpy(subsys->desc->fw_name, fw_name, + sizeof(subsys->desc->fw_name)); + + return 0; +} +EXPORT_SYMBOL(subsystem_set_fwname); + void *__subsystem_get(const char *name, const char *fw_name) { struct subsys_device *subsys; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 0bdb3d76cc8d..9fdb06e08d4b 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -198,13 +198,13 @@ struct msm_port { static void msm_write(struct uart_port *port, unsigned int val, unsigned int off) { - writel_relaxed(val, port->membase + off); + writel_relaxed_no_log(val, port->membase + off); } static unsigned int msm_read(struct uart_port *port, unsigned int off) { - return readl_relaxed(port->membase + off); + return readl_relaxed_no_log(port->membase + off); } /* diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 8189096f95c2..83a265f0211e 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -45,6 +45,7 @@ #include <linux/msm-bus.h> #include <linux/irq.h> #include <linux/extcon.h> +#include <linux/reset.h> #include "power.h" #include "core.h" @@ -159,6 +160,7 @@ struct dwc3_msm { struct clk *utmi_clk_src; struct clk *bus_aggr_clk; struct clk *cfg_ahb_clk; + struct reset_control *core_reset; struct regulator *dwc3_gdsc; struct usb_phy *hs_phy, *ss_phy; @@ -1517,19 +1519,19 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert) clk_disable_unprepare(mdwc->sleep_clk); clk_disable_unprepare(mdwc->core_clk); clk_disable_unprepare(mdwc->iface_clk); - ret = clk_reset(mdwc->core_clk, CLK_RESET_ASSERT); + ret = reset_control_assert(mdwc->core_reset); if (ret) - dev_err(mdwc->dev, "dwc3 core_clk assert failed\n"); + dev_err(mdwc->dev, "dwc3 core_reset assert failed\n"); } else { dev_dbg(mdwc->dev, "block_reset DEASSERT\n"); - ret = clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT); + ret = reset_control_deassert(mdwc->core_reset); + if (ret) + dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n"); ndelay(200); clk_prepare_enable(mdwc->iface_clk); clk_prepare_enable(mdwc->core_clk); clk_prepare_enable(mdwc->sleep_clk); clk_prepare_enable(mdwc->utmi_clk); - if (ret) - dev_err(mdwc->dev, "dwc3 core_clk deassert failed\n"); enable_irq(mdwc->pwr_event_irq); } @@ -2041,11 +2043,16 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) { dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__); dwc3_msm_config_gdsc(mdwc, 1); - - clk_reset(mdwc->core_clk, CLK_RESET_ASSERT); + ret = reset_control_assert(mdwc->core_reset); + if (ret) + dev_err(mdwc->dev, "%s:core_reset assert failed\n", + __func__); /* HW requires a short delay for reset to take place properly */ usleep_range(1000, 1200); - clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT); + ret = reset_control_deassert(mdwc->core_reset); + if (ret) + dev_err(mdwc->dev, "%s:core_reset deassert failed\n", + __func__); clk_prepare_enable(mdwc->sleep_clk); } @@ -2366,6 +2373,17 @@ static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc) mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX); } + mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset"); + if (IS_ERR(mdwc->core_reset)) { + dev_err(mdwc->dev, "failed to get core_reset\n"); + return PTR_ERR(mdwc->core_reset); + } + + /* + * Get Max supported clk frequency for USB Core CLK and request + * to set the same. + */ + mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX); if (IS_ERR_VALUE(mdwc->core_clk_rate)) { dev_err(mdwc->dev, "fail to get core clk max freq.\n"); } else { @@ -3191,7 +3209,7 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on) static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA) { - union power_supply_propval pval = {1000 * mA}; + union power_supply_propval pval = {0}; int ret; if (mdwc->charging_disabled) @@ -3208,9 +3226,14 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA) } } + power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval); + if (pval.intval != POWER_SUPPLY_TYPE_USB) + return 0; + dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA); /* Set max current limit in uA */ + pval.intval = 1000 * mA; ret = power_supply_set_property(mdwc->usb_psy, POWER_SUPPLY_PROP_CURRENT_MAX, &pval); if (ret) { diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 3d17fd93c787..a99405261306 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -27,6 +27,14 @@ #define SSUSB_GADGET_VBUS_DRAW_UNITS 8 #define HSUSB_GADGET_VBUS_DRAW_UNITS 2 +/* + * Based on enumerated USB speed, draw power with set_config and resume + * HSUSB: 500mA, SSUSB: 900mA + */ +#define USB_VBUS_DRAW(speed)\ + (speed == USB_SPEED_SUPER ?\ + SSUSB_GADGET_VBUS_DRAW : CONFIG_USB_GADGET_VBUS_DRAW) + static bool enable_l1_for_hs; module_param(enable_l1_for_hs, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_l1_for_hs, "Enable support for L1 LPM for HS devices"); @@ -749,7 +757,6 @@ static int set_config(struct usb_composite_dev *cdev, struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c = NULL; int result = -EINVAL; - unsigned power = gadget_is_otg(gadget) ? 8 : 100; int tmp; /* @@ -863,14 +870,8 @@ static int set_config(struct usb_composite_dev *cdev, } } - /* Allow 900mA to draw with Super-Speed */ - if (gadget->speed == USB_SPEED_SUPER) - power = SSUSB_GADGET_VBUS_DRAW; - else - power = CONFIG_USB_GADGET_VBUS_DRAW; - done: - usb_gadget_vbus_draw(gadget, power); + usb_gadget_vbus_draw(gadget, USB_VBUS_DRAW(gadget->speed)); if (result >= 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; return result; @@ -2319,7 +2320,6 @@ void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; - u16 maxpower; int ret; unsigned long flags; @@ -2352,10 +2352,7 @@ void composite_resume(struct usb_gadget *gadget) f->resume(f); } - maxpower = cdev->config->MaxPower; - - usb_gadget_vbus_draw(gadget, maxpower ? - maxpower : CONFIG_USB_GADGET_VBUS_DRAW); + usb_gadget_vbus_draw(gadget, USB_VBUS_DRAW(gadget->speed)); } spin_unlock_irqrestore(&cdev->lock, flags); diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index e32348d17b26..80c9928e948e 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -36,6 +36,7 @@ MODULE_PARM_DESC(num_out_bufs, static struct workqueue_struct *ipa_usb_wq; +static void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis); static void ipa_disconnect_handler(struct gsi_data_port *d_port); static int gsi_ctrl_send_notification(struct f_gsi *gsi, enum gsi_ctrl_notify_state); @@ -553,6 +554,7 @@ static void ipa_work_handler(struct work_struct *w) struct device *dev; struct device *gad_dev; struct f_gsi *gsi; + bool block_db; event = read_event(d_port); @@ -665,7 +667,27 @@ static void ipa_work_handler(struct work_struct *w) } break; case STATE_CONNECTED: - if (event == EVT_DISCONNECTED) { + if (event == EVT_DISCONNECTED || event == EVT_HOST_NRDY) { + if (peek_event(d_port) == EVT_HOST_READY) { + read_event(d_port); + log_event_dbg("%s: NO_OP NRDY_RDY", __func__); + break; + } + + if (event == EVT_HOST_NRDY) { + log_event_dbg("%s: ST_CON_HOST_NRDY\n", + __func__); + block_db = true; + /* stop USB ringing doorbell to GSI(OUT_EP) */ + usb_gsi_ep_op(d_port->in_ep, (void *)&block_db, + GSI_EP_OP_SET_CLR_BLOCK_DBL); + gsi_rndis_ipa_reset_trigger(gsi); + usb_gsi_ep_op(d_port->in_ep, NULL, + GSI_EP_OP_ENDXFER); + usb_gsi_ep_op(d_port->out_ep, NULL, + GSI_EP_OP_ENDXFER); + } + ipa_disconnect_work_handler(d_port); d_port->sm_state = STATE_INITIALIZED; usb_gadget_autopm_put_async(d_port->gadget); @@ -1269,7 +1291,7 @@ static void gsi_rndis_open(struct f_gsi *rndis) rndis_signal_connect(rndis->params); } -void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis) +static void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis) { unsigned long flags; @@ -1301,12 +1323,11 @@ void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param) d_port = &rndis->d_port; - if (enable) { - gsi_rndis_ipa_reset_trigger(rndis); - usb_gsi_ep_op(d_port->in_ep, NULL, GSI_EP_OP_ENDXFER); - usb_gsi_ep_op(d_port->out_ep, NULL, GSI_EP_OP_ENDXFER); - post_event(d_port, EVT_DISCONNECTED); + if (enable) { + log_event_dbg("%s: posting HOST_NRDY\n", __func__); + post_event(d_port, EVT_HOST_NRDY); } else { + log_event_dbg("%s: posting HOST_READY\n", __func__); post_event(d_port, EVT_HOST_READY); } @@ -1359,6 +1380,12 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi, return -ENODEV; } + if (atomic_inc_return(&gsi->c_port.notify_count) != 1) { + log_event_dbg("delay ep_queue: notify req is busy %d", + atomic_read(&gsi->c_port.notify_count)); + return 0; + } + event = req->buf; switch (state) { @@ -1414,12 +1441,6 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi, log_event_dbg("send Notify type %02x", event->bNotificationType); - if (atomic_inc_return(&gsi->c_port.notify_count) != 1) { - log_event_dbg("delay ep_queue: notify req is busy %d", - atomic_read(&gsi->c_port.notify_count)); - return 0; - } - return queue_notification_request(gsi); } @@ -1469,7 +1490,8 @@ static void gsi_rndis_response_available(void *_rndis) { struct f_gsi *gsi = _rndis; - gsi_ctrl_send_notification(gsi, GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE); + gsi_ctrl_send_notification(gsi, + GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE); } static void gsi_rndis_command_complete(struct usb_ep *ep, @@ -2056,6 +2078,8 @@ static void gsi_suspend(struct usb_function *f) queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w); } + log_event_dbg("%s: notify_count = %d\n", + __func__, atomic_read(&gsi->c_port.notify_count)); log_event_dbg("gsi suspended"); } @@ -2080,6 +2104,21 @@ static void gsi_resume(struct usb_function *f) else remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup; + if (gsi->c_port.notify && !gsi->c_port.notify->desc) + config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify); + + log_event_dbg("%s: notify_count = %d\n", + __func__, atomic_read(&gsi->c_port.notify_count)); + + /* Send notification to host for RMNET, RNDIS and MBIM Interface */ + if ((gsi->prot_id == IPA_USB_MBIM || + gsi->prot_id == IPA_USB_RNDIS || + gsi->prot_id == IPA_USB_RMNET) && + (atomic_read(&gsi->c_port.notify_count) >= 1)) { + log_event_dbg("%s: force_queue\n", __func__); + queue_notification_request(gsi); + } + if (!remote_wakeup_allowed) { /* Configure EPs for GSI */ @@ -2110,11 +2149,6 @@ static void gsi_resume(struct usb_function *f) post_event(&gsi->d_port, EVT_RESUMED); queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w); - - if (gsi->c_port.notify && !gsi->c_port.notify->desc) - config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify); - - atomic_set(&gsi->c_port.notify_count, 0); log_event_dbg("%s: completed", __func__); } diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 76b25445c6ad..dd73dfe5dcab 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -1014,6 +1014,7 @@ struct net_device *gether_setup_name_default(const char *netname) spin_lock_init(&dev->lock); spin_lock_init(&dev->req_lock); INIT_WORK(&dev->work, eth_work); + INIT_WORK(&dev->rx_work, process_rx_w); INIT_LIST_HEAD(&dev->tx_reqs); INIT_LIST_HEAD(&dev->rx_reqs); diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index b011efe189e7..1bb7082be8e6 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -1257,8 +1257,11 @@ static void usbpd_sm(struct work_struct *w) POWER_SUPPLY_PROP_PD_ACTIVE, &val); if (pd->current_pr == PR_SRC) { - regulator_disable(pd->vconn); regulator_disable(pd->vbus); + if (pd->vconn_enabled) { + regulator_disable(pd->vconn); + pd->vconn_enabled = false; + } } if (pd->current_dr == DR_UFP) diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index b222c3c5f7b4..4ecdc350fbd4 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -27,6 +27,7 @@ #include <linux/regulator/machine.h> #include <linux/usb/phy.h> #include <linux/usb/msm_hsusb.h> +#include <linux/reset.h> #define QUSB2PHY_PWR_CTRL1 0x210 #define PWR_CTRL1_POWR_DOWN BIT(0) @@ -76,7 +77,7 @@ struct qusb_phy { struct clk *ref_clk_src; struct clk *ref_clk; struct clk *cfg_ahb_clk; - struct clk *phy_reset; + struct reset_control *phy_reset; struct regulator *vdd; struct regulator *vdda33; @@ -370,14 +371,19 @@ static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt, static void qusb_phy_host_init(struct usb_phy *phy) { u8 reg; + int ret; struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); dev_dbg(phy->dev, "%s\n", __func__); /* Perform phy reset */ - clk_reset(qphy->phy_reset, CLK_RESET_ASSERT); + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__); usleep_range(100, 150); - clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__); qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq, qphy->host_init_seq_len, 0); @@ -411,9 +417,13 @@ static int qusb_phy_init(struct usb_phy *phy) qusb_phy_enable_clocks(qphy, true); /* Perform phy reset */ - clk_reset(qphy->phy_reset, CLK_RESET_ASSERT); + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__); usleep_range(100, 150); - clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__); if (qphy->emulation) { if (qphy->emu_init_seq) @@ -531,6 +541,7 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); u32 linestate = 0, intr_mask = 0; + int ret; if (qphy->suspended && suspend) { dev_dbg(phy->dev, "%s: USB PHY is already suspended\n", @@ -578,9 +589,15 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) qusb_phy_enable_clocks(qphy, false); } else { /* Cable disconnect case */ - clk_reset(qphy->phy_reset, CLK_RESET_ASSERT); + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset assert failed\n", + __func__); usleep_range(100, 150); - clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset deassert failed\n", + __func__); /* enable clock bypass */ writel_relaxed(0x90, @@ -622,7 +639,10 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) */ wmb(); - clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset deassert failed\n", + __func__); qusb_phy_enable_power(qphy, true, true); qusb_phy_enable_clocks(qphy, true); @@ -802,7 +822,7 @@ static int qusb_phy_probe(struct platform_device *pdev) } } - qphy->phy_reset = devm_clk_get(dev, "phy_reset"); + qphy->phy_reset = devm_reset_control_get(dev, "phy_reset"); if (IS_ERR(qphy->phy_reset)) return PTR_ERR(qphy->phy_reset); diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c index af7ec03314f5..5456b9e8733b 100644 --- a/drivers/usb/phy/phy-msm-qusb.c +++ b/drivers/usb/phy/phy-msm-qusb.c @@ -27,6 +27,7 @@ #include <linux/regulator/machine.h> #include <linux/usb/phy.h> #include <linux/usb/msm_hsusb.h> +#include <linux/reset.h> #define QUSB2PHY_PLL_STATUS 0x38 #define QUSB2PHY_PLL_LOCK BIT(5) @@ -112,7 +113,7 @@ struct qusb_phy { struct clk *ref_clk_src; struct clk *ref_clk; struct clk *cfg_ahb_clk; - struct clk *phy_reset; + struct reset_control *phy_reset; struct regulator *vdd; struct regulator *vdda33; @@ -443,9 +444,13 @@ static int qusb_phy_init(struct usb_phy *phy) } /* Perform phy reset */ - clk_reset(qphy->phy_reset, CLK_RESET_ASSERT); + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__); usleep_range(100, 150); - clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__); if (qphy->emulation) { if (qphy->emu_init_seq) @@ -858,7 +863,7 @@ static int qusb_phy_probe(struct platform_device *pdev) if (IS_ERR(qphy->cfg_ahb_clk)) return PTR_ERR(qphy->cfg_ahb_clk); - qphy->phy_reset = devm_clk_get(dev, "phy_reset"); + qphy->phy_reset = devm_reset_control_get(dev, "phy_reset"); if (IS_ERR(qphy->phy_reset)) return PTR_ERR(qphy->phy_reset); @@ -1011,8 +1016,11 @@ static int qusb_phy_probe(struct platform_device *pdev) * not used, there is leakage current seen with QUSB PHY related voltage * rail. Hence keep QUSB PHY into reset state explicitly here. */ - if (hold_phy_reset) - clk_reset(qphy->phy_reset, CLK_RESET_ASSERT); + if (hold_phy_reset) { + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(dev, "%s:phy_reset assert failed\n", __func__); + } ret = usb_add_phy_dev(&qphy->phy); if (ret) diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 87235a5a4f99..fc61e3172d0b 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -25,6 +25,7 @@ #include <linux/usb/msm_hsusb.h> #include <linux/clk.h> #include <linux/clk/msm-clk.h> +#include <linux/reset.h> enum core_ldo_levels { CORE_LEVEL_NONE = 0, @@ -87,9 +88,10 @@ struct msm_ssphy_qmp { struct clk *aux_clk; struct clk *cfg_ahb_clk; struct clk *pipe_clk; - struct clk *phy_reset; - struct clk *phy_phy_reset; bool power_enabled; + struct reset_control *phy_reset; + struct reset_control *phy_phy_reset; + bool clk_enabled; bool cable_connected; bool in_suspend; @@ -371,56 +373,39 @@ static int msm_ssphy_qmp_reset(struct usb_phy *uphy) dev_dbg(uphy->dev, "Resetting QMP phy\n"); /* Assert USB3 PHY reset */ - if (phy->phy_phy_reset) { - ret = clk_reset(phy->phy_phy_reset, CLK_RESET_ASSERT); - if (ret) { - dev_err(uphy->dev, "phy_phy reset assert failed\n"); - goto exit; - } - } else { - ret = clk_reset(phy->pipe_clk, CLK_RESET_ASSERT); - if (ret) { - dev_err(uphy->dev, "pipe_clk reset assert failed\n"); - goto exit; - } + ret = reset_control_assert(phy->phy_phy_reset); + if (ret) { + dev_err(uphy->dev, "phy_phy_reset assert failed\n"); + goto exit; } /* Assert USB3 PHY CSR reset */ - ret = clk_reset(phy->phy_reset, CLK_RESET_ASSERT); + ret = reset_control_assert(phy->phy_reset); if (ret) { - dev_err(uphy->dev, "phy_reset clk assert failed\n"); + dev_err(uphy->dev, "phy_reset assert failed\n"); goto deassert_phy_phy_reset; } /* Deassert USB3 PHY CSR reset */ - ret = clk_reset(phy->phy_reset, CLK_RESET_DEASSERT); + ret = reset_control_deassert(phy->phy_reset); if (ret) { - dev_err(uphy->dev, "phy_reset clk deassert failed\n"); + dev_err(uphy->dev, "phy_reset deassert failed\n"); goto deassert_phy_phy_reset; } /* Deassert USB3 PHY reset */ - if (phy->phy_phy_reset) { - ret = clk_reset(phy->phy_phy_reset, CLK_RESET_DEASSERT); - if (ret) { - dev_err(uphy->dev, "phy_phy reset deassert failed\n"); - goto exit; - } - } else { - ret = clk_reset(phy->pipe_clk, CLK_RESET_DEASSERT); - if (ret) { - dev_err(uphy->dev, "pipe_clk reset deassert failed\n"); - goto exit; - } + ret = reset_control_deassert(phy->phy_phy_reset); + if (ret) { + dev_err(uphy->dev, "phy_phy_reset deassert failed\n"); + goto exit; } return 0; deassert_phy_phy_reset: - if (phy->phy_phy_reset) - clk_reset(phy->phy_phy_reset, CLK_RESET_DEASSERT); - else - clk_reset(phy->pipe_clk, CLK_RESET_DEASSERT); + ret = reset_control_deassert(phy->phy_phy_reset); + if (ret) + dev_err(uphy->dev, "phy_phy_reset deassert failed\n"); exit: phy->in_suspend = false; @@ -594,26 +579,18 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) goto err; } - if (of_property_match_string(pdev->dev.of_node, - "clock-names", "phy_reset") >= 0) { - phy->phy_reset = clk_get(&pdev->dev, "phy_reset"); - if (IS_ERR(phy->phy_reset)) { - ret = PTR_ERR(phy->phy_reset); - phy->phy_reset = NULL; - dev_dbg(dev, "failed to get phy_reset\n"); - goto err; - } + phy->phy_reset = devm_reset_control_get(dev, "phy_reset"); + if (IS_ERR(phy->phy_reset)) { + ret = PTR_ERR(phy->phy_reset); + dev_dbg(dev, "failed to get phy_reset\n"); + goto err; } - if (of_property_match_string(pdev->dev.of_node, - "clock-names", "phy_phy_reset") >= 0) { - phy->phy_phy_reset = clk_get(dev, "phy_phy_reset"); - if (IS_ERR(phy->phy_phy_reset)) { - ret = PTR_ERR(phy->phy_phy_reset); - phy->phy_phy_reset = NULL; - dev_dbg(dev, "phy_phy_reset unavailable\n"); - goto err; - } + phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset"); + if (IS_ERR(phy->phy_phy_reset)) { + ret = PTR_ERR(phy->phy_phy_reset); + dev_dbg(dev, "failed to get phy_phy_reset\n"); + goto err; } of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size); diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c index 8061d8e6b9fb..75b2f0b18522 100644 --- a/drivers/video/adf/adf_client.c +++ b/drivers/video/adf/adf_client.c @@ -305,8 +305,10 @@ static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf, } done: - if (ret < 0) + if (ret < 0) { adf_buffer_mapping_cleanup(mapping, buf); + memset(mapping, 0, sizeof(*mapping)); + } return ret; } diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c index c8b415df4bce..f25a6e185051 100644 --- a/drivers/video/fbdev/msm/mdss_dp.c +++ b/drivers/video/fbdev/msm/mdss_dp.c @@ -1434,10 +1434,54 @@ static ssize_t mdss_dp_rda_connected(struct device *dev, return ret; } + +static ssize_t mdss_dp_sysfs_wta_s3d_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret, s3d_mode; + struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev); + + if (!dp) { + DEV_ERR("%s: invalid input\n", __func__); + return -EINVAL; + } + ret = kstrtoint(buf, 10, &s3d_mode); + if (ret) { + DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret); + goto end; + } + + dp->s3d_mode = s3d_mode; + ret = strnlen(buf, PAGE_SIZE); + DEV_DBG("%s: %d\n", __func__, dp->s3d_mode); +end: + return ret; +} + +static ssize_t mdss_dp_sysfs_rda_s3d_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret; + struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev); + + if (!dp) { + DEV_ERR("%s: invalid input\n", __func__); + return -EINVAL; + } + + ret = snprintf(buf, PAGE_SIZE, "%d\n", dp->s3d_mode); + DEV_DBG("%s: '%d'\n", __func__, dp->s3d_mode); + + return ret; +} + static DEVICE_ATTR(connected, S_IRUGO, mdss_dp_rda_connected, NULL); +static DEVICE_ATTR(s3d_mode, S_IRUGO | S_IWUSR, mdss_dp_sysfs_rda_s3d_mode, + mdss_dp_sysfs_wta_s3d_mode); static struct attribute *mdss_dp_fs_attrs[] = { &dev_attr_connected.attr, + &dev_attr_s3d_mode.attr, NULL, }; diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h index b724aa655424..9a8534677c5e 100644 --- a/drivers/video/fbdev/msm/mdss_dp.h +++ b/drivers/video/fbdev/msm/mdss_dp.h @@ -404,6 +404,7 @@ struct mdss_dp_drv_pdata { struct mutex pd_msg_mutex; struct mutex hdcp_mutex; bool cable_connected; + u32 s3d_mode; u32 aux_cmd_busy; u32 aux_cmd_i2c; int aux_trans_num; diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c index fe6ce30d0c89..e8d68059581f 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_panel.c +++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c @@ -27,7 +27,6 @@ #include "mdss_dba_utils.h" #define DT_CMD_HDR 6 -#define MIN_REFRESH_RATE 48 #define DEFAULT_MDP_TRANSFER_TIME 14000 #define VSYNC_DELAY msecs_to_jiffies(17) @@ -1907,10 +1906,10 @@ static int mdss_dsi_set_refresh_rate_range(struct device_node *pan_node, __func__, __LINE__); /* - * Since min refresh rate is not specified when dynamic - * fps is enabled, using minimum as 30 + * If min refresh rate is not specified, set it to the + * default panel refresh rate. */ - pinfo->min_fps = MIN_REFRESH_RATE; + pinfo->min_fps = pinfo->mipi.frame_rate; rc = 0; } diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index f35156a2cfcb..cd842cecc945 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -1364,6 +1364,7 @@ static void mdss_mdp_memory_retention_enter(void) } } + __mdss_mdp_reg_access_clk_enable(mdata, true); if (mdss_mdp_clk) { clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM); clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET); @@ -1375,6 +1376,7 @@ static void mdss_mdp_memory_retention_enter(void) clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_SET); clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_NORETAIN_PERIPH); } + __mdss_mdp_reg_access_clk_enable(mdata, false); } static void mdss_mdp_memory_retention_exit(void) @@ -1396,7 +1398,7 @@ static void mdss_mdp_memory_retention_exit(void) } } - + __mdss_mdp_reg_access_clk_enable(mdata, true); if (mdss_mdp_clk) { clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM); clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH); @@ -1408,6 +1410,7 @@ static void mdss_mdp_memory_retention_exit(void) clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_PERIPH); clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_CLEAR); } + __mdss_mdp_reg_access_clk_enable(mdata, false); } /** |
