diff options
Diffstat (limited to 'drivers/scsi/ufs/ufs-qcom.c')
| -rw-r--r-- | drivers/scsi/ufs/ufs-qcom.c | 1717 |
1 files changed, 1513 insertions, 204 deletions
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 2b779a55f699..e1509b2bad19 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. + * Copyright (c) 2013-2017, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,15 +14,33 @@ #include <linux/time.h> #include <linux/of.h> +#include <linux/iopoll.h> #include <linux/platform_device.h> -#include <linux/phy/phy.h> +#ifdef CONFIG_QCOM_BUS_SCALING +#include <linux/msm-bus.h> +#endif + +#include <soc/qcom/scm.h> +#include <linux/phy/phy.h> #include <linux/phy/phy-qcom-ufs.h> + #include "ufshcd.h" #include "ufshcd-pltfrm.h" #include "unipro.h" #include "ufs-qcom.h" #include "ufshci.h" +#include "ufs_quirks.h" +#include "ufs-qcom-ice.h" +#include "ufs-qcom-debugfs.h" +#include <linux/clk/msm-clk.h> + +#define MAX_PROP_SIZE 32 +#define VDDP_REF_CLK_MIN_UV 1200000 +#define VDDP_REF_CLK_MAX_UV 1200000 +/* TODO: further tuning for this parameter may be required */ +#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */ + #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) @@ -44,18 +62,24 @@ enum { static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); +static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg); static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, u32 clk_cycles); +static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host); static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, char *prefix) { print_hex_dump(KERN_ERR, prefix, len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, - 16, 4, (void __force *)hba->mmio_base + offset, - len * 4, false); + 16, 4, hba->mmio_base + offset, len * 4, false); +} + +static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, + char *prefix, void *priv) +{ + ufs_qcom_dump_regs(hba, offset, len, prefix); } static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) @@ -78,13 +102,10 @@ static int ufs_qcom_host_clk_get(struct device *dev, int err = 0; clk = devm_clk_get(dev, name); - if (IS_ERR(clk)) { + if (IS_ERR(clk)) err = PTR_ERR(clk); - dev_err(dev, "%s: failed to get %s err %d", - __func__, name, err); - } else { + else *clk_out = clk; - } return err; } @@ -106,9 +127,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) if (!host->is_lane_clks_enabled) return; - clk_disable_unprepare(host->tx_l1_sync_clk); + if (host->tx_l1_sync_clk) + clk_disable_unprepare(host->tx_l1_sync_clk); clk_disable_unprepare(host->tx_l0_sync_clk); - clk_disable_unprepare(host->rx_l1_sync_clk); + if (host->rx_l1_sync_clk) + clk_disable_unprepare(host->rx_l1_sync_clk); clk_disable_unprepare(host->rx_l0_sync_clk); host->is_lane_clks_enabled = false; @@ -132,21 +155,20 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) if (err) goto disable_rx_l0; - err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", - host->rx_l1_sync_clk); - if (err) - goto disable_tx_l0; - - err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", - host->tx_l1_sync_clk); - if (err) - goto disable_rx_l1; + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", + host->rx_l1_sync_clk); + if (err) + goto disable_tx_l0; + /* The tx lane1 clk could be muxed, hence keep this optional */ + if (host->tx_l1_sync_clk) + ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", + host->tx_l1_sync_clk); + } host->is_lane_clks_enabled = true; goto out; -disable_rx_l1: - clk_disable_unprepare(host->rx_l1_sync_clk); disable_tx_l0: clk_disable_unprepare(host->tx_l0_sync_clk); disable_rx_l0: @@ -162,42 +184,34 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", &host->rx_l0_sync_clk); - if (err) + if (err) { + dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d", + __func__, err); goto out; + } err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", &host->tx_l0_sync_clk); - if (err) - goto out; - - err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", - &host->rx_l1_sync_clk); - if (err) - goto out; - - err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", - &host->tx_l1_sync_clk); - -out: - return err; -} - -static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; - u32 tx_lanes; - int err = 0; - - err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); - if (err) + if (err) { + dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d", + __func__, err); goto out; + } - err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); - if (err) - dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", - __func__); + /* In case of single lane per direction, don't read lane1 clocks */ + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", + &host->rx_l1_sync_clk); + if (err) { + dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d", + __func__, err); + goto out; + } + /* The tx lane1 clk could be muxed, hence keep this optional */ + ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", + &host->tx_l1_sync_clk); + } out: return err; } @@ -267,9 +281,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); if (ret) { - dev_err(hba->dev, - "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", - __func__, ret); + dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", + __func__, ret); goto out; } @@ -290,8 +303,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) ret = ufs_qcom_phy_is_pcs_ready(phy); if (ret) - dev_err(hba->dev, - "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", + dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", __func__, ret); ufs_qcom_select_unipro_mode(host); @@ -307,15 +319,65 @@ out: * in a specific operation, UTP controller CGCs are by default disabled and * this function enables them (after every UFS link startup) to save some power * leakage. + * + * UFS host controller v3.0.0 onwards has internal clock gating mechanism + * in Qunipro, enable them to save additional power. */ -static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) +static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) { + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + /* Enable UTP internal clock gating */ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ mb(); + + /* Enable Qunipro internal clock gating if supported */ + if (!ufs_qcom_cap_qunipro_clk_gating(host)) + goto out; + + /* Enable all the mask bits */ + err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK, + DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK, + PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL); +out: + return err; +} + +static void ufs_qcom_force_mem_config(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + + /* + * Configure the behavior of ufs clocks core and peripheral + * memory state when they are turned off. + * This configuration is required to allow retaining + * ICE crypto configuration (including keys) when + * core_clk_ice is turned off, and powering down + * non-ICE RAMs of host controller. + */ + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core_clk_ice")) + clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM); + else + clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM); + clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH); + clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR); + } } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -326,6 +388,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: + ufs_qcom_force_mem_config(hba); ufs_qcom_power_up_sequence(hba); /* * The PHY PLL output is the source of tx/rx lane symbol @@ -333,12 +396,19 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * is initialized. */ err = ufs_qcom_enable_lane_clks(host); + if (!err && host->ice.pdev) { + err = ufs_qcom_ice_init(host); + if (err) { + dev_err(hba->dev, "%s: ICE init failed (%d)\n", + __func__, err); + err = -EINVAL; + } + } + break; case POST_CHANGE: /* check if UFS PHY moved from DISABLED to HIBERN8 */ err = ufs_qcom_check_hibern8(hba); - ufs_qcom_enable_hw_clk_gating(hba); - break; default: dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); @@ -351,8 +421,9 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, /** * Returns zero for success and non-zero in case of a failure */ -static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, - u32 hs, u32 rate, bool update_link_startup_timer) +static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, + u32 hs, u32 rate, bool update_link_startup_timer, + bool is_pre_scale_up) { int ret = 0; struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -386,9 +457,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & * UFS_REG_PA_LINK_STARTUP_TIMER * But UTP controller uses SYS1CLK_1US_REG register for Interrupt - * Aggregation logic. + * Aggregation / Auto hibern8 logic. */ - if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) + if (ufs_qcom_cap_qunipro(host) && + (!(ufshcd_is_intr_aggr_allowed(hba) || + ufshcd_is_auto_hibern8_supported(hba)))) goto out; if (gear == 0) { @@ -397,8 +470,12 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, } list_for_each_entry(clki, &hba->clk_list_head, list) { - if (!strcmp(clki->name, "core_clk")) - core_clk_rate = clk_get_rate(clki->clk); + if (!strcmp(clki->name, "core_clk")) { + if (is_pre_scale_up) + core_clk_rate = clki->max_freq; + else + core_clk_rate = clk_get_rate(clki->clk); + } } /* If frequency is smaller than 1MHz, set to 1MHz */ @@ -495,70 +572,247 @@ out: return ret; } +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, + u32 hs, u32 rate, bool update_link_startup_timer) +{ + return __ufs_qcom_cfg_timers(hba, gear, hs, rate, + update_link_startup_timer, false); +} + +static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + u32 unipro_ver; + int err = 0; + + if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", + __func__); + err = -EINVAL; + goto out; + } + + /* make sure RX LineCfg is enabled before link startup */ + err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true); + if (err) + goto out; + + if (ufs_qcom_cap_qunipro(host)) { + /* + * set unipro core clock cycles to 150 & clear clock divider + */ + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); + if (err) + goto out; + } + + err = ufs_qcom_enable_hw_clk_gating(hba); + if (err) + goto out; + + /* + * Some UFS devices (and may be host) have issues if LCC is + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + unipro_ver = ufshcd_get_local_unipro_ver(hba); + if (unipro_ver != UFS_UNIPRO_VER_1_41) + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), + 0); + if (err) + goto out; + + if (!ufs_qcom_cap_qunipro_clk_gating(host)) + goto out; + + /* Enable all the mask bits */ + err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK, + SAVECONFIGTIME_MODE_MASK, + PA_VS_CONFIG_REG1); +out: + return err; +} + +static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + u32 tx_lanes; + int err = 0; + + err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); + if (err) + goto out; + + err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); + if (err) { + dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", + __func__); + goto out; + } + + /* + * Some UFS devices send incorrect LineCfg data as part of power mode + * change sequence which may cause host PHY to go into bad state. + * Disabling Rx LineCfg of host PHY should help avoid this. + */ + if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41) + err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false); + if (err) { + dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n", + __func__); + goto out; + } + + /* + * UFS controller has *clk_req output to GCC, for each one if the clocks + * entering it. When *clk_req for a specific clock is de-asserted, + * a corresponding clock from GCC is stopped. UFS controller de-asserts + * *clk_req outputs when it is in Auto Hibernate state only if the + * Clock request feature is enabled. + * Enable the Clock request feature: + * - Enable HW clock control for UFS clocks in GCC (handled by the + * clock driver as part of clk_prepare_enable). + * - Set the AH8_CFG.*CLK_REQ register bits to 1. + */ + if (ufshcd_is_auto_hibern8_supported(hba)) + ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) | + UFS_HW_CLK_CTRL_EN, + UFS_AH8_CFG); + /* + * Make sure clock request feature gets enabled for HW clk gating + * before further operations. + */ + mb(); + +out: + return err; +} + static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { int err = 0; - struct ufs_qcom_host *host = ufshcd_get_variant(hba); switch (status) { case PRE_CHANGE: - if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, - 0, true)) { - dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", - __func__); - err = -EINVAL; - goto out; - } - - if (ufs_qcom_cap_qunipro(host)) - /* - * set unipro core clock cycles to 150 & clear clock - * divider - */ - err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, - 150); - + err = ufs_qcom_link_startup_pre_change(hba); break; case POST_CHANGE: - ufs_qcom_link_startup_post_change(hba); + err = ufs_qcom_link_startup_post_change(hba); break; default: break; } -out: return err; } -static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) + +static int ufs_qcom_config_vreg(struct device *dev, + struct ufs_vreg *vreg, bool on) { - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; int ret = 0; + struct regulator *reg; + int min_uV, uA_load; - if (ufs_qcom_is_link_off(hba)) { - /* - * Disable the tx/rx lane symbol clocks before PHY is - * powered down as the PLL source should be disabled - * after downstream clocks are disabled. - */ - ufs_qcom_disable_lane_clks(host); - phy_power_off(phy); - - /* Assert PHY soft reset */ - ufs_qcom_assert_reset(hba); + if (!vreg) { + WARN_ON(1); + ret = -EINVAL; goto out; } + reg = vreg->reg; + if (regulator_count_voltages(reg) > 0) { + uA_load = on ? vreg->max_uA : 0; + ret = regulator_set_load(vreg->reg, uA_load); + if (ret) + goto out; + + min_uV = on ? vreg->min_uV : 0; + ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); + if (ret) { + dev_err(dev, "%s: %s set voltage failed, err=%d\n", + __func__, vreg->name, ret); + goto out; + } + } +out: + return ret; +} + +static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (vreg->enabled) + return ret; + + ret = ufs_qcom_config_vreg(dev, vreg, true); + if (ret) + goto out; + + ret = regulator_enable(vreg->reg); + if (ret) + goto out; + + vreg->enabled = true; +out: + return ret; +} + +static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg->enabled) + return ret; + + ret = regulator_disable(vreg->reg); + if (ret) + goto out; + + ret = ufs_qcom_config_vreg(dev, vreg, false); + if (ret) + goto out; + + vreg->enabled = false; +out: + return ret; +} + +static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + int ret = 0; + /* - * If UniPro link is not active, PHY ref_clk, main PHY analog power - * rail and low noise analog power rail for PLL can be switched off. + * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog + * power rail and low noise analog power rail for PLL can be + * switched off. */ if (!ufs_qcom_is_link_active(hba)) { ufs_qcom_disable_lane_clks(host); phy_power_off(phy); + + if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba)) + ret = ufs_qcom_disable_vreg(hba->dev, + host->vddp_ref_clk); + ufs_qcom_ice_suspend(host); + + if (ufs_qcom_is_link_off(hba)) { + /* Assert PHY soft reset */ + ufs_qcom_assert_reset(hba); + goto out; + } } + /* Unvote PM QoS */ + ufs_qcom_pm_qos_suspend(host); out: return ret; @@ -577,16 +831,146 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto out; } + if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 || + hba->spm_lvl > UFS_PM_LVL_3)) + ufs_qcom_enable_vreg(hba->dev, + host->vddp_ref_clk); + err = ufs_qcom_enable_lane_clks(host); if (err) goto out; + err = ufs_qcom_ice_resume(host); + if (err) { + dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n", + __func__, err); + goto out; + } + hba->is_sys_suspended = false; out: return err; } +static int ufs_qcom_full_reset(struct ufs_hba *hba) +{ + int ret = -ENOTSUPP; + + if (!hba->core_reset) { + dev_err(hba->dev, "%s: failed, err = %d\n", __func__, + ret); + goto out; + } + + ret = reset_control_assert(hba->core_reset); + if (ret) { + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + goto out; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(hba->core_reset); + if (ret) + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + +out: + return ret; +} + +#ifdef CONFIG_SCSI_UFS_QCOM_ICE +static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct request *req; + int ret; + + if (lrbp->cmd && lrbp->cmd->request) + req = lrbp->cmd->request; + else + return 0; + + /* Use request LBA as the DUN value */ + if (req->bio) + *dun = (req->bio->bi_iter.bi_sector) >> + UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; + + ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable); + + return ret; +} + +static +int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + int err = 0; + + if (!host->ice.pdev || + !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI) + goto out; + + err = ufs_qcom_ice_cfg_start(host, lrbp->cmd); +out: + return err; +} + +static +int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, struct request *req) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI) + goto out; + + err = ufs_qcom_ice_cfg_end(host, req); +out: + return err; +} + +static +int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + if (!host->ice.pdev) + goto out; + + err = ufs_qcom_ice_reset(host); +out: + return err; +} + +static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!status) + return -EINVAL; + + return ufs_qcom_ice_get_status(host, status); +} +#else /* !CONFIG_SCSI_UFS_QCOM_ICE */ +#define ufs_qcom_crypto_req_setup NULL +#define ufs_qcom_crytpo_engine_cfg_start NULL +#define ufs_qcom_crytpo_engine_cfg_end NULL +#define ufs_qcom_crytpo_engine_reset NULL +#define ufs_qcom_crypto_engine_get_status NULL +#endif /* CONFIG_SCSI_UFS_QCOM_ICE */ + struct ufs_qcom_dev_params { u32 pwm_rx_gear; /* pwm rx gear to work in */ u32 pwm_tx_gear; /* pwm tx gear to work in */ @@ -685,7 +1069,7 @@ static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, return 0; } -#ifdef CONFIG_MSM_BUS_SCALING +#ifdef CONFIG_QCOM_BUS_SCALING static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, const char *speed_mode) { @@ -739,7 +1123,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) } } -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) +static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) { int err = 0; @@ -770,7 +1154,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) vote = ufs_qcom_get_bus_vote(host, mode); if (vote >= 0) - err = ufs_qcom_set_bus_vote(host, vote); + err = __ufs_qcom_set_bus_vote(host, vote); else err = vote; @@ -781,6 +1165,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) return err; } +static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int vote, err; + + /* + * In case ufs_qcom_init() is not yet done, simply ignore. + * This ufs_qcom_set_bus_vote() shall be called from + * ufs_qcom_init() after init is done. + */ + if (!host) + return 0; + + if (on) { + vote = host->bus_vote.saved_vote; + if (vote == host->bus_vote.min_bw_vote) + ufs_qcom_update_bus_bw_vote(host); + } else { + vote = host->bus_vote.min_bw_vote; + } + + err = __ufs_qcom_set_bus_vote(host, vote); + if (err) + dev_err(hba->dev, "%s: set bus vote failed %d\n", + __func__, err); + + return err; +} + static ssize_t show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, char *buf) @@ -851,13 +1264,13 @@ static int ufs_qcom_bus_register(struct ufs_qcom_host *host) out: return err; } -#else /* CONFIG_MSM_BUS_SCALING */ +#else /* CONFIG_QCOM_BUS_SCALING */ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) { return 0; } -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) +static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on) { return 0; } @@ -866,7 +1279,10 @@ static int ufs_qcom_bus_register(struct ufs_qcom_host *host) { return 0; } -#endif /* CONFIG_MSM_BUS_SCALING */ +static inline void msm_bus_scale_unregister_client(uint32_t cl) +{ +} +#endif /* CONFIG_QCOM_BUS_SCALING */ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) { @@ -953,6 +1369,18 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; } + /* + * Platforms using QRBTCv2 phy must limit link to PWM Gear-1 + * and SLOW mode to successfully bring up the link. + */ + if (!strcmp(ufs_qcom_phy_name(phy), "ufs_phy_qrbtc_v2")) { + ufs_qcom_cap.tx_lanes = 1; + ufs_qcom_cap.rx_lanes = 1; + ufs_qcom_cap.pwm_rx_gear = UFS_PWM_G1; + ufs_qcom_cap.pwm_tx_gear = UFS_PWM_G1; + ufs_qcom_cap.desired_working_mode = SLOW; + } + ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, dev_max_params, dev_req_params); @@ -962,6 +1390,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, goto out; } + /* enable the device ref clock before changing to HS mode */ + if (!ufshcd_is_hs_mode(&hba->pwr_info) && + ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, true); break; case POST_CHANGE: if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, @@ -989,6 +1421,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); ufs_qcom_update_bus_bw_vote(host); + + /* disable the device ref clock if entered PWM mode */ + if (ufshcd_is_hs_mode(&hba->pwr_info) && + !ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, false); break; default: ret = -EINVAL; @@ -998,6 +1435,34 @@ out: return ret; } +static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) +{ + int err; + u32 pa_vs_config_reg1; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + &pa_vs_config_reg1); + if (err) + goto out; + + /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + (pa_vs_config_reg1 | (1 << 12))); + +out: + return err; +} + +static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) + err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + + return err; +} + static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -1021,12 +1486,12 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - if (host->hw_ver.major == 0x01) { - hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS - | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP - | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; + if (host->hw_ver.major == 0x1) { + hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE); - if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) + if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001) hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; @@ -1041,34 +1506,59 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); } + + if (host->disable_lpm) + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; } static void ufs_qcom_set_caps(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; - hba->caps |= UFSHCD_CAP_CLK_SCALING; + if (!host->disable_lpm) { + hba->caps |= UFSHCD_CAP_CLK_GATING; + hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + hba->caps |= UFSHCD_CAP_CLK_SCALING; + } hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; if (host->hw_ver.major >= 0x2) { + if (!host->disable_lpm) + hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8; host->caps = UFS_QCOM_CAP_QUNIPRO | UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; } + if (host->hw_ver.major >= 0x3) { + host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING; + /* + * The UFS PHY attached to v3.0.0 controller supports entering + * deeper low power state of SVS2. This lets the controller + * run at much lower clock frequencies for saving power. + * Assuming this and any future revisions of the controller + * support this capability. Need to revist this assumption if + * any future platform with this core doesn't support the + * capability, as there will be no benefit running at lower + * frequencies then. + */ + host->caps |= UFS_QCOM_CAP_SVS2; + } } /** * ufs_qcom_setup_clocks - enables/disable clocks * @hba: host controller instance * @on: If true, enable clocks else disable them. + * @is_gating_context: If true then it means this function is called from + * aggressive clock gating context and we may only need to gate off important + * clocks. If false then make sure to gate off all clocks. * * Returns 0 on success, non-zero on failure. */ -static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) +static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, + bool is_gating_context) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - int vote = 0; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -1090,30 +1580,426 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) ufs_qcom_phy_disable_iface_clk(host->generic_phy); goto out; } - vote = host->bus_vote.saved_vote; - if (vote == host->bus_vote.min_bw_vote) - ufs_qcom_update_bus_bw_vote(host); + /* enable the device ref clock for HS mode*/ + if (ufshcd_is_hs_mode(&hba->pwr_info)) + ufs_qcom_dev_ref_clk_ctrl(host, true); + err = ufs_qcom_ice_resume(host); + if (err) + goto out; } else { + err = ufs_qcom_ice_suspend(host); + if (err) + goto out; /* M-PHY RMMI interface clocks can be turned off */ ufs_qcom_phy_disable_iface_clk(host->generic_phy); - if (!ufs_qcom_is_link_active(hba)) + /* + * If auto hibern8 is supported then the link will already + * be in hibern8 state and the ref clock can be gated. + */ + if (ufshcd_is_auto_hibern8_supported(hba) || + !ufs_qcom_is_link_active(hba)) { + /* turn off UFS local PHY ref_clk */ + ufs_qcom_phy_disable_ref_clk(host->generic_phy); /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); - - vote = host->bus_vote.min_bw_vote; + } } - err = ufs_qcom_set_bus_vote(host, vote); - if (err) - dev_err(hba->dev, "%s: set bus vote failed %d\n", - __func__, err); - out: return err; } +#ifdef CONFIG_SMP /* CONFIG_SMP */ +static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu) +{ + int i; + + if (cpu >= 0 && cpu < num_possible_cpus()) + for (i = 0; i < host->pm_qos.num_groups; i++) + if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask)) + return i; + + return host->pm_qos.default_cpu; +} + +static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req) +{ + unsigned long flags; + struct ufs_qcom_host *host; + struct ufs_qcom_pm_qos_cpu_group *group; + + if (!hba || !req) + return; + + host = ufshcd_get_variant(hba); + if (!host->pm_qos.groups) + return; + + group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)]; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!host->pm_qos.is_enabled) + goto out; + + group->active_reqs++; + if (group->state != PM_QOS_REQ_VOTE && + group->state != PM_QOS_VOTED) { + group->state = PM_QOS_REQ_VOTE; + queue_work(host->pm_qos.workq, &group->vote_work); + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +/* hba->host->host_lock is assumed to be held by caller */ +static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu) +{ + struct ufs_qcom_pm_qos_cpu_group *group; + + if (!host->pm_qos.groups || !host->pm_qos.is_enabled) + return; + + group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)]; + + if (--group->active_reqs) + return; + group->state = PM_QOS_REQ_UNVOTE; + queue_work(host->pm_qos.workq, &group->unvote_work); +} + +static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req, + bool should_lock) +{ + unsigned long flags = 0; + + if (!hba || !req) + return; + + if (should_lock) + spin_lock_irqsave(hba->host->host_lock, flags); + __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu); + if (should_lock) + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufs_qcom_pm_qos_vote_work(struct work_struct *work) +{ + struct ufs_qcom_pm_qos_cpu_group *group = + container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work); + struct ufs_qcom_host *host = group->host; + unsigned long flags; + + spin_lock_irqsave(host->hba->host->host_lock, flags); + + if (!host->pm_qos.is_enabled || !group->active_reqs) { + spin_unlock_irqrestore(host->hba->host->host_lock, flags); + return; + } + + group->state = PM_QOS_VOTED; + spin_unlock_irqrestore(host->hba->host->host_lock, flags); + + pm_qos_update_request(&group->req, group->latency_us); +} + +static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work) +{ + struct ufs_qcom_pm_qos_cpu_group *group = container_of(work, + struct ufs_qcom_pm_qos_cpu_group, unvote_work); + struct ufs_qcom_host *host = group->host; + unsigned long flags; + + /* + * Check if new requests were submitted in the meantime and do not + * unvote if so. + */ + spin_lock_irqsave(host->hba->host->host_lock, flags); + + if (!host->pm_qos.is_enabled || group->active_reqs) { + spin_unlock_irqrestore(host->hba->host->host_lock, flags); + return; + } + + group->state = PM_QOS_UNVOTED; + spin_unlock_irqrestore(host->hba->host->host_lock, flags); + + pm_qos_update_request_timeout(&group->req, + group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US); +} + +static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev->parent); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled); +} + +static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev->parent); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + unsigned long value; + unsigned long flags; + bool enable; + int i; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + enable = !!value; + + /* + * Must take the spinlock and save irqs before changing the enabled + * flag in order to keep correctness of PM QoS release. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (enable == host->pm_qos.is_enabled) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; + } + host->pm_qos.is_enabled = enable; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (!enable) + for (i = 0; i < host->pm_qos.num_groups; i++) { + cancel_work_sync(&host->pm_qos.groups[i].vote_work); + cancel_work_sync(&host->pm_qos.groups[i].unvote_work); + spin_lock_irqsave(hba->host->host_lock, flags); + host->pm_qos.groups[i].state = PM_QOS_UNVOTED; + host->pm_qos.groups[i].active_reqs = 0; + spin_unlock_irqrestore(hba->host->host_lock, flags); + pm_qos_update_request(&host->pm_qos.groups[i].req, + PM_QOS_DEFAULT_VALUE); + } + + return count; +} + +static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev->parent); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int ret; + int i; + int offset = 0; + + for (i = 0; i < host->pm_qos.num_groups; i++) { + ret = snprintf(&buf[offset], PAGE_SIZE, + "cpu group #%d(mask=0x%lx): %d\n", i, + host->pm_qos.groups[i].mask.bits[0], + host->pm_qos.groups[i].latency_us); + if (ret > 0) + offset += ret; + else + break; + } + + return offset; +} + +static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev->parent); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + unsigned long value; + unsigned long flags; + char *strbuf; + char *strbuf_copy; + char *token; + int i; + int ret; + + /* reserve one byte for null termination */ + strbuf = kmalloc(count + 1, GFP_KERNEL); + if (!strbuf) + return -ENOMEM; + strbuf_copy = strbuf; + strlcpy(strbuf, buf, count + 1); + + for (i = 0; i < host->pm_qos.num_groups; i++) { + token = strsep(&strbuf, ","); + if (!token) + break; + + ret = kstrtoul(token, 0, &value); + if (ret) + break; + + spin_lock_irqsave(hba->host->host_lock, flags); + host->pm_qos.groups[i].latency_us = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + + kfree(strbuf_copy); + return count; +} + +static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host) +{ + struct device_node *node = host->hba->dev->of_node; + struct device_attribute *attr; + int ret = 0; + int num_groups; + int num_values; + char wq_name[sizeof("ufs_pm_qos_00")]; + int i; + + num_groups = of_property_count_u32_elems(node, + "qcom,pm-qos-cpu-groups"); + if (num_groups <= 0) + goto no_pm_qos; + + num_values = of_property_count_u32_elems(node, + "qcom,pm-qos-cpu-group-latency-us"); + if (num_values <= 0) + goto no_pm_qos; + + if (num_values != num_groups || num_groups > num_possible_cpus()) { + dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n", + __func__, num_groups, num_values, num_possible_cpus()); + goto no_pm_qos; + } + + host->pm_qos.num_groups = num_groups; + host->pm_qos.groups = kcalloc(host->pm_qos.num_groups, + sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL); + if (!host->pm_qos.groups) + return -ENOMEM; + + for (i = 0; i < host->pm_qos.num_groups; i++) { + u32 mask; + + ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups", + i, &mask); + if (ret) + goto free_groups; + host->pm_qos.groups[i].mask.bits[0] = mask; + if (!cpumask_subset(&host->pm_qos.groups[i].mask, + cpu_possible_mask)) { + dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n", + __func__, mask); + goto free_groups; + } + + ret = of_property_read_u32_index(node, + "qcom,pm-qos-cpu-group-latency-us", i, + &host->pm_qos.groups[i].latency_us); + if (ret) + goto free_groups; + + host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES; + host->pm_qos.groups[i].req.cpus_affine = + host->pm_qos.groups[i].mask; + host->pm_qos.groups[i].state = PM_QOS_UNVOTED; + host->pm_qos.groups[i].active_reqs = 0; + host->pm_qos.groups[i].host = host; + + INIT_WORK(&host->pm_qos.groups[i].vote_work, + ufs_qcom_pm_qos_vote_work); + INIT_WORK(&host->pm_qos.groups[i].unvote_work, + ufs_qcom_pm_qos_unvote_work); + } + + ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu", + &host->pm_qos.default_cpu); + if (ret || host->pm_qos.default_cpu > num_possible_cpus()) + host->pm_qos.default_cpu = 0; + + /* + * Use a single-threaded workqueue to assure work submitted to the queue + * is performed in order. Consider the following 2 possible cases: + * + * 1. A new request arrives and voting work is scheduled for it. Before + * the voting work is performed the request is finished and unvote + * work is also scheduled. + * 2. A request is finished and unvote work is scheduled. Before the + * work is performed a new request arrives and voting work is also + * scheduled. + * + * In both cases a vote work and unvote work wait to be performed. + * If ordering is not guaranteed, then the end state might be the + * opposite of the desired state. + */ + snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos", + host->hba->host->host_no); + host->pm_qos.workq = create_singlethread_workqueue(wq_name); + if (!host->pm_qos.workq) { + dev_err(host->hba->dev, "%s: failed to create the workqueue\n", + __func__); + ret = -ENOMEM; + goto free_groups; + } + + /* Initialization was ok, add all PM QoS requests */ + for (i = 0; i < host->pm_qos.num_groups; i++) + pm_qos_add_request(&host->pm_qos.groups[i].req, + PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + + /* PM QoS latency sys-fs attribute */ + attr = &host->pm_qos.latency_attr; + attr->show = ufs_qcom_pm_qos_latency_show; + attr->store = ufs_qcom_pm_qos_latency_store; + sysfs_attr_init(&attr->attr); + attr->attr.name = "pm_qos_latency_us"; + attr->attr.mode = S_IRUGO | S_IWUSR; + if (device_create_file(host->hba->var->dev, attr)) + dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n"); + + /* PM QoS enable sys-fs attribute */ + attr = &host->pm_qos.enable_attr; + attr->show = ufs_qcom_pm_qos_enable_show; + attr->store = ufs_qcom_pm_qos_enable_store; + sysfs_attr_init(&attr->attr); + attr->attr.name = "pm_qos_enable"; + attr->attr.mode = S_IRUGO | S_IWUSR; + if (device_create_file(host->hba->var->dev, attr)) + dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n"); + + host->pm_qos.is_enabled = true; + + return 0; + +free_groups: + kfree(host->pm_qos.groups); +no_pm_qos: + host->pm_qos.groups = NULL; + return ret ? ret : -ENOTSUPP; +} + +static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host) +{ + int i; + + if (!host->pm_qos.groups) + return; + + for (i = 0; i < host->pm_qos.num_groups; i++) + flush_work(&host->pm_qos.groups[i].unvote_work); +} + +static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host) +{ + int i; + + if (!host->pm_qos.groups) + return; + + for (i = 0; i < host->pm_qos.num_groups; i++) + pm_qos_remove_request(&host->pm_qos.groups[i].req); + destroy_workqueue(host->pm_qos.workq); + + kfree(host->pm_qos.groups); + host->pm_qos.groups = NULL; +} +#endif /* CONFIG_SMP */ + #define ANDROID_BOOT_DEV_MAX 30 static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; @@ -1126,6 +2012,69 @@ static int __init get_android_boot_dev(char *str) __setup("androidboot.bootdevice=", get_android_boot_dev); #endif +/* + * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled. + */ +static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host) +{ + struct device_node *node = host->hba->dev->of_node; + + host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm"); + if (host->disable_lpm) + pr_info("%s: will disable all LPM modes\n", __func__); +} + +static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name, + struct ufs_vreg **out_vreg) +{ + int ret = 0; + char prop_name[MAX_PROP_SIZE]; + struct ufs_vreg *vreg = NULL; + struct device *dev = host->hba->dev; + struct device_node *np = dev->of_node; + + if (!np) { + dev_err(dev, "%s: non DT initialization\n", __func__); + goto out; + } + + snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); + if (!of_parse_phandle(np, prop_name, 0)) { + dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", + __func__, prop_name); + ret = -ENODEV; + goto out; + } + + vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); + if (!vreg) + return -ENOMEM; + + vreg->name = name; + + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); + ret = of_property_read_u32(np, prop_name, &vreg->max_uA); + if (ret) { + dev_err(dev, "%s: unable to find %s err %d\n", + __func__, prop_name, ret); + goto out; + } + + vreg->reg = devm_regulator_get(dev, vreg->name); + if (IS_ERR(vreg->reg)) { + ret = PTR_ERR(vreg->reg); + dev_err(dev, "%s: %s get failed, err=%d\n", + __func__, vreg->name, ret); + } + vreg->min_uV = VDDP_REF_CLK_MIN_UV; + vreg->max_uV = VDDP_REF_CLK_MAX_UV; + +out: + if (!ret) + *out_vreg = vreg; + return ret; +} + /** * ufs_qcom_init - bind phy with controller * @hba: host controller instance @@ -1144,9 +2093,6 @@ static int ufs_qcom_init(struct ufs_hba *hba) struct ufs_qcom_host *host; struct resource *res; - if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) - return -ENODEV; - host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { err = -ENOMEM; @@ -1156,21 +2102,58 @@ static int ufs_qcom_init(struct ufs_hba *hba) /* Make a two way bind between the qcom host and the hba */ host->hba = hba; + spin_lock_init(&host->ice_work_lock); + ufshcd_set_variant(hba, host); - /* - * voting/devoting device ref_clk source is time consuming hence - * skip devoting it during aggressive clock gating. This clock - * will still be gated off during runtime suspend. - */ + err = ufs_qcom_ice_get_dev(host); + if (err == -EPROBE_DEFER) { + /* + * UFS driver might be probed before ICE driver does. + * In that case we would like to return EPROBE_DEFER code + * in order to delay its probing. + */ + dev_err(dev, "%s: required ICE device not probed yet err = %d\n", + __func__, err); + goto out_host_free; + + } else if (err == -ENODEV) { + /* + * ICE device is not enabled in DTS file. No need for further + * initialization of ICE driver. + */ + dev_warn(dev, "%s: ICE device is not enabled", + __func__); + } else if (err) { + dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n", + __func__, err); + goto out_host_free; + } + host->generic_phy = devm_phy_get(dev, "ufsphy"); - if (IS_ERR(host->generic_phy)) { + if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) { + /* + * UFS driver might be probed before the phy driver does. + * In that case we would like to return EPROBE_DEFER code. + */ + err = -EPROBE_DEFER; + dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n", + __func__, err); + goto out_host_free; + } else if (IS_ERR(host->generic_phy)) { err = PTR_ERR(host->generic_phy); dev_err(dev, "%s: PHY get failed %d\n", __func__, err); goto out; } + err = ufs_qcom_pm_qos_init(host); + if (err) + dev_info(dev, "%s: PM QoS will be disabled\n", __func__); + + /* restore the secure configuration */ + ufs_qcom_update_sec_cfg(hba, true); + err = ufs_qcom_bus_register(host); if (err) goto out_host_free; @@ -1206,19 +2189,33 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_phy_save_controller_version(host->generic_phy, host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); + err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk", + &host->vddp_ref_clk); phy_init(host->generic_phy); err = phy_power_on(host->generic_phy); if (err) goto out_unregister_bus; + if (host->vddp_ref_clk) { + err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk); + if (err) { + dev_err(dev, "%s: failed enabling ref clk supply: %d\n", + __func__, err); + goto out_disable_phy; + } + } err = ufs_qcom_init_lane_clks(host); if (err) - goto out_disable_phy; + goto out_disable_vddp; + ufs_qcom_parse_lpm(host); + if (host->disable_lpm) + pm_runtime_forbid(host->hba->dev); ufs_qcom_set_caps(hba); ufs_qcom_advertise_quirks(hba); - ufs_qcom_setup_clocks(hba, true); + ufs_qcom_set_bus_vote(hba, true); + ufs_qcom_setup_clocks(hba, true, false); if (hba->dev->id < MAX_UFS_QCOM_HOSTS) ufs_qcom_hosts[hba->dev->id] = host; @@ -1234,10 +2231,14 @@ static int ufs_qcom_init(struct ufs_hba *hba) goto out; +out_disable_vddp: + if (host->vddp_ref_clk) + ufs_qcom_disable_vreg(dev, host->vddp_ref_clk); out_disable_phy: phy_power_off(host->generic_phy); out_unregister_bus: phy_exit(host->generic_phy); + msm_bus_scale_unregister_client(host->bus_vote.client_handle); out_host_free: devm_kfree(dev, host); ufshcd_set_variant(hba, NULL); @@ -1249,8 +2250,10 @@ static void ufs_qcom_exit(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + msm_bus_scale_unregister_client(host->bus_vote.client_handle); ufs_qcom_disable_lane_clks(host); phy_power_off(host->generic_phy); + ufs_qcom_pm_qos_remove(host); } static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, @@ -1281,105 +2284,292 @@ out: return err; } -static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) +static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable) { - /* nothing to do as of now */ - return 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + int err = 0; + + /* The default low power mode configuration is SVS2 */ + if (!ufs_qcom_cap_svs2(host)) + goto out; + + if (!((host->hw_ver.major == 0x3) && + (host->hw_ver.minor == 0x0) && + (host->hw_ver.step == 0x0))) + goto out; + + /* + * The link should be put in hibern8 state before + * configuring the PHY to enter/exit SVS2 mode. + */ + err = ufshcd_uic_hibern8_enter(hba); + if (err) + goto out; + + err = ufs_qcom_phy_configure_lpm(phy, enable); + if (err) + goto out; + + err = ufshcd_uic_hibern8_exit(hba); +out: + return err; } -static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) +static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr *attr = &host->dev_req_params; + int err = 0; if (!ufs_qcom_cap_qunipro(host)) - return 0; + goto out; + + err = ufs_qcom_configure_lpm(hba, false); + if (err) + goto out; + + if (attr) + __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx, + attr->hs_rate, false, true); /* set unipro core clock cycles to 150 and clear clock divider */ - return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); +out: + return err; } static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int err; - u32 core_clk_ctrl_reg; if (!ufs_qcom_cap_qunipro(host)) return 0; - err = ufshcd_dme_get(hba, - UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), - &core_clk_ctrl_reg); - - /* make sure CORE_CLK_DIV_EN is cleared */ - if (!err && - (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { - core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; - err = ufshcd_dme_set(hba, - UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), - core_clk_ctrl_reg); - } - - return err; + return ufs_qcom_configure_lpm(hba, true); } static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr *attr = &host->dev_req_params; + int err = 0; if (!ufs_qcom_cap_qunipro(host)) return 0; - /* set unipro core clock cycles to 75 and clear clock divider */ - return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); + if (attr) + ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx, + attr->hs_rate, false); + + if (ufs_qcom_cap_svs2(host)) + /* + * For SVS2 set unipro core clock cycles to 37 and + * clear clock divider + */ + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37); + else + /* + * For SVS set unipro core clock cycles to 75 and + * clear clock divider + */ + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); + + return err; } static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; int err = 0; - if (status == PRE_CHANGE) { + switch (status) { + case PRE_CHANGE: if (scale_up) err = ufs_qcom_clk_scale_up_pre_change(hba); else err = ufs_qcom_clk_scale_down_pre_change(hba); - } else { - if (scale_up) - err = ufs_qcom_clk_scale_up_post_change(hba); - else + break; + case POST_CHANGE: + if (!scale_up) err = ufs_qcom_clk_scale_down_post_change(hba); - if (err || !dev_req_params) - goto out; - - ufs_qcom_cfg_timers(hba, - dev_req_params->gear_rx, - dev_req_params->pwr_rx, - dev_req_params->hs_rate, - false); ufs_qcom_update_bus_bw_vote(host); + break; + default: + dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); + err = -EINVAL; + break; } -out: return err; } +/* + * This function should be called to restore the security configuration of UFS + * register space after coming out of UFS host core power collapse. + * + * @hba: host controller instance + * @restore_sec_cfg: Set "true" if secure configuration needs to be restored + * and set "false" when secure configuration is lost. + */ +static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg) +{ + int ret = 0; + u64 scm_ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* scm command buffer structrue */ + struct msm_scm_cmd_buf { + unsigned int device_id; + unsigned int spare; + } cbuf = {0}; + #define RESTORE_SEC_CFG_CMD 0x2 + #define UFS_TZ_DEV_ID 19 + + if (!host || !hba->vreg_info.vdd_hba || + !(host->sec_cfg_updated ^ restore_sec_cfg)) { + return 0; + } else if (host->caps & + UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE) { + return 0; + } else if (!restore_sec_cfg) { + /* + * Clear the flag so next time when this function is called + * with restore_sec_cfg set to true, we can restore the secure + * configuration. + */ + host->sec_cfg_updated = false; + goto out; + } else if (hba->clk_gating.state != CLKS_ON) { + /* + * Clocks should be ON to restore the host controller secure + * configuration. + */ + goto out; + } + + /* + * If we are here, Host controller clocks are running, Host controller + * power collapse feature is supported and Host controller has just came + * out of power collapse. + */ + cbuf.device_id = UFS_TZ_DEV_ID; + ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret); + if (ret || scm_ret) { + dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n", + __func__, ret, scm_ret); + if (!ret) + ret = scm_ret; + } else { + host->sec_cfg_updated = true; + } + +out: + dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n", + __func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret); + return ret; +} + + +static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (ufs_qcom_cap_svs2(host)) + return UFS_HS_G1; + /* Default SVS support @ HS G2 frequencies*/ + return UFS_HS_G2; +} + +void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv, + void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs, + char *str, void *priv)) +{ + u32 reg; + struct ufs_qcom_host *host; + + if (unlikely(!hba)) { + pr_err("%s: hba is NULL\n", __func__); + return; + } + if (unlikely(!print_fn)) { + dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); + return; + } + + host = ufshcd_get_variant(hba); + if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) + return; + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); + print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); + + reg = ufshcd_readl(hba, REG_UFS_CFG1); + reg |= UFS_BIT(17); + ufshcd_writel(hba, reg, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); + print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); + print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); + print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); + + /* clear bit 17 - UTP_DBG_RAMS_EN */ + ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); + print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); + print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); + print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); + print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); + print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); +} + +static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) +{ + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, + UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); + ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); + } else { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); + ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); + } +} + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) { /* provide a legal default configuration */ - host->testbus.select_major = TSTBUS_UAWM; - host->testbus.select_minor = 1; + host->testbus.select_major = TSTBUS_UNIPRO; + host->testbus.select_minor = 37; } -static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) +bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, + u8 select_major, u8 select_minor) { - if (host->testbus.select_major >= TSTBUS_MAX) { + if (select_major >= TSTBUS_MAX) { dev_err(host->hba->dev, "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", - __func__, host->testbus.select_major); + __func__, select_major); return false; } @@ -1388,28 +2578,33 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) * mappings of select_minor, since there is no harm in * configuring a non-existent select_minor */ - if (host->testbus.select_minor > 0x1F) { + if (select_minor > 0xFF) { dev_err(host->hba->dev, "%s: 0x%05X is not a legal testbus option\n", - __func__, host->testbus.select_minor); + __func__, select_minor); return false; } return true; } +/* + * The caller of this function must make sure that the controller + * is out of runtime suspend and appropriate clocks are enabled + * before accessing. + */ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) { - int reg; - int offset; + int reg = 0; + int offset = 0, ret = 0, testbus_sel_offset = 19; u32 mask = TEST_BUS_SUB_SEL_MASK; + unsigned long flags; + struct ufs_hba *hba; if (!host) return -EINVAL; - - if (!ufs_qcom_testbus_cfg_is_ok(host)) - return -EPERM; - + hba = host->hba; + spin_lock_irqsave(hba->host->host_lock, flags); switch (host->testbus.select_major) { case TSTBUS_UAWM: reg = UFS_TEST_BUS_CTRL_0; @@ -1457,7 +2652,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) break; case TSTBUS_UNIPRO: reg = UFS_UNIPRO_CFG; - offset = 1; + offset = 20; + mask = 0xFFF; break; /* * No need for a default case, since @@ -1466,19 +2662,27 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) */ } mask <<= offset; - - pm_runtime_get_sync(host->hba->dev); - ufshcd_hold(host->hba, false); - ufshcd_rmwl(host->hba, TEST_BUS_SEL, - (u32)host->testbus.select_major << 19, + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (reg) { + ufshcd_rmwl(host->hba, TEST_BUS_SEL, + (u32)host->testbus.select_major << testbus_sel_offset, REG_UFS_CFG1); - ufshcd_rmwl(host->hba, mask, + ufshcd_rmwl(host->hba, mask, (u32)host->testbus.select_minor << offset, reg); - ufshcd_release(host->hba); - pm_runtime_put_sync(host->hba->dev); - - return 0; + } else { + dev_err(hba->dev, "%s: Problem setting minor\n", __func__); + ret = -EINVAL; + goto out; + } + ufs_qcom_enable_test_bus(host); + /* + * Make sure the test bus configuration is + * committed before returning. + */ + mb(); +out: + return ret; } static void ufs_qcom_testbus_read(struct ufs_hba *hba) @@ -1486,13 +2690,73 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba) ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); } -static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) +static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) { + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + u32 *testbus = NULL; + int i, nminor = 256, testbus_len = nminor * sizeof(u32); + + testbus = kmalloc(testbus_len, GFP_KERNEL); + if (!testbus) + return; + + host->testbus.select_major = TSTBUS_UNIPRO; + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + kfree(testbus); +} + +static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + u32 *testbus = NULL; + int i, nminor = 32, testbus_len = nminor * sizeof(u32); + + testbus = kmalloc(testbus_len, GFP_KERNEL); + if (!testbus) + return; + + host->testbus.select_major = TSTBUS_UTP_HCI; + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, "UTP_HCI_TEST_BUS ", DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + kfree(testbus); +} + +static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, "HCI Vendor Specific Registers "); + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); + + if (no_sleep) + return; + /* sleep a bit intermittently as we are dumping too much data */ + usleep_range(1000, 1100); ufs_qcom_testbus_read(hba); + usleep_range(1000, 1100); + ufs_qcom_print_unipro_testbus(hba); + usleep_range(1000, 1100); + ufs_qcom_print_utp_hci_testbus(hba); + usleep_range(1000, 1100); + ufs_qcom_phy_dbg_register_dump(phy); + usleep_range(1000, 1100); + ufs_qcom_ice_print_regs(host); } + /** * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -1500,7 +2764,6 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) * handshake during initialization. */ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { - .name = "qcom", .init = ufs_qcom_init, .exit = ufs_qcom_exit, .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, @@ -1509,9 +2772,37 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .hce_enable_notify = ufs_qcom_hce_enable_notify, .link_startup_notify = ufs_qcom_link_startup_notify, .pwr_change_notify = ufs_qcom_pwr_change_notify, + .apply_dev_quirks = ufs_qcom_apply_dev_quirks, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, + .full_reset = ufs_qcom_full_reset, + .update_sec_cfg = ufs_qcom_update_sec_cfg, + .get_scale_down_gear = ufs_qcom_get_scale_down_gear, + .set_bus_vote = ufs_qcom_set_bus_vote, .dbg_register_dump = ufs_qcom_dump_dbg_regs, +#ifdef CONFIG_DEBUG_FS + .add_debugfs = ufs_qcom_dbg_add_debugfs, +#endif +}; + +static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = { + .crypto_req_setup = ufs_qcom_crypto_req_setup, + .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start, + .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end, + .crypto_engine_reset = ufs_qcom_crytpo_engine_reset, + .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status, +}; + +static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = { + .req_start = ufs_qcom_pm_qos_req_start, + .req_end = ufs_qcom_pm_qos_req_end, +}; + +static struct ufs_hba_variant ufs_hba_qcom_variant = { + .name = "qcom", + .vops = &ufs_hba_qcom_vops, + .crypto_vops = &ufs_hba_crypto_variant_ops, + .pm_qos_vops = &ufs_hba_pm_qos_variant_ops, }; /** @@ -1524,9 +2815,27 @@ static int ufs_qcom_probe(struct platform_device *pdev) { int err; struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + /* + * On qcom platforms, bootdevice is the primary storage + * device. This device can either be eMMC or UFS. + * The type of device connected is detected at runtime. + * So, if an eMMC device is connected, and this function + * is invoked, it would turn-off the regulator if it detects + * that the storage device is not ufs. + * These regulators are turned ON by the bootloaders & turning + * them off without sending PON may damage the connected device. + * Hence, check for the connected device early-on & don't turn-off + * the regulators. + */ + if (of_property_read_bool(np, "non-removable") && + strlen(android_boot_dev) && + strcmp(android_boot_dev, dev_name(dev))) + return -ENODEV; /* Perform generic probe */ - err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); + err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant); if (err) dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); |
