diff options
| author | Talel Shenhar <tatias@codeaurora.org> | 2015-05-18 12:12:48 +0300 |
|---|---|---|
| committer | Subhash Jadavani <subhashj@codeaurora.org> | 2016-05-31 15:26:23 -0700 |
| commit | ae479fa29fc8cc9fd3ab36dde46a0638da2a4fcc (patch) | |
| tree | 24577d0ae0e0f274af20de06c28ca9e126a01c3a | |
| parent | cff87b3cda7f31e08f6c539661e3236889831a9e (diff) | |
mmc: core: devfreq: migrate to devfreq based clock scaling
This change adds the use of devfreq to MMC.
Both eMMC and SD card will use it.
For some workloads, such as video playback, it isn't
necessary for these cards to run at high speed.
Running at lower frequency, for example 52MHz, in such
cases can still meet the deadlines for data transfers.
Scaling down the clock frequency dynamically has power
savings not only because the bus is running at lower frequency
but also has an advantage of scaling down the system core
voltage, if supported.
Provide an ondemand clock scaling support similar to the
cpufreq ondemand governor having two thresholds,
up_threshold and down_threshold to decide whether to
increase the frequency or scale it down respectively.
The sampling interval is in the order of milliseconds.
If sampling interval is too low, frequent switching of
frequencies can lead to high power consumption and if
sampling interval is too high, the clock scaling logic
would take long time to realize that the underlying
hardware (controller and card) is busy and scale up
the clocks.
Change-Id: I58ddbd93648ded82b304411956e035fb353cd97e
Signed-off-by: Talel Shenhar <tatias@codeaurora.org>
[subhashj@codeaurora.org: fixed trivial merge conflicts & compilation
errors]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
| -rw-r--r-- | Documentation/devicetree/bindings/mmc/sdhci-msm.txt | 10 | ||||
| -rw-r--r-- | drivers/mmc/core/core.c | 936 | ||||
| -rw-r--r-- | drivers/mmc/core/core.h | 6 | ||||
| -rw-r--r-- | drivers/mmc/core/debugfs.c | 48 | ||||
| -rw-r--r-- | drivers/mmc/core/host.c | 66 | ||||
| -rw-r--r-- | drivers/mmc/core/mmc.c | 13 | ||||
| -rw-r--r-- | drivers/mmc/core/sd.c | 3 | ||||
| -rw-r--r-- | drivers/mmc/host/sdhci-msm.c | 10 | ||||
| -rw-r--r-- | include/linux/mmc/card.h | 4 | ||||
| -rw-r--r-- | include/linux/mmc/host.h | 65 |
10 files changed, 697 insertions, 464 deletions
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt index b9dcecdf2cf8..a95ff2d27c94 100644 --- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt +++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt @@ -34,6 +34,14 @@ Optional Properties: "HS200_1p2v" - indicates that host can support HS200 at 1.2v. "DDR_1p8v" - indicates that host can support DDR mode at 1.8v. "DDR_1p2v" - indicates that host can support DDR mode at 1.2v. + - qcom,devfreq,freq-table - specifies supported frequencies for clock scaling. + Clock scaling logic shall toggle between these frequencies based + on card load. In case the defined frequencies are over or below + the supported card frequencies, they will be overridden + during card init. In case this entry is not supplied, + the driver will construct one based on the card + supported max and min frequencies. + The frequencies must be ordered from lowest to highest. In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage). - qcom,<supply>-always-on - specifies whether supply should be kept "on" always. @@ -102,6 +110,8 @@ Example: qcom,vdd-io-voltage-level = <1800000 2950000>; qcom,vdd-io-current-level = <6 22000>; + qcom,devfreq,freq-table = <52000000 200000000>; + pinctrl-names = "active", "sleep"; pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>; pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_on &sdc1_data_on>; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index da5927ed5772..0340980c0b77 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/completion.h> +#include <linux/devfreq.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/pagemap.h> @@ -53,8 +54,6 @@ /* If the device is not responding */ #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ -static void mmc_clk_scaling(struct mmc_host *host, bool from_wq); - /* * Background operations can take a long time, depending on the housekeeping * operations the card has to perform. @@ -127,41 +126,539 @@ static inline void mmc_should_fail_request(struct mmc_host *host, #endif /* CONFIG_FAIL_MMC_REQUEST */ -static inline void -mmc_clk_scaling_update_state(struct mmc_host *host, struct mmc_request *mrq) +static bool mmc_is_data_request(struct mmc_request *mmc_request) { - if (mrq) { - switch (mrq->cmd->opcode) { - case MMC_READ_SINGLE_BLOCK: - case MMC_READ_MULTIPLE_BLOCK: - case MMC_WRITE_BLOCK: - case MMC_WRITE_MULTIPLE_BLOCK: - host->clk_scaling.invalid_state = false; - break; - default: - host->clk_scaling.invalid_state = true; - break; + switch (mmc_request->cmd->opcode) { + case MMC_READ_SINGLE_BLOCK: + case MMC_READ_MULTIPLE_BLOCK: + case MMC_WRITE_BLOCK: + case MMC_WRITE_MULTIPLE_BLOCK: + return true; + default: + return false; + } +} + +static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed) +{ + unsigned long flags; + struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling; + + if (!clk_scaling->enable) + return; + + if (lock_needed) + spin_lock_irqsave(&clk_scaling->lock, flags); + + clk_scaling->start_busy = ktime_get(); + clk_scaling->is_busy_started = true; + + if (lock_needed) + spin_unlock_irqrestore(&clk_scaling->lock, flags); +} + +static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed) +{ + unsigned long flags; + struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling; + + if (!clk_scaling->enable) + return; + + if (lock_needed) + spin_lock_irqsave(&clk_scaling->lock, flags); + + if (!clk_scaling->is_busy_started) { + WARN_ON(1); + goto out; + } + + clk_scaling->total_busy_time_us += + ktime_to_us(ktime_sub(ktime_get(), + clk_scaling->start_busy)); + pr_debug("%s: accumulated busy time is %lu usec\n", + mmc_hostname(host), clk_scaling->total_busy_time_us); + clk_scaling->is_busy_started = false; + +out: + if (lock_needed) + spin_unlock_irqrestore(&clk_scaling->lock, flags); +} + +/** + * mmc_disable_devfreq_clk_scaling() - Disable clock scaling + * @host: pointer to mmc host structure + * + * Disables clock scaling aggresively + */ +void mmc_disable_clk_scaling(struct mmc_host *host) +{ + if (!host) { + pr_err("bad host parameter\n"); + WARN_ON(1); + return; + } + pr_debug("%s: disabling clock scaling\n", mmc_hostname(host)); + mmc_exit_clk_scaling(host); + +} +EXPORT_SYMBOL(mmc_disable_clk_scaling); + +/** + * mmc_can_scale_clk() - Check clock scaling capability + * @host: pointer to mmc host structure + */ +bool mmc_can_scale_clk(struct mmc_host *host) +{ + if (!host) { + pr_err("bad host parameter\n"); + WARN_ON(1); + return false; + } + + return host->caps2 & MMC_CAP2_CLK_SCALE; +} +EXPORT_SYMBOL(mmc_can_scale_clk); + +static int mmc_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *status) +{ + unsigned long flags; + struct mmc_host *host = container_of(dev, struct mmc_host, class_dev); + struct mmc_devfeq_clk_scaling *clk_scaling; + + if (!host) { + pr_err("bad host parameter\n"); + WARN_ON(1); + return -EINVAL; + } + + clk_scaling = &host->clk_scaling; + + if (!clk_scaling->enable) + return 0; + + spin_lock_irqsave(&clk_scaling->lock, flags); + + /* accumulate the busy time of ongoing work */ + memset(status, 0, sizeof(*status)); + if (clk_scaling->is_busy_started) { + mmc_clk_scaling_stop_busy(host, false); + mmc_clk_scaling_start_busy(host, false); + } + + status->busy_time = clk_scaling->total_busy_time_us; + status->total_time = ktime_to_us(ktime_sub(ktime_get(), + clk_scaling->measure_interval_start)); + clk_scaling->total_busy_time_us = 0; + status->current_frequency = clk_scaling->curr_freq; + clk_scaling->measure_interval_start = ktime_get(); + + pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n", + mmc_hostname(host), + (status->busy_time*100)/status->total_time, + status->total_time, status->busy_time, + status->current_frequency); + + spin_unlock_irqrestore(&clk_scaling->lock, flags); + + return 0; +} + +static bool mmc_is_vaild_state_for_clk_scaling(struct mmc_host *host) +{ + struct mmc_card *card = host->card; + u32 status; + + /* + * If the current partition type is RPMB, clock switching may not + * work properly as sending tuning command (CMD21) is illegal in + * this mode. + */ + if (!card || (mmc_card_mmc(card) && + card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)) + return false; + + if (mmc_send_status(card, &status)) { + pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); + return false; + } + + return R1_CURRENT_STATE(status) == R1_STATE_TRAN; +} + +int mmc_clk_update_freq(struct mmc_host *host, + unsigned long freq, enum mmc_load state) +{ + int err = 0; + + if (!host) { + pr_err("bad host parameter\n"); + WARN_ON(1); + return -EINVAL; + } + + if (host->ops->notify_load) { + err = host->ops->notify_load(host, state); + if (err) { + pr_err("%s: %s: fail on notify_load\n", + mmc_hostname(host), __func__); + goto out; } - } else { - /* - * force clock scaling transitions, - * if other conditions are met - */ - host->clk_scaling.invalid_state = false; } - return; + if (freq != host->clk_scaling.curr_freq) { + if (!mmc_is_vaild_state_for_clk_scaling(host)) { + pr_debug("%s: invalid state for clock scaling - skipping", + mmc_hostname(host)); + err = -EAGAIN; + goto error; + } + + err = host->bus_ops->change_bus_speed(host, &freq); + if (!err) + host->clk_scaling.curr_freq = freq; + else + pr_err("%s: %s: failed (%d) at freq=%lu\n", + mmc_hostname(host), __func__, err, freq); + } +error: + if (err) { + /* restore previous state */ + if (host->ops->notify_load) + if (host->ops->notify_load(host, + host->clk_scaling.state)) + pr_err("%s: %s: fail on notify_load restore\n", + mmc_hostname(host), __func__); + } +out: + return err; +} +EXPORT_SYMBOL(mmc_clk_update_freq); + +static int mmc_devfreq_set_target(struct device *dev, + unsigned long *freq, u32 devfreq_flags) +{ + struct mmc_host *host = container_of(dev, struct mmc_host, class_dev); + struct mmc_devfeq_clk_scaling *clk_scaling; + int err = 0; + int abort; + unsigned long flags; + + if (!(host && freq)) { + pr_err("%s: unexpected host/freq parameter\n", __func__); + err = -EINVAL; + goto out; + } + clk_scaling = &host->clk_scaling; + + if (!clk_scaling->enable) + goto out; + + pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host), + *freq, current->comm); + + if ((clk_scaling->curr_freq == *freq) || + clk_scaling->skip_clk_scale_freq_update) + goto out; + + /* No need to scale the clocks if they are gated */ + if (!host->ios.clock) + goto out; + + spin_lock_irqsave(&clk_scaling->lock, flags); + if (clk_scaling->clk_scaling_in_progress) { + pr_debug("%s: clocks scaling is already in-progress by mmc thread\n", + mmc_hostname(host)); + spin_unlock_irqrestore(&clk_scaling->lock, flags); + goto out; + } + clk_scaling->need_freq_change = true; + clk_scaling->target_freq = *freq; + clk_scaling->state = *freq < clk_scaling->curr_freq ? + MMC_LOAD_LOW : MMC_LOAD_HIGH; + spin_unlock_irqrestore(&clk_scaling->lock, flags); + + abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort); + if (abort) + goto out; + + /* + * In case we were able to claim host there is no need to + * defer the frequency change. It will be done now + */ + clk_scaling->need_freq_change = false; + + mmc_host_clk_hold(host); + err = mmc_clk_update_freq(host, *freq, clk_scaling->state); + if (err && err != -EAGAIN) + pr_err("%s: clock scale to %lu failed with error %d\n", + mmc_hostname(host), *freq, err); + else + pr_debug("%s: clock change to %lu finished successfully (%s)\n", + mmc_hostname(host), *freq, current->comm); + + + mmc_host_clk_release(host); + mmc_release_host(host); +out: + return err; +} + +static void mmc_deferred_scaling(struct mmc_host *host) +{ + unsigned long flags; + unsigned long target_freq; + int err; + + if (!host->clk_scaling.enable) + return; + + spin_lock_irqsave(&host->clk_scaling.lock, flags); + + if (host->clk_scaling.clk_scaling_in_progress || + !(host->clk_scaling.need_freq_change)) { + spin_unlock_irqrestore(&host->clk_scaling.lock, flags); + return; + } + + + atomic_inc(&host->clk_scaling.devfreq_abort); + target_freq = host->clk_scaling.target_freq; + host->clk_scaling.clk_scaling_in_progress = true; + host->clk_scaling.need_freq_change = false; + spin_unlock_irqrestore(&host->clk_scaling.lock, flags); + pr_debug("%s: doing deferred frequency change (%lu) (%s)\n", + mmc_hostname(host), + target_freq, current->comm); + + err = mmc_clk_update_freq(host, target_freq, + host->clk_scaling.state); + if (err && err != -EAGAIN) + pr_err("%s: failed on deferred scale clocks (%d)\n", + mmc_hostname(host), err); + else + pr_debug("%s: clocks were successfully scaled to %lu (%s)\n", + mmc_hostname(host), + target_freq, current->comm); + host->clk_scaling.clk_scaling_in_progress = false; + atomic_dec(&host->clk_scaling.devfreq_abort); +} + +static int mmc_devfreq_create_freq_table(struct mmc_host *host) +{ + int i; + struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling; + + pr_debug("%s: supported: lowest=%lu, highest=%lu\n", + mmc_hostname(host), + host->card->clk_scaling_lowest, + host->card->clk_scaling_highest); + + if (!clk_scaling->freq_table) { + pr_debug("%s: no frequency table defined - setting default\n", + mmc_hostname(host)); + clk_scaling->freq_table = kzalloc( + 2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL); + if (!clk_scaling->freq_table) + return -ENOMEM; + clk_scaling->freq_table[0] = host->card->clk_scaling_lowest; + clk_scaling->freq_table[1] = host->card->clk_scaling_highest; + clk_scaling->freq_table_sz = 2; + goto out; + } + + if (host->card->clk_scaling_lowest > + clk_scaling->freq_table[0]) + pr_debug("%s: frequency table undershot possible freq\n", + mmc_hostname(host)); + + for (i = 0; i < clk_scaling->freq_table_sz; i++) { + if (clk_scaling->freq_table[i] <= + host->card->clk_scaling_highest) + continue; + clk_scaling->freq_table[i] = + host->card->clk_scaling_highest; + clk_scaling->freq_table_sz = i + 1; + pr_debug("%s: frequency table overshot possible freq (%d)\n", + mmc_hostname(host), clk_scaling->freq_table[i]); + break; + } + +out: + clk_scaling->devfreq_profile.freq_table = clk_scaling->freq_table; + clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz; + + for (i = 0; i < clk_scaling->freq_table_sz; i++) + pr_debug("%s: freq[%d] = %u\n", + mmc_hostname(host), i, clk_scaling->freq_table[i]); + + return 0; } -static inline void mmc_update_clk_scaling(struct mmc_host *host) +/** + * mmc_init_devfreq_clk_scaling() - Initialize clock scaling + * @host: pointer to mmc host structure + * + * Initialize clock scaling for supported hosts. It is assumed that the caller + * ensure clock is running at maximum possible frequency before calling this + * function. Shall use struct devfreq_simple_ondemand_data to configure + * governor. + */ +int mmc_init_clk_scaling(struct mmc_host *host) { - if (host->clk_scaling.enable && !host->clk_scaling.invalid_state) { - host->clk_scaling.busy_time_us += - ktime_to_us(ktime_sub(ktime_get(), - host->clk_scaling.start_busy)); - host->clk_scaling.start_busy = ktime_get(); + int err; + + if (!host || !host->card) { + pr_err("%s: unexpected host/card parameters\n", + __func__); + return -EINVAL; + } + + if (!mmc_can_scale_clk(host) || + !host->bus_ops->change_bus_speed) { + pr_debug("%s: clock scaling is not supported\n", + mmc_hostname(host)); + return 0; } + + pr_debug("registering %s dev (%p) to devfreq", + mmc_hostname(host), + mmc_classdev(host)); + + if (host->clk_scaling.devfreq) { + pr_err("%s: dev is already registered for dev %p\n", + mmc_hostname(host), + mmc_dev(host)); + return -EPERM; + } + spin_lock_init(&host->clk_scaling.lock); + atomic_set(&host->clk_scaling.devfreq_abort, 0); + host->clk_scaling.curr_freq = host->ios.clock; + host->clk_scaling.clk_scaling_in_progress = false; + host->clk_scaling.need_freq_change = false; + host->clk_scaling.is_busy_started = false; + + host->clk_scaling.devfreq_profile.polling_ms = + host->clk_scaling.polling_delay_ms; + host->clk_scaling.devfreq_profile.get_dev_status = + mmc_devfreq_get_dev_status; + host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target; + host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock; + + host->clk_scaling.ondemand_gov_data.simple_scaling = true; + host->clk_scaling.ondemand_gov_data.upthreshold = + host->clk_scaling.upthreshold; + host->clk_scaling.ondemand_gov_data.downdifferential = + host->clk_scaling.downthreshold; + + err = mmc_devfreq_create_freq_table(host); + if (err) { + pr_err("%s: fail to create devfreq frequency table\n", + mmc_hostname(host)); + return err; + } + + pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n", + mmc_hostname(host), + host->clk_scaling.ondemand_gov_data.upthreshold, + host->clk_scaling.ondemand_gov_data.downdifferential, + host->clk_scaling.devfreq_profile.polling_ms); + host->clk_scaling.devfreq = devfreq_add_device( + mmc_classdev(host), + &host->clk_scaling.devfreq_profile, + "simple_ondemand", + &host->clk_scaling.ondemand_gov_data); + if (!host->clk_scaling.devfreq) { + pr_err("%s: unable to register with devfreq\n", + mmc_hostname(host)); + return -EPERM; + } + + pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n", + mmc_hostname(host), + dev_name(mmc_classdev(host)), + mmc_classdev(host), + host->clk_scaling.devfreq, + host->ios.clock); + + host->clk_scaling.enable = true; + + return err; } +EXPORT_SYMBOL(mmc_init_clk_scaling); + +/** + * mmc_exit_devfreq_clk_scaling() - Disable clock scaling + * @host: pointer to mmc host structure + * + * Disable clock scaling permanently. + */ +int mmc_exit_clk_scaling(struct mmc_host *host) +{ + int err; + + if (!host) { + pr_err("bad host parameter\n"); + WARN_ON(1); + return -EINVAL; + } + + if (!mmc_can_scale_clk(host)) + return 0; + + if (!host->clk_scaling.devfreq) { + pr_err("%s: no devfreq is assosiated with this device\n", + mmc_hostname(host)); + return -EPERM; + } + + host->clk_scaling.enable = false; + atomic_inc(&host->clk_scaling.devfreq_abort); + err = devfreq_suspend_device(host->clk_scaling.devfreq); + if (err) { + pr_err("%s: failed to suspend devfreq\n", mmc_hostname(host)); + return err; + } + pr_debug("%s: devfreq suspended\n", mmc_hostname(host)); + + err = devfreq_remove_device(host->clk_scaling.devfreq); + if (err) { + pr_err("%s: remove devfreq failed\n", mmc_hostname(host)); + return err; + } + + host->clk_scaling.devfreq = NULL; + atomic_set(&host->clk_scaling.devfreq_abort, 1); + mmc_reset_clk_scale_stats(host); + pr_debug("%s: devfreq was removed\n", mmc_hostname(host)); + + return 0; +} +EXPORT_SYMBOL(mmc_exit_clk_scaling); + + +/** + * mmc_reset_clk_scale_stats() - reset clock scaling statistics + * @host: pointer to mmc host structure + */ +void mmc_reset_clk_scale_stats(struct mmc_host *host) +{ + unsigned long flags; + + if (!host) { + pr_err("bad host parameter\n"); + WARN_ON(1); + return; + } + + spin_lock_irqsave(&host->clk_scaling.lock, flags); + host->clk_scaling.total_busy_time_us = 0; + spin_unlock_irqrestore(&host->clk_scaling.lock, flags); + +} +EXPORT_SYMBOL(mmc_reset_clk_scale_stats); + /** * mmc_request_done - finish processing an MMC request * @host: MMC host which completed request @@ -177,8 +674,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) #ifdef CONFIG_MMC_PERF_PROFILING ktime_t diff; #endif - if (host->card) - mmc_update_clk_scaling(host); + + if (host->clk_scaling.is_busy_started) + mmc_clk_scaling_stop_busy(host, true); /* Flag re-tuning needed on CRC errors */ if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && @@ -365,20 +863,9 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mmc_host_clk_hold(host); led_trigger_event(host->led, LED_FULL); - if (host->card && host->clk_scaling.enable) { - /* - * Check if we need to scale the clocks. Clocks - * will be scaled up immediately if necessary - * conditions are satisfied. Scaling down the - * frequency will be done after current thread - * releases host. - */ - mmc_clk_scaling_update_state(host, mrq); - if (!host->clk_scaling.invalid_state) { - mmc_clk_scaling(host, false); - host->clk_scaling.start_busy = ktime_get(); - } - } + mmc_deferred_scaling(host); + if (mmc_is_data_request(mrq)) + mmc_clk_scaling_start_busy(host, true); __mmc_start_request(host, mrq); @@ -2710,367 +3197,6 @@ int mmc_hw_reset(struct mmc_host *host) } EXPORT_SYMBOL(mmc_hw_reset); -/** - * mmc_reset_clk_scale_stats() - reset clock scaling statistics - * @host: pointer to mmc host structure - */ -void mmc_reset_clk_scale_stats(struct mmc_host *host) -{ - host->clk_scaling.busy_time_us = 0; - host->clk_scaling.window_time = jiffies; -} -EXPORT_SYMBOL_GPL(mmc_reset_clk_scale_stats); - -/** - * mmc_get_max_frequency() - get max. frequency supported - * @host: pointer to mmc host structure - * - * Returns max. frequency supported by card/host. If the - * timing mode is SDR50/SDR104/HS200/DDR50 return appropriate - * max. frequency in these modes else, use the current frequency. - * Also, allow host drivers to overwrite the frequency in case - * they support "get_max_frequency" host ops. - */ -unsigned long mmc_get_max_frequency(struct mmc_host *host) -{ - unsigned long freq; - unsigned char timing; - - if (host->ops && host->ops->get_max_frequency) { - freq = host->ops->get_max_frequency(host); - goto out; - } - - if (mmc_card_hs400(host->card)) - timing = MMC_TIMING_MMC_HS400; - else - timing = host->ios.timing; - - switch (timing) { - case MMC_TIMING_UHS_SDR50: - freq = UHS_SDR50_MAX_DTR; - break; - case MMC_TIMING_UHS_SDR104: - freq = UHS_SDR104_MAX_DTR; - break; - case MMC_TIMING_MMC_HS200: - freq = MMC_HS200_MAX_DTR; - break; - case MMC_TIMING_UHS_DDR50: - freq = UHS_DDR50_MAX_DTR; - break; - case MMC_TIMING_MMC_HS400: - freq = MMC_HS200_MAX_DTR; - break; - default: - mmc_host_clk_hold(host); - freq = host->ios.clock; - mmc_host_clk_release(host); - break; - } - -out: - return freq; -} -EXPORT_SYMBOL_GPL(mmc_get_max_frequency); - -/** - * mmc_get_min_frequency() - get min. frequency supported - * @host: pointer to mmc host structure - * - * Returns min. frequency supported by card/host which doesn't impair - * performance for most usecases. If the timing mode is SDR50/SDR104/HS200 - * return 50MHz value. If timing mode is DDR50 return 25MHz so that - * throughput would be equivalent to SDR50/SDR104 in 50MHz. Also, allow - * host drivers to overwrite the frequency in case they support - * "get_min_frequency" host ops. - */ -static unsigned long mmc_get_min_frequency(struct mmc_host *host) -{ - unsigned long freq; - - if (host->ops && host->ops->get_min_frequency) { - freq = host->ops->get_min_frequency(host); - goto out; - } - - switch (host->ios.timing) { - case MMC_TIMING_UHS_SDR50: - case MMC_TIMING_UHS_SDR104: - freq = UHS_SDR25_MAX_DTR; - break; - case MMC_TIMING_MMC_HS200: - freq = MMC_HIGH_52_MAX_DTR; - break; - case MMC_TIMING_MMC_HS400: - freq = MMC_HIGH_52_MAX_DTR; - break; - case MMC_TIMING_UHS_DDR50: - freq = UHS_DDR50_MAX_DTR / 2; - break; - default: - mmc_host_clk_hold(host); - freq = host->ios.clock; - mmc_host_clk_release(host); - break; - } - -out: - return freq; -} - -/* - * Scale down clocks to minimum frequency supported. - * The delayed work re-arms itself in case it cannot - * claim the host. - */ -static void mmc_clk_scale_work(struct work_struct *work) -{ - struct mmc_host *host = container_of(work, struct mmc_host, - clk_scaling.work.work); - - if (!host->card || !host->bus_ops || - !host->bus_ops->change_bus_speed || - !host->clk_scaling.enable || !host->ios.clock) - goto out; - - mmc_clk_scaling(host, true); - mmc_release_host(host); -out: - return; -} - -static bool mmc_is_vaild_state_for_clk_scaling(struct mmc_host *host) -{ - struct mmc_card *card = host->card; - u32 status; - bool ret = false; - - /* - * If the current partition type is RPMB, clock switching may not - * work properly as sending tuning command (CMD21) is illegal in - * this mode. - */ - if (!card || (mmc_card_mmc(card) && - card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) - || host->clk_scaling.invalid_state) - goto out; - - if (mmc_send_status(card, &status)) { - pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); - goto out; - } - - switch (R1_CURRENT_STATE(status)) { - case R1_STATE_TRAN: - ret = true; - break; - default: - break; - } -out: - return ret; -} - -static int mmc_clk_update_freq(struct mmc_host *host, - unsigned long freq, enum mmc_load state) -{ - int err = 0; - - if (host->ops->notify_load) { - err = host->ops->notify_load(host, state); - if (err) - goto out; - } - - if (freq != host->clk_scaling.curr_freq) { - if (!mmc_is_vaild_state_for_clk_scaling(host)) { - err = -EAGAIN; - goto error; - } - - err = host->bus_ops->change_bus_speed(host, &freq); - if (!err) - host->clk_scaling.curr_freq = freq; - else - pr_err("%s: %s: failed (%d) at freq=%lu\n", - mmc_hostname(host), __func__, err, freq); - } -error: - if (err) { - /* restore previous state */ - if (host->ops->notify_load) - host->ops->notify_load(host, host->clk_scaling.state); - } -out: - return err; -} - -/** - * mmc_clk_scaling() - clock scaling decision algorithm - * @host: pointer to mmc host structure - * @from_wq: variable that specifies the context in which - * mmc_clk_scaling() is called. - * - * Calculate load percentage based on host busy time - * and total sampling interval and decide clock scaling - * based on scale up/down thresholds. - * If load is greater than up threshold increase the - * frequency to maximum as supported by host. Else, - * if load is less than down threshold, scale down the - * frequency to minimum supported by the host. Otherwise, - * retain current frequency and do nothing. - */ -static void mmc_clk_scaling(struct mmc_host *host, bool from_wq) -{ - int err = 0; - struct mmc_card *card = host->card; - unsigned long total_time_ms = 0; - unsigned long busy_time_ms = 0; - unsigned long freq; - unsigned int up_threshold = host->clk_scaling.up_threshold; - unsigned int down_threshold = host->clk_scaling.down_threshold; - bool queue_scale_down_work = false; - enum mmc_load state; - - if (!card || !host->bus_ops || !host->bus_ops->change_bus_speed) { - pr_err("%s: %s: invalid entry\n", mmc_hostname(host), __func__); - goto out; - } - - /* Check if the clocks are already gated. */ - if (!host->ios.clock) - goto out; - - if (time_is_after_jiffies(host->clk_scaling.window_time + - msecs_to_jiffies(host->clk_scaling.polling_delay_ms))) - goto out; - - /* handle time wrap */ - total_time_ms = jiffies_to_msecs((long)jiffies - - (long)host->clk_scaling.window_time); - - /* Check if we re-enter during clock switching */ - if (unlikely(host->clk_scaling.in_progress)) - goto out; - - host->clk_scaling.in_progress = true; - - busy_time_ms = host->clk_scaling.busy_time_us / USEC_PER_MSEC; - - freq = host->clk_scaling.curr_freq; - state = host->clk_scaling.state; - - /* - * Note that the max. and min. frequency should be based - * on the timing modes that the card and host handshake - * during initialization. - */ - if ((busy_time_ms * 100 > total_time_ms * up_threshold)) { - freq = mmc_get_max_frequency(host); - state = MMC_LOAD_HIGH; - } else if ((busy_time_ms * 100 < total_time_ms * down_threshold)) { - if (!from_wq) - queue_scale_down_work = true; - freq = mmc_get_min_frequency(host); - state = MMC_LOAD_LOW; - } - - if (state != host->clk_scaling.state) { - if (!queue_scale_down_work) { - if (!from_wq) - cancel_delayed_work_sync( - &host->clk_scaling.work); - err = mmc_clk_update_freq(host, freq, state); - if (!err) - host->clk_scaling.state = state; - else if (err == -EAGAIN) - goto no_reset_stats; - } else { - /* - * We hold claim host while queueing the scale down - * work, so delay atleast one timer tick to release - * host and re-claim while scaling down the clocks. - */ - queue_delayed_work(system_wq, - &host->clk_scaling.work, 1); - goto no_reset_stats; - } - } - - mmc_reset_clk_scale_stats(host); -no_reset_stats: - host->clk_scaling.in_progress = false; -out: - return; -} - -/** - * mmc_disable_clk_scaling() - Disable clock scaling - * @host: pointer to mmc host structure - * - * Disables clock scaling temporarily by setting enable - * property to false. To disable completely, one also - * need to set 'initialized' variable to false. - */ -void mmc_disable_clk_scaling(struct mmc_host *host) -{ - if (host->clk_scaling.initialized) { - cancel_delayed_work_sync(&host->clk_scaling.work); - host->clk_scaling.enable = false; - } -} -EXPORT_SYMBOL_GPL(mmc_disable_clk_scaling); - -/** - * mmc_can_scale_clk() - Check if clock scaling is initialized - * @host: pointer to mmc host structure - */ -bool mmc_can_scale_clk(struct mmc_host *host) -{ - return host->clk_scaling.initialized; -} -EXPORT_SYMBOL_GPL(mmc_can_scale_clk); - -/** - * mmc_init_clk_scaling() - Initialize clock scaling - * @host: pointer to mmc host structure - * - * Initialize clock scaling for supported hosts. - * It is assumed that the caller ensure clock is - * running at maximum possible frequency before - * calling this function. - */ -void mmc_init_clk_scaling(struct mmc_host *host) -{ - if (!host->card || !(host->caps2 & MMC_CAP2_CLK_SCALE)) - return; - - INIT_DELAYED_WORK(&host->clk_scaling.work, mmc_clk_scale_work); - host->clk_scaling.curr_freq = mmc_get_max_frequency(host); - if (host->ops->notify_load) - host->ops->notify_load(host, MMC_LOAD_HIGH); - host->clk_scaling.state = MMC_LOAD_HIGH; - mmc_reset_clk_scale_stats(host); - host->clk_scaling.enable = true; - host->clk_scaling.initialized = true; - pr_debug("%s: clk scaling enabled\n", mmc_hostname(host)); -} -EXPORT_SYMBOL_GPL(mmc_init_clk_scaling); - -/** - * mmc_exit_clk_scaling() - Disable clock scaling - * @host: pointer to mmc host structure - * - * Disable clock scaling permanently. - */ -void mmc_exit_clk_scaling(struct mmc_host *host) -{ - cancel_delayed_work_sync(&host->clk_scaling.work); - memset(&host->clk_scaling, 0, sizeof(host->clk_scaling)); -} -EXPORT_SYMBOL_GPL(mmc_exit_clk_scaling); - static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) { host->f_init = freq; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 9bf804ee35a2..c4ab8ce9fd90 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -41,6 +41,8 @@ void mmc_init_erase(struct mmc_card *card); void mmc_set_chip_select(struct mmc_host *host, int mode); void mmc_set_clock(struct mmc_host *host, unsigned int hz); +int mmc_clk_update_freq(struct mmc_host *host, + unsigned long freq, enum mmc_load state); void mmc_gate_clock(struct mmc_host *host); void mmc_ungate_clock(struct mmc_host *host); void mmc_set_ungated(struct mmc_host *host); @@ -94,8 +96,8 @@ void mmc_init_context_info(struct mmc_host *host); extern void mmc_disable_clk_scaling(struct mmc_host *host); extern bool mmc_can_scale_clk(struct mmc_host *host); -extern void mmc_init_clk_scaling(struct mmc_host *host); -extern void mmc_exit_clk_scaling(struct mmc_host *host); +extern int mmc_init_clk_scaling(struct mmc_host *host); +extern int mmc_exit_clk_scaling(struct mmc_host *host); extern void mmc_reset_clk_scale_stats(struct mmc_host *host); extern unsigned long mmc_get_max_frequency(struct mmc_host *host); diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 1479a96dfee4..9f75060d3b0c 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -234,6 +234,45 @@ static int mmc_clock_opt_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set, "%llu\n"); +#include <linux/delay.h> + +static int mmc_scale_get(void *data, u64 *val) +{ + struct mmc_host *host = data; + + *val = host->clk_scaling.curr_freq; + + return 0; +} + +static int mmc_scale_set(void *data, u64 val) +{ + int err = 0; + struct mmc_host *host = data; + + mmc_claim_host(host); + mmc_host_clk_hold(host); + + /* change frequency from sysfs manually */ + err = mmc_clk_update_freq(host, val, host->clk_scaling.state); + if (err == -EAGAIN) + err = 0; + else if (err) + pr_err("%s: clock scale to %llu failed with error %d\n", + mmc_hostname(host), val, err); + else + pr_debug("%s: clock change to %llu finished successfully (%s)\n", + mmc_hostname(host), val, current->comm); + + mmc_host_clk_release(host); + mmc_release_host(host); + + return err; +} + +DEFINE_SIMPLE_ATTRIBUTE(mmc_scale_fops, mmc_scale_get, mmc_scale_set, + "%llu\n"); + static int mmc_max_clock_get(void *data, u64 *val) { struct mmc_host *host = data; @@ -300,6 +339,15 @@ void mmc_add_host_debugfs(struct mmc_host *host) &mmc_max_clock_fops)) goto err_node; + if (!debugfs_create_file("scale", S_IRUSR | S_IWUSR, root, host, + &mmc_scale_fops)) + goto err_node; + + if (!debugfs_create_bool("skip_clk_scale_freq_update", + S_IRUSR | S_IWUSR, root, + &host->clk_scaling.skip_clk_scale_freq_update)) + goto err_node; + #ifdef CONFIG_MMC_CLKGATE if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR), root, &host->clk_delay)) diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index b0e8a7b69774..2871f21803bd 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -34,6 +34,9 @@ #include "pwrseq.h" #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) +#define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35 +#define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5 +#define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100 static DEFINE_IDR(mmc_host_idr); static DEFINE_SPINLOCK(mmc_host_lock); @@ -638,45 +641,27 @@ static ssize_t store_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mmc_host *host = cls_dev_to_mmc_host(dev); - unsigned long value, freq; - int retval = -EINVAL; + unsigned long value; - if (!host) - goto out; + if (!host || kstrtoul(buf, 0, &value)) + return -EINVAL; mmc_claim_host(host); - if (!host->card || kstrtoul(buf, 0, &value)) - goto err; - - if (value && !mmc_can_scale_clk(host)) { + if (!value && host->clk_scaling.enable) { + /*turnning off clock scaling*/ + mmc_exit_clk_scaling(host); + host->caps2 &= ~MMC_CAP2_CLK_SCALE; + } else if (value) { + /* starting clock scaling, will restart in case started */ host->caps2 |= MMC_CAP2_CLK_SCALE; + if (host->clk_scaling.enable) + mmc_exit_clk_scaling(host); mmc_init_clk_scaling(host); - - if (!mmc_can_scale_clk(host)) { - host->caps2 &= ~MMC_CAP2_CLK_SCALE; - goto err; - } - } else if (!value && mmc_can_scale_clk(host)) { - host->caps2 &= ~MMC_CAP2_CLK_SCALE; - mmc_disable_clk_scaling(host); - - /* Set to max. frequency, since we are disabling */ - if (host->bus_ops && host->bus_ops->change_bus_speed) { - freq = mmc_get_max_frequency(host); - if (host->bus_ops->change_bus_speed(host, &freq)) - goto err; - } - if (host->ops->notify_load && - host->ops->notify_load(host, MMC_LOAD_HIGH)) - goto err; - host->clk_scaling.state = MMC_LOAD_HIGH; - host->clk_scaling.initialized = false; } - retval = count; -err: + mmc_release_host(host); -out: - return retval; + + return count; } static ssize_t show_up_threshold(struct device *dev, @@ -687,7 +672,7 @@ static ssize_t show_up_threshold(struct device *dev, if (!host) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.up_threshold); + return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold); } #define MAX_PERCENTAGE 100 @@ -700,7 +685,7 @@ static ssize_t store_up_threshold(struct device *dev, if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE)) return -EINVAL; - host->clk_scaling.up_threshold = value; + host->clk_scaling.upthreshold = value; pr_debug("%s: clkscale_up_thresh set to %lu\n", mmc_hostname(host), value); @@ -716,7 +701,7 @@ static ssize_t show_down_threshold(struct device *dev, return -EINVAL; return snprintf(buf, PAGE_SIZE, "%d\n", - host->clk_scaling.down_threshold); + host->clk_scaling.downthreshold); } static ssize_t store_down_threshold(struct device *dev, @@ -728,7 +713,7 @@ static ssize_t store_down_threshold(struct device *dev, if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE)) return -EINVAL; - host->clk_scaling.down_threshold = value; + host->clk_scaling.downthreshold = value; pr_debug("%s: clkscale_down_thresh set to %lu\n", mmc_hostname(host), value); @@ -867,15 +852,16 @@ int mmc_add_host(struct mmc_host *host) led_trigger_register_simple(dev_name(&host->class_dev), &host->led); + host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD; + host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD; + host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC; + host->clk_scaling.skip_clk_scale_freq_update = false; + #ifdef CONFIG_DEBUG_FS mmc_add_host_debugfs(host); #endif mmc_host_clk_sysfs_init(host); - host->clk_scaling.up_threshold = 35; - host->clk_scaling.down_threshold = 5; - host->clk_scaling.polling_delay_ms = 100; - err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp); if (err) pr_err("%s: failed to create clk scale sysfs group with err %d\n", diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index d08629b4c385..dda6600a25d9 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1809,6 +1809,16 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } } + card->clk_scaling_lowest = host->f_min; + if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS400) | + (card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS200)) + card->clk_scaling_highest = card->ext_csd.hs200_max_dtr; + else if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS) | + (card->mmc_avail_type | EXT_CSD_CARD_TYPE_DDR_52)) + card->clk_scaling_highest = card->ext_csd.hs_max_dtr; + else + card->clk_scaling_highest = card->csd.max_dtr; + /* * Choose the power class with selected bus interface */ @@ -2110,7 +2120,8 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) * Disable clock scaling before suspend and enable it after resume so * as to avoid clock scaling decisions kicking in during this window. */ - mmc_disable_clk_scaling(host); + if (mmc_can_scale_clk(host)) + mmc_disable_clk_scaling(host); if (mmc_card_doing_bkops(host->card)) { err = mmc_stop_bkops(host->card); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 8fefdc9bf153..c7aaa9bbb1be 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1101,6 +1101,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, } } + card->clk_scaling_highest = mmc_sd_get_max_clock(card); + card->clk_scaling_lowest = host->f_min; + host->card = card; return 0; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index e44c5c4c1043..c60b2a9ddcd6 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1405,6 +1405,16 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, pdata->cpu_dma_latency_us = cpu_dma_latency; else pdata->cpu_dma_latency_us = MSM_MMC_DEFAULT_CPU_DMA_LATENCY; + + if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table", + &msm_host->mmc->clk_scaling.freq_table, + &msm_host->mmc->clk_scaling.freq_table_sz, 0)) + pr_debug("%s: no clock scaling frequencies were supplied\n", + dev_name(dev)); + else if (!msm_host->mmc->clk_scaling.freq_table || + !msm_host->mmc->clk_scaling.freq_table_sz) + dev_err(dev, "bad dts clock scaling frequencies\n"); + if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates", &clk_table, &clk_table_len, 0)) { dev_err(dev, "failed parsing supported clock rates\n"); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 59ef4e717beb..ae943ccb331e 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -275,6 +275,10 @@ struct mmc_card { struct mmc_host *host; /* the host this device belongs to */ struct device dev; /* the device */ u32 ocr; /* the current OCR setting */ + unsigned long clk_scaling_lowest; /* lowest scaleable + * frequency */ + unsigned long clk_scaling_highest; /* highest scaleable + * frequency */ unsigned int rca; /* relative card address of device */ unsigned int type; /* card type */ #define MMC_TYPE_MMC 0 /* MMC card */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 2ba8ba249e00..5afbac264ddd 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -15,6 +15,7 @@ #include <linux/timer.h> #include <linux/sched.h> #include <linux/device.h> +#include <linux/devfreq.h> #include <linux/fault-inject.h> #include <linux/mmc/core.h> @@ -224,9 +225,57 @@ enum dev_state { DEV_RESUMED, }; +/** + * struct mmc_devfeq_clk_scaling - main context for MMC clock scaling logic + * + * @lock: spinlock to protect statistics + * @devfreq: struct that represent mmc-host as a client for devfreq + * @devfreq_profile: MMC device profile, mostly polling interval and callbacks + * @ondemand_gov_data: struct supplied to ondemmand governor (thresholds) + * @state: load state, can be HIGH or LOW. used to notify mmc_host_ops callback + * @start_busy: timestamped armed once a data request is started + * @measure_interval_start: timestamped armed once a measure interval started + * @devfreq_abort: flag to sync between different contexts relevant to devfreq + * @skip_clk_scale_freq_update: flag that enable/disable frequency change + * @freq_table_sz: table size of frequencies supplied to devfreq + * @freq_table: frequencies table supplied to devfreq + * @curr_freq: current frequency + * @polling_delay_ms: polling interval for status collection used by devfreq + * @upthreshold: up-threshold supplied to ondemand governor + * @downthreshold: down-threshold supplied to ondemand governor + * @need_freq_change: flag indicating if a frequency change is required + * @clk_scaling_in_progress: flag indicating if there's ongoing frequency change + * @is_busy_started: flag indicating if a request is handled by the HW + * @enable: flag indicating if the clock scaling logic is enabled for this host + */ +struct mmc_devfeq_clk_scaling { + spinlock_t lock; + struct devfreq *devfreq; + struct devfreq_dev_profile devfreq_profile; + struct devfreq_simple_ondemand_data ondemand_gov_data; + enum mmc_load state; + ktime_t start_busy; + ktime_t measure_interval_start; + atomic_t devfreq_abort; + bool skip_clk_scale_freq_update; + int freq_table_sz; + u32 *freq_table; + unsigned long total_busy_time_us; + unsigned long target_freq; + unsigned long curr_freq; + unsigned long polling_delay_ms; + unsigned int upthreshold; + unsigned int downthreshold; + bool need_freq_change; + bool clk_scaling_in_progress; + bool is_busy_started; + bool enable; +}; + struct mmc_host { struct device *parent; struct device class_dev; + struct mmc_devfeq_clk_scaling clk_scaling; int index; const struct mmc_host_ops *ops; struct mmc_pwrseq *pwrseq; @@ -441,22 +490,6 @@ struct mmc_host { } perf; bool perf_enable; #endif - struct { - unsigned long busy_time_us; - unsigned long window_time; - unsigned long curr_freq; - unsigned long polling_delay_ms; - unsigned int up_threshold; - unsigned int down_threshold; - ktime_t start_busy; - bool enable; - bool initialized; - bool in_progress; - /* freq. transitions are not allowed in invalid state */ - bool invalid_state; - struct delayed_work work; - enum mmc_load state; - } clk_scaling; enum dev_state dev_status; bool wakeup_on_idle; unsigned long private[0] ____cacheline_aligned; |
