diff options
22 files changed, 315 insertions, 224 deletions
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index 96518dca0ec1..818b213f355d 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -1192,41 +1192,49 @@ label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 2>; qcom,secure-context-bank; + dma-coherent; }; qcom,msm_fastrpc_compute_cb1 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 8>; + dma-coherent; }; qcom,msm_fastrpc_compute_cb2 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 9>; + dma-coherent; }; qcom,msm_fastrpc_compute_cb3 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 10>; + dma-coherent; }; qcom,msm_fastrpc_compute_cb4 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 11>; + dma-coherent; }; qcom,msm_fastrpc_compute_cb6 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 5>; + dma-coherent; }; qcom,msm_fastrpc_compute_cb7 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 6>; + dma-coherent; }; qcom,msm_fastrpc_compute_cb8 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&lpass_q6_smmu 7>; + dma-coherent; }; }; diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 63dc23387133..67c1207d35be 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -163,6 +163,7 @@ struct fastrpc_smmu { int enabled; int faults; int secure; + int coherent; }; struct fastrpc_session_ctx { @@ -1129,6 +1130,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) for (oix = 0; oix < inbufs + outbufs; ++oix) { int i = ctx->overps[oix]->raix; struct fastrpc_mmap *map = ctx->maps[i]; + if (ctx->fl->sctx->smmu.coherent) + continue; if (map && map->uncached) continue; if (rpra[i].buf.len && ctx->overps[oix]->mstart) @@ -1141,7 +1144,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len; rpra[inh + i].h = ctx->lpra[inh + i].h; } - dmac_flush_range((char *)rpra, (char *)rpra + ctx->used); + if (!ctx->fl->sctx->smmu.coherent) + dmac_flush_range((char *)rpra, (char *)rpra + ctx->used); bail: return err; } @@ -1372,13 +1376,15 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, goto bail; } - inv_args_pre(ctx); - if (FASTRPC_MODE_SERIAL == mode) - inv_args(ctx); + if (!fl->sctx->smmu.coherent) { + inv_args_pre(ctx); + if (mode == FASTRPC_MODE_SERIAL) + inv_args(ctx); + } VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle)); if (err) goto bail; - if (FASTRPC_MODE_PARALLEL == mode) + if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent) inv_args(ctx); wait: if (kernel) @@ -2301,6 +2307,8 @@ static int fastrpc_cb_probe(struct device *dev) sess = &chan->session[chan->sesscount]; sess->smmu.cb = iommuspec.args[0]; sess->used = 0; + sess->smmu.coherent = of_property_read_bool(dev->of_node, + "dma-coherent"); sess->smmu.secure = of_property_read_bool(dev->of_node, "qcom,secure-context-bank"); if (sess->smmu.secure) diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c index 05272118af16..46e791b3cb99 100644 --- a/drivers/clk/msm/clock-gcc-cobalt.c +++ b/drivers/clk/msm/clock-gcc-cobalt.c @@ -2374,7 +2374,7 @@ static struct mux_clk gcc_debug_mux = { { &debug_cpu_clk.c, 0x00c0 }, { &snoc_clk.c, 0x0000 }, { &cnoc_clk.c, 0x000e }, - { &bimc_clk.c, 0x00a9 }, + { &bimc_clk.c, 0x014e }, { &gcc_mmss_sys_noc_axi_clk.c, 0x001f }, { &gcc_mmss_noc_cfg_ahb_clk.c, 0x0020 }, { &gcc_usb30_master_clk.c, 0x003e }, diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index d6cdbbc78827..d29fd60719c9 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -202,6 +202,8 @@ enum clk_osm_trace_packet_id { #define TRACE_CTRL_EN_MASK BIT(0) #define TRACE_CTRL_ENABLE 1 #define TRACE_CTRL_DISABLE 0 +#define TRACE_CTRL_ENABLE_WDOG_STATUS BIT(30) +#define TRACE_CTRL_ENABLE_WDOG_STATUS_MASK BIT(30) #define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3) #define TRACE_CTRL_PACKET_TYPE_SHIFT 1 #define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3) @@ -221,6 +223,11 @@ enum clk_osm_trace_packet_id { #define PERFCL_EFUSE_SHIFT 29 #define PERFCL_EFUSE_MASK 0x7 +#define MSMCOBALTV1_PWRCL_BOOT_RATE 1478400000 +#define MSMCOBALTV1_PERFCL_BOOT_RATE 1536000000 +#define MSMCOBALTV2_PWRCL_BOOT_RATE 1555200000 +#define MSMCOBALTV2_PERFCL_BOOT_RATE 1728000000 + static void __iomem *virt_base; static void __iomem *debug_base; @@ -2687,6 +2694,18 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) return rc; } + if (msmcobalt_v2) { + /* Enable OSM WDOG registers */ + clk_osm_masked_write_reg(&pwrcl_clk, + TRACE_CTRL_ENABLE_WDOG_STATUS, + TRACE_CTRL, + TRACE_CTRL_ENABLE_WDOG_STATUS_MASK); + clk_osm_masked_write_reg(&perfcl_clk, + TRACE_CTRL_ENABLE_WDOG_STATUS, + TRACE_CTRL, + TRACE_CTRL_ENABLE_WDOG_STATUS_MASK); + } + /* * The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct * frequency before enabling OSM. LUT index 0 is always sourced from @@ -2700,18 +2719,22 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) } clk_prepare_enable(&sys_apcsaux_clk_gcc.c); - /* Set 300MHz index */ - rc = clk_set_rate(&pwrcl_clk.c, init_rate); + /* Set boot rate */ + rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ? + MSMCOBALTV1_PWRCL_BOOT_RATE : + MSMCOBALTV2_PWRCL_BOOT_RATE); if (rc) { - dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n", + dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n", rc); clk_disable_unprepare(&sys_apcsaux_clk_gcc.c); return rc; } - rc = clk_set_rate(&perfcl_clk.c, init_rate); + rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ? + MSMCOBALTV1_PERFCL_BOOT_RATE : + MSMCOBALTV2_PERFCL_BOOT_RATE); if (rc) { - dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n", + dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n", rc); clk_disable_unprepare(&sys_apcsaux_clk_gcc.c); return rc; diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index e9d16426d4a5..94d828027f20 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1243,86 +1243,6 @@ static bool regulators_left_on(struct kgsl_device *device) return false; } -static void _setup_throttling_counters(struct adreno_device *adreno_dev) -{ - int i, ret; - - if (!adreno_is_a540(adreno_dev)) - return; - - if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) - return; - - for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) { - /* reset throttled cycles ivalue */ - adreno_dev->busy_data.throttle_cycles[i] = 0; - - if (adreno_dev->gpmu_throttle_counters[i] != 0) - continue; - ret = adreno_perfcounter_get(adreno_dev, - KGSL_PERFCOUNTER_GROUP_GPMU_PWR, - ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i, - &adreno_dev->gpmu_throttle_counters[i], - NULL, - PERFCOUNTER_FLAG_KERNEL); - WARN_ONCE(ret, "Unable to get clock throttling counter %x\n", - ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i); - } -} - -/* FW driven idle 10% throttle */ -#define IDLE_10PCT 0 -/* number of cycles when clock is throttled by 50% (CRC) */ -#define CRC_50PCT 1 -/* number of cycles when clock is throttled by more than 50% (CRC) */ -#define CRC_MORE50PCT 2 -/* number of cycles when clock is throttle by less than 50% (CRC) */ -#define CRC_LESS50PCT 3 - -static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev) -{ - int i, adj; - uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS]; - struct adreno_busy_data *busy = &adreno_dev->busy_data; - - if (!adreno_is_a540(adreno_dev)) - return 0; - - if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) - return 0; - - if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag)) - return 0; - - for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) { - if (!adreno_dev->gpmu_throttle_counters[i]) - return 0; - - th[i] = counter_delta(KGSL_DEVICE(adreno_dev), - adreno_dev->gpmu_throttle_counters[i], - &busy->throttle_cycles[i]); - } - adj = th[CRC_MORE50PCT] - th[IDLE_10PCT]; - adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3; - - trace_kgsl_clock_throttling( - th[IDLE_10PCT], th[CRC_50PCT], - th[CRC_MORE50PCT], th[CRC_LESS50PCT], - adj); - return adj; -} - -static void _update_threshold_count(struct adreno_device *adreno_dev, - uint64_t adj) -{ - if (adreno_is_a530(adreno_dev)) - kgsl_regread(KGSL_DEVICE(adreno_dev), - adreno_dev->lm_threshold_count, - &adreno_dev->lm_threshold_cross); - else if (adreno_is_a540(adreno_dev)) - adreno_dev->lm_threshold_cross = adj; -} - static void _set_secvid(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); @@ -1419,8 +1339,8 @@ static int _adreno_start(struct adreno_device *adreno_dev) } } - if (device->pwrctrl.bus_control) { + if (device->pwrctrl.bus_control) { /* VBIF waiting for RAM */ if (adreno_dev->starved_ram_lo == 0) { ret = adreno_perfcounter_get(adreno_dev, @@ -1456,20 +1376,6 @@ static int _adreno_start(struct adreno_device *adreno_dev) adreno_dev->busy_data.vbif_ram_cycles = 0; adreno_dev->busy_data.vbif_starved_ram = 0; - if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM) - && adreno_dev->lm_threshold_count == 0) { - - ret = adreno_perfcounter_get(adreno_dev, - KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27, - &adreno_dev->lm_threshold_count, NULL, - PERFCOUNTER_FLAG_KERNEL); - /* Ignore noncritical ret - used for debugfs */ - if (ret) - adreno_dev->lm_threshold_count = 0; - } - - _setup_throttling_counters(adreno_dev); - /* Restore performance counter registers with saved values */ adreno_perfcounter_restore(adreno_dev); @@ -2576,27 +2482,6 @@ static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq) return ticks / freq; } -static unsigned int counter_delta(struct kgsl_device *device, - unsigned int reg, unsigned int *counter) -{ - unsigned int val; - unsigned int ret = 0; - - /* Read the value */ - kgsl_regread(device, reg, &val); - - /* Return 0 for the first read */ - if (*counter != 0) { - if (val < *counter) - ret = (0xFFFFFFFF - *counter) + val; - else - ret = val - *counter; - } - - *counter = val; - return ret; -} - /** * adreno_power_stats() - Reads the counters needed for freq decisions * @device: Pointer to device whose counters are read @@ -2608,6 +2493,7 @@ static void adreno_power_stats(struct kgsl_device *device, struct kgsl_power_stats *stats) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct adreno_busy_data *busy = &adreno_dev->busy_data; uint64_t adj = 0; @@ -2621,8 +2507,11 @@ static void adreno_power_stats(struct kgsl_device *device, gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo, &busy->gpu_busy); - adj = _read_throttling_counters(adreno_dev); - gpu_busy += adj; + if (gpudev->read_throttling_counters) { + adj = gpudev->read_throttling_counters(adreno_dev); + gpu_busy += adj; + } + stats->busy_time = adreno_ticks_to_us(gpu_busy, kgsl_pwrctrl_active_freq(pwr)); } @@ -2643,8 +2532,9 @@ static void adreno_power_stats(struct kgsl_device *device, stats->ram_time = ram_cycles; stats->ram_wait = starved_ram; } - if (adreno_dev->lm_threshold_count) - _update_threshold_count(adreno_dev, adj); + if (adreno_dev->lm_threshold_count && + gpudev->count_throttles) + gpudev->count_throttles(adreno_dev, adj); } static unsigned int adreno_gpuid(struct kgsl_device *device, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 295a3d80d476..0f3403cb0095 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -756,6 +756,10 @@ struct adreno_gpudev { void (*pwrlevel_change_settings)(struct adreno_device *, unsigned int prelevel, unsigned int postlevel, bool post); + uint64_t (*read_throttling_counters)(struct adreno_device *); + void (*count_throttles)(struct adreno_device *, uint64_t adj); + int (*enable_pwr_counters)(struct adreno_device *, + unsigned int counter); unsigned int (*preemption_pre_ibsubmit)(struct adreno_device *, struct adreno_ringbuffer *rb, unsigned int *, struct kgsl_context *); @@ -1466,4 +1470,24 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb, spin_unlock_irqrestore(&rb->preempt_lock, flags); } +static inline unsigned int counter_delta(struct kgsl_device *device, + unsigned int reg, unsigned int *counter) +{ + unsigned int val; + unsigned int ret = 0; + + /* Read the value */ + kgsl_regread(device, reg, &val); + + /* Return 0 for the first read */ + if (*counter != 0) { + if (val < *counter) + ret = (0xFFFFFFFF - *counter) + val; + else + ret = val - *counter; + } + + *counter = val; + return ret; +} #endif /*__ADRENO_H */ diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index e67bb92c0c28..2891940b8f5b 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -27,6 +27,7 @@ #include "kgsl_sharedmem.h" #include "kgsl_log.h" #include "kgsl.h" +#include "kgsl_trace.h" #include "adreno_a5xx_packets.h" static int zap_ucode_loaded; @@ -1543,6 +1544,76 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev, } } +static void a5xx_count_throttles(struct adreno_device *adreno_dev, + uint64_t adj) +{ + if (adreno_is_a530(adreno_dev)) + kgsl_regread(KGSL_DEVICE(adreno_dev), + adreno_dev->lm_threshold_count, + &adreno_dev->lm_threshold_cross); + else if (adreno_is_a540(adreno_dev)) + adreno_dev->lm_threshold_cross = adj; +} + +static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev, + unsigned int counter) +{ + /* + * On 5XX we have to emulate the PWR counters which are physically + * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute + * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want + * to take away too many of the generic RBBM counters. + */ + + if (counter == 0) + return -EINVAL; + + kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6); + + return 0; +} + +/* FW driven idle 10% throttle */ +#define IDLE_10PCT 0 +/* number of cycles when clock is throttled by 50% (CRC) */ +#define CRC_50PCT 1 +/* number of cycles when clock is throttled by more than 50% (CRC) */ +#define CRC_MORE50PCT 2 +/* number of cycles when clock is throttle by less than 50% (CRC) */ +#define CRC_LESS50PCT 3 + +static uint64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev) +{ + int i, adj; + uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS]; + struct adreno_busy_data *busy = &adreno_dev->busy_data; + + if (!adreno_is_a540(adreno_dev)) + return 0; + + if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) + return 0; + + if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag)) + return 0; + + for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) { + if (!adreno_dev->gpmu_throttle_counters[i]) + return 0; + + th[i] = counter_delta(KGSL_DEVICE(adreno_dev), + adreno_dev->gpmu_throttle_counters[i], + &busy->throttle_cycles[i]); + } + adj = th[CRC_MORE50PCT] - th[IDLE_10PCT]; + adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3; + + trace_kgsl_clock_throttling( + th[IDLE_10PCT], th[CRC_50PCT], + th[CRC_MORE50PCT], th[CRC_LESS50PCT], + adj); + return adj; +} static void a5xx_enable_64bit(struct adreno_device *adreno_dev) { @@ -1599,12 +1670,44 @@ static void a5xx_gpmu_reset(struct work_struct *work) /* Soft reset of the GPMU block */ kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16)); + /* GPU comes up in secured mode, make it unsecured by default */ + if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION)) + kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); + + a5xx_gpmu_init(adreno_dev); out: mutex_unlock(&device->mutex); } +static void _setup_throttling_counters(struct adreno_device *adreno_dev) +{ + int i, ret; + + if (!adreno_is_a540(adreno_dev)) + return; + + if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) + return; + + for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) { + /* reset throttled cycles ivalue */ + adreno_dev->busy_data.throttle_cycles[i] = 0; + + if (adreno_dev->gpmu_throttle_counters[i] != 0) + continue; + ret = adreno_perfcounter_get(adreno_dev, + KGSL_PERFCOUNTER_GROUP_GPMU_PWR, + ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i, + &adreno_dev->gpmu_throttle_counters[i], + NULL, + PERFCOUNTER_FLAG_KERNEL); + WARN_ONCE(ret, "Unable to get clock throttling counter %x\n", + ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i); + } +} + /* * a5xx_start() - Device start * @adreno_dev: Pointer to adreno device @@ -1616,6 +1719,21 @@ static void a5xx_start(struct adreno_device *adreno_dev) struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); unsigned int bit; + int ret; + + if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM) + && adreno_dev->lm_threshold_count == 0) { + + ret = adreno_perfcounter_get(adreno_dev, + KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27, + &adreno_dev->lm_threshold_count, NULL, + PERFCOUNTER_FLAG_KERNEL); + /* Ignore noncritical ret - used for debugfs */ + if (ret) + adreno_dev->lm_threshold_count = 0; + } + + _setup_throttling_counters(adreno_dev); adreno_vbif_start(adreno_dev, a5xx_vbif_platforms, ARRAY_SIZE(a5xx_vbif_platforms)); @@ -1921,11 +2039,6 @@ static int a5xx_post_start(struct adreno_device *adreno_dev) static int a5xx_gpmu_init(struct adreno_device *adreno_dev) { int ret; - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - - /* GPU comes up in secured mode, make it unsecured by default */ - if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION)) - kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); /* Set up LM before initializing the GPMU */ a5xx_lm_init(adreno_dev); @@ -2246,20 +2359,10 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev, if (ret) return ret; - /* Set up LM before initializing the GPMU */ - a5xx_lm_init(adreno_dev); - - /* Enable SPTP based power collapse before enabling GPMU */ - a5xx_enable_pc(adreno_dev); - - /* Program the GPMU */ - ret = a5xx_gpmu_start(adreno_dev); + ret = a5xx_gpmu_init(adreno_dev); if (ret) return ret; - /* Enable limits management */ - a5xx_lm_enable(adreno_dev); - a5xx_post_start(adreno_dev); return 0; @@ -3421,6 +3524,9 @@ struct adreno_gpudev adreno_a5xx_gpudev = { .regulator_enable = a5xx_regulator_enable, .regulator_disable = a5xx_regulator_disable, .pwrlevel_change_settings = a5xx_pwrlevel_change_settings, + .read_throttling_counters = a5xx_read_throttling_counters, + .count_throttles = a5xx_count_throttles, + .enable_pwr_counters = a5xx_enable_pwr_counters, .preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit, .preemption_yield_enable = a5xx_preemption_yield_enable, diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c index 8e354d71a291..42f8119ad8b4 100644 --- a/drivers/gpu/msm/adreno_perfcounter.c +++ b/drivers/gpu/msm/adreno_perfcounter.c @@ -598,28 +598,6 @@ int adreno_perfcounter_put(struct adreno_device *adreno_dev, return -EINVAL; } -static int _perfcounter_enable_pwr(struct adreno_device *adreno_dev, - unsigned int counter) -{ - /* PWR counters enabled by default on A3XX/A4XX so nothing to do */ - if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev)) - return 0; - - /* - * On 5XX we have to emulate the PWR counters which are physically - * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute - * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want - * to take away too many of the generic RBBM counters. - */ - - if (counter == 0) - return -EINVAL; - - kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6); - - return 0; -} - static void _perfcounter_enable_vbif(struct adreno_device *adreno_dev, struct adreno_perfcounters *counters, unsigned int counter, unsigned int countable) @@ -771,6 +749,7 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev, unsigned int group, unsigned int counter, unsigned int countable) { struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev); + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); if (counters == NULL) return -EINVAL; @@ -786,7 +765,9 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev, /* alwayson counter is global, so init value is 0 */ break; case KGSL_PERFCOUNTER_GROUP_PWR: - return _perfcounter_enable_pwr(adreno_dev, counter); + if (gpudev->enable_pwr_counters) + return gpudev->enable_pwr_counters(adreno_dev, counter); + return 0; case KGSL_PERFCOUNTER_GROUP_VBIF: if (countable > VBIF2_PERF_CNT_SEL_MASK) return -EINVAL; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c index 0e62811bf41b..3cd1c38f8f37 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c @@ -599,8 +599,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst) goto dcvs_decision_done; } if (msm_comm_turbo_session(inst) || - !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit || - instance_count > 1)) + !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) || + instance_count > 1) is_dcvs_supported = false; } if (inst->session_type == MSM_VIDC_ENCODER) { @@ -617,8 +617,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst) goto dcvs_decision_done; } if (msm_comm_turbo_session(inst) || - !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit || - instance_count > 1)) + !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) || + instance_count > 1) is_dcvs_supported = false; } dcvs_decision_done: diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index e0fb31de38ff..8332c7f4db43 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -3336,7 +3336,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet) { bool local_packet = false; enum vidc_msg_prio log_level = VIDC_FW; - unsigned int pending_packet_count = 0; if (!device) { dprintk(VIDC_ERR, "%s: Invalid params\n", __func__); @@ -3361,23 +3360,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet) log_level = VIDC_ERR; } - /* - * In FATAL situation, print all the pending messages in msg - * queue. This is useful for debugging. At this time, message - * queues may be corrupted. Hence don't trust them and just print - * first max_packets packets. - */ - - if (local_packet) { - dprintk(VIDC_ERR, - "Printing all pending messages in message Queue\n"); - while (!__iface_msgq_read(device, packet) && - pending_packet_count < max_packets) { - __dump_packet(packet, log_level); - pending_packet_count++; - } - } - while (!__iface_dbgq_read(device, packet)) { struct hfi_msg_sys_coverage_packet *pkt = (struct hfi_msg_sys_coverage_packet *) packet; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index 0eab77d27760..50c387ec785d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -1420,6 +1420,7 @@ static ssize_t ipa_read_nat4(struct file *file, u16 enable, tbl_entry, flag; u32 no_entrys = 0; + mutex_lock(&ipa_ctx->nat_mem.lock); value = ipa_ctx->nat_mem.public_ip_addr; pr_err( "Table IP Address:%d.%d.%d.%d\n", @@ -1573,6 +1574,7 @@ static ssize_t ipa_read_nat4(struct file *file, } } pr_err("Current No. Nat Entries: %d\n", no_entrys); + mutex_unlock(&ipa_ctx->nat_mem.lock); return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 3915f652d87b..25e5e3b74f26 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -1478,6 +1478,7 @@ static ssize_t ipa3_read_nat4(struct file *file, u16 enable, tbl_entry, flag; u32 no_entrys = 0; + mutex_lock(&ipa3_ctx->nat_mem.lock); value = ipa3_ctx->nat_mem.public_ip_addr; pr_err( "Table IP Address:%d.%d.%d.%d\n", @@ -1631,6 +1632,7 @@ static ssize_t ipa3_read_nat4(struct file *file, } } pr_err("Current No. Nat Entries: %d\n", no_entrys); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return 0; } diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 545a1e684b25..8af1eb66c699 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -269,6 +269,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(pd_active), POWER_SUPPLY_ATTR(charger_temp), POWER_SUPPLY_ATTR(charger_temp_max), + POWER_SUPPLY_ATTR(parallel_disable), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c index ee576d300054..8aaeb095db3c 100644 --- a/drivers/power/qcom-charger/qpnp-smb2.c +++ b/drivers/power/qcom-charger/qpnp-smb2.c @@ -342,6 +342,7 @@ static enum power_supply_property smb2_usb_props[] = { POWER_SUPPLY_PROP_PD_ACTIVE, POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, POWER_SUPPLY_PROP_INPUT_CURRENT_NOW, + POWER_SUPPLY_PROP_PARALLEL_DISABLE, }; static int smb2_usb_get_prop(struct power_supply *psy, @@ -404,6 +405,10 @@ static int smb2_usb_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW: rc = smblib_get_prop_usb_current_now(chg, val); break; + case POWER_SUPPLY_PROP_PARALLEL_DISABLE: + val->intval = get_client_vote(chg->pl_disable_votable, + USER_VOTER); + break; default: pr_err("get prop %d is not supported\n", psp); rc = -EINVAL; @@ -448,6 +453,9 @@ static int smb2_usb_set_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_PD_ACTIVE: rc = smblib_set_prop_pd_active(chg, val); break; + case POWER_SUPPLY_PROP_PARALLEL_DISABLE: + vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0); + break; default: pr_err("set prop %d is not supported\n", psp); rc = -EINVAL; @@ -462,6 +470,7 @@ static int smb2_usb_prop_is_writeable(struct power_supply *psy, { switch (psp) { case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE: + case POWER_SUPPLY_PROP_PARALLEL_DISABLE: return 1; default: break; @@ -678,7 +687,7 @@ static int smb2_batt_get_prop(struct power_supply *psy, val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CHARGE_DONE: - val->intval = chg->chg_done; + rc = smblib_get_prop_batt_charge_done(chg, val); break; default: pr_err("batt power supply prop %d not supported\n", psp); @@ -710,9 +719,6 @@ static int smb2_batt_set_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_CAPACITY: rc = smblib_set_prop_batt_capacity(chg, val); break; - case POWER_SUPPLY_PROP_CHARGE_DONE: - chg->chg_done = val->intval; - break; default: rc = -EINVAL; } diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c index 0067ec5c2ca2..ce76260be6f6 100644 --- a/drivers/power/qcom-charger/smb-lib.c +++ b/drivers/power/qcom-charger/smb-lib.c @@ -1165,6 +1165,24 @@ int smblib_get_prop_step_chg_step(struct smb_charger *chg, return rc; } +int smblib_get_prop_batt_charge_done(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc; + u8 stat; + + rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat); + if (rc < 0) { + dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n", + rc); + return rc; + } + + stat = stat & BATTERY_CHARGER_STATUS_MASK; + val->intval = (stat == TERMINATE_CHARGE); + return 0; +} + /*********************** * BATTERY PSY SETTERS * ***********************/ @@ -1749,6 +1767,22 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, return rc; } +/************************ + * PARALLEL PSY GETTERS * + ************************/ + +int smblib_get_prop_slave_current_now(struct smb_charger *chg, + union power_supply_propval *pval) +{ + if (IS_ERR_OR_NULL(chg->iio.batt_i_chan)) + chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i"); + + if (IS_ERR(chg->iio.batt_i_chan)) + return PTR_ERR(chg->iio.batt_i_chan); + + return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval); +} + /********************** * INTERRUPT HANDLERS * **********************/ @@ -1793,7 +1827,6 @@ static void smblib_pl_handle_chg_state_change(struct smb_charger *chg, u8 stat) irqreturn_t smblib_handle_chg_state_change(int irq, void *data) { - union power_supply_propval pval = {0, }; struct smb_irq_data *irq_data = data; struct smb_charger *chg = irq_data->parent_data; u8 stat; @@ -1810,9 +1843,6 @@ irqreturn_t smblib_handle_chg_state_change(int irq, void *data) stat = stat & BATTERY_CHARGER_STATUS_MASK; smblib_pl_handle_chg_state_change(chg, stat); - pval.intval = (stat == TERMINATE_CHARGE); - power_supply_set_property(chg->batt_psy, POWER_SUPPLY_PROP_CHARGE_DONE, - &pval); power_supply_changed(chg->batt_psy); return IRQ_HANDLED; } @@ -2440,6 +2470,8 @@ static void smblib_iio_deinit(struct smb_charger *chg) iio_channel_release(chg->iio.usbin_i_chan); if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan)) iio_channel_release(chg->iio.usbin_v_chan); + if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan)) + iio_channel_release(chg->iio.batt_i_chan); } int smblib_init(struct smb_charger *chg) diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h index 5b4c2016adc8..00975e6c1285 100644 --- a/drivers/power/qcom-charger/smb-lib.h +++ b/drivers/power/qcom-charger/smb-lib.h @@ -103,6 +103,7 @@ struct smb_iio { struct iio_channel *temp_max_chan; struct iio_channel *usbin_i_chan; struct iio_channel *usbin_v_chan; + struct iio_channel *batt_i_chan; }; struct smb_charger { @@ -233,6 +234,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_batt_charge_type(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_batt_charge_done(struct smb_charger *chg, + union power_supply_propval *val); int smblib_get_prop_batt_health(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_system_temp_level(struct smb_charger *chg, @@ -301,6 +304,9 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg, int smblib_set_prop_pd_active(struct smb_charger *chg, const union power_supply_propval *val); +int smblib_get_prop_slave_current_now(struct smb_charger *chg, + union power_supply_propval *val); + int smblib_init(struct smb_charger *chg); int smblib_deinit(struct smb_charger *chg); #endif /* __SMB2_CHARGER_H */ diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c index 33d759be9aeb..e8ec2f49f7eb 100644 --- a/drivers/power/qcom-charger/smb138x-charger.c +++ b/drivers/power/qcom-charger/smb138x-charger.c @@ -48,8 +48,8 @@ static struct smb_params v1_params = { .name = "fast charge current", .reg = FAST_CHARGE_CURRENT_CFG_REG, .min_u = 0, - .max_u = 5000000, - .step_u = 50000, + .max_u = 4500000, + .step_u = 25000, }, .fv = { .name = "float voltage", @@ -395,6 +395,7 @@ static enum power_supply_property smb138x_parallel_props[] = { POWER_SUPPLY_PROP_INPUT_SUSPEND, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CHARGER_TEMP, POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, }; @@ -431,6 +432,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy, rc = smblib_get_charge_param(chg, &chg->param.fcc, &val->intval); break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + rc = smblib_get_prop_slave_current_now(chg, val); + break; case POWER_SUPPLY_PROP_CHARGER_TEMP: rc = smblib_get_prop_charger_temp(chg, val); break; @@ -1125,6 +1129,15 @@ static int smb138x_slave_probe(struct smb138x *chip) return rc; } + /* enable parallel current sensing */ + rc = smblib_masked_write(chg, CFG_REG, + VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT); + if (rc < 0) { + dev_err(chg->dev, "Couldn't enable parallel current sensing rc=%d\n", + rc); + return rc; + } + /* keep at the end of probe, ready to serve before notifying others */ rc = smb138x_init_parallel_psy(chip); if (rc < 0) { diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c index 03a1591e5b09..242071f52811 100644 --- a/drivers/soc/qcom/rpm-smd.c +++ b/drivers/soc/qcom/rpm-smd.c @@ -967,8 +967,10 @@ static struct msm_rpm_request *msm_rpm_create_request_common( cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq)); - if (!cdata->client_buf) - goto cdata_alloc_fail; + if (!cdata->client_buf) { + pr_warn("Cannot allocate memory for client_buf\n"); + goto client_buf_alloc_fail; + } set_set_type(cdata->client_buf, set); set_rsc_type(cdata->client_buf, rsc_type); @@ -997,6 +999,8 @@ static struct msm_rpm_request *msm_rpm_create_request_common( buf_alloc_fail: kfree(cdata->kvp); kvp_alloc_fail: + kfree(cdata->client_buf); +client_buf_alloc_fail: kfree(cdata); cdata_alloc_fail: return NULL; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4ad994972b19..805c5e1931e1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -421,7 +421,16 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep) if (dep->endpoint.ep_type == EP_TYPE_GSI) return; - if (dep->trb_pool && dep->trb_pool_dma) { + /* + * Clean up ep ring to avoid getting xferInProgress due to stale trbs + * with HWO bit set from previous composition when update transfer cmd + * is issued. + */ + if (dep->number > 1 && dep->trb_pool && dep->trb_pool_dma) { + memset(&dep->trb_pool[0], 0, + sizeof(struct dwc3_trb) * dep->num_trbs); + dbg_event(dep->number, "Clr_TRB", 0); + dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool, dep->trb_pool_dma); @@ -723,17 +732,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) (dep->number & 1) ? "in" : "out"); } - /* - * Clean up ep ring of non-control endpoint to avoid getting xferInProgress - * due to stale trbs with HWO bit set from previous composition when update - * transfer cmd is issued. - */ - if (dep->number > 1 && dep->trb_pool) { - memset(&dep->trb_pool[0], 0, - sizeof(struct dwc3_trb) * dep->num_trbs); - dbg_event(dep->number, "Clr_TRB", 0); - } - return 0; } diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index 768c44d9ea8b..0ae23ddbc528 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -144,10 +144,10 @@ the appropriate macros. */ /* This needs to be modified manually now, when we add a new RANGE of SSIDs to the msg_mask_tbl */ #define MSG_MASK_TBL_CNT 25 -#define APPS_EVENT_LAST_ID 0x0B14 +#define APPS_EVENT_LAST_ID 0x0B2A #define MSG_SSID_0 0 -#define MSG_SSID_0_LAST 118 +#define MSG_SSID_0_LAST 120 #define MSG_SSID_1 500 #define MSG_SSID_1_LAST 506 #define MSG_SSID_2 1000 @@ -163,7 +163,7 @@ the appropriate macros. */ #define MSG_SSID_7 4600 #define MSG_SSID_7_LAST 4615 #define MSG_SSID_8 5000 -#define MSG_SSID_8_LAST 5032 +#define MSG_SSID_8_LAST 5033 #define MSG_SSID_9 5500 #define MSG_SSID_9_LAST 5516 #define MSG_SSID_10 6000 @@ -193,7 +193,7 @@ the appropriate macros. */ #define MSG_SSID_22 10350 #define MSG_SSID_22_LAST 10377 #define MSG_SSID_23 10400 -#define MSG_SSID_23_LAST 10415 +#define MSG_SSID_23_LAST 10416 #define MSG_SSID_24 0xC000 #define MSG_SSID_24_LAST 0xC063 @@ -336,7 +336,9 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_HIGH + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL }; static const uint32_t msg_bld_masks_1[] = { @@ -535,7 +537,8 @@ static const uint32_t msg_bld_masks_8[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_MED + MSG_LVL_MED, + MSG_LVL_HIGH }; static const uint32_t msg_bld_masks_9[] = { @@ -848,13 +851,14 @@ static const uint32_t msg_bld_masks_23[] = { MSG_LVL_LOW, MSG_LVL_LOW, MSG_LVL_LOW, + MSG_LVL_LOW, MSG_LVL_LOW }; /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ - 0x1966, /* EQUIP ID 1 */ + 0x1A02, /* EQUIP ID 1 */ 0x0, /* EQUIP ID 2 */ 0x0, /* EQUIP ID 3 */ 0x4910, /* EQUIP ID 4 */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 03853d956b41..c477f60c3f01 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -218,6 +218,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_PD_ACTIVE, POWER_SUPPLY_PROP_CHARGER_TEMP, POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, + POWER_SUPPLY_PROP_PARALLEL_DISABLE, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ diff --git a/net/wireless/db.txt b/net/wireless/db.txt index 3e47e0641780..23b7c76ff2d8 100644 --- a/net/wireless/db.txt +++ b/net/wireless/db.txt @@ -416,7 +416,7 @@ country EE: DFS-ETSI (57240 - 65880 @ 2160), (40), NO-OUTDOOR country EG: DFS-ETSI - (2402 - 2482 @ 40), (20) + (2402 - 2482 @ 20), (20) (5170 - 5250 @ 20), (23) (5250 - 5330 @ 20), (23), DFS |
