From d8cd9dca9bd9ab27f0cd0b0fca43fc7493b4198d Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 9 Mar 2016 13:51:36 -0700 Subject: msm: kgsl: Correct the order of preemption packets Current order: IB1 batch, timestamp writes, SRM=NULL, CP_YIELD_ENABLE, CP_CONTEXT_SWITCH_YIELD Correct order: IB1 batch, SRM=NULL, CP_YIELD_ENABLE, timestamp writes, CP_CONTEXT_SWITCH_YIELD Reason: if preemption is initiated after the last checkpoint but before SET_RENDER_MODE == NULL is executed, all of the PM4s starting at the preamble of the check point will be replayed up to the SRM == NULL, including an attempt to re-timestamp/ re-retire the last batch of IBs. If what was intended here was to make sure that the IB batch would be retired once then the SET_RENDER_MODE == NULL and CP_YIELD_ENABLE should be placed immediately after IB_PFE packets and before the time stamping PM4 packets in the ring buffer. CRs-Fixed: 990078 Change-Id: I04a1a44f12dd3a09c50b4fe39e14a2bd636b24de Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno.h | 1 + drivers/gpu/msm/adreno_a5xx.c | 39 ++++++++++++++++++++++--------------- drivers/gpu/msm/adreno_ringbuffer.c | 15 +++++++++++--- 3 files changed, 36 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index f739783ebd84..816185e9aad4 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -721,6 +721,7 @@ struct adreno_gpudev { struct adreno_ringbuffer *, unsigned int *, struct kgsl_context *, uint64_t cond_addr, struct kgsl_memobj_node *); + int (*preemption_yield_enable)(unsigned int *); int (*preemption_post_ibsubmit)(struct adreno_device *, struct adreno_ringbuffer *, unsigned int *, struct kgsl_context *); diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 9d37c86aee0f..42cbb07c4b30 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -317,10 +317,6 @@ static int a5xx_preemption_token(struct adreno_device *adreno_dev, { unsigned int *cmds_orig = cmds; - /* Enable yield in RB only */ - *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1); - *cmds++ = 1; - *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4); cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr); *cmds++ = 1; @@ -411,18 +407,11 @@ static int a5xx_preemption_pre_ibsubmit( } /* - * a5xx_preemption_post_ibsubmit() - Below PM4 commands are + * a5xx_preemption_yield_enable() - Below PM4 commands are * added after every cmdbatch submission. */ -static int a5xx_preemption_post_ibsubmit( - struct adreno_device *adreno_dev, - struct adreno_ringbuffer *rb, unsigned int *cmds, - struct kgsl_context *context) +static int a5xx_preemption_yield_enable(unsigned int *cmds) { - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - unsigned int *cmds_orig = cmds; - unsigned int ctx_id = context ? context->id : 0; - /* * SRM -- set render mode (ex binning, direct render etc) * SRM is set by UMD usually at start of IB to tell CP the type of @@ -437,11 +426,27 @@ static int a5xx_preemption_post_ibsubmit( *cmds++ = 0; *cmds++ = 0; - cmds += a5xx_preemption_token(adreno_dev, rb, cmds, + *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1); + *cmds++ = 1; + + return 8; +} + +/* + * a5xx_preemption_post_ibsubmit() - Below PM4 commands are + * added after every cmdbatch submission. + */ +static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, unsigned int *cmds, + struct kgsl_context *context) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int ctx_id = context ? context->id : 0; + + return a5xx_preemption_token(adreno_dev, rb, cmds, device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(ctx_id, preempted)); - return cmds - cmds_orig; } static void a5xx_platform_setup(struct adreno_device *adreno_dev) @@ -4182,8 +4187,10 @@ struct adreno_gpudev adreno_a5xx_gpudev = { .regulator_disable = a5xx_regulator_disable, .pwrlevel_change_settings = a5xx_pwrlevel_change_settings, .preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit, + .preemption_yield_enable = + a5xx_preemption_yield_enable, .preemption_post_ibsubmit = - a5xx_preemption_post_ibsubmit, + a5xx_preemption_post_ibsubmit, .preemption_token = a5xx_preemption_token, .preemption_init = a5xx_preemption_init, .preemption_schedule = a5xx_preemption_schedule, diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index a397a3e83cf4..dc1fbdb64317 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -520,7 +520,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (gpudev->preemption_post_ibsubmit && adreno_is_preemption_enabled(adreno_dev)) - total_sizedwords += 13; + total_sizedwords += 5; /* * a5xx uses 64 bit memory address. pm4 commands that involve read/write @@ -707,8 +707,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (gpudev->preemption_post_ibsubmit && adreno_is_preemption_enabled(adreno_dev)) - ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, - rb, ringcmds, &drawctxt->base); + ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb, + ringcmds, &drawctxt->base); /* * If we have more ringbuffer commands than space reserved @@ -860,6 +860,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; @@ -978,6 +979,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, dwords += 2; } + if (gpudev->preemption_yield_enable && + adreno_is_preemption_enabled(adreno_dev)) + dwords += 8; + link = kzalloc(sizeof(unsigned int) * dwords, GFP_KERNEL); if (!link) { ret = -ENOMEM; @@ -1028,6 +1033,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, } } + if (gpudev->preemption_yield_enable && + adreno_is_preemption_enabled(adreno_dev)) + cmds += gpudev->preemption_yield_enable(cmds); + if (cmdbatch_kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, adreno_dev->cmdbatch_profile_buffer.gpuaddr + -- cgit v1.2.3 From 6e3b3512ba5fdc6cfe73c743ac5d27a625f03682 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Mon, 21 Mar 2016 15:31:06 -0600 Subject: msm: kgsl: Zero the adreno ioctl command buffer The kernel command buffer is not zeroed in the adreno ioctls, and may contain garbage. The garbage value can lead to unexpected results. CRs-Fixed: 993518 Change-Id: I75033cdf4637881ecd6fa4dd31aea083b134e6d2 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno_ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c index 13d3353946ca..519087a77b83 100644 --- a/drivers/gpu/msm/adreno_ioctl.c +++ b/drivers/gpu/msm/adreno_ioctl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -117,7 +117,7 @@ long adreno_ioctl_helper(struct kgsl_device_private *dev_priv, unsigned int cmd, unsigned long arg, const struct kgsl_ioctl *cmds, int len) { - unsigned char data[128]; + unsigned char data[128] = { 0 }; long ret; int i; -- cgit v1.2.3 From 0917d68d0b6900a0544eaf5ffe648e5d7f0cede5 Mon Sep 17 00:00:00 2001 From: Oleg Perelet Date: Tue, 22 Mar 2016 16:46:55 -0700 Subject: msm: kgsl: Enable GPMU and SPTP/RAC power collapse on A540 Enable GPMU and SPTP/RAC power collapse on A540. CRs-Fixed: 973565 Change-Id: I73b40d264c4054a43c2776337b80af88adff077e Signed-off-by: Oleg Perelet --- drivers/gpu/msm/adreno-gpulist.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 65beccc5d7fe..778c76f52d0b 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -246,7 +246,8 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .minor = 0, .patchid = ANY_ID, .features = ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION, + ADRENO_CONTENT_PROTECTION | + ADRENO_GPMU | ADRENO_SPTP_PC, .pm4fw_name = "a530_pm4.fw", .pfpfw_name = "a530_pfp.fw", .zap_name = "a530_zap", -- cgit v1.2.3 From 0e5fa912901a038d4180edbfe354c54b58b38e59 Mon Sep 17 00:00:00 2001 From: Tarun Karra Date: Thu, 17 Mar 2016 21:10:36 -0700 Subject: msm: kgsl: verify user memory permissions before mapping to GPU driver For user memory of type KGSL_USER_MEM_TYPE_ADDR mapped to GPU driver verify permissions and map GPU permissions same as CPU permissions. If elevated permissions are requested return an error to prevent privilege escalation. Without this check user could map readonly memory into GPU driver as readwrite and gain elevated privilege. Write permissions check is currently inverted causing readonly user pages to be mapped as readwrite in GPU driver. Fix this check to map readonly pages as readonly. CRs-Fixed: 988993 Change-Id: I0e097d7e4e4c414c0849e33bcc61a26fb94291ad Signed-off-by: Tarun Karra --- drivers/gpu/msm/kgsl.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index e3e0b0973410..ecbdd412d378 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1962,6 +1962,20 @@ static inline int _check_region(unsigned long start, unsigned long size, return (end > len); } +static int check_vma_flags(struct vm_area_struct *vma, + unsigned int flags) +{ + unsigned long flags_requested = (VM_READ | VM_WRITE); + + if (flags & KGSL_MEMFLAGS_GPUREADONLY) + flags_requested &= ~VM_WRITE; + + if ((vma->vm_flags & flags_requested) == flags_requested) + return 0; + + return -EFAULT; +} + static int check_vma(struct vm_area_struct *vma, struct file *vmfile, struct kgsl_memdesc *memdesc) { @@ -1975,7 +1989,7 @@ static int check_vma(struct vm_area_struct *vma, struct file *vmfile, if (vma->vm_start != memdesc->useraddr || (memdesc->useraddr + memdesc->size) != vma->vm_end) return -EINVAL; - return 0; + return check_vma_flags(vma, memdesc->flags); } static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile) @@ -1984,7 +1998,7 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile) long npages = 0, i; size_t sglen = (size_t) (memdesc->size / PAGE_SIZE); struct page **pages = NULL; - int write = (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) != 0; + int write = ((memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 0 : 1); if (sglen == 0 || sglen >= LONG_MAX) return -EINVAL; @@ -2103,6 +2117,12 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device, if (vma && vma->vm_file) { int fd; + ret = check_vma_flags(vma, entry->memdesc.flags); + if (ret) { + up_read(¤t->mm->mmap_sem); + return ret; + } + /* * Check to see that this isn't our own memory that we have * already mapped -- cgit v1.2.3 From 4a6f12c7a0ea94183f58ce728617c446116fb58c Mon Sep 17 00:00:00 2001 From: Hareesh Gundu Date: Wed, 23 Mar 2016 23:58:11 +0530 Subject: msm: kgsl: Fix gpudev NULL dereference in adreno_remove In adreno_remove() there is possibility of dereference of gpudev without NULL check. Fix this by getting gpudev after adreno_dev NULL check. CRs-Fixed: 993267 Change-Id: I17d8b4ba2c74a787a065dbdb0ac88d065605fcb1 Signed-off-by: Hareesh Gundu --- drivers/gpu/msm/adreno.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 79e95ebb8363..32c83ab76f09 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1012,13 +1012,14 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev) static int adreno_remove(struct platform_device *pdev) { struct adreno_device *adreno_dev = adreno_get_dev(pdev); - struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + struct adreno_gpudev *gpudev; struct kgsl_device *device; if (adreno_dev == NULL) return 0; device = KGSL_DEVICE(adreno_dev); + gpudev = ADRENO_GPU_DEVICE(adreno_dev); if (gpudev->remove != NULL) gpudev->remove(adreno_dev); -- cgit v1.2.3 From a65f379129d06a926bbd31232059d3867198610c Mon Sep 17 00:00:00 2001 From: Tarun Karra Date: Wed, 16 Mar 2016 10:23:16 -0700 Subject: msm: kgsl: Pass correct buffer size for mapping gpuobj user memory Current code incorrectly specifies buffer size as 0 for mapping gpuobj user memory. This causes the map to fail because buffer size is expected to be a non zero value. Fix this by passing the correct size of the buffer to be mapped. CRs-Fixed: 995378 Change-Id: I1a9aeb3f1dd67f014847322e5b14cba8775a82a4 Signed-off-by: Tarun Karra --- drivers/gpu/msm/kgsl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index ecbdd412d378..77494ebb2c92 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2211,7 +2211,7 @@ static long _gpuobj_map_useraddr(struct kgsl_device *device, return -EINVAL; return kgsl_setup_useraddr(device, pagetable, entry, - (unsigned long) useraddr.virtaddr, 0, 0); + (unsigned long) useraddr.virtaddr, 0, param->priv_len); } #ifdef CONFIG_DMA_SHARED_BUFFER -- cgit v1.2.3 From f8856af38c24716058eef2931c7bf70dc6ba46ac Mon Sep 17 00:00:00 2001 From: Hareesh Gundu Date: Fri, 18 Mar 2016 18:49:47 +0530 Subject: msm: kgsl: Return EOPNOTSUPP for A3XX command batch profiling A3XX doesn't have support for command batch profiling. Return EOPNOTSUPP for a command batch profiling request on A3XX, so that userspace code knows that this feature is not supported. CRs-Fixed: 986169 Change-Id: I6dfcab462a933ef31e3bba6bef07f17016ae50b9 Signed-off-by: Hareesh Gundu --- drivers/gpu/msm/adreno_ringbuffer.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index dc1fbdb64317..dceb8fb93461 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -801,6 +801,11 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC)) device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; + /* A3XX does not have support for command batch profiling */ + if (adreno_is_a3xx(adreno_dev) && + (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)) + return -EOPNOTSUPP; + /* Queue the command in the ringbuffer */ ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch, timestamp); -- cgit v1.2.3 From 00c02258558347d5b451b6c784682102a438ee33 Mon Sep 17 00:00:00 2001 From: Oleg Perelet Date: Mon, 28 Mar 2016 11:15:46 -0700 Subject: msm: kgsl: Invoke DCVS callbacks on A540 As long as GPMU is enabled, DCVS has to handshake with firmware. It is a new requirement of A540 power management. CRs-Fixed: 973565 Change-Id: Ie6480fc3ba0e1b95aab40e31b09ff2bd798ff30f Signed-off-by: Oleg Perelet --- drivers/gpu/msm/adreno.c | 4 ++-- drivers/gpu/msm/adreno_a5xx.c | 20 ++++++++++++++++---- drivers/gpu/msm/adreno_a5xx.h | 5 ++++- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 32c83ab76f09..24c5186340e5 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1222,7 +1222,7 @@ static void _setup_throttling_counters(struct adreno_device *adreno_dev) if (!adreno_is_a540(adreno_dev)) return; - if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM)) + if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) return; for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) { @@ -1260,7 +1260,7 @@ static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev) if (!adreno_is_a540(adreno_dev)) return 0; - if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM)) + if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) return 0; for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) { diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 42cbb07c4b30..ef3d5d8fc552 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1845,7 +1845,7 @@ static void a540_lm_init(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); uint32_t agc_lm_config = - ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) | 0x3) + ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3) << AGC_GPU_VERSION_SHIFT); unsigned int r, i; @@ -1855,8 +1855,8 @@ static void a540_lm_init(struct adreno_device *adreno_dev) AGC_THROTTLE_SEL_DCS; kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r); - if (r & GPMU_BCL_ENABLED) - agc_lm_config |= AGC_BCL_ENABLED; + if (!(r & GPMU_BCL_ENABLED)) + agc_lm_config |= AGC_BCL_DISABLED; if (r & GPMU_LLM_ENABLED) agc_lm_config |= AGC_LLM_ENABLED; @@ -1905,6 +1905,9 @@ start_agc: kgsl_regwrite(device, A5XX_GPMU_GPMU_PWR_THRESHOLD, PWR_THRESHOLD_VALID | lm_limit(adreno_dev)); + kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, + VOLTAGE_INTR_EN); + if (lm_on(adreno_dev)) wake_llm(adreno_dev); } @@ -1953,7 +1956,10 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev, { int on = 0; - /* Only call through if PPD or LM is supported and enabled */ + /* + * On pre A540 HW only call through if PPD or LMx + * is supported and enabled + */ if (ADRENO_FEATURE(adreno_dev, ADRENO_PPD) && test_bit(ADRENO_PPD_CTRL, &adreno_dev->pwrctrl_flag)) on = ADRENO_PPD; @@ -1962,6 +1968,12 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev, test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) on = ADRENO_LM; + /* On 540+ HW call through unconditionally as long as GPMU is enabled */ + if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) { + if (adreno_is_a540(adreno_dev)) + on = ADRENO_GPMU; + } + if (!on) return; diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h index 6c1b8d141671..e41b5b9cce0c 100644 --- a/drivers/gpu/msm/adreno_a5xx.h +++ b/drivers/gpu/msm/adreno_a5xx.h @@ -132,6 +132,9 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on); #define AMP_CALIBRATION_RETRY_CNT 3 #define AMP_CALIBRATION_TIMEOUT 6 +/* A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK */ +#define VOLTAGE_INTR_EN BIT(0) + /* A5XX_GPMU_GPMU_PWR_THRESHOLD */ #define PWR_THRESHOLD_VALID 0x80000000 /* AGC */ @@ -170,7 +173,7 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on); #define AGC_LLM_ENABLED (1 << 16) #define AGC_GPU_VERSION_MASK GENMASK(18, 17) #define AGC_GPU_VERSION_SHIFT 17 -#define AGC_BCL_ENABLED (1 << 24) +#define AGC_BCL_DISABLED (1 << 24) #define AGC_LEVEL_CONFIG (140/4) -- cgit v1.2.3 From 37e734900a2a705af019f7017db34e6e27599e51 Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Fri, 8 Apr 2016 10:04:42 -0700 Subject: ARM: dts: msm: Fix USB3_PHY_SW_RESET register's offset on msmcobalt Update USB3_PHY_SW_RESET register's offset on msmcobalt otherwise USB QMP PHY is not released out of reset. CRs-Fixed: 1001463 Change-Id: Idc71b0abb24cf8c103dfde893ba8c40d342a7fb8 Signed-off-by: Mayank Rana --- arch/arm/boot/dts/qcom/msmcobalt.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index f04291d41c95..fda8d11e1c52 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -1417,7 +1417,7 @@ 0xcd8 /* USB3_PHY_AUTONOMOUS_MODE_CTRL */ 0xcdc /* USB3_PHY_LFPS_RXTERM_IRQ_CLEAR */ 0xc04 /* USB3_PHY_POWER_DOWN_CONTROL */ - 0x000 /* USB3_PHY_SW_RESET */ + 0xc00 /* USB3_PHY_SW_RESET */ 0xc08>; /* USB3_PHY_START */ clocks = <&clock_gcc clk_gcc_usb3_phy_aux_clk>, -- cgit v1.2.3 From 9c2a287cae68ab97f9236aa13b5546a963fe9e0f Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Fri, 8 Apr 2016 10:08:48 -0700 Subject: ARM: dts: msm: Select CML clock with USB QMP PHY on msmcobalt USB QMP PHY requires CML based refclock, otherwise USB QMP PHY PLL may not lock. Hence select CML based refclock by programming SYSCLK_EN_SEL register. CRs-Fixed: 1001463 Change-Id: I4cc68a447d0cf3571a50b18d7eec5415430f9423 Signed-off-by: Mayank Rana --- arch/arm/boot/dts/qcom/msmcobalt.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index fda8d11e1c52..ba58d0639f1a 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -1302,6 +1302,7 @@ /* */ <0x138 0x30 0x00 /* Common block */ 0x3c 0x06 0x00 + 0x80 0x14 0x00 /* SYSCLK_EN_SEL */ 0x8c 0x08 0x00 0x15c 0x06 0x00 0x164 0x01 0x00 -- cgit v1.2.3 From 78602620ed3268a9e3ec19c662340522f822b51c Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Sun, 10 Apr 2016 12:19:31 -0700 Subject: usb: phy: qmp: Add support to use different voltage with core supply On newer platform USB QMP PHY needs different voltage supply as core voltage. This change adds required support for the same. CRs-Fixed: 1001463 Change-Id: If100d36bade241dedf28e3cea9e07be192bdfdc2 Signed-off-by: Mayank Rana --- Documentation/devicetree/bindings/usb/msm-phy.txt | 5 +- drivers/usb/phy/phy-msm-ssusb-qmp.c | 99 +++++++++++++++-------- 2 files changed, 71 insertions(+), 33 deletions(-) diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt index cccdb281a31d..929fdee7157b 100644 --- a/Documentation/devicetree/bindings/usb/msm-phy.txt +++ b/Documentation/devicetree/bindings/usb/msm-phy.txt @@ -100,7 +100,7 @@ Required properties: - -supply: phandle to the regulator device tree node Required "supply-name" examples are: "vdd" : vdd supply for SSPHY digital circuit operation - "vdda18" : 1.8v high-voltage analog supply for SSPHY + "core" : high-voltage analog supply for SSPHY - qcom,vdd-voltage-level: This property must be a list of three integer values (no, min, max) where each value represents either a voltage in microvolts or a value corresponding to voltage corner @@ -121,6 +121,9 @@ Optional properties: the USB PHY and the controller must rely on external VBUS notification in order to manually relay the notification to the SSPHY. - qcom,emulation: Indicates that we are running on emulation platform. + - qcom,core-voltage-level: This property must be a list of three integer + values (no, min, max) where each value represents either a voltage in + microvolts or a value corresponding to voltage corner. Example: ssphy0: ssphy@f9b38000 { diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index cacb46378c94..8b4fd8c0436a 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -26,12 +26,18 @@ #include #include -#define INIT_MAX_TIME_USEC 1000 +enum core_ldo_levels { + CORE_LEVEL_NONE = 0, + CORE_LEVEL_MIN, + CORE_LEVEL_MAX, +}; -#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */ -#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */ -#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */ +#define INIT_MAX_TIME_USEC 1000 +/* default CORE votlage and load values */ +#define USB_SSPHY_1P2_VOL_MIN 1200000 /* uV */ +#define USB_SSPHY_1P2_VOL_MAX 1200000 /* uV */ +#define USB_SSPHY_HPM_LOAD 23000 /* uA */ /* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */ #define PHYSTATUS BIT(6) @@ -64,8 +70,9 @@ struct msm_ssphy_qmp { void __iomem *vls_clamp_reg; struct regulator *vdd; - struct regulator *vdda18; int vdd_levels[3]; /* none, low, high */ + struct regulator *core_ldo; + int core_voltage_levels[3]; struct clk *ref_clk_src; struct clk *ref_clk; struct clk *aux_clk; @@ -171,41 +178,44 @@ static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on) goto disable_regulators; - rc = regulator_set_load(phy->vdda18, USB_SSPHY_1P8_HPM_LOAD); + rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD); if (rc < 0) { - dev_err(phy->phy.dev, "Unable to set HPM of vdda18\n"); + dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n"); return rc; } - rc = regulator_set_voltage(phy->vdda18, USB_SSPHY_1P8_VOL_MIN, - USB_SSPHY_1P8_VOL_MAX); + rc = regulator_set_voltage(phy->core_ldo, + phy->core_voltage_levels[CORE_LEVEL_MIN], + phy->core_voltage_levels[CORE_LEVEL_MAX]); if (rc) { - dev_err(phy->phy.dev, "unable to set voltage for vdda18\n"); - goto put_vdda18_lpm; + dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n"); + goto put_core_ldo_lpm; } - rc = regulator_enable(phy->vdda18); + rc = regulator_enable(phy->core_ldo); if (rc) { - dev_err(phy->phy.dev, "Unable to enable vdda18\n"); - goto unset_vdda18; + dev_err(phy->phy.dev, "Unable to enable core_ldo\n"); + goto unset_core_ldo; } return 0; disable_regulators: - rc = regulator_disable(phy->vdda18); + rc = regulator_disable(phy->core_ldo); if (rc) - dev_err(phy->phy.dev, "Unable to disable vdda18\n"); + dev_err(phy->phy.dev, "Unable to disable core_ldo\n"); -unset_vdda18: - rc = regulator_set_voltage(phy->vdda18, 0, USB_SSPHY_1P8_VOL_MAX); +unset_core_ldo: + rc = regulator_set_voltage(phy->core_ldo, + phy->core_voltage_levels[CORE_LEVEL_NONE], + phy->core_voltage_levels[CORE_LEVEL_MAX]); if (rc) - dev_err(phy->phy.dev, "unable to set voltage for vdda18\n"); + dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n"); -put_vdda18_lpm: - rc = regulator_set_load(phy->vdda18, 0); +put_core_ldo_lpm: + rc = regulator_set_load(phy->core_ldo, 0); if (rc < 0) - dev_err(phy->phy.dev, "Unable to set LPM of vdda18\n"); + dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n"); return rc < 0 ? rc : 0; } @@ -495,7 +505,7 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) struct msm_ssphy_qmp *phy; struct device *dev = &pdev->dev; struct resource *res; - int ret = 0, size = 0; + int ret = 0, size = 0, len; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) @@ -631,11 +641,36 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) } } - ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level", - (u32 *) phy->vdd_levels, - ARRAY_SIZE(phy->vdd_levels)); - if (ret) { - dev_err(dev, "error reading qcom,vdd-voltage-level property\n"); + /* Set default core voltage values */ + phy->core_voltage_levels[CORE_LEVEL_NONE] = 0; + phy->core_voltage_levels[CORE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN; + phy->core_voltage_levels[CORE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX; + + if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) && + len == sizeof(phy->core_voltage_levels)) { + ret = of_property_read_u32_array(dev->of_node, + "qcom,core-voltage-level", + (u32 *)phy->core_voltage_levels, + len / sizeof(u32)); + if (ret) { + dev_err(dev, "err qcom,core-voltage-level property\n"); + goto err; + } + } + + if (of_get_property(dev->of_node, "qcom,vdd-voltage-level", &len) && + len == sizeof(phy->vdd_levels)) { + ret = of_property_read_u32_array(dev->of_node, + "qcom,vdd-voltage-level", + (u32 *) phy->vdd_levels, + len / sizeof(u32)); + if (ret) { + dev_err(dev, "err qcom,vdd-voltage-level property\n"); + goto err; + } + } else { + ret = -EINVAL; + dev_err(dev, "error invalid inputs for vdd-voltage-level\n"); goto err; } @@ -646,10 +681,10 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) goto err; } - phy->vdda18 = devm_regulator_get(dev, "vdda18"); - if (IS_ERR(phy->vdda18)) { - dev_err(dev, "unable to get vdda18 supply\n"); - ret = PTR_ERR(phy->vdda18); + phy->core_ldo = devm_regulator_get(dev, "core"); + if (IS_ERR(phy->core_ldo)) { + dev_err(dev, "unable to get core ldo supply\n"); + ret = PTR_ERR(phy->core_ldo); goto err; } -- cgit v1.2.3 From c71663e596d3ec8a13c985041a655f464d671b93 Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Sun, 10 Apr 2016 12:21:09 -0700 Subject: ARM: dts: msm: Update QMP PHY supply name on msm8996 and msmcobalt Rename vdda18-supply as core-supply with USB QMP PHY on msm8996 and msmcobalt. Also provides required voltage value with this core ldo for msm8996. CRs-Fixed: 1001463 Change-Id: Ia826e361d8259126a8168c07539ba4b4f6053f65 Signed-off-by: Mayank Rana --- arch/arm/boot/dts/qcom/msm8996.dtsi | 3 ++- arch/arm/boot/dts/qcom/msmcobalt.dtsi | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index 3a5b55848d53..e0bd005f8b76 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -2140,7 +2140,8 @@ reg-names = "qmp_phy_base", "vls_clamp_reg"; vdd-supply = <&pm8994_l28>; - vdda18-supply = <&pm8994_l12>; + core-supply = <&pm8994_l12>; + qcom,core-voltage-level = <0 1800000 1800000>; qcom,vdd-voltage-level = <0 925000 925000>; qcom,vbus-valid-override; qcom,qmp-phy-init-seq = diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index ba58d0639f1a..fb5a743ca330 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -1295,7 +1295,7 @@ reg-names = "qmp_phy_base", "vls_clamp_reg"; vdd-supply = <&pmcobalt_l1>; - vdda18-supply = <&pmcobalt_l12>; + core-supply = <&pmcobalt_l2>; qcom,vdd-voltage-level = <0 880000 880000>; qcom,vbus-valid-override; qcom,qmp-phy-init-seq = -- cgit v1.2.3 From 0228837248d41a1077f1cd3cdd22c00598337aa6 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Mon, 1 Feb 2016 11:08:14 -0700 Subject: defconfig: enable MSM_PM for msmcortex MSM_PM enables sleep modes for CPU subsystem. It is not currently enabled for msmcortex target. Enable MSM_PM for cortex target. Change-Id: I67244ff55690c164634e9233e2d0cec3388c5be8 Signed-off-by: Mahesh Sivasubramanian --- arch/arm64/configs/msmcortex-perf_defconfig | 1 + arch/arm64/configs/msmcortex_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 5913b9491e57..7dd4af53b108 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -293,6 +293,7 @@ CONFIG_POWER_RESET_QCOM=y CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y +CONFIG_MSM_PM=y CONFIG_MSM_APM=y # CONFIG_HWMON is not set CONFIG_THERMAL_TSENS8974=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 3bde1a0038c4..eb46d4cab424 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -306,6 +306,7 @@ CONFIG_POWER_RESET_QCOM=y CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y +CONFIG_MSM_PM=y CONFIG_MSM_APM=y # CONFIG_HWMON is not set CONFIG_THERMAL_TSENS8974=y -- cgit v1.2.3 From 15ecda6ff973e3425244b58296f94084b0835364 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Mon, 25 Jan 2016 15:24:11 -0700 Subject: defconfig: Enable MPM for msmcortex defconfig MPM driver is required to configure the hardware to wakeup from SoC sleep. It is currently not enabled for cortex targets. Enable MPM_OF config for cortex targets. Change-Id: I09313d7809ec939a9d0440d0ab30a5992f512b96 Signed-off-by: Mahesh Sivasubramanian --- arch/arm64/configs/msmcortex-perf_defconfig | 1 + arch/arm64/configs/msmcortex_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 7dd4af53b108..c27ea1cb1ebd 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -431,6 +431,7 @@ CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y CONFIG_TRACER_PKT=y +CONFIG_MSM_MPM_OF=y CONFIG_MSM_CORE_CTL_HELPER=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_EXTCON=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index eb46d4cab424..ef63d4dd4a37 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -456,6 +456,7 @@ CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y CONFIG_TRACER_PKT=y +CONFIG_MSM_MPM_OF=y CONFIG_MSM_CORE_CTL_HELPER=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_EXTCON=y -- cgit v1.2.3 From fa8b06a184f779d9c4f69043ce0a91574eb490e1 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Mon, 28 Mar 2016 17:36:41 -0600 Subject: soc: qcom: event_timer: Fix irq_desc structure usage Some of the common irq data is moved into a irq_common_data structure within irq_desc structure. Change irq_data references to irq_common_data structure to fix compilation issues. Change-Id: I59c99348a44d364d74af6b67ccabdd2d8c5008b0 Signed-off-by: Mahesh Sivasubramanian --- drivers/soc/qcom/event_timer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/qcom/event_timer.c b/drivers/soc/qcom/event_timer.c index 374fa56b0b28..5ae42ee749b3 100644 --- a/drivers/soc/qcom/event_timer.c +++ b/drivers/soc/qcom/event_timer.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -97,7 +97,7 @@ struct event_timer_info *add_event_timer(uint32_t irq, if (irq) { struct irq_desc *desc = irq_to_desc(irq); - struct cpumask *mask = desc->irq_data.affinity; + struct cpumask *mask = desc->irq_common_data.affinity; get_online_cpus(); event_info->cpu = cpumask_any_and(mask, cpu_online_mask); -- cgit v1.2.3 From e341ce495b28df12780ca5c66dcbda1608fdd1c5 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Mon, 25 Jan 2016 15:24:11 -0700 Subject: defconfig: Enable Event timers for msmcortex defconfig Drivers use event timer framework to programs timers with zero wakeup latency Change-Id: I991dfdc22936dba667110de338d0109c58e68bd5 Signed-off-by: Mahesh Sivasubramanian --- arch/arm64/configs/msmcortex-perf_defconfig | 1 + arch/arm64/configs/msmcortex_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index c27ea1cb1ebd..005f9afd7379 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -432,6 +432,7 @@ CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y CONFIG_TRACER_PKT=y CONFIG_MSM_MPM_OF=y +CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_CORE_CTL_HELPER=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_EXTCON=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index ef63d4dd4a37..185c6383d397 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -457,6 +457,7 @@ CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y CONFIG_TRACER_PKT=y CONFIG_MSM_MPM_OF=y +CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_CORE_CTL_HELPER=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_EXTCON=y -- cgit v1.2.3 From 135904445efef77492556dd05e29abab68400a9b Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Fri, 8 Jan 2016 16:46:22 -0700 Subject: ARM: dts: msm: Add device node for MPM for MSMCobalt Add device tree node for MPM to enable RPM PC. Change-Id: I88c579189287d655b10b48496be39ed9f20d9bfc Signed-off-by: Mahesh Sivasubramanian --- arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi | 256 +++++++++++++++++++++++++++++++ 1 file changed, 256 insertions(+) diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi index 31b1c9486226..b6ae1f0c6ca5 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi @@ -9,6 +9,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#include &soc { qcom,spm@178120000 { @@ -277,4 +278,259 @@ qcom,sleep-stats-version = <2>; }; + qcom,mpm@7781b8 { + compatible = "qcom,mpm-v2"; + reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */ + <0x17911008 0x4>; /* MSM_APCS_GCC_BASE 4K */ + reg-names = "vmpm", "ipc"; + interrupts = ; + clocks = <&clock_gcc clk_cxo_lpm_clk>; + clock-names = "xo"; + qcom,num-mpm-irqs = <96>; + + qcom,ipc-bit-offset = <1>; + + qcom,gic-parent = <&intc>; + qcom,gic-map = <2 216>, /* tsens_upper_lower_int */ + <79 379>, /* qusb2phy_dmse_hv_prim */ + <80 384>, /* qusb2phy_dmse_hv_sec */ + <52 275>, /* qmp_usb3_lfps_rxterm_irq */ + <87 358>, /* ee0_krait_hlos_spmi_periph_irq */ + <0xff 16>, /* APCj_qgicdrCpu0HwFaultIrptReq */ + <0xff 23>, /* APCj_qgicdrCpu0PerfMonIrptReq */ + <0xff 27>, /* APCj_qgicdrCpu0QTmrVirtIrptReq */ + <0xff 32>, /* APCj_qgicdrL2PerfMonIrptReq */ + <0xff 33>, /* APCC_qgicL2PerfMonIrptReq */ + <0xff 34>, /* APCC_qgicL2ErrorIrptReq */ + <0xff 35>, /* WDT_barkInt */ + <0xff 40>, /* qtimer_phy_irq */ + <0xff 41>, /* APCj_qgicdrL2HwFaultNonFatalIrptReq */ + <0xff 42>, /* APCj_qgicdrL2HwFaultFatalIrptReq */ + <0xff 49>, /* L3UX_qgicL3ErrorIrptReq */ + <0xff 54>, /* M4M_sysErrorInterrupt */ + <0xff 55>, /* M4M_sysDlmInterrupt */ + <0xff 57>, /* mss_to_apps_irq(0) */ + <0xff 58>, /* mss_to_apps_irq(1) */ + <0xff 59>, /* mss_to_apps_irq(2) */ + <0xff 60>, /* mss_to_apps_irq(3) */ + <0xff 61>, /* mss_a2_bam_irq */ + <0xff 62>, /* QTMR_qgicFrm0VirtIrq */ + <0xff 63>, /* QTMR_qgicFrm1PhysIrq */ + <0xff 64>, /* QTMR_qgicFrm2PhysIrq */ + <0xff 65>, /* QTMR_qgicFrm3PhysIrq */ + <0xff 66>, /* QTMR_qgicFrm4PhysIrq */ + <0xff 67>, /* QTMR_qgicFrm5PhysIrq */ + <0xff 68>, /* QTMR_qgicFrm6PhysIrq */ + <0xff 69>, /* QTMR_qgicFrm7PhysIrq */ + <0xff 70>, /* iommu_pmon_nonsecure_irq */ + <0xff 74>, /* osmmu_CIrpt[1] */ + <0xff 75>, /* osmmu_CIrpt[0] */ + <0xff 77>, /* osmmu_CIrpt[0] */ + <0xff 78>, /* osmmu_CIrpt[0] */ + <0xff 79>, /* osmmu_CIrpt[0] */ + <0xff 80>, /* CPR3_irq */ + <0xff 94>, /* osmmu_CIrpt[0] */ + <0xff 97>, /* iommu_nonsecure_irq */ + <0xff 99>, /* msm_iommu_pmon_nonsecure_irq */ + <0xff 102>, /* osmmu_CIrpt[1] */ + <0xff 105>, /* iommu_pmon_nonsecure_irq */ + <0xff 108>, /* osmmu_PMIrpt */ + <0xff 109>, /* ocmem_dm_nonsec_irq */ + <0xff 110>, /* csiphy_0_irq */ + <0xff 111>, /* csiphy_1_irq */ + <0xff 112>, /* csiphy_2_irq */ + <0xff 115>, /* mdss_irq */ + <0xff 126>, /* bam_irq[0] */ + <0xff 127>, /* blsp1_qup_irq(0) */ + <0xff 132>, /* blsp1_qup_irq(5) */ + <0xff 133>, /* blsp2_qup_irq(0) */ + <0xff 134>, /* blsp2_qup_irq(1) */ + <0xff 138>, /* blsp2_qup_irq(5) */ + <0xff 140>, /* blsp1_uart_irq(1) */ + <0xff 146>, /* blsp2_uart_irq(1) */ + <0xff 155>, /* sdcc_irq[0] */ + <0xff 157>, /* sdc2_irq[0] */ + <0xff 163>, /* usb30_ee1_irq */ + <0xff 164>, /* usb30_bam_irq(0) */ + <0xff 165>, /* usb30_hs_phy_irq */ + <0xff 166>, /* sdc1_pwr_cmd_irq */ + <0xff 170>, /* sdcc_pwr_cmd_irq */ + <0xff 173>, /* sdc1_irq[0] */ + <0xff 174>, /* o_wcss_apss_smd_med */ + <0xff 175>, /* o_wcss_apss_smd_low */ + <0xff 176>, /* o_wcss_apss_smsm_irq */ + <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */ + <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */ + <0xff 179>, /* o_wcss_apss_asic_intr */ + <0xff 180>, /* pcie20_2_int_pls_err */ + <0xff 181>, /* wcnss watchdog */ + <0xff 188>, /* lpass_irq_out_apcs(0) */ + <0xff 189>, /* lpass_irq_out_apcs(1) */ + <0xff 190>, /* lpass_irq_out_apcs(2) */ + <0xff 191>, /* lpass_irq_out_apcs(3) */ + <0xff 192>, /* lpass_irq_out_apcs(4) */ + <0xff 193>, /* lpass_irq_out_apcs(5) */ + <0xff 194>, /* lpass_irq_out_apcs(6) */ + <0xff 195>, /* lpass_irq_out_apcs(7) */ + <0xff 196>, /* lpass_irq_out_apcs(8) */ + <0xff 197>, /* lpass_irq_out_apcs(9) */ + <0xff 198>, /* coresight-tmc-etr interrupt */ + <0xff 200>, /* rpm_ipc(4) */ + <0xff 201>, /* rpm_ipc(5) */ + <0xff 202>, /* rpm_ipc(6) */ + <0xff 203>, /* rpm_ipc(7) */ + <0xff 204>, /* rpm_ipc(24) */ + <0xff 205>, /* rpm_ipc(25) */ + <0xff 206>, /* rpm_ipc(26) */ + <0xff 207>, /* rpm_ipc(27) */ + <0xff 208>, + <0xff 210>, + <0xff 211>, /* usb_dwc3_otg */ + <0xff 215>, /* o_bimc_intr(0) */ + <0xff 224>, /* spdm_realtime_irq[1] */ + <0xff 238>, /* crypto_bam_irq[0] */ + <0xff 240>, /* summary_irq_kpss */ + <0xff 253>, /* sdc2_pwr_cmd_irq */ + <0xff 258>, /* lpass_irq_out_apcs[21] */ + <0xff 268>, /* bam_irq[1] */ + <0xff 270>, /* bam_irq[0] */ + <0xff 271>, /* bam_irq[0] */ + <0xff 276>, /* wlan_pci */ + <0xff 283>, /* pcie20_0_int_pls_err */ + <0xff 284>, /* pcie20_0_int_aer_legacy */ + <0xff 286>, /* pcie20_0_int_pls_link_down */ + <0xff 290>, /* ufs_ice_nonsec_level_irq */ + <0xff 293>, /* pcie20_2_int_pls_link_down */ + <0xff 295>, /* camss_cpp_mmu_cirpt[0] */ + <0xff 296>, /* camss_cpp_mmu_pmirpt */ + <0xff 297>, /* ufs_intrq */ + <0xff 302>, /* qdss_etrbytecnt_irq */ + <0xff 310>, /* pcie20_1_int_pls_err */ + <0xff 311>, /* pcie20_1_int_aer_legacy */ + <0xff 313>, /* pcie20_1_int_pls_link_down */ + <0xff 318>, /* venus0_mmu_pmirpt */ + <0xff 319>, /* venus0_irq */ + <0xff 325>, /* camss_irq18 */ + <0xff 326>, /* camss_irq0 */ + <0xff 327>, /* camss_irq1 */ + <0xff 328>, /* camss_irq2 */ + <0xff 329>, /* camss_irq3 */ + <0xff 330>, /* camss_irq4 */ + <0xff 331>, /* camss_irq5 */ + <0xff 332>, /* sps */ + <0xff 346>, /* camss_irq8 */ + <0xff 347>, /* camss_irq9 */ + <0xff 352>, /* mdss_mmu_cirpt[0] */ + <0xff 353>, /* mdss_mmu_cirpt[1] */ + <0xff 361>, /* ogpu_mmu_cirpt[0] */ + <0xff 362>, /* ogpu_mmu_cirpt[1] */ + <0xff 365>, /* ipa_irq[0] */ + <0xff 366>, /* ogpu_mmu_pmirpt */ + <0xff 367>, /* venus0_mmu_cirpt[0] */ + <0xff 368>, /* venus0_mmu_cirpt[1] */ + <0xff 369>, /* venus0_mmu_cirpt[2] */ + <0xff 370>, /* venus0_mmu_cirpt[3] */ + <0xff 375>, /* camss_vfe_mmu_cirpt[0] */ + <0xff 376>, /* camss_vfe_mmu_cirpt[1] */ + <0xff 380>, /* mdss_dma_mmu_cirpt[0] */ + <0xff 381>, /* mdss_dma_mmu_cirpt[1] */ + <0xff 385>, /* mdss_dma_mmu_pmirpt */ + <0xff 387>, /* osmmu_CIrpt[0] */ + <0xff 394>, /* osmmu_PMIrpt */ + <0xff 403>, /* osmmu_PMIrpt */ + <0xff 405>, /* osmmu_CIrpt[0] */ + <0xff 413>, /* osmmu_PMIrpt */ + <0xff 422>, /* ssc_irq_out_apcs[5] */ + <0xff 424>, /* ipa_irq[2] */ + <0xff 425>, /* lpass_irq_out_apcs[22] */ + <0xff 426>, /* lpass_irq_out_apcs[23] */ + <0xff 427>, /* lpass_irq_out_apcs[24] */ + <0xff 428>, /* lpass_irq_out_apcs[25] */ + <0xff 429>, /* lpass_irq_out_apcs[26] */ + <0xff 430>, /* lpass_irq_out_apcs[27] */ + <0xff 431>, /* lpass_irq_out_apcs[28] */ + <0xff 432>, /* lpass_irq_out_apcs[29] */ + <0xff 436>, /* lpass_irq_out_apcs[37] */ + <0xff 437>, /* pcie20_0_int_msi_dev0 */ + <0xff 445>, /* pcie20_1_int_msi_dev0 */ + <0xff 453>, /* pcie20_2_int_msi_dev0 */ + <0xff 461>, /* o_vmem_nonsec_irq */ + <0xff 462>, /* tsens1_tsens_critical_int */ + <0xff 464>, /* ipa_bam_irq[0] */ + <0xff 465>, /* ipa_bam_irq[2] */ + <0xff 477>, /* tsens0_tsens_critical_int */ + <0xff 480>, /* q6_wdog_expired_irq */ + <0xff 481>, /* mss_ipc_out_irq(4) */ + <0xff 483>, /* mss_ipc_out_irq(6) */ + <0xff 484>, /* mss_ipc_out_irq(7) */ + <0xff 487>, /* mss_ipc_out_irq(30) */ + <0xff 490>, /* tsens0_tsens_upper_lower_int */ + <0xff 493>; /* sdc1_ice_nonsec_level_irq */ + + qcom,gpio-parent = <&tlmm>; + qcom,gpio-map = <3 1>, + <4 5>, + <5 9>, + <6 11>, + <7 66>, + <8 22>, + <9 24>, + <10 26>, + <11 34>, + <12 36>, + <13 37>, /* PCIe0 */ + <14 38>, + <15 40>, + <16 42>, + <17 46>, + <18 50>, + <19 53>, + <20 54>, + <21 56>, + <22 57>, + <23 58>, + <24 59>, + <25 60>, + <26 61>, + <27 62>, + <28 63>, + <29 64>, + <30 71>, + <31 73>, + <32 77>, + <33 78>, + <34 79>, + <35 80>, + <36 82>, + <37 86>, + <38 91>, + <39 92>, + <40 95>, + <41 97>, + <42 101>, + <43 104>, + <44 106>, + <45 108>, + <46 112>, + <47 113>, + <48 110>, + <50 127>, + <51 115>, + <54 116>, /* PCIe2 */ + <55 117>, + <56 118>, + <57 119>, + <58 120>, + <59 121>, + <60 122>, + <61 123>, + <62 124>, + <63 125>, + <64 126>, + <65 129>, + <66 131>, + <67 132>, /* PCIe1 */ + <68 133>, + <69 145>; + }; }; -- cgit v1.2.3 From c38f4e3fd53ec75618431363323433650d5653bc Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Wed, 10 Feb 2016 16:26:57 -0700 Subject: ARM: dts: msm: Add RPM handshake with System sleep for MSMCobalt Allowing RPM to handshake allows RPM to switch to sleep set vote for resources allowing the SOC to enter a lower power state. Change-Id: I9d835cddc85c007bcc6b918b9dc4335b82b97306 Signed-off-by: Mahesh Sivasubramanian --- arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi index b6ae1f0c6ca5..0c3dac376c55 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi @@ -78,6 +78,7 @@ qcom,time-overhead = <550>; qcom,min-child-idx = <3>; qcom,is-reset; + qcom,notify-rpm; }; qcom,pm-cluster@0{ -- cgit v1.2.3 From dd45821bd98430329f45d1b149df152ebb9015a7 Mon Sep 17 00:00:00 2001 From: Satya Durga Srinivasu Prabhala Date: Fri, 1 Apr 2016 16:59:53 -0700 Subject: defconfig: arm64: set SELINUX as default security for msmcortex To support Android need to set SELINUX as default security. CRs-Fixed: 998858 Change-Id: Id6d18a67329abe5a89b4284fc5a3d1cf6a042dc4 Signed-off-by: Satya Durga Srinivasu Prabhala --- arch/arm64/configs/msmcortex-perf_defconfig | 1 - arch/arm64/configs/msmcortex_defconfig | 1 - 2 files changed, 2 deletions(-) diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 005f9afd7379..f20fff11bf93 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -483,7 +483,6 @@ CONFIG_DEBUG_RODATA=y CONFIG_SECURITY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y -CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 185c6383d397..46268d6644ef 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -536,7 +536,6 @@ CONFIG_FREE_PAGES_RDONLY=y CONFIG_SECURITY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y -CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y -- cgit v1.2.3 From 5576240badcdda37af31fd4e8944a8a08d661c6d Mon Sep 17 00:00:00 2001 From: Pushkar Joshi Date: Mon, 30 Mar 2015 17:36:50 -0700 Subject: soc: qcom: Service notification driver for remote services Add a library for a kernel client to register and be notified of any state changes regarding a local or remote service which runs on a remote processor on the SoC. CRs-Fixed: 999530 Change-Id: Idd56140e11f4fdc48fd999a1e808f3263024f34d Signed-off-by: Pushkar Joshi Signed-off-by: Deepak Katragadda Signed-off-by: Puja Gupta --- Documentation/arm/msm/service_notifier.txt | 43 ++ drivers/soc/qcom/Kconfig | 10 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/service-notifier.c | 660 +++++++++++++++++++++++++++++ drivers/soc/qcom/service-notifier.h | 303 +++++++++++++ 5 files changed, 1017 insertions(+) create mode 100644 Documentation/arm/msm/service_notifier.txt create mode 100644 drivers/soc/qcom/service-notifier.c create mode 100644 drivers/soc/qcom/service-notifier.h diff --git a/Documentation/arm/msm/service_notifier.txt b/Documentation/arm/msm/service_notifier.txt new file mode 100644 index 000000000000..cfa64256d93a --- /dev/null +++ b/Documentation/arm/msm/service_notifier.txt @@ -0,0 +1,43 @@ +Introduction +============= + +The service notifier driver facilitates a mechanism for a client +to register for state notifications regarding a particular remote service. +A remote service here refers to a process providing certain services like audio, +the identifier for which is provided by the service locator. The process +domain will typically run on a remote processor within the same SoC. + +Software Description +===================== + +The driver provides the following two APIs: +* service_notif_register_notifier() - Register a notifier for a service + On success, it returns back a handle. It takes the following arguments: + service_path: Individual service identifier path for which a client + registers for notifications. + instance_id: Instance id specific to a subsystem. + current_state: Current state of service returned by the registration + process. + notifier block: notifier callback for service events. + +* service_notif_unregister_notifier() - Unregister a notifier for a service. + This takes the handle returned during registration and the notifier block + previously registered as the arguments. + +Types of notifications: +======================= + +A client can get either a SERVICE_DOWN notification or a SERVICE_UP +notification. A SERVICE_UP notification will be sent out when the SERVICE comes +up and is functional while a SERVICE_DOWN notification is sent after a +service ceases to exist. At the point a SERVICE_DOWN notification is sent out, +all the clients should assume that the service is already dead. + +Interaction with SSR +===================== +In general, it is recommended that clients register for either service +notifications using the service notifier or SSR notifications, but not both. +In case it is necessary to register for both, the client can expect to get +the SERVICE_DOWN notification before the SUBSYS_AFTER_SHUTDOWN notification. +However, the client may receive the SUBSYS_BEFORE_SHUTDOWN notification +either before or after the SERVICE_DOWN notification. diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index a14feed47dcb..b6e2a55d5a9e 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -636,4 +636,14 @@ config QCOM_REMOTEQDSS enable/disable these events. Interface located in /sys/class/remoteqdss. +config MSM_SERVICE_NOTIFIER + bool "Service Notifier" + depends on MSM_SERVICE_LOCATOR && MSM_SUBSYSTEM_RESTART + help + The Service Notifier provides a library for a kernel client to + register for state change notifications regarding a remote service. + A remote service here refers to a process providing certain services + like audio, the identifier for which is provided by the service + locator. + source "drivers/soc/qcom/memshare/Kconfig" diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 13c63d6e59bf..1a4757f16e77 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o obj-$(CONFIG_SOC_BUS) += socinfo.o obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/ +obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o obj-$(CONFIG_MSM_SECURE_BUFFER) += secure_buffer.o obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c new file mode 100644 index 000000000000..7355c2af8f61 --- /dev/null +++ b/drivers/soc/qcom/service-notifier.c @@ -0,0 +1,660 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "service-notifier: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "service-notifier.h" + +#define QMI_RESP_BIT_SHIFT(x) (x << 16) +#define SERVREG_NOTIF_NAME_LENGTH QMI_SERVREG_NOTIF_NAME_LENGTH_V01 +#define SERVREG_NOTIF_SERVICE_ID SERVREG_NOTIF_SERVICE_ID_V01 +#define SERVREG_NOTIF_SERVICE_VERS SERVREG_NOTIF_SERVICE_VERS_V01 + +#define SERVREG_NOTIF_SET_ACK_REQ \ + QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01 +#define SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN \ + QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN +#define SERVREG_NOTIF_SET_ACK_RESP \ + QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01 +#define SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN \ + QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN +#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG \ + QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01 +#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN \ + QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN + +#define SERVREG_NOTIF_REGISTER_LISTENER_REQ \ + QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01 +#define SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN \ + QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN +#define SERVREG_NOTIF_REGISTER_LISTENER_RESP \ + QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01 +#define SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN \ + QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN + +#define QMI_STATE_MIN_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01 +#define QMI_STATE_MAX_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01 + +#define SERVER_TIMEOUT 500 + +/* + * Per user service data structure + * struct service_notif_info - notifier struct for each unique service path + * service_path - service provider path/location + * instance_id - service instance id specific to a subsystem + * service_notif_rcvr_list - list of clients interested in this service + * providers notifications + * curr_state: Current state of the service + */ +struct service_notif_info { + char service_path[SERVREG_NOTIF_NAME_LENGTH]; + int instance_id; + struct srcu_notifier_head service_notif_rcvr_list; + struct list_head list; + int curr_state; +}; +static LIST_HEAD(service_list); +static DEFINE_MUTEX(service_list_lock); + +struct ind_req_resp { + char service_path[SERVREG_NOTIF_NAME_LENGTH]; + int transaction_id; +}; + +/* + * Per Root Process Domain (Root service) data structure + * struct qmi_client_info - QMI client info for each subsystem/instance id + * instance_id - service instance id specific to a subsystem (Root PD) + * clnt_handle - unique QMI client handle + * service_connected - indicates if QMI service is up on the subsystem + * ind_recv - completion variable to record receiving an indication + * ssr_handle - The SSR handle provided by the SSR driver for the subsystem + * on which the remote root PD runs. + */ +struct qmi_client_info { + int instance_id; + struct work_struct svc_arrive; + struct work_struct svc_exit; + struct work_struct svc_rcv_msg; + struct work_struct ind_ack; + struct workqueue_struct *svc_event_wq; + struct qmi_handle *clnt_handle; + struct notifier_block notifier; + void *ssr_handle; + struct notifier_block ssr_notifier; + bool service_connected; + struct completion ind_recv; + struct list_head list; + struct ind_req_resp ind_msg; +}; +static LIST_HEAD(qmi_client_list); +static DEFINE_MUTEX(qmi_list_lock); + +static DEFINE_MUTEX(notif_add_lock); + +static void root_service_clnt_recv_msg(struct work_struct *work); +static void root_service_service_arrive(struct work_struct *work); +static void root_service_exit_work(struct work_struct *work); + +static struct service_notif_info *_find_service_info(const char *service_path) +{ + struct service_notif_info *service_notif; + + mutex_lock(&service_list_lock); + list_for_each_entry(service_notif, &service_list, list) + if (!strcmp(service_notif->service_path, service_path)) { + mutex_unlock(&service_list_lock); + return service_notif; + } + mutex_unlock(&service_list_lock); + return NULL; +} + +static int service_notif_queue_notification(struct service_notif_info + *service_notif, + enum qmi_servreg_notif_service_state_enum_type_v01 notif_type, + void *info) +{ + int ret = 0; + + if (!service_notif) + return -EINVAL; + + if ((int) notif_type < QMI_STATE_MIN_VAL || + (int) notif_type > QMI_STATE_MAX_VAL) + return -EINVAL; + + if (service_notif->curr_state == notif_type) + return 0; + + if (!service_notif->service_notif_rcvr_list.head) + return 0; + + ret = srcu_notifier_call_chain(&service_notif->service_notif_rcvr_list, + notif_type, info); + return ret; +} + +static void root_service_clnt_recv_msg(struct work_struct *work) +{ + int ret; + struct qmi_client_info *data = container_of(work, + struct qmi_client_info, svc_rcv_msg); + + do { + pr_debug("Notified about a Receive event (instance-id: %d)\n", + data->instance_id); + } while ((ret = qmi_recv_msg(data->clnt_handle)) == 0); + + if (ret != -ENOMSG) + pr_err("Error receiving message (instance-id: %d)\n", + data->instance_id); +} + +static void root_service_clnt_notify(struct qmi_handle *handle, + enum qmi_event_type event, void *notify_priv) +{ + struct qmi_client_info *data = container_of(notify_priv, + struct qmi_client_info, svc_arrive); + + switch (event) { + case QMI_RECV_MSG: + schedule_work(&data->svc_rcv_msg); + break; + default: + break; + } +} + +static void send_ind_ack(struct work_struct *work) +{ + struct qmi_client_info *data = container_of(work, + struct qmi_client_info, ind_ack); + struct qmi_servreg_notif_set_ack_req_msg_v01 req; + struct msg_desc req_desc, resp_desc; + struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } }; + int rc; + + req.transaction_id = data->ind_msg.transaction_id; + snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s", + data->ind_msg.service_path); + + req_desc.msg_id = SERVREG_NOTIF_SET_ACK_REQ; + req_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN; + req_desc.ei_array = qmi_servreg_notif_set_ack_req_msg_v01_ei; + + resp_desc.msg_id = SERVREG_NOTIF_SET_ACK_RESP; + resp_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN; + resp_desc.ei_array = qmi_servreg_notif_set_ack_resp_msg_v01_ei; + + rc = qmi_send_req_wait(data->clnt_handle, &req_desc, + &req, sizeof(req), &resp_desc, &resp, + sizeof(resp), SERVER_TIMEOUT); + if (rc < 0) { + pr_err("%s: Sending Ack failed/server timeout, ret - %d\n", + data->ind_msg.service_path, rc); + goto exit; + } + + /* Check the response */ + if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) + pr_err("QMI request failed 0x%x\n", + QMI_RESP_BIT_SHIFT(resp.resp.error)); + pr_debug("Indication ACKed for transid %d, service %s, instance %d!\n", + data->ind_msg.transaction_id, data->ind_msg.service_path, + data->instance_id); +exit: + complete(&data->ind_recv); +} + +static void root_service_service_ind_cb(struct qmi_handle *handle, + unsigned int msg_id, void *msg, + unsigned int msg_len, void *ind_cb_priv) +{ + struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv; + struct service_notif_info *service_notif; + struct msg_desc ind_desc; + struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg; + int rc; + + ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG; + ind_desc.max_msg_len = SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN; + ind_desc.ei_array = qmi_servreg_notif_state_updated_ind_msg_v01_ei; + rc = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len); + if (rc < 0) { + pr_err("Failed to decode message!\n"); + goto send_ind_resp; + } + + pr_debug("Indication received from %s, state: 0x%x, trans-id: %d\n", + ind_msg.service_name, ind_msg.curr_state, + ind_msg.transaction_id); + + service_notif = _find_service_info(ind_msg.service_name); + if (!service_notif) + return; + + if ((int)ind_msg.curr_state < QMI_STATE_MIN_VAL || + (int)ind_msg.curr_state > QMI_STATE_MAX_VAL) + pr_err("Unexpected indication notification state %d\n", + ind_msg.curr_state); + else { + mutex_lock(¬if_add_lock); + mutex_lock(&service_list_lock); + if (service_notif_queue_notification(service_notif, + ind_msg.curr_state, NULL)) + pr_err("Nnotification failed for %s\n", + ind_msg.service_name); + service_notif->curr_state = ind_msg.curr_state; + mutex_unlock(&service_list_lock); + mutex_unlock(¬if_add_lock); + } +send_ind_resp: + data->ind_msg.transaction_id = ind_msg.transaction_id; + snprintf(data->ind_msg.service_path, + ARRAY_SIZE(data->ind_msg.service_path), "%s", + ind_msg.service_name); + schedule_work(&data->ind_ack); + rc = wait_for_completion_timeout(&data->ind_recv, SERVER_TIMEOUT); + if (rc < 0) { + pr_err("Timeout waiting for sending indication ACK!"); + return; + } + +} + +static int send_notif_listener_msg_req(struct service_notif_info *service_notif, + struct qmi_client_info *data, + bool register_notif, int *curr_state) +{ + struct qmi_servreg_notif_register_listener_req_msg_v01 req; + struct qmi_servreg_notif_register_listener_resp_msg_v01 + resp = { { 0, 0 } }; + struct msg_desc req_desc, resp_desc; + int rc; + + snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s", + service_notif->service_path); + req.enable = register_notif; + + req_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_REQ; + req_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN; + req_desc.ei_array = qmi_servreg_notif_register_listener_req_msg_v01_ei; + + resp_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_RESP; + resp_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN; + resp_desc.ei_array = + qmi_servreg_notif_register_listener_resp_msg_v01_ei; + + rc = qmi_send_req_wait(data->clnt_handle, &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + SERVER_TIMEOUT); + if (rc < 0) { + pr_err("%s: Message sending failed/server timeout, ret - %d\n", + service_notif->service_path, rc); + return rc; + } + + /* Check the response */ + if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) { + pr_err("QMI request failed 0x%x\n", + QMI_RESP_BIT_SHIFT(resp.resp.error)); + return -EREMOTEIO; + } + + if ((int) resp.curr_state < QMI_STATE_MIN_VAL || + (int) resp.curr_state > QMI_STATE_MAX_VAL) { + pr_err("Invalid notif info 0x%x\n", resp.curr_state); + rc = -EINVAL; + } + service_notif->curr_state = resp.curr_state; + *curr_state = resp.curr_state; + return rc; +} + +static int register_notif_listener(struct service_notif_info *service_notif, + struct qmi_client_info *data, + int *curr_state) +{ + return send_notif_listener_msg_req(service_notif, data, true, + curr_state); +} + +static void root_service_service_arrive(struct work_struct *work) +{ + struct service_notif_info *service_notif = NULL; + struct qmi_client_info *data = container_of(work, + struct qmi_client_info, svc_arrive); + int rc; + int curr_state; + + /* Create a Local client port for QMI communication */ + data->clnt_handle = qmi_handle_create(root_service_clnt_notify, work); + if (!data->clnt_handle) { + pr_err("QMI client handle alloc failed (instance-id: %d)\n", + data->instance_id); + return; + } + + /* Connect to the service on the root PD service */ + rc = qmi_connect_to_service(data->clnt_handle, + SERVREG_NOTIF_SERVICE_ID, SERVREG_NOTIF_SERVICE_VERS, + data->instance_id); + if (rc < 0) { + pr_err("Could not connect handle to service(instance-id: %d)\n", + data->instance_id); + qmi_handle_destroy(data->clnt_handle); + data->clnt_handle = NULL; + return; + } + data->service_connected = true; + pr_info("Connection established between QMI handle and %d service\n", + data->instance_id); + /* Register for indication messages about service */ + rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb, + (void *)data); + if (rc < 0) + pr_err("Indication callback register failed(instance-id: %d)\n", + data->instance_id); + + mutex_lock(¬if_add_lock); + mutex_lock(&service_list_lock); + list_for_each_entry(service_notif, &service_list, list) { + if (service_notif->instance_id == data->instance_id) { + rc = register_notif_listener(service_notif, data, + &curr_state); + if (rc) { + pr_err("Notifier registration failed for %s\n", + service_notif->service_path); + } else { + rc = service_notif_queue_notification( + service_notif, + curr_state, NULL); + if (rc) + pr_err("Notifier failed for %s\n", + service_notif->service_path); + service_notif->curr_state = curr_state; + } + } + } + mutex_unlock(&service_list_lock); + mutex_unlock(¬if_add_lock); +} + +static void root_service_service_exit(struct qmi_client_info *data) +{ + struct service_notif_info *service_notif = NULL; + int rc; + + /* + * Send service down notifications to all clients + * of registered for notifications for that service. + */ + mutex_lock(¬if_add_lock); + mutex_lock(&service_list_lock); + list_for_each_entry(service_notif, &service_list, list) { + if (service_notif->instance_id == data->instance_id) { + rc = service_notif_queue_notification(service_notif, + SERVREG_NOTIF_SERVICE_STATE_DOWN_V01, + NULL); + if (rc) + pr_err("Notification failed for %s\n", + service_notif->service_path); + service_notif->curr_state = + SERVREG_NOTIF_SERVICE_STATE_DOWN_V01; + } + } + mutex_unlock(&service_list_lock); + mutex_unlock(¬if_add_lock); + + /* + * Destroy client handle and try connecting when + * service comes up again. + */ + data->service_connected = false; + qmi_handle_destroy(data->clnt_handle); + data->clnt_handle = NULL; +} + +static void root_service_exit_work(struct work_struct *work) +{ + struct qmi_client_info *data = container_of(work, + struct qmi_client_info, svc_exit); + root_service_service_exit(data); +} + +static int service_event_notify(struct notifier_block *this, + unsigned long code, + void *_cmd) +{ + struct qmi_client_info *data = container_of(this, + struct qmi_client_info, notifier); + + switch (code) { + case QMI_SERVER_ARRIVE: + pr_debug("Root PD service UP\n"); + queue_work(data->svc_event_wq, &data->svc_arrive); + break; + case QMI_SERVER_EXIT: + pr_debug("Root PD service DOWN\n"); + queue_work(data->svc_event_wq, &data->svc_exit); + break; + default: + break; + } + return 0; +} + +static int ssr_event_notify(struct notifier_block *this, + unsigned long code, + void *data) +{ + struct qmi_client_info *info = container_of(this, + struct qmi_client_info, ssr_notifier); + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + pr_debug("Root PD service Down (SSR notification)\n"); + root_service_service_exit(info); + break; + default: + break; + } + return NOTIFY_DONE; +} + +static void *add_service_notif(const char *service_path, int instance_id, + int *curr_state) +{ + struct service_notif_info *service_notif; + struct qmi_client_info *tmp, *qmi_data; + long int rc; + char subsys[SERVREG_NOTIF_NAME_LENGTH]; + + rc = find_subsys(service_path, subsys); + if (rc < 0) { + pr_err("Could not find subsys for %s\n", service_path); + return ERR_PTR(rc); + } + + service_notif = kzalloc(sizeof(struct service_notif_info), GFP_KERNEL); + if (!service_notif) + return ERR_PTR(-ENOMEM); + + strlcpy(service_notif->service_path, service_path, + ARRAY_SIZE(service_notif->service_path)); + service_notif->instance_id = instance_id; + + /* If we already have a connection to the root PD on which the remote + * service we are interested in notifications about runs, then use + * the existing QMI connection. + */ + mutex_lock(&qmi_list_lock); + list_for_each_entry(tmp, &qmi_client_list, list) { + if (tmp->instance_id == instance_id) { + if (tmp->service_connected) { + rc = register_notif_listener(service_notif, tmp, + curr_state); + if (rc) { + mutex_unlock(&qmi_list_lock); + pr_err("Register notifier failed: %s", + service_path); + kfree(service_notif); + return ERR_PTR(rc); + } + } + mutex_unlock(&qmi_list_lock); + goto add_service_list; + } + } + mutex_unlock(&qmi_list_lock); + + qmi_data = kzalloc(sizeof(struct qmi_client_info), GFP_KERNEL); + if (!qmi_data) { + kfree(service_notif); + return ERR_PTR(-ENOMEM); + } + + qmi_data->instance_id = instance_id; + qmi_data->clnt_handle = NULL; + qmi_data->notifier.notifier_call = service_event_notify; + init_completion(&qmi_data->ind_recv); + + qmi_data->svc_event_wq = create_singlethread_workqueue(subsys); + if (!qmi_data->svc_event_wq) { + rc = -ENOMEM; + goto exit; + } + + INIT_WORK(&qmi_data->svc_arrive, root_service_service_arrive); + INIT_WORK(&qmi_data->svc_exit, root_service_exit_work); + INIT_WORK(&qmi_data->svc_rcv_msg, root_service_clnt_recv_msg); + INIT_WORK(&qmi_data->ind_ack, send_ind_ack); + + *curr_state = service_notif->curr_state = + SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01; + + rc = qmi_svc_event_notifier_register(SERVREG_NOTIF_SERVICE_ID, + SERVREG_NOTIF_SERVICE_VERS, qmi_data->instance_id, + &qmi_data->notifier); + if (rc < 0) { + pr_err("Notifier register failed (instance-id: %d)\n", + qmi_data->instance_id); + goto exit; + } + qmi_data->ssr_notifier.notifier_call = ssr_event_notify; + qmi_data->ssr_handle = subsys_notif_register_notifier(subsys, + &qmi_data->ssr_notifier); + if (IS_ERR(qmi_data->ssr_handle)) { + pr_err("SSR notif register for %s failed(instance-id: %d)\n", + subsys, qmi_data->instance_id); + rc = PTR_ERR(qmi_data->ssr_handle); + goto exit; + } + + mutex_lock(&qmi_list_lock); + INIT_LIST_HEAD(&qmi_data->list); + list_add_tail(&qmi_data->list, &qmi_client_list); + mutex_unlock(&qmi_list_lock); + +add_service_list: + srcu_init_notifier_head(&service_notif->service_notif_rcvr_list); + + mutex_lock(&service_list_lock); + INIT_LIST_HEAD(&service_notif->list); + list_add_tail(&service_notif->list, &service_list); + mutex_unlock(&service_list_lock); + + return service_notif; +exit: + if (qmi_data->svc_event_wq) + destroy_workqueue(qmi_data->svc_event_wq); + kfree(qmi_data); + kfree(service_notif); + return ERR_PTR(rc); +} + +/* service_notif_register_notifier() - Register a notifier for a service + * On success, it returns back a handle. It takes the following arguments: + * service_path: Individual service identifier path for which a client + * registers for notifications. + * instance_id: Instance id specific to a subsystem. + * current_state: Current state of service returned by the registration + * process. + * notifier block: notifier callback for service events. + */ +void *service_notif_register_notifier(const char *service_path, int instance_id, + struct notifier_block *nb, int *curr_state) +{ + struct service_notif_info *service_notif; + int ret = 0; + + if (!service_path || !instance_id || !nb) + return ERR_PTR(-EINVAL); + + service_notif = _find_service_info(service_path); + mutex_lock(¬if_add_lock); + if (!service_notif) { + service_notif = (struct service_notif_info *)add_service_notif( + service_path, + instance_id, + curr_state); + if (IS_ERR(service_notif)) + goto exit; + } + + ret = srcu_notifier_chain_register( + &service_notif->service_notif_rcvr_list, nb); + *curr_state = service_notif->curr_state; + if (ret < 0) + service_notif = ERR_PTR(ret); +exit: + mutex_unlock(¬if_add_lock); + return service_notif; +} +EXPORT_SYMBOL(service_notif_register_notifier); + +/* service_notif_unregister_notifier() - Unregister a notifier for a service. + * service_notif_handle - The notifier handler that was provided by the + * service_notif_register_notifier function when the + * client registered for notifications. + * nb - The notifier block that was previously used during the registration. + */ +int service_notif_unregister_notifier(void *service_notif_handle, + struct notifier_block *nb) +{ + struct service_notif_info *service_notif; + + if (!service_notif_handle || !nb) + return -EINVAL; + + service_notif = (struct service_notif_info *)service_notif_handle; + if (service_notif < 0) + return -EINVAL; + + return srcu_notifier_chain_unregister( + &service_notif->service_notif_rcvr_list, nb); +} +EXPORT_SYMBOL(service_notif_unregister_notifier); diff --git a/drivers/soc/qcom/service-notifier.h b/drivers/soc/qcom/service-notifier.h new file mode 100644 index 000000000000..2fa44b8181f6 --- /dev/null +++ b/drivers/soc/qcom/service-notifier.h @@ -0,0 +1,303 @@ + /* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef SERVICE_REGISTRY_NOTIFIER_H +#define SERVICE_REGISTRY_NOTIFIER_H + +#include + +#include + +#define SERVREG_NOTIF_SERVICE_ID_V01 0x42 +#define SERVREG_NOTIF_SERVICE_VERS_V01 0x01 + +#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01 0x0020 +#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_V01 0x0021 +#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01 0x0020 +#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_V01 0x0021 +#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01 0x0022 +#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01 0x0023 +#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01 0x0023 + +#define QMI_SERVREG_NOTIF_NAME_LENGTH_V01 64 + +enum qmi_servreg_notif_service_state_enum_type_v01 { + QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01 = INT_MIN, + QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01 = INT_MAX, + SERVREG_NOTIF_SERVICE_STATE_DOWN_V01 = 0x0FFFFFFF, + SERVREG_NOTIF_SERVICE_STATE_UP_V01 = 0x1FFFFFFF, + SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01 = 0x7FFFFFFF, +}; + +struct qmi_servreg_notif_register_listener_req_msg_v01 { + uint8_t enable; + char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1]; +}; +#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN 71 +struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[]; + +struct qmi_servreg_notif_register_listener_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t curr_state_valid; + enum qmi_servreg_notif_service_state_enum_type_v01 curr_state; +}; +#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN 14 +struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[]; + +struct qmi_servreg_notif_query_state_req_msg_v01 { + char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1]; +}; +#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_MSG_V01_MAX_MSG_LEN 67 +struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[]; + +struct qmi_servreg_notif_query_state_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t curr_state_valid; + enum qmi_servreg_notif_service_state_enum_type_v01 curr_state; +}; +#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_MSG_V01_MAX_MSG_LEN 14 +struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[]; + +struct qmi_servreg_notif_state_updated_ind_msg_v01 { + enum qmi_servreg_notif_service_state_enum_type_v01 curr_state; + char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1]; + uint16_t transaction_id; +}; +#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN 79 +struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[]; + +struct qmi_servreg_notif_set_ack_req_msg_v01 { + char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1]; + uint16_t transaction_id; +}; +#define QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN 72 +struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[]; + +struct qmi_servreg_notif_set_ack_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; +#define QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN 7 +struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[]; + +struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + qmi_servreg_notif_register_listener_req_msg_v01, + enable), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + qmi_servreg_notif_register_listener_req_msg_v01, + service_name), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + qmi_servreg_notif_register_listener_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + qmi_servreg_notif_register_listener_resp_msg_v01, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof( + enum qmi_servreg_notif_service_state_enum_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + qmi_servreg_notif_register_listener_resp_msg_v01, + curr_state), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + qmi_servreg_notif_query_state_req_msg_v01, + service_name), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + qmi_servreg_notif_query_state_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + qmi_servreg_notif_query_state_resp_msg_v01, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum + qmi_servreg_notif_service_state_enum_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + qmi_servreg_notif_query_state_resp_msg_v01, + curr_state), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum + qmi_servreg_notif_service_state_enum_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + qmi_servreg_notif_state_updated_ind_msg_v01, + curr_state), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + qmi_servreg_notif_state_updated_ind_msg_v01, + service_name), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct + qmi_servreg_notif_state_updated_ind_msg_v01, + transaction_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + qmi_servreg_notif_set_ack_req_msg_v01, + service_name), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + qmi_servreg_notif_set_ack_req_msg_v01, + transaction_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + qmi_servreg_notif_set_ack_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +#endif -- cgit v1.2.3 From 2668ff4e5436d5c72dad607d3f278d73cf1e6573 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Mon, 28 Mar 2016 15:51:20 -0700 Subject: msm: ipa: add common internal header Add a common internal header for ipa_v2 and ipa_v3. Common definitions should go to this header. CRs-Fixed: 995821 Change-Id: I39539cf661d9e0e0bb59236c92b169d3054485a9 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_api.c | 202 ++++++++++++++++++++++ drivers/platform/msm/ipa/ipa_api.h | 21 +++ drivers/platform/msm/ipa/ipa_common_i.h | 128 ++++++++++++++ drivers/platform/msm/ipa/ipa_v2/ipa.c | 124 +++---------- drivers/platform/msm/ipa/ipa_v2/ipa_client.c | 18 +- drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 24 +-- drivers/platform/msm/ipa/ipa_v2/ipa_dma.c | 4 +- drivers/platform/msm/ipa/ipa_v2/ipa_dp.c | 40 ++--- drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 122 ++----------- drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c | 6 +- drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c | 18 +- drivers/platform/msm/ipa/ipa_v2/ipa_rm.c | 6 +- drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c | 6 +- drivers/platform/msm/ipa/ipa_v2/ipa_uc.c | 24 +-- drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c | 30 ++-- drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c | 30 ++-- drivers/platform/msm/ipa/ipa_v2/ipa_utils.c | 108 ++++++------ drivers/platform/msm/ipa/ipa_v3/ipa.c | 92 ++-------- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 113 +----------- drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c | 4 +- drivers/platform/msm/ipa/ipa_v3/ipa_uc.c | 2 +- drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 12 +- 22 files changed, 581 insertions(+), 553 deletions(-) create mode 100644 drivers/platform/msm/ipa/ipa_common_i.h diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 04054fe1211f..a8ccba973776 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -76,6 +76,78 @@ static enum ipa_hw_type ipa_api_hw_type; static struct ipa_api_controller *ipa_api_ctrl; +const char *ipa_clients_strings[IPA_CLIENT_MAX] = { + __stringify(IPA_CLIENT_HSIC1_PROD), + __stringify(IPA_CLIENT_WLAN1_PROD), + __stringify(IPA_CLIENT_HSIC2_PROD), + __stringify(IPA_CLIENT_USB2_PROD), + __stringify(IPA_CLIENT_HSIC3_PROD), + __stringify(IPA_CLIENT_USB3_PROD), + __stringify(IPA_CLIENT_HSIC4_PROD), + __stringify(IPA_CLIENT_USB4_PROD), + __stringify(IPA_CLIENT_HSIC5_PROD), + __stringify(IPA_CLIENT_USB_PROD), + __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD), + __stringify(IPA_CLIENT_A2_EMBEDDED_PROD), + __stringify(IPA_CLIENT_A2_TETHERED_PROD), + __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD), + __stringify(IPA_CLIENT_APPS_CMD_PROD), + __stringify(IPA_CLIENT_ODU_PROD), + __stringify(IPA_CLIENT_MHI_PROD), + __stringify(IPA_CLIENT_Q6_LAN_PROD), + __stringify(IPA_CLIENT_Q6_WAN_PROD), + __stringify(IPA_CLIENT_Q6_CMD_PROD), + __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD), + __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD), + __stringify(IPA_CLIENT_Q6_DECOMP_PROD), + __stringify(IPA_CLIENT_Q6_DECOMP2_PROD), + __stringify(IPA_CLIENT_UC_USB_PROD), + + /* Below PROD client type is only for test purpose */ + __stringify(IPA_CLIENT_TEST_PROD), + __stringify(IPA_CLIENT_TEST1_PROD), + __stringify(IPA_CLIENT_TEST2_PROD), + __stringify(IPA_CLIENT_TEST3_PROD), + __stringify(IPA_CLIENT_TEST4_PROD), + + __stringify(IPA_CLIENT_HSIC1_CONS), + __stringify(IPA_CLIENT_WLAN1_CONS), + __stringify(IPA_CLIENT_HSIC2_CONS), + __stringify(IPA_CLIENT_USB2_CONS), + __stringify(IPA_CLIENT_WLAN2_CONS), + __stringify(IPA_CLIENT_HSIC3_CONS), + __stringify(IPA_CLIENT_USB3_CONS), + __stringify(IPA_CLIENT_WLAN3_CONS), + __stringify(IPA_CLIENT_HSIC4_CONS), + __stringify(IPA_CLIENT_USB4_CONS), + __stringify(IPA_CLIENT_WLAN4_CONS), + __stringify(IPA_CLIENT_HSIC5_CONS), + __stringify(IPA_CLIENT_USB_CONS), + __stringify(IPA_CLIENT_USB_DPL_CONS), + __stringify(IPA_CLIENT_A2_EMBEDDED_CONS), + __stringify(IPA_CLIENT_A2_TETHERED_CONS), + __stringify(IPA_CLIENT_A5_LAN_WAN_CONS), + __stringify(IPA_CLIENT_APPS_LAN_CONS), + __stringify(IPA_CLIENT_APPS_WAN_CONS), + __stringify(IPA_CLIENT_ODU_EMB_CONS), + __stringify(IPA_CLIENT_ODU_TETH_CONS), + __stringify(IPA_CLIENT_MHI_CONS), + __stringify(IPA_CLIENT_Q6_LAN_CONS), + __stringify(IPA_CLIENT_Q6_WAN_CONS), + __stringify(IPA_CLIENT_Q6_DUN_CONS), + __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS), + __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS), + __stringify(IPA_CLIENT_Q6_DECOMP_CONS), + __stringify(IPA_CLIENT_Q6_DECOMP2_CONS), + __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS), + /* Below CONS client type is only for test purpose */ + __stringify(IPA_CLIENT_TEST_CONS), + __stringify(IPA_CLIENT_TEST1_CONS), + __stringify(IPA_CLIENT_TEST2_CONS), + __stringify(IPA_CLIENT_TEST3_CONS), + __stringify(IPA_CLIENT_TEST4_CONS), +}; + /** * ipa_connect() - low-level IPA client connect @@ -2634,6 +2706,136 @@ int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data), } EXPORT_SYMBOL(ipa_register_ipa_ready_cb); +/** + * ipa_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA_ACTIVE_CLIENTS_INC_XXX(); + * + * Return codes: + * None +*/ +void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + IPA_API_DISPATCH(ipa_inc_client_enable_clks, id); +} +EXPORT_SYMBOL(ipa_inc_client_enable_clks); + +/** + * ipa_dec_client_disable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA_ACTIVE_CLIENTS_DEC_XXX(); + * + * Return codes: + * None +*/ +void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + IPA_API_DISPATCH(ipa_dec_client_disable_clks, id); +} +EXPORT_SYMBOL(ipa_dec_client_disable_clks); + +/** + * ipa_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done.Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Please do not use this API, use the wrapper macros instead(ipa_i.h) + * + * + * Return codes : 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa_inc_client_enable_clks_no_block( + struct ipa_active_client_logging_info *id) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id); + + return ret; +} +EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block); + +/** +* ipa_suspend_resource_no_block() - suspend client endpoints related to the +* IPA_RM resource and decrement active clients counter. This function is +* guaranteed to avoid sleeping. +* +* @resource: [IN] IPA Resource Manager resource +* +* Return codes: 0 on success, negative on failure. +*/ +int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_resource_no_block); +/** + * ipa_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_resume_resource(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_resume_resource); + +/** + * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_resource_sync); + +/** + * ipa_set_required_perf_profile() - set IPA to the specified performance + * profile based on the bandwidth, unless minimum voltage required is + * higher. In this case the floor_voltage specified will be used. + * @floor_voltage: minimum voltage to operate + * @bandwidth_mbps: needed bandwidth from IPA + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage, + bandwidth_mbps); + + return ret; +} +EXPORT_SYMBOL(ipa_set_required_perf_profile); + static const struct dev_pm_ops ipa_pm_ops = { .suspend_noirq = ipa_ap_suspend, .resume_noirq = ipa_ap_resume, diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 04b7ba64a6aa..6cab1273e7e9 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -10,6 +10,8 @@ * GNU General Public License for more details. */ +#include "ipa_common_i.h" + #ifndef _IPA_API_H_ #define _IPA_API_H_ @@ -325,6 +327,25 @@ struct ipa_api_controller { int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data), void *user_data); + void (*ipa_inc_client_enable_clks)( + struct ipa_active_client_logging_info *id); + + void (*ipa_dec_client_disable_clks)( + struct ipa_active_client_logging_info *id); + + int (*ipa_inc_client_enable_clks_no_block)( + struct ipa_active_client_logging_info *id); + + int (*ipa_suspend_resource_no_block)( + enum ipa_rm_resource_name resource); + + int (*ipa_resume_resource)(enum ipa_rm_resource_name name); + + int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource); + + int (*ipa_set_required_perf_profile)( + enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps); + }; #ifdef CONFIG_IPA diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h new file mode 100644 index 000000000000..8149837b2de0 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -0,0 +1,128 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_COMMON_I_H_ +#define _IPA_COMMON_I_H_ + +#define __FILENAME__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) + +#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = EP; \ + log_info.id_string = ipa_clients_strings[client] + +#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = SIMPLE; \ + log_info.id_string = __func__ + +#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = RESOURCE; \ + log_info.id_string = resource_name + +#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = SPECIAL; \ + log_info.id_string = id_str + +#define IPA_ACTIVE_CLIENTS_INC_EP(client) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + + +enum ipa_active_client_log_type { + EP, + SIMPLE, + RESOURCE, + SPECIAL, + INVALID +}; + +struct ipa_active_client_logging_info { + const char *id_string; + char *file; + int line; + enum ipa_active_client_log_type type; +}; + +extern const char *ipa_clients_strings[]; + +void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +int ipa_inc_client_enable_clks_no_block( + struct ipa_active_client_logging_info *id); +int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource); +int ipa_resume_resource(enum ipa_rm_resource_name name); +int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource); +int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); + + +#endif /* _IPA_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 8c825d1f4749..2e82d04f56ec 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -206,78 +206,6 @@ static bool smmu_disable_htw; static char *active_clients_table_buf; -const char *ipa2_clients_strings[IPA_CLIENT_MAX] = { - __stringify(IPA_CLIENT_HSIC1_PROD), - __stringify(IPA_CLIENT_WLAN1_PROD), - __stringify(IPA_CLIENT_USB2_PROD), - __stringify(IPA_CLIENT_HSIC3_PROD), - __stringify(IPA_CLIENT_HSIC2_PROD), - __stringify(IPA_CLIENT_USB3_PROD), - __stringify(IPA_CLIENT_HSIC4_PROD), - __stringify(IPA_CLIENT_USB4_PROD), - __stringify(IPA_CLIENT_HSIC5_PROD), - __stringify(IPA_CLIENT_USB_PROD), - __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD), - __stringify(IPA_CLIENT_A2_EMBEDDED_PROD), - __stringify(IPA_CLIENT_A2_TETHERED_PROD), - __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD), - __stringify(IPA_CLIENT_APPS_CMD_PROD), - __stringify(IPA_CLIENT_ODU_PROD), - __stringify(IPA_CLIENT_MHI_PROD), - __stringify(IPA_CLIENT_Q6_LAN_PROD), - __stringify(IPA_CLIENT_Q6_WAN_PROD), - __stringify(IPA_CLIENT_Q6_CMD_PROD), - __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD), - __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD), - __stringify(IPA_CLIENT_Q6_DECOMP_PROD), - __stringify(IPA_CLIENT_Q6_DECOMP2_PROD), - __stringify(IPA_CLIENT_UC_USB_PROD), - - /* Below PROD client type is only for test purpose */ - __stringify(IPA_CLIENT_TEST_PROD), - __stringify(IPA_CLIENT_TEST1_PROD), - __stringify(IPA_CLIENT_TEST2_PROD), - __stringify(IPA_CLIENT_TEST3_PROD), - __stringify(IPA_CLIENT_TEST4_PROD), - - __stringify(IPA_CLIENT_HSIC1_CONS), - __stringify(IPA_CLIENT_WLAN1_CONS), - __stringify(IPA_CLIENT_HSIC2_CONS), - __stringify(IPA_CLIENT_USB2_CONS), - __stringify(IPA_CLIENT_WLAN2_CONS), - __stringify(IPA_CLIENT_HSIC3_CONS), - __stringify(IPA_CLIENT_USB3_CONS), - __stringify(IPA_CLIENT_WLAN3_CONS), - __stringify(IPA_CLIENT_HSIC4_CONS), - __stringify(IPA_CLIENT_USB4_CONS), - __stringify(IPA_CLIENT_WLAN4_CONS), - __stringify(IPA_CLIENT_HSIC5_CONS), - __stringify(IPA_CLIENT_USB_CONS), - __stringify(IPA_CLIENT_USB_DPL_CONS), - __stringify(IPA_CLIENT_A2_EMBEDDED_CONS), - __stringify(IPA_CLIENT_A2_TETHERED_CONS), - __stringify(IPA_CLIENT_A5_LAN_WAN_CONS), - __stringify(IPA_CLIENT_APPS_LAN_CONS), - __stringify(IPA_CLIENT_APPS_WAN_CONS), - __stringify(IPA_CLIENT_ODU_EMB_CONS), - __stringify(IPA_CLIENT_ODU_TETH_CONS), - __stringify(IPA_CLIENT_MHI_CONS), - __stringify(IPA_CLIENT_Q6_LAN_CONS), - __stringify(IPA_CLIENT_Q6_WAN_CONS), - __stringify(IPA_CLIENT_Q6_DUN_CONS), - __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS), - __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS), - __stringify(IPA_CLIENT_Q6_DECOMP_CONS), - __stringify(IPA_CLIENT_Q6_DECOMP2_CONS), - __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS), - /* Below CONS client type is only for test purpose */ - __stringify(IPA_CLIENT_TEST_CONS), - __stringify(IPA_CLIENT_TEST1_CONS), - __stringify(IPA_CLIENT_TEST2_CONS), - __stringify(IPA_CLIENT_TEST3_CONS), - __stringify(IPA_CLIENT_TEST4_CONS), -}; - int ipa2_active_clients_log_print_buffer(char *buf, int size) { int i; @@ -626,7 +554,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (_IOC_NR(cmd) >= IPA_IOCTL_MAX) return -ENOTTY; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); switch (cmd) { case IPA_IOC_ALLOC_NAT_MEM: @@ -1329,12 +1257,12 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; default: /* redundant, as cmd was checked against MAXNR */ - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; } kfree(param); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return retval; } @@ -1521,7 +1449,7 @@ int ipa_init_q6_smem(void) { int rc; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0) rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) - @@ -1533,7 +1461,7 @@ int ipa_init_q6_smem(void) if (rc) { IPAERR("failed to initialize Modem RAM memory\n"); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return rc; } @@ -1541,7 +1469,7 @@ int ipa_init_q6_smem(void) IPA_MEM_PART(modem_hdr_ofst)); if (rc) { IPAERR("failed to initialize Modem HDRs RAM memory\n"); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return rc; } @@ -1549,7 +1477,7 @@ int ipa_init_q6_smem(void) IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); if (rc) { IPAERR("failed to initialize Modem proc ctx RAM memory\n"); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return rc; } @@ -1557,11 +1485,11 @@ int ipa_init_q6_smem(void) IPA_MEM_PART(modem_comp_decomp_ofst)); if (rc) { IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n"); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return rc; } - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return rc; } @@ -1609,7 +1537,7 @@ int ipa_q6_monitor_holb_mitigation(bool enable) int ep_idx; int client_idx; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) { ep_idx = ipa2_get_ep_mapping(client_idx); @@ -1621,7 +1549,7 @@ int ipa_q6_monitor_holb_mitigation(bool enable) ipa_uc_monitor_holb(client_idx, enable); } } - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -1957,7 +1885,7 @@ int ipa_q6_pre_shutdown_cleanup(void) if (ipa_ctx->uc_ctx.uc_zip_error) BUG(); - IPA2_ACTIVE_CLIENTS_INC_SPECIAL("Q6"); + IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6"); /* * pipe delay and holb discard for ZIP pipes are handled * in post shutdown callback. @@ -3010,7 +2938,7 @@ static void ipa_start_tag_process(struct work_struct *work) if (res) IPAERR("ipa_tag_aggr_force_close failed %d\n", res); - IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS"); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS"); IPADBG("TAG process done\n"); } @@ -3038,7 +2966,7 @@ static void ipa_start_tag_process(struct work_struct *work) * - Remove and deallocate unneeded data structure * - Log the call in the circular history buffer (unless it is a simple call) */ -void ipa2_active_clients_log_mod(struct ipa2_active_client_logging_info *id, +void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id, bool inc, bool int_ctx) { char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]; @@ -3094,13 +3022,13 @@ void ipa2_active_clients_log_mod(struct ipa2_active_client_logging_info *id, } } -void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id, +void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id, bool int_ctx) { ipa2_active_clients_log_mod(id, false, int_ctx); } -void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id, +void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id, bool int_ctx) { ipa2_active_clients_log_mod(id, true, int_ctx); @@ -3116,7 +3044,7 @@ void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id, * Return codes: * None */ -void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id) +void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id) { ipa_active_clients_lock(); ipa2_active_clients_log_inc(id, false); @@ -3138,7 +3066,7 @@ void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id) * Return codes: 0 for success * -EPERM if an asynchronous action should have been done */ -int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info +int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info *id) { int res = 0; @@ -3176,9 +3104,9 @@ bail: * Return codes: * None */ -void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id) +void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id) { - struct ipa2_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; ipa_active_clients_lock(); ipa2_active_clients_log_dec(id, false); @@ -3186,7 +3114,7 @@ void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id) IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); if (ipa_ctx->ipa_active_clients.cnt == 0) { if (ipa_ctx->tag_process_before_gating) { - IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "TAG_PROCESS"); ipa2_active_clients_log_inc(&log_info, false); ipa_ctx->tag_process_before_gating = false; @@ -3282,7 +3210,7 @@ fail: return retval; } -int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, +int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps) { enum ipa_voltage_level needed_voltage; @@ -3474,7 +3402,7 @@ void ipa_suspend_handler(enum ipa_irq_type interrupt, if (!atomic_read( &ipa_ctx->sps_pm.dec_clients) ) { - IPA2_ACTIVE_CLIENTS_INC_EP( + IPA_ACTIVE_CLIENTS_INC_EP( ipa_ctx->ep[i].client); IPADBG("Pipes un-suspended.\n"); IPADBG("Enter poll mode.\n"); @@ -3551,7 +3479,7 @@ static void ipa_sps_release_resource(struct work_struct *work) ipa_sps_process_irq_schedule_rel(); } else { atomic_set(&ipa_ctx->sps_pm.dec_clients, 0); - IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE"); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE"); } } atomic_set(&ipa_ctx->sps_pm.eot_activity, 0); @@ -3622,7 +3550,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, struct sps_bam_props bam_props = { 0 }; struct ipa_flt_tbl *flt_tbl; struct ipa_rt_tbl_set *rset; - struct ipa2_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; IPADBG("IPA Driver initialization started\n"); @@ -3746,7 +3674,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, mutex_init(&ipa_ctx->ipa_active_clients.mutex); spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock); - IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); ipa2_active_clients_log_inc(&log_info, false); ipa_ctx->ipa_active_clients.cnt = 1; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c index b3f50dd52528..f1742e05c598 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c @@ -289,7 +289,7 @@ int ipa2_connect(const struct ipa_connect_params *in, } memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); - IPA2_ACTIVE_CLIENTS_INC_EP(in->client); + IPA_ACTIVE_CLIENTS_INC_EP(in->client); ep->skip_ep_cfg = in->skip_ep_cfg; @@ -432,7 +432,7 @@ int ipa2_connect(const struct ipa_connect_params *in, ipa_install_dflt_flt_rules(ipa_ep_idx); if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(in->client); IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx); @@ -486,7 +486,7 @@ desc_mem_alloc_fail: sps_free_endpoint(ep->ep_hdl); ipa_cfg_ep_fail: memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); - IPA2_ACTIVE_CLIENTS_DEC_EP(in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(in->client); fail: return result; } @@ -556,7 +556,7 @@ int ipa2_disconnect(u32 clnt_hdl) ep = &ipa_ctx->ep[clnt_hdl]; client_type = ipa2_get_client_mapping(clnt_hdl); if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_INC_EP(client_type); + IPA_ACTIVE_CLIENTS_INC_EP(client_type); /* Set Disconnect in Progress flag. */ spin_lock(&ipa_ctx->disconnect_lock); @@ -663,7 +663,7 @@ int ipa2_disconnect(u32 clnt_hdl) memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); spin_unlock(&ipa_ctx->disconnect_lock); - IPA2_ACTIVE_CLIENTS_DEC_EP(client_type); + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); @@ -694,7 +694,7 @@ int ipa2_reset_endpoint(u32 clnt_hdl) } ep = &ipa_ctx->ep[clnt_hdl]; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); res = sps_disconnect(ep->ep_hdl); if (res) { IPAERR("sps_disconnect() failed, res=%d.\n", res); @@ -709,7 +709,7 @@ int ipa2_reset_endpoint(u32 clnt_hdl) } bail: - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return res; } @@ -761,7 +761,7 @@ int ipa2_clear_endpoint_delay(u32 clnt_hdl) ep->qmi_request_sent = true; } - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); /* Set disconnect in progress flag so further flow control events are * not honored. */ @@ -774,7 +774,7 @@ int ipa2_clear_endpoint_delay(u32 clnt_hdl) ep_ctrl.ipa_ep_suspend = false; ipa2_cfg_ep_ctrl(clnt_hdl, &ep_ctrl); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index daf6091aad67..756bd7be9bb9 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -158,9 +158,9 @@ static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf, { int nbytes; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); nbytes = ipa_ctx->ctrl->ipa_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); } @@ -328,7 +328,7 @@ static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf, end_idx = start_idx + 1; } pos = *ppos; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); for (i = start_idx; i < end_idx; i++) { nbytes = ipa_ctx->ctrl->ipa_read_ep_reg(dbg_buff, @@ -338,7 +338,7 @@ static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf, ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); if (ret < 0) { - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return ret; } @@ -346,7 +346,7 @@ static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf, ubuf += nbytes; count -= nbytes; } - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); *ppos = pos + size; return size; @@ -370,9 +370,9 @@ static ssize_t ipa_write_keep_awake(struct file *file, const char __user *buf, return -EFAULT; if (option == 1) - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); else if (option == 0) - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); else return -EFAULT; @@ -1230,9 +1230,9 @@ static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf, if (kstrtou32(dbg_buff, 0, &option)) return -EFAULT; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); ipa_ctx->ctrl->ipa_write_dbg_cnt(option); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return count; } @@ -1264,9 +1264,9 @@ static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf, { int nbytes; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); nbytes = ipa_ctx->ctrl->ipa_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c index 90d3bb4c5e95..e08f281b1864 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c @@ -274,7 +274,7 @@ int ipa2_dma_enable(void) mutex_unlock(&ipa_dma_ctx->enable_lock); return -EPERM; } - IPA2_ACTIVE_CLIENTS_INC_SPECIAL("DMA"); + IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA"); ipa_dma_ctx->is_enabled = true; mutex_unlock(&ipa_dma_ctx->enable_lock); @@ -337,7 +337,7 @@ int ipa2_dma_disable(void) } ipa_dma_ctx->is_enabled = false; spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); - IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("DMA"); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA"); mutex_unlock(&ipa_dma_ctx->enable_lock); IPADMA_FUNC_EXIT(); return 0; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c index 5929bfc8f96e..25b29cdb3e32 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -254,7 +254,7 @@ static void ipa_handle_tx(struct ipa_sys_context *sys) int inactive_cycles = 0; int cnt; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); do { cnt = ipa_handle_tx_core(sys, true, true); if (cnt == 0) { @@ -267,7 +267,7 @@ static void ipa_handle_tx(struct ipa_sys_context *sys) } while (inactive_cycles <= POLLING_INACTIVITY_TX); ipa_tx_switch_to_intr_mode(sys); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } static void ipa_wq_handle_tx(struct work_struct *work) @@ -653,7 +653,7 @@ int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr) } sys = ipa_ctx->ep[ep_idx].sys; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); if (num_desc == 1) { init_completion(&descr->xfer_done); @@ -687,7 +687,7 @@ int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr) } bail: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return result; } @@ -1002,7 +1002,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys) int inactive_cycles = 0; int cnt; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); do { cnt = ipa_handle_rx_core(sys, true, true); if (cnt == 0) { @@ -1026,7 +1026,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys) trace_poll_to_intr(sys->ep->client); ipa_rx_switch_to_intr_mode(sys); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } static void switch_to_intr_rx_work_func(struct work_struct *work) @@ -1118,7 +1118,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) ep = &ipa_ctx->ep[ipa_ep_idx]; - IPA2_ACTIVE_CLIENTS_INC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); if (ep->valid == 1) { if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) { @@ -1143,7 +1143,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) ep->priv = sys_in->priv; *clnt_hdl = ipa_ep_idx; if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); return 0; } @@ -1357,7 +1357,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) } if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client, ipa_ep_idx, ep->sys); @@ -1380,7 +1380,7 @@ fail_wq: kfree(ep->sys); memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); fail_and_disable_clocks: - IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); fail_gen: return result; } @@ -1410,7 +1410,7 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl) ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_disable_data_path(clnt_hdl); ep->valid = 0; @@ -1456,7 +1456,7 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl) if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt)) ipa_cleanup_wlan_rx_common_cache(); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); @@ -2065,9 +2065,9 @@ static void replenish_rx_work_func(struct work_struct *work) dwork = container_of(work, struct delayed_work, work); sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); sys->repl_hdlr(sys); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } /** @@ -3295,7 +3295,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, ep = &ipa_ctx->ep[ipa_ep_idx]; - IPA2_ACTIVE_CLIENTS_INC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); if (ep->valid == 1) { if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) { @@ -3322,7 +3322,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, ep->priv = sys_in->priv; *clnt_hdl = ipa_ep_idx; if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); return 0; } @@ -3363,7 +3363,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, *ipa_bam_hdl = ipa_ctx->bam_handle; if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client, @@ -3373,7 +3373,7 @@ int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, fail_gen2: fail_and_disable_clocks: - IPA2_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); fail_gen: return result; } @@ -3391,12 +3391,12 @@ int ipa2_sys_teardown(u32 clnt_hdl) ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_disable_data_path(clnt_hdl); ep->valid = 0; - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index 3f538a3ed8cf..8d7b300d0aef 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -31,6 +31,7 @@ #include "ipa_reg.h" #include "ipa_qmi_service.h" #include "../ipa_api.h" +#include "../ipa_common_i.h" #define DRV_NAME "ipa" #define NAT_DEV_NAME "ipaNatTable" @@ -150,117 +151,16 @@ #define IPA_SMMU_UC_VA_SIZE 0x20000000 #define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START + IPA_SMMU_UC_VA_SIZE) -#define __FILENAME__ \ - (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) - - -#define IPA2_ACTIVE_CLIENTS_PREP_EP(log_info, client) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = EP; \ - log_info.id_string = ipa2_clients_strings[client] - -#define IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = SIMPLE; \ - log_info.id_string = __func__ - -#define IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = RESOURCE; \ - log_info.id_string = resource_name - -#define IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = SPECIAL; \ - log_info.id_string = id_str - -#define IPA2_ACTIVE_CLIENTS_INC_EP(client) \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ - ipa2_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_DEC_EP(client) \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ - ipa2_dec_client_disable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_INC_SIMPLE() \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ - ipa2_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_DEC_SIMPLE() \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ - ipa2_dec_client_disable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ - ipa2_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ - ipa2_dec_client_disable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ - ipa2_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA2_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \ - do { \ - struct ipa2_active_client_logging_info log_info; \ - IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ - ipa2_dec_client_disable_clks(&log_info); \ - } while (0) - #define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120 #define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96 #define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 #define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40 -extern const char *ipa2_clients_strings[]; - -enum ipa2_active_client_log_type { - EP, - SIMPLE, - RESOURCE, - SPECIAL, - INVALID -}; - -struct ipa2_active_client_logging_info { - const char *id_string; - char *file; - int line; - enum ipa2_active_client_log_type type; -}; - struct ipa2_active_client_htable_entry { struct hlist_node list; char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN]; int count; - enum ipa2_active_client_log_type type; + enum ipa_active_client_log_type type; }; struct ipa2_active_clients_log_ctx { @@ -1952,13 +1852,13 @@ int ipa_straddle_boundary(u32 start, u32 end, u32 boundary); struct ipa_context *ipa_get_ctx(void); void ipa_enable_clks(void); void ipa_disable_clks(void); -void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id); -int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info +void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info *id); -void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id); -void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id, +void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id, bool int_ctx); -void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id, +void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id, bool int_ctx); int ipa2_active_clients_log_print_buffer(char *buf, int size); int ipa2_active_clients_log_print_table(char *buf, int size); @@ -2052,7 +1952,7 @@ int ipa_id_alloc(void *ptr); void *ipa_id_find(u32 id); void ipa_id_remove(u32 id); -int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, +int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps); int ipa2_cfg_ep_status(u32 clnt_hdl, @@ -2060,9 +1960,9 @@ int ipa2_cfg_ep_status(u32 clnt_hdl, int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity); int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity); -int ipa_suspend_resource_no_block(enum ipa_rm_resource_name name); -int ipa_suspend_resource_sync(enum ipa_rm_resource_name name); -int ipa_resume_resource(enum ipa_rm_resource_name name); +int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name name); +int ipa2_suspend_resource_sync(enum ipa_rm_resource_name name); +int ipa2_resume_resource(enum ipa_rm_resource_name name); bool ipa_should_pipe_be_suspended(enum ipa_client_type client); int ipa_tag_aggr_force_close(int pipe_num); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c index e3797a48c010..f30fd4c60171 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -184,9 +184,9 @@ static void ipa_process_interrupts(bool isr_context) static void ipa_interrupt_defer(struct work_struct *work) { IPADBG("processing interrupts in wq\n"); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); ipa_process_interrupts(false); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); IPADBG("Done\n"); } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c index a389802de33f..e7032f339405 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1240,7 +1240,7 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl) IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n", channel->client, channel->hdl, channel->id); - IPA2_ACTIVE_CLIENTS_INC_EP(in->sys.client); + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); if (ep->valid == 1) { IPA_MHI_ERR("EP already allocated.\n"); @@ -1310,7 +1310,7 @@ int ipa2_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl) ipa_install_dflt_flt_rules(ipa_ep_idx); if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client, @@ -1328,7 +1328,7 @@ fail_enable_dp: fail_init_channel: memset(ep, 0, offsetof(struct ipa_ep_context, sys)); fail_ep_exists: - IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); return -EPERM; } @@ -1379,7 +1379,7 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl) ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); res = ipa_mhi_reset_channel(channel); if (res) { @@ -1390,7 +1390,7 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl) ep->valid = 0; ipa_delete_dflt_flt_rules(clnt_hdl); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); IPA_MHI_FUNC_EXIT(); @@ -1398,7 +1398,7 @@ int ipa2_mhi_disconnect_pipe(u32 clnt_hdl) fail_reset_channel: if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return res; } @@ -1653,7 +1653,7 @@ int ipa2_mhi_suspend(bool force) * IPA RM resource are released to make sure tag process will not start */ if (!bam_empty) - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); IPA_MHI_DBG("release prod\n"); res = ipa_mhi_release_prod(); @@ -1696,7 +1696,7 @@ int ipa2_mhi_suspend(bool force) if (!bam_empty) { ipa_ctx->tag_process_before_gating = false; - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c index ff93394a9363..3e47d1d5e150 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c @@ -707,7 +707,7 @@ static void ipa_rm_wq_resume_handler(struct work_struct *work) IPA_RM_ERR("resource is not CONS\n"); return; } - IPA2_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str( + IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str( ipa_rm_work->resource_name)); spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, @@ -715,7 +715,7 @@ static void ipa_rm_wq_resume_handler(struct work_struct *work) &resource) != 0){ IPA_RM_ERR("resource does not exists\n"); spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA2_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str( + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str( ipa_rm_work->resource_name)); goto bail; } @@ -1000,7 +1000,7 @@ void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name) bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[ resource_name - IPA_RM_RESOURCE_PROD_MAX]; } else { - IPAERR("Invalid resource_name\n"); + IPA_RM_ERR("Invalid resource_name\n"); return; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c index c22bd3b670bd..66e086768294 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -147,7 +147,7 @@ int ipa_rm_resource_consumer_request( { int result = 0; enum ipa_rm_resource_state prev_state; - struct ipa2_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(consumer->resource.name), @@ -160,7 +160,7 @@ int ipa_rm_resource_consumer_request( case IPA_RM_RELEASE_IN_PROGRESS: reinit_completion(&consumer->request_consumer_in_progress); consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; - IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, ipa_rm_resource_str(consumer->resource.name)); if (prev_state == IPA_RM_RELEASE_IN_PROGRESS || ipa2_inc_client_enable_clks_no_block(&log_info) != 0) { diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c index 0142dace47d8..b49815b24bc2 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c @@ -329,7 +329,7 @@ static void ipa_uc_event_handler(enum ipa_irq_type interrupt, WARN_ON(private_data != ipa_ctx); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); IPADBG("uC evt opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); @@ -340,7 +340,7 @@ static void ipa_uc_event_handler(enum ipa_irq_type interrupt, if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { IPAERR("Invalid feature %u for event %u\n", feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return; } /* Feature specific handling */ @@ -370,7 +370,7 @@ static void ipa_uc_event_handler(enum ipa_irq_type interrupt, IPADBG("unsupported uC evt opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); } - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } @@ -378,14 +378,14 @@ static int ipa_uc_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr) { int result = 0; - struct ipa2_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr); result = ipa_uc_state_check(); if (result) goto fail; - IPA2_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); if (ipa2_inc_client_enable_clks_no_block(&log_info)) goto fail; @@ -397,7 +397,7 @@ static int ipa_uc_panic_notifier(struct notifier_block *this, /* give uc enough time to save state */ udelay(IPA_PKT_FLUSH_TO_US); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); IPADBG("err_fatal issued\n"); fail: @@ -425,7 +425,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt, WARN_ON(private_data != ipa_ctx); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); IPADBG("uC rsp opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); @@ -434,7 +434,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt, if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { IPAERR("Invalid feature %u for event %u\n", feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return; } @@ -447,7 +447,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt, IPADBG("feature %d specific response handler\n", feature); complete_all(&ipa_ctx->uc_ctx.uc_completion); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return; } } @@ -490,7 +490,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt, IPAERR("Unsupported uC rsp opcode = %u\n", ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); } - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } /** @@ -816,9 +816,9 @@ EXPORT_SYMBOL(ipa_uc_monitor_holb); static void ipa_start_monitor_holb(struct work_struct *work) { IPADBG("starting holb monitoring on IPA_CLIENT_USB_CONS\n"); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); ipa_uc_monitor_holb(IPA_CLIENT_USB_CONS, true); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c index 1588fea23ddc..ec3814b4e747 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -614,7 +614,7 @@ int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); res = ipa_uc_update_hw_flags(0); if (res) { @@ -677,7 +677,7 @@ int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } @@ -700,7 +700,7 @@ int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, return -EINVAL; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; @@ -725,7 +725,7 @@ int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } @@ -741,7 +741,7 @@ int ipa_uc_mhi_reset_channel(int channelHandle) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; @@ -763,7 +763,7 @@ int ipa_uc_mhi_reset_channel(int channelHandle) res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } @@ -778,7 +778,7 @@ int ipa_uc_mhi_suspend_channel(int channelHandle) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; @@ -800,7 +800,7 @@ int ipa_uc_mhi_suspend_channel(int channelHandle) res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } @@ -815,7 +815,7 @@ int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; @@ -838,7 +838,7 @@ int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } @@ -852,7 +852,7 @@ int ipa_uc_mhi_stop_event_update_channel(int channelHandle) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); memset(&cmd, 0, sizeof(cmd)); cmd.params.channelHandle = channelHandle; @@ -870,7 +870,7 @@ int ipa_uc_mhi_stop_event_update_channel(int channelHandle) res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } @@ -888,7 +888,7 @@ int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd) IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); @@ -899,7 +899,7 @@ int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd) res = 0; disable_clks: - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return res; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c index 496e77bceb59..df52018f6193 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c @@ -407,7 +407,7 @@ int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) return -EINVAL; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); TX_STATS(num_pkts_processed); TX_STATS(copy_engine_doorbell_value); @@ -449,7 +449,7 @@ int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) RX_STATS(reserved1); RX_STATS(reserved2); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -756,7 +756,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in, } memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); - IPA2_ACTIVE_CLIENTS_INC_EP(in->sys.client); + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx); if (IPA_CLIENT_IS_CONS(in->sys.client)) { @@ -960,7 +960,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in, ipa_install_dflt_flt_rules(ipa_ep_idx); if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); ep->wdi_state |= IPA_WDI_CONNECTED; @@ -974,7 +974,7 @@ uc_timeout: ipa_release_uc_smmu_mappings(in->sys.client); dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); dma_alloc_fail: - IPA2_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); fail: return result; } @@ -1019,7 +1019,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl) } if (!ep->keep_ipa_awake) - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); tear.params.ipa_pipe_number = clnt_hdl; @@ -1037,7 +1037,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl) ipa_release_uc_smmu_mappings(ep->client); memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); @@ -1084,7 +1084,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); enable.params.ipa_pipe_number = clnt_hdl; result = ipa_uc_send_cmd(enable.raw32b, @@ -1104,7 +1104,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl) result = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); } - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); ep->wdi_state |= IPA_WDI_ENABLED; IPADBG("client (ep: %d) enabled\n", clnt_hdl); @@ -1152,7 +1152,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); result = ipa_disable_data_path(clnt_hdl); if (result) { @@ -1205,7 +1205,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl) ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); } - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); ep->wdi_state &= ~IPA_WDI_ENABLED; IPADBG("client (ep: %d) disabled\n", clnt_hdl); @@ -1252,7 +1252,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); resume.params.ipa_pipe_number = clnt_hdl; result = ipa_uc_send_cmd(resume.raw32b, @@ -1368,7 +1368,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl) } ipa_ctx->tag_process_before_gating = true; - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); ep->wdi_state &= ~IPA_WDI_RESUMED; IPADBG("client (ep: %d) suspended\n", clnt_hdl); @@ -1401,7 +1401,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id) return -EFAULT; } - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); qmap.params.ipa_pipe_number = clnt_hdl; qmap.params.qmap_id = qmap_id; @@ -1415,7 +1415,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id) goto uc_timeout; } - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c index 888345a23ba5..421b737ddfeb 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -495,7 +495,7 @@ bool ipa_should_pipe_be_suspended(enum ipa_client_type client) } /** - * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM * resource and decrement active clients counter, which may result in clock * gating of IPA clocks. * @@ -503,7 +503,7 @@ bool ipa_should_pipe_be_suspended(enum ipa_client_type client) * * Return codes: 0 on success, negative on failure. */ -int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) +int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource) { struct ipa_client_names clients; int res; @@ -546,13 +546,13 @@ int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) /* before gating IPA clocks do TAG process */ ipa_ctx->tag_process_before_gating = true; - IPA2_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); return 0; } /** - * ipa_suspend_resource_no_block() - suspend client endpoints related to the + * ipa2_suspend_resource_no_block() - suspend client endpoints related to the * IPA_RM resource and decrement active clients counter. This function is * guaranteed to avoid sleeping. * @@ -560,7 +560,7 @@ int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) * * Return codes: 0 on success, negative on failure. */ -int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) +int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource) { int res; struct ipa_client_names clients; @@ -569,7 +569,7 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) struct ipa_ep_cfg_ctrl suspend; int ipa_ep_idx; unsigned long flags; - struct ipa2_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; if (ipa_active_clients_trylock(&flags) == 0) return -EPERM; @@ -607,7 +607,7 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) } if (res == 0) { - IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, ipa_rm_resource_str(resource)); ipa2_active_clients_log_dec(&log_info, true); ipa_ctx->ipa_active_clients.cnt--; @@ -621,14 +621,14 @@ bail: } /** - * ipa_resume_resource() - resume client endpoints related to the IPA_RM + * ipa2_resume_resource() - resume client endpoints related to the IPA_RM * resource. * * @resource: [IN] IPA Resource Manager resource * * Return codes: 0 on success, negative on failure. */ -int ipa_resume_resource(enum ipa_rm_resource_name resource) +int ipa2_resume_resource(enum ipa_rm_resource_name resource) { struct ipa_client_names clients; @@ -821,11 +821,11 @@ int ipa_cfg_route(struct ipa_route *route) route->route_def_hdr_ofst, route->route_frag_def_pipe); - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); ipa_ctx->ctrl->ipa_cfg_route(route); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -841,12 +841,12 @@ int ipa_cfg_filter(u32 disable) { u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst, IPA_SETFIELD(!disable, IPA_FILTER_FILTER_EN_SHFT, IPA_FILTER_FILTER_EN_BMSK)); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -2496,11 +2496,11 @@ int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -2556,11 +2556,11 @@ int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status) /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].status = *ep_status; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -2618,11 +2618,11 @@ int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -2674,11 +2674,11 @@ int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -2797,11 +2797,11 @@ int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) /* copy over EP cfg */ ep->cfg.hdr = *ep_hdr; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -2923,11 +2923,11 @@ int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl, /* copy over EP cfg */ ep->cfg.hdr_ext = *ep_hdr_ext; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -3138,13 +3138,13 @@ int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode; ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl, ipa_ctx->ep[clnt_hdl].dst_pipe_index, ep_mode); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -3270,11 +3270,11 @@ int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -3353,12 +3353,12 @@ int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) else ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl, ipa_ctx->ep[clnt_hdl].rt_tbl_idx); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -3447,11 +3447,11 @@ int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) ipa_ctx->ep[clnt_hdl].holb = *ep_holb; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl, ep_holb->tmr_val); @@ -3548,11 +3548,11 @@ int ipa2_cfg_ep_deaggr(u32 clnt_hdl, /* copy over EP cfg */ ep->cfg.deaggr = *ep_deaggr; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -3600,13 +3600,13 @@ int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md; - IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md); ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr); - IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); return 0; } @@ -3788,11 +3788,11 @@ int ipa2_set_aggr_mode(enum ipa_aggr_mode mode) { u32 reg_val; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) | (reg_val & 0xfffffffe)); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -3816,12 +3816,12 @@ int ipa2_set_qcncm_ndp_sig(char sig[3]) IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n"); return -EINVAL; } - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 | (sig[1] << 12) | (sig[2] << 4) | (reg_val & 0xf000000f)); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -3837,11 +3837,11 @@ int ipa2_set_single_ndp_per_mbim(bool enable) { u32 reg_val; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST, (enable & 0x1) | (reg_val & 0xfffffffe)); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } @@ -3857,12 +3857,12 @@ int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable) { u32 reg_val; - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) | (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK)); - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; } EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr); @@ -3905,7 +3905,7 @@ void ipa2_bam_reg_dump(void) { static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1); if (__ratelimit(&_rs)) { - IPA2_ACTIVE_CLIENTS_INC_SIMPLE(); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); pr_err("IPA BAM START\n"); if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) { sps_get_bam_debug_info(ipa_ctx->bam_handle, 5, @@ -3919,7 +3919,7 @@ void ipa2_bam_reg_dump(void) SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))), 0, 2); } - IPA2_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } } @@ -4821,7 +4821,7 @@ bool ipa2_is_client_handle_valid(u32 clnt_hdl) void ipa2_proxy_clk_unvote(void) { if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) { - IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE"); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE"); ipa_ctx->q6_proxy_clk_vote_valid = false; } } @@ -4834,7 +4834,7 @@ void ipa2_proxy_clk_unvote(void) void ipa2_proxy_clk_vote(void) { if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) { - IPA2_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE"); + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE"); ipa_ctx->q6_proxy_clk_vote_valid = true; } } @@ -5077,6 +5077,16 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev; api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info; api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel; + api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks; + api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks; + api_ctrl->ipa_inc_client_enable_clks_no_block = + ipa2_inc_client_enable_clks_no_block; + api_ctrl->ipa_suspend_resource_no_block = + ipa2_suspend_resource_no_block; + api_ctrl->ipa_resume_resource = ipa2_resume_resource; + api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync; + api_ctrl->ipa_set_required_perf_profile = + ipa2_set_required_perf_profile; return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index d09363b725de..dcec51d4f3c7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -219,78 +219,6 @@ static bool smmu_disable_htw; static char *active_clients_table_buf; -const char *ipa3_clients_strings[IPA_CLIENT_MAX] = { - __stringify(IPA_CLIENT_HSIC1_PROD), - __stringify(IPA_CLIENT_WLAN1_PROD), - __stringify(IPA_CLIENT_USB2_PROD), - __stringify(IPA_CLIENT_HSIC3_PROD), - __stringify(IPA_CLIENT_HSIC2_PROD), - __stringify(IPA_CLIENT_USB3_PROD), - __stringify(IPA_CLIENT_HSIC4_PROD), - __stringify(IPA_CLIENT_USB4_PROD), - __stringify(IPA_CLIENT_HSIC5_PROD), - __stringify(IPA_CLIENT_USB_PROD), - __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD), - __stringify(IPA_CLIENT_A2_EMBEDDED_PROD), - __stringify(IPA_CLIENT_A2_TETHERED_PROD), - __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD), - __stringify(IPA_CLIENT_APPS_CMD_PROD), - __stringify(IPA_CLIENT_ODU_PROD), - __stringify(IPA_CLIENT_MHI_PROD), - __stringify(IPA_CLIENT_Q6_LAN_PROD), - __stringify(IPA_CLIENT_Q6_WAN_PROD), - __stringify(IPA_CLIENT_Q6_CMD_PROD), - __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD), - __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD), - __stringify(IPA_CLIENT_Q6_DECOMP_PROD), - __stringify(IPA_CLIENT_Q6_DECOMP2_PROD), - __stringify(IPA_CLIENT_UC_USB_PROD), - - /* Below PROD client type is only for test purpose */ - __stringify(IPA_CLIENT_TEST_PROD), - __stringify(IPA_CLIENT_TEST1_PROD), - __stringify(IPA_CLIENT_TEST2_PROD), - __stringify(IPA_CLIENT_TEST3_PROD), - __stringify(IPA_CLIENT_TEST4_PROD), - - __stringify(IPA_CLIENT_HSIC1_CONS), - __stringify(IPA_CLIENT_WLAN1_CONS), - __stringify(IPA_CLIENT_HSIC2_CONS), - __stringify(IPA_CLIENT_USB2_CONS), - __stringify(IPA_CLIENT_WLAN2_CONS), - __stringify(IPA_CLIENT_HSIC3_CONS), - __stringify(IPA_CLIENT_USB3_CONS), - __stringify(IPA_CLIENT_WLAN3_CONS), - __stringify(IPA_CLIENT_HSIC4_CONS), - __stringify(IPA_CLIENT_USB4_CONS), - __stringify(IPA_CLIENT_WLAN4_CONS), - __stringify(IPA_CLIENT_HSIC5_CONS), - __stringify(IPA_CLIENT_USB_CONS), - __stringify(IPA_CLIENT_USB_DPL_CONS), - __stringify(IPA_CLIENT_A2_EMBEDDED_CONS), - __stringify(IPA_CLIENT_A2_TETHERED_CONS), - __stringify(IPA_CLIENT_A5_LAN_WAN_CONS), - __stringify(IPA_CLIENT_APPS_LAN_CONS), - __stringify(IPA_CLIENT_APPS_WAN_CONS), - __stringify(IPA_CLIENT_ODU_EMB_CONS), - __stringify(IPA_CLIENT_ODU_TETH_CONS), - __stringify(IPA_CLIENT_MHI_CONS), - __stringify(IPA_CLIENT_Q6_LAN_CONS), - __stringify(IPA_CLIENT_Q6_WAN_CONS), - __stringify(IPA_CLIENT_Q6_DUN_CONS), - __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS), - __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS), - __stringify(IPA_CLIENT_Q6_DECOMP_CONS), - __stringify(IPA_CLIENT_Q6_DECOMP2_CONS), - __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS), - /* Below CONS client type is only for test purpose */ - __stringify(IPA_CLIENT_TEST_CONS), - __stringify(IPA_CLIENT_TEST1_CONS), - __stringify(IPA_CLIENT_TEST2_CONS), - __stringify(IPA_CLIENT_TEST3_CONS), - __stringify(IPA_CLIENT_TEST4_CONS), -}; - int ipa3_active_clients_log_print_buffer(char *buf, int size) { int i; @@ -3149,7 +3077,7 @@ static void ipa3_start_tag_process(struct work_struct *work) * - Remove and deallocate unneeded data structure * - Log the call in the circular history buffer (unless it is a simple call) */ -void ipa3_active_clients_log_mod(struct ipa3_active_client_logging_info *id, +void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id, bool inc, bool int_ctx) { char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]; @@ -3205,13 +3133,13 @@ void ipa3_active_clients_log_mod(struct ipa3_active_client_logging_info *id, } } -void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id, +void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id, bool int_ctx) { ipa3_active_clients_log_mod(id, false, int_ctx); } -void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id, +void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, bool int_ctx) { ipa3_active_clients_log_mod(id, true, int_ctx); @@ -3224,7 +3152,7 @@ void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id, * Return codes: * None */ -void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id) +void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id) { ipa3_active_clients_lock(); ipa3_active_clients_log_inc(id, false); @@ -3243,7 +3171,7 @@ void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id) * Return codes: 0 for success * -EPERM if an asynchronous action should have been done */ -int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info +int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info *id) { int res = 0; @@ -3276,9 +3204,9 @@ bail: * Return codes: * None */ -void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id) +void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id) { - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; ipa3_active_clients_lock(); ipa3_active_clients_log_dec(id, false); @@ -3622,7 +3550,7 @@ static void ipa3_freeze_clock_vote_and_notify_modem(void) { int res; u32 ipa_clk_state; - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; if (ipa3_ctx->smp2p_info.res_sent) return; @@ -4012,7 +3940,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, int i; struct ipa3_flt_tbl *flt_tbl; struct ipa3_rt_tbl_set *rset; - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; IPADBG("IPA Driver initialization started\n"); @@ -5111,7 +5039,7 @@ static void ipa_gsi_request_resource(struct work_struct *work) void ipa_gsi_req_res_cb(void *user_data, bool *granted) { unsigned long flags; - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 3fa7d4121a1b..a7ba4ca49ecf 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -35,6 +35,7 @@ #include "../ipa_api.h" #include "ipahal/ipahal_reg.h" #include "ipahal/ipahal.h" +#include "../ipa_common_i.h" #define DRV_NAME "ipa" #define NAT_DEV_NAME "ipaNatTable" @@ -54,9 +55,6 @@ #define IPA_UC_WAII_MAX_SLEEP 1200 #define IPA_MAX_STATUS_STAT_NUM 30 -#define __FILENAME__ \ - (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) - #define IPA_IPC_LOGGING(buf, fmt, args...) \ ipc_log_string((buf), \ @@ -213,113 +211,16 @@ #define IPA_SLEEP_CLK_RATE_KHZ (32) -#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = EP; \ - log_info.id_string = ipa3_clients_strings[client] - -#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = SIMPLE; \ - log_info.id_string = __func__ - -#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = RESOURCE; \ - log_info.id_string = resource_name - -#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \ - log_info.file = __FILENAME__; \ - log_info.line = __LINE__; \ - log_info.type = SPECIAL; \ - log_info.id_string = id_str - -#define IPA_ACTIVE_CLIENTS_INC_EP(client) \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ - ipa3_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ - ipa3_dec_client_disable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ - ipa3_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ - ipa3_dec_client_disable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ - ipa3_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ - ipa3_dec_client_disable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ - ipa3_inc_client_enable_clks(&log_info); \ - } while (0) - -#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \ - do { \ - struct ipa3_active_client_logging_info log_info; \ - IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ - ipa3_dec_client_disable_clks(&log_info); \ - } while (0) - #define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120 #define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96 #define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 #define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40 -extern const char *ipa3_clients_strings[]; - -enum ipa3_active_client_log_type { - EP, - SIMPLE, - RESOURCE, - SPECIAL, - INVALID -}; - -struct ipa3_active_client_logging_info { - const char *id_string; - char *file; - int line; - enum ipa3_active_client_log_type type; -}; - struct ipa3_active_client_htable_entry { struct hlist_node list; char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; int count; - enum ipa3_active_client_log_type type; + enum ipa_active_client_log_type type; }; struct ipa3_active_clients_log_ctx { @@ -2228,13 +2129,13 @@ int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary); struct ipa3_context *ipa3_get_ctx(void); void ipa3_enable_clks(void); void ipa3_disable_clks(void); -void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id); -int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info +void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info *id); -void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id); -void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id, +void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id, bool int_ctx); -void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id, +void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, bool int_ctx); int ipa3_active_clients_log_print_buffer(char *buf, int size); int ipa3_active_clients_log_print_table(char *buf, int size); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c index 4566b8c4ea84..426836bb6363 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -151,7 +151,7 @@ int ipa3_rm_resource_consumer_request( { int result = 0; enum ipa3_rm_resource_state prev_state; - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; IPA_RM_DBG("%s state: %d\n", ipa3_rm_resource_str(consumer->resource.name), diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c index 1e03e6497ad6..7bc11a339633 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c @@ -383,7 +383,7 @@ int ipa3_uc_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr) { int result = 0; - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 1c4f812bc40f..38837bbfe09d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -631,7 +631,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource) struct ipa_ep_cfg_ctrl suspend; int ipa_ep_idx; unsigned long flags; - struct ipa3_active_client_logging_info log_info; + struct ipa_active_client_logging_info log_info; if (ipa3_active_clients_trylock(&flags) == 0) return -EPERM; @@ -4584,6 +4584,16 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info; api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel; api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb; + api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks; + api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks; + api_ctrl->ipa_inc_client_enable_clks_no_block = + ipa3_inc_client_enable_clks_no_block; + api_ctrl->ipa_suspend_resource_no_block = + ipa3_suspend_resource_no_block; + api_ctrl->ipa_resume_resource = ipa3_resume_resource; + api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync; + api_ctrl->ipa_set_required_perf_profile = + ipa3_set_required_perf_profile; return 0; } -- cgit v1.2.3 From 2311947aca58e833a07a32f6fff25c5d11ae7ffc Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Tue, 29 Mar 2016 10:48:41 -0700 Subject: msm: ipa: unify IPA RM Unify IPA RM from ipa_v2 and ipa_v3 to common driver under ipa folder CRs-Fixed: 995821 Change-Id: I4a7d8c328af7cd5506b3fbbdc76b1bc5bb0de698 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/Makefile | 7 +- drivers/platform/msm/ipa/ipa_api.c | 333 ------ drivers/platform/msm/ipa/ipa_api.h | 43 - drivers/platform/msm/ipa/ipa_clients/ipa_usb.c | 39 +- drivers/platform/msm/ipa/ipa_rm.c | 1091 ++++++++++++++++++ drivers/platform/msm/ipa/ipa_rm_dependency_graph.c | 245 ++++ drivers/platform/msm/ipa/ipa_rm_dependency_graph.h | 47 + drivers/platform/msm/ipa/ipa_rm_i.h | 130 +++ drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c | 272 +++++ drivers/platform/msm/ipa/ipa_rm_peers_list.c | 246 ++++ drivers/platform/msm/ipa/ipa_rm_peers_list.h | 53 + drivers/platform/msm/ipa/ipa_rm_resource.c | 1171 +++++++++++++++++++ drivers/platform/msm/ipa/ipa_rm_resource.h | 163 +++ drivers/platform/msm/ipa/ipa_v2/Makefile | 3 +- drivers/platform/msm/ipa/ipa_v2/ipa.c | 14 +- drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 43 - drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c | 24 +- drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa_rm.c | 1077 ------------------ .../msm/ipa/ipa_v2/ipa_rm_dependency_graph.c | 245 ---- .../msm/ipa/ipa_v2/ipa_rm_dependency_graph.h | 47 - drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h | 128 --- .../msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c | 268 ----- .../platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c | 247 ---- .../platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h | 53 - drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c | 1164 ------------------- drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h | 162 --- drivers/platform/msm/ipa/ipa_v2/ipa_utils.c | 20 +- drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c | 62 +- drivers/platform/msm/ipa/ipa_v2/teth_bridge.c | 16 +- drivers/platform/msm/ipa/ipa_v3/Makefile | 3 +- drivers/platform/msm/ipa/ipa_v3/ipa.c | 24 +- drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 4 +- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 43 - drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 32 +- drivers/platform/msm/ipa/ipa_v3/ipa_rm.c | 1039 ----------------- .../msm/ipa/ipa_v3/ipa_rm_dependency_graph.c | 245 ---- .../msm/ipa/ipa_v3/ipa_rm_dependency_graph.h | 47 - drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h | 129 --- .../msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c | 268 ----- .../platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c | 247 ---- .../platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h | 53 - drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c | 1176 -------------------- drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h | 164 --- drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 24 +- drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 62 +- drivers/platform/msm/ipa/ipa_v3/teth_bridge.c | 16 +- 48 files changed, 3576 insertions(+), 7417 deletions(-) create mode 100644 drivers/platform/msm/ipa/ipa_rm.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_dependency_graph.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_dependency_graph.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_i.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_peers_list.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_peers_list.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_resource.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_resource.h delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm.c delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c delete mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm.c delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c delete mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile index 8bc7d8a498f6..704dd0abfefa 100644 --- a/drivers/platform/msm/ipa/Makefile +++ b/drivers/platform/msm/ipa/Makefile @@ -1,5 +1,4 @@ -obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ -obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ +obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common +obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common -obj-$(CONFIG_IPA) += ipa_api.o -obj-$(CONFIG_IPA3) += ipa_api.o +ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index a8ccba973776..13ea3b2fb920 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -1530,314 +1530,6 @@ int ipa_uc_dereg_rdyCB(void) } EXPORT_SYMBOL(ipa_uc_dereg_rdyCB); -/** - * ipa_rm_create_resource() - create resource - * @create_params: [in] parameters needed - * for resource initialization - * - * Returns: 0 on success, negative on failure - * - * This function is called by IPA RM client to initialize client's resources. - * This API should be called before any other IPA RM API on a given resource - * name. - * - */ -int ipa_rm_create_resource(struct ipa_rm_create_params *create_params) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_create_resource, create_params); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_create_resource); - -/** - * ipa_rm_delete_resource() - delete resource - * @resource_name: name of resource to be deleted - * - * Returns: 0 on success, negative on failure - * - * This function is called by IPA RM client to delete client's resources. - * - */ -int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_delete_resource, resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_delete_resource); - -/** - * ipa_rm_add_dependency() - create dependency - * between 2 resources - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: IPA_RM_RESORCE_GRANTED could be generated - * in case client registered with IPA RM - */ -int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_add_dependency, resource_name, - depends_on_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_add_dependency); - -/** - * ipa_rm_delete_dependency() - create dependency - * between 2 resources - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: IPA_RM_RESORCE_GRANTED could be generated - * in case client registered with IPA RM - */ -int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_delete_dependency, resource_name, - depends_on_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_delete_dependency); - -/** - * ipa_rm_request_resource() - request resource - * @resource_name: [in] name of the requested resource - * - * Returns: 0 on success, negative on failure - * - * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED - * on successful completion of this operation. - */ -int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_request_resource, resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_request_resource); - -/** - * ipa_rm_release_resource() - release resource - * @resource_name: [in] name of the requested resource - * - * Returns: 0 on success, negative on failure - * - * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED - * on successful completion of this operation. - */ -int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_release_resource, resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_release_resource); - -/** - * ipa_rm_register() - register for event - * @resource_name: resource name - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Registration parameters provided here should be the same - * as provided later in ipa_rm_deregister() call. - */ -int ipa_rm_register(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_register, resource_name, reg_params); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_register); - -/** - * ipa_rm_deregister() - cancel the registration - * @resource_name: resource name - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Registration parameters provided here should be the same - * as provided in ipa_rm_register() call. - */ -int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_deregister, resource_name, reg_params); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_deregister); - -/** - * ipa_rm_set_perf_profile() - set performance profile - * @resource_name: resource name - * @profile: [in] profile information. - * - * Returns: 0 on success, negative on failure - * - * Set resource performance profile. - * Updates IPA driver if performance level changed. - */ -int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, - struct ipa_rm_perf_profile *profile) -{ - int ret; - - IPA_API_DISPATCH_RETURN( - ipa_rm_set_perf_profile, - resource_name, - profile); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_set_perf_profile); - -/** - * ipa_rm_notify_completion() - - * consumer driver notification for - * request_resource / release_resource operations - * completion - * @event: notified event - * @resource_name: resource name - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_notify_completion(enum ipa_rm_event event, - enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_notify_completion, event, resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_notify_completion); - -/** -* ipa_rm_inactivity_timer_init() - Init function for IPA RM -* inactivity timer. This function shall be called prior calling -* any other API of IPA RM inactivity timer. -* -* @resource_name: Resource name. @see ipa_rm.h -* @msecs: time in miliseccond, that IPA RM inactivity timer -* shall wait prior calling to ipa_rm_release_resource(). -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, - unsigned long msecs) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_init, resource_name, - msecs); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_inactivity_timer_init); - -/** -* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA -* RM inactivity timer. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_destroy, resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy); - -/** -* ipa_rm_inactivity_timer_request_resource() - Same as -* ipa_rm_request_resource(), with a difference that calling to -* this function will also cancel the inactivity timer, if -* ipa_rm_inactivity_timer_release_resource() was called earlier. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa_rm_inactivity_timer_request_resource( - enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_request_resource, - resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource); - -/** -* ipa_rm_inactivity_timer_release_resource() - Sets the -* inactivity timer to the timeout set by -* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA -* RM inactivity timer will call to ipa_rm_release_resource(). -* If a call to ipa_rm_inactivity_timer_request_resource() was -* made BEFORE the timout has expired, rge timer will be -* cancelled. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa_rm_inactivity_timer_release_resource( - enum ipa_rm_resource_name resource_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_inactivity_timer_release_resource, - resource_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource); - /** * teth_bridge_init() - Initialize the Tethering bridge driver * @params - in/out params for USB initialization API (please look at struct @@ -2502,31 +2194,6 @@ int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) } EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr); -/** - * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources - * in a synchronized fashion. In case a producer resource is in GRANTED state - * and the newly added consumer resource is in RELEASED state, the consumer - * entity will be requested and the function will block until the consumer - * is granted. - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: May block. See documentation above. - */ -int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - int ret; - - IPA_API_DISPATCH_RETURN(ipa_rm_add_dependency_sync, resource_name, - depends_on_name); - - return ret; -} -EXPORT_SYMBOL(ipa_rm_add_dependency_sync); - /** * ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer * diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 6cab1273e7e9..7edbf4e5b1d9 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -185,45 +185,6 @@ struct ipa_api_controller { int (*ipa_uc_dereg_rdyCB)(void); - int (*ipa_rm_create_resource)( - struct ipa_rm_create_params *create_params); - - int (*ipa_rm_delete_resource)(enum ipa_rm_resource_name resource_name); - - int (*ipa_rm_register)(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params); - - int (*ipa_rm_deregister)(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params); - - int (*ipa_rm_set_perf_profile)(enum ipa_rm_resource_name resource_name, - struct ipa_rm_perf_profile *profile); - - int (*ipa_rm_add_dependency)(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - - int (*ipa_rm_delete_dependency)(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - - int (*ipa_rm_request_resource)(enum ipa_rm_resource_name resource_name); - - int (*ipa_rm_release_resource)(enum ipa_rm_resource_name resource_name); - - int (*ipa_rm_notify_completion)(enum ipa_rm_event event, - enum ipa_rm_resource_name resource_name); - - int (*ipa_rm_inactivity_timer_init)(enum ipa_rm_resource_name - resource_name, unsigned long msecs); - - int (*ipa_rm_inactivity_timer_destroy)( - enum ipa_rm_resource_name resource_name); - - int (*ipa_rm_inactivity_timer_request_resource)( - enum ipa_rm_resource_name resource_name); - - int (*ipa_rm_inactivity_timer_release_resource)( - enum ipa_rm_resource_name resource_name); - int (*teth_bridge_init)(struct teth_bridge_init_params *params); int (*teth_bridge_disconnect)(enum ipa_client_type client); @@ -310,10 +271,6 @@ struct ipa_api_controller { int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size, uint32_t agg_count); - int (*ipa_rm_add_dependency_sync)( - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - struct device *(*ipa_get_dma_dev)(void); int (*ipa_release_wdi_mapping)(u32 num_buffers, diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c index a0c94c8e37ec..8004fa9e42ae 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -19,6 +19,7 @@ #include #include #include "../ipa_v3/ipa_i.h" +#include "../ipa_rm_i.h" #define IPA_USB_RM_TIMEOUT_MSEC 10000 #define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000 @@ -394,7 +395,7 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit, } /* Notify RM that consumer is granted */ if (rm_ctx->cons_requested) { - ipa3_rm_notify_completion( + ipa_rm_notify_completion( IPA_RM_RESOURCE_GRANTED, rm_ctx->cons_params.name); rm_ctx->cons_state = IPA_USB_CONS_GRANTED; @@ -529,12 +530,12 @@ static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event, switch (event) { case IPA_RM_RESOURCE_GRANTED: IPA_USB_DBG(":%s granted\n", - ipa3_rm_resource_str(rm_ctx->prod_params.name)); + ipa_rm_resource_str(rm_ctx->prod_params.name)); complete_all(&rm_ctx->prod_comp); break; case IPA_RM_RESOURCE_RELEASED: IPA_USB_DBG(":%s released\n", - ipa3_rm_resource_str(rm_ctx->prod_params.name)); + ipa_rm_resource_str(rm_ctx->prod_params.name)); complete_all(&rm_ctx->prod_comp); break; } @@ -824,13 +825,13 @@ static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype) result = ipa_rm_create_resource(&rm_ctx->prod_params); if (result) { IPA_USB_ERR("Failed to create %s RM resource\n", - ipa3_rm_resource_str(rm_ctx->prod_params.name)); + ipa_rm_resource_str(rm_ctx->prod_params.name)); return result; } rm_ctx->prod_valid = true; created = true; IPA_USB_DBG("Created %s RM resource\n", - ipa3_rm_resource_str(rm_ctx->prod_params.name)); + ipa_rm_resource_str(rm_ctx->prod_params.name)); } /* Create CONS */ @@ -852,12 +853,12 @@ static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype) result = ipa_rm_create_resource(&rm_ctx->cons_params); if (result) { IPA_USB_ERR("Failed to create %s RM resource\n", - ipa3_rm_resource_str(rm_ctx->cons_params.name)); + ipa_rm_resource_str(rm_ctx->cons_params.name)); goto create_cons_rsc_fail; } rm_ctx->cons_valid = true; IPA_USB_DBG("Created %s RM resource\n", - ipa3_rm_resource_str(rm_ctx->cons_params.name)); + ipa_rm_resource_str(rm_ctx->cons_params.name)); } return 0; @@ -1298,11 +1299,11 @@ static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype) const char *rsrc_str; rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; - rsrc_str = ipa3_rm_resource_str(rm_ctx->prod_params.name); + rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name); IPA_USB_DBG_LOW("requesting %s\n", rsrc_str); init_completion(&rm_ctx->prod_comp); - result = ipa3_rm_request_resource(rm_ctx->prod_params.name); + result = ipa_rm_request_resource(rm_ctx->prod_params.name); if (result) { if (result != -EINPROGRESS) { IPA_USB_ERR("failed to request %s: %d\n", @@ -1328,7 +1329,7 @@ static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype) const char *rsrc_str; rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; - rsrc_str = ipa3_rm_resource_str(rm_ctx->prod_params.name); + rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name); IPA_USB_DBG_LOW("releasing %s\n", rsrc_str); @@ -1408,10 +1409,10 @@ static int ipa3_usb_connect_dpl(void) * is sync in order to make sure the IPA clocks are up before we * continue and notify the USB driver it may continue. */ - res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res < 0) { - IPA_USB_ERR("ipa3_rm_add_dependency_sync() failed.\n"); + IPA_USB_ERR("ipa_rm_add_dependency_sync() failed.\n"); return res; } @@ -1420,11 +1421,11 @@ static int ipa3_usb_connect_dpl(void) * status is connected (which can happen only later in the flow), * the clocks are already up so the call doesn't need to block. */ - res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_USB_DPL_CONS); if (res < 0 && res != -EINPROGRESS) { - IPA_USB_ERR("ipa3_rm_add_dependency() failed.\n"); - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_USB_ERR("ipa_rm_add_dependency() failed.\n"); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, IPA_RM_RESOURCE_Q6_CONS); return res; } @@ -1590,12 +1591,12 @@ static int ipa3_usb_disconnect_dpl(void) int res; /* Remove DPL RM dependency */ - res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res) IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n"); - res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_USB_DPL_CONS); if (res) IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n"); @@ -1716,7 +1717,7 @@ static int ipa3_usb_xdci_connect_internal( &profile); if (result) { IPA_USB_ERR("failed to set %s perf profile\n", - ipa3_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype]. + ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype]. rm_ctx.prod_params.name)); return result; } @@ -1725,7 +1726,7 @@ static int ipa3_usb_xdci_connect_internal( &profile); if (result) { IPA_USB_ERR("failed to set %s perf profile\n", - ipa3_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype]. + ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype]. rm_ctx.cons_params.name)); return result; } diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c new file mode 100644 index 000000000000..53c72b154096 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm.c @@ -0,0 +1,1091 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "ipa_rm_dependency_graph.h" +#include "ipa_rm_i.h" +#include "ipa_common_i.h" + +static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = { + __stringify(IPA_RM_RESOURCE_Q6_PROD), + __stringify(IPA_RM_RESOURCE_USB_PROD), + __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD), + __stringify(IPA_RM_RESOURCE_HSIC_PROD), + __stringify(IPA_RM_RESOURCE_STD_ECM_PROD), + __stringify(IPA_RM_RESOURCE_RNDIS_PROD), + __stringify(IPA_RM_RESOURCE_WWAN_0_PROD), + __stringify(IPA_RM_RESOURCE_WLAN_PROD), + __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD), + __stringify(IPA_RM_RESOURCE_MHI_PROD), + __stringify(IPA_RM_RESOURCE_Q6_CONS), + __stringify(IPA_RM_RESOURCE_USB_CONS), + __stringify(IPA_RM_RESOURCE_USB_DPL_CONS), + __stringify(IPA_RM_RESOURCE_HSIC_CONS), + __stringify(IPA_RM_RESOURCE_WLAN_CONS), + __stringify(IPA_RM_RESOURCE_APPS_CONS), + __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS), + __stringify(IPA_RM_RESOURCE_MHI_CONS), +}; + +struct ipa_rm_profile_vote_type { + enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX]; + enum ipa_voltage_level curr_volt; + u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX]; + u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX]; + u32 curr_bw; +}; + +struct ipa_rm_context_type { + struct ipa_rm_dep_graph *dep_graph; + struct workqueue_struct *ipa_rm_wq; + spinlock_t ipa_rm_lock; + struct ipa_rm_profile_vote_type prof_vote; +}; +static struct ipa_rm_context_type *ipa_rm_ctx; + +struct ipa_rm_notify_ipa_work_type { + struct work_struct work; + enum ipa_voltage_level volt; + u32 bandwidth_mbps; +}; + +/** + * ipa_rm_create_resource() - create resource + * @create_params: [in] parameters needed + * for resource initialization + * + * Returns: 0 on success, negative on failure + * + * This function is called by IPA RM client to initialize client's resources. + * This API should be called before any other IPA RM API on a given resource + * name. + * + */ +int ipa_rm_create_resource(struct ipa_rm_create_params *create_params) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!create_params) { + IPA_RM_ERR("invalid args\n"); + return -EINVAL; + } + IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name)); + + if (create_params->floor_voltage < 0 || + create_params->floor_voltage >= IPA_VOLTAGE_MAX) { + IPA_RM_ERR("invalid voltage %d\n", + create_params->floor_voltage); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + create_params->name, + &resource) == 0) { + IPA_RM_ERR("resource already exists\n"); + result = -EEXIST; + goto bail; + } + result = ipa_rm_resource_create(create_params, + &resource); + if (result) { + IPA_RM_ERR("ipa_rm_resource_create() failed\n"); + goto bail; + } + result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource); + if (result) { + IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n"); + ipa_rm_resource_delete(resource); + goto bail; + } +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_create_resource); + +/** + * ipa_rm_delete_resource() - delete resource + * @resource_name: name of resource to be deleted + * + * Returns: 0 on success, negative on failure + * + * This function is called by IPA RM client to delete client's resources. + * + */ +int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exist\n"); + result = -EINVAL; + goto bail; + } + result = ipa_rm_resource_delete(resource); + if (result) { + IPA_RM_ERR("ipa_rm_resource_delete() failed\n"); + goto bail; + } + result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph, + resource_name); + if (result) { + IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n"); + goto bail; + } +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_delete_resource); + +/** + * ipa_rm_add_dependency() - create dependency + * between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_add_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_add_dependency); + +/** + * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources + * in a synchronized fashion. In case a producer resource is in GRANTED state + * and the newly added consumer resource is in RELEASED state, the consumer + * entity will be requested and the function will block until the consumer + * is granted. + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: May block. See documentation above. + */ +int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + int result; + struct ipa_rm_resource *consumer; + unsigned long time; + unsigned long flags; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_add_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + if (result == -EINPROGRESS) { + ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + depends_on_name, + &consumer); + IPA_RM_DBG("%s waits for GRANT of %s.\n", + ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + time = wait_for_completion_timeout( + &((struct ipa_rm_resource_cons *)consumer)-> + request_consumer_in_progress, + HZ); + result = 0; + if (!time) { + IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.", + ipa_rm_resource_str(depends_on_name)); + result = -ETIME; + } + IPA_RM_DBG("%s waited for %s GRANT %lu time.\n", + ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name), + time); + } + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_add_dependency_sync); + +/** + * ipa_rm_delete_dependency() - create dependency + * between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_delete_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_delete_dependency); + +/** + * ipa_rm_request_resource() - request resource + * @resource_name: [in] name of the requested resource + * + * Returns: 0 on success, negative on failure + * + * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED + * on successful completion of this operation. + */ +int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_request( + (struct ipa_rm_resource_prod *)resource); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} +EXPORT_SYMBOL(ipa_rm_request_resource); + +void delayed_release_work_func(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_delayed_release_work_type *rwork = container_of( + to_delayed_work(work), + struct ipa_rm_delayed_release_work_type, + work); + + if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + kfree(rwork); + return; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + rwork->resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + goto bail; + } + + ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)resource, rwork->needed_bw, + rwork->dec_usage_count); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + kfree(rwork); + +} + +/** + * ipa_rm_request_resource_with_timer() - requests the specified consumer + * resource and releases it after 1 second + * @resource_name: name of the requested resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_delayed_release_work_type *release_work; + int result; + + if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)resource, 0, false, true); + if (result != 0 && result != -EINPROGRESS) { + IPA_RM_ERR("consumer request returned error %d\n", result); + result = -EPERM; + goto bail; + } + + release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC); + if (!release_work) { + result = -ENOMEM; + goto bail; + } + release_work->resource_name = resource->name; + release_work->needed_bw = 0; + release_work->dec_usage_count = false; + INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func); + schedule_delayed_work(&release_work->work, + msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC)); + result = 0; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} + +/** + * ipa_rm_release_resource() - release resource + * @resource_name: [in] name of the requested resource + * + * Returns: 0 on success, negative on failure + * + * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED + * on successful completion of this operation. + */ +int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_release( + (struct ipa_rm_resource_prod *)resource); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} +EXPORT_SYMBOL(ipa_rm_release_resource); + +/** + * ipa_rm_register() - register for event + * @resource_name: resource name + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Registration parameters provided here should be the same + * as provided later in ipa_rm_deregister() call. + */ +int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_register( + (struct ipa_rm_resource_prod *)resource, + reg_params, + true); +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_register); + +/** + * ipa_rm_deregister() - cancel the registration + * @resource_name: resource name + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Registration parameters provided here should be the same + * as provided in ipa_rm_register() call. + */ +int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_deregister( + (struct ipa_rm_resource_prod *)resource, + reg_params); +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_deregister); + +/** + * ipa_rm_set_perf_profile() - set performance profile + * @resource_name: resource name + * @profile: [in] profile information. + * + * Returns: 0 on success, negative on failure + * + * Set resource performance profile. + * Updates IPA driver if performance level changed. + */ +int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + if (profile) + IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps); + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_set_perf_profile(resource, profile); + if (result) { + IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n", + result); + goto bail; + } + + result = 0; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_set_perf_profile); + +/** + * ipa_rm_notify_completion() - + * consumer driver notification for + * request_resource / release_resource operations + * completion + * @event: notified event + * @resource_name: resource name + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("event %d on %s\n", event, + ipa_rm_resource_str(resource_name)); + if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + result = -EINVAL; + goto bail; + } + ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB, + resource_name, + event, + false); + result = 0; +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_notify_completion); + +static void ipa_rm_wq_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_work_type, + work); + IPA_RM_DBG("%s cmd=%d event=%d notify_registered_only=%d\n", + ipa_rm_resource_str(ipa_rm_work->resource_name), + ipa_rm_work->wq_cmd, + ipa_rm_work->event, + ipa_rm_work->notify_registered_only); + switch (ipa_rm_work->wq_cmd) { + case IPA_RM_WQ_NOTIFY_PROD: + if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not PROD\n"); + goto free_work; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + goto free_work; + } + ipa_rm_resource_producer_notify_clients( + (struct ipa_rm_resource_prod *)resource, + ipa_rm_work->event, + ipa_rm_work->notify_registered_only); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + break; + case IPA_RM_WQ_NOTIFY_CONS: + break; + case IPA_RM_WQ_RESOURCE_CB: + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + goto free_work; + } + ipa_rm_resource_consumer_handle_cb( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->event); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + break; + default: + break; + } + +free_work: + kfree((void *) work); +} + +static void ipa_rm_wq_resume_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_suspend_resume_work_type, + work); + IPA_RM_DBG("resume work handler: %s", + ipa_rm_resource_str(ipa_rm_work->resource_name)); + + if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not CONS\n"); + return; + } + IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str( + ipa_rm_work->resource_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str( + ipa_rm_work->resource_name)); + goto bail; + } + ipa_rm_resource_consumer_request_work( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); +bail: + kfree(ipa_rm_work); +} + + +static void ipa_rm_wq_suspend_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_suspend_resume_work_type, + work); + IPA_RM_DBG("suspend work handler: %s", + ipa_rm_resource_str(ipa_rm_work->resource_name)); + + if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not CONS\n"); + return; + } + ipa_suspend_resource_sync(ipa_rm_work->resource_name); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + return; + } + ipa_rm_resource_consumer_release_work( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->prev_state, + true); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + kfree(ipa_rm_work); +} + +/** + * ipa_rm_wq_send_cmd() - send a command for deferred work + * @wq_cmd: command that should be executed + * @resource_name: resource on which command should be executed + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + * + * Returns: 0 on success, negative otherwise + */ +int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_event event, + bool notify_registered_only) +{ + int result = -ENOMEM; + struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC); + + if (work) { + INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler); + work->wq_cmd = wq_cmd; + work->resource_name = resource_name; + work->event = event; + work->notify_registered_only = notify_registered_only; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } else { + IPA_RM_ERR("no mem\n"); + } + + return result; +} + +int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw) +{ + int result = -ENOMEM; + struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), + GFP_ATOMIC); + if (work) { + INIT_WORK((struct work_struct *)work, + ipa_rm_wq_suspend_handler); + work->resource_name = resource_name; + work->prev_state = prev_state; + work->needed_bw = needed_bw; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } else { + IPA_RM_ERR("no mem\n"); + } + + return result; +} + +int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw) +{ + int result = -ENOMEM; + struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), + GFP_ATOMIC); + if (work) { + INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler); + work->resource_name = resource_name; + work->prev_state = prev_state; + work->needed_bw = needed_bw; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } else { + IPA_RM_ERR("no mem\n"); + } + + return result; +} +/** + * ipa_rm_initialize() - initialize IPA RM component + * + * Returns: 0 on success, negative otherwise + */ +int ipa_rm_initialize(void) +{ + int result; + + ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL); + if (!ipa_rm_ctx) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq"); + if (!ipa_rm_ctx->ipa_rm_wq) { + IPA_RM_ERR("create workqueue failed\n"); + result = -ENOMEM; + goto create_wq_fail; + } + result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph)); + if (result) { + IPA_RM_ERR("create dependency graph failed\n"); + goto graph_alloc_fail; + } + spin_lock_init(&ipa_rm_ctx->ipa_rm_lock); + IPA_RM_DBG("SUCCESS\n"); + + return 0; +graph_alloc_fail: + destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); +create_wq_fail: + kfree(ipa_rm_ctx); +bail: + return result; +} + +/** + * ipa_rm_stat() - print RM stat + * @buf: [in] The user buff used to print + * @size: [in] The size of buf + * Returns: number of bytes used on success, negative on failure + * + * This function is called by ipa_debugfs in order to receive + * a full picture of the current state of the RM + */ + +int ipa_rm_stat(char *buf, int size) +{ + unsigned long flags; + int i, cnt = 0, result = EINVAL; + struct ipa_rm_resource *resource = NULL; + + if (!buf || size < 0) + return result; + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) { + result = ipa_rm_dep_graph_get_resource( + ipa_rm_ctx->dep_graph, + i, + &resource); + if (!result) { + result = ipa_rm_resource_producer_print_stat( + resource, buf + cnt, + size-cnt); + if (result < 0) + goto bail; + cnt += result; + } + } + result = cnt; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} + +/** + * ipa_rm_resource_str() - returns string that represent the resource + * @resource_name: [in] resource name + */ +const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name) +{ + if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX) + return "INVALID RESOURCE"; + + return resource_name_to_str[resource_name]; +}; + +static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work) +{ + struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work, + struct ipa_rm_notify_ipa_work_type, + work); + int res; + + IPA_RM_DBG("calling to IPA driver. voltage %d bandwidth %d\n", + notify_work->volt, notify_work->bandwidth_mbps); + + res = ipa_set_required_perf_profile(notify_work->volt, + notify_work->bandwidth_mbps); + if (res) { + IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res); + goto bail; + } + + IPA_RM_DBG("IPA driver notified\n"); +bail: + kfree(notify_work); +} + +static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt, + u32 bandwidth) +{ + struct ipa_rm_notify_ipa_work_type *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + IPA_RM_ERR("no mem\n"); + return; + } + + INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work); + work->volt = volt; + work->bandwidth_mbps = bandwidth; + queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work); +} + +/** + * ipa_rm_perf_profile_change() - change performance profile vote for resource + * @resource_name: [in] resource name + * + * change bandwidth and voltage vote based on resource state. + */ +void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name) +{ + enum ipa_voltage_level old_volt; + u32 *bw_ptr; + u32 old_bw; + struct ipa_rm_resource *resource; + int i; + u32 sum_bw_prod = 0; + u32 sum_bw_cons = 0; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + WARN_ON(1); + return; + } + + old_volt = ipa_rm_ctx->prof_vote.curr_volt; + old_bw = ipa_rm_ctx->prof_vote.curr_bw; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) { + bw_ptr = &ipa_rm_ctx->prof_vote.bw_prods[resource_name]; + } else if (IPA_RM_RESORCE_IS_CONS(resource_name)) { + bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[ + resource_name - IPA_RM_RESOURCE_PROD_MAX]; + } else { + IPA_RM_ERR("Invalid resource_name\n"); + return; + } + + switch (resource->state) { + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + IPA_RM_DBG("max_bw = %d, needed_bw = %d\n", + resource->max_bw, resource->needed_bw); + *bw_ptr = min(resource->max_bw, resource->needed_bw); + ipa_rm_ctx->prof_vote.volt[resource_name] = + resource->floor_voltage; + break; + + case IPA_RM_RELEASE_IN_PROGRESS: + case IPA_RM_RELEASED: + *bw_ptr = 0; + ipa_rm_ctx->prof_vote.volt[resource_name] = 0; + break; + + default: + IPA_RM_ERR("unknown state %d\n", resource->state); + WARN_ON(1); + return; + } + IPA_RM_DBG("resource bandwidth: %d voltage: %d\n", *bw_ptr, + resource->floor_voltage); + + ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED; + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (ipa_rm_ctx->prof_vote.volt[i] > + ipa_rm_ctx->prof_vote.curr_volt) { + ipa_rm_ctx->prof_vote.curr_volt = + ipa_rm_ctx->prof_vote.volt[i]; + } + } + + for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++) + sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i]; + + for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++) + sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i]; + + IPA_RM_DBG("all prod bandwidth: %d all cons bandwidth: %d\n", + sum_bw_prod, sum_bw_cons); + ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons); + + if (ipa_rm_ctx->prof_vote.curr_volt == old_volt && + ipa_rm_ctx->prof_vote.curr_bw == old_bw) { + IPA_RM_DBG("same voting\n"); + return; + } + + IPA_RM_DBG("new voting: voltage %d bandwidth %d\n", + ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + + ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + + return; +}; + +/** + * ipa_rm_exit() - free all IPA RM resources + */ +void ipa_rm_exit(void) +{ + IPA_RM_DBG("ENTER\n"); + ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph); + destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); + kfree(ipa_rm_ctx); + ipa_rm_ctx = NULL; + IPA_RM_DBG("EXIT\n"); +} diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c new file mode 100644 index 000000000000..fd437b0c8775 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c @@ -0,0 +1,245 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_rm_dependency_graph.h" +#include "ipa_rm_i.h" + +static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name) +{ + int resource_index = IPA_RM_INDEX_INVALID; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) + resource_index = ipa_rm_prod_index(resource_name); + else if (IPA_RM_RESORCE_IS_CONS(resource_name)) + resource_index = ipa_rm_cons_index(resource_name); + + return resource_index; +} + +/** + * ipa_rm_dep_graph_create() - creates graph + * @dep_graph: [out] created dependency graph + * + * Returns: dependency graph on success, NULL on failure + */ +int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph) +{ + int result = 0; + + *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL); + if (!*dep_graph) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } +bail: + return result; +} + +/** + * ipa_rm_dep_graph_delete() - destroyes the graph + * @graph: [in] dependency graph + * + * Frees all resources. + */ +void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph) +{ + int resource_index; + + if (!graph) { + IPA_RM_ERR("invalid params\n"); + return; + } + for (resource_index = 0; + resource_index < IPA_RM_RESOURCE_MAX; + resource_index++) + kfree(graph->resource_table[resource_index]); + memset(graph->resource_table, 0, sizeof(graph->resource_table)); +} + +/** + * ipa_rm_dep_graph_get_resource() - provides a resource by name + * @graph: [in] dependency graph + * @name: [in] name of the resource + * @resource: [out] resource in case of success + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_get_resource( + struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_resource **resource) +{ + int result; + int resource_index; + + if (!graph) { + result = -EINVAL; + goto bail; + } + resource_index = ipa_rm_dep_get_index(resource_name); + if (resource_index == IPA_RM_INDEX_INVALID) { + result = -EINVAL; + goto bail; + } + *resource = graph->resource_table[resource_index]; + if (!*resource) { + result = -EINVAL; + goto bail; + } + result = 0; +bail: + return result; +} + +/** + * ipa_rm_dep_graph_add() - adds resource to graph + * @graph: [in] dependency graph + * @resource: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, + struct ipa_rm_resource *resource) +{ + int result = 0; + int resource_index; + + if (!graph || !resource) { + result = -EINVAL; + goto bail; + } + resource_index = ipa_rm_dep_get_index(resource->name); + if (resource_index == IPA_RM_INDEX_INVALID) { + result = -EINVAL; + goto bail; + } + graph->resource_table[resource_index] = resource; +bail: + return result; +} + +/** + * ipa_rm_dep_graph_remove() - removes resource from graph + * @graph: [in] dependency graph + * @resource: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name) +{ + if (!graph) + return -EINVAL; + graph->resource_table[resource_name] = NULL; + + return 0; +} + +/** + * ipa_rm_dep_graph_add_dependency() - adds dependency between + * two nodes in graph + * @graph: [in] dependency graph + * @resource_name: [in] resource to add + * @depends_on_name: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + struct ipa_rm_resource *dependent = NULL; + struct ipa_rm_resource *dependency = NULL; + int result; + + if (!graph || + !IPA_RM_RESORCE_IS_PROD(resource_name) || + !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { + IPA_RM_ERR("invalid params\n"); + result = -EINVAL; + goto bail; + } + if (ipa_rm_dep_graph_get_resource(graph, + resource_name, + &dependent)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(resource_name)); + result = -EINVAL; + goto bail; + } + if (ipa_rm_dep_graph_get_resource(graph, + depends_on_name, + &dependency)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(depends_on_name)); + result = -EINVAL; + goto bail; + } + result = ipa_rm_resource_add_dependency(dependent, dependency); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_dep_graph_delete_dependency() - deleted dependency between + * two nodes in graph + * @graph: [in] dependency graph + * @resource_name: [in] resource to delete + * @depends_on_name: [in] resource to delete + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + struct ipa_rm_resource *dependent = NULL; + struct ipa_rm_resource *dependency = NULL; + int result; + + if (!graph || + !IPA_RM_RESORCE_IS_PROD(resource_name) || + !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { + IPA_RM_ERR("invalid params\n"); + result = -EINVAL; + goto bail; + } + + if (ipa_rm_dep_graph_get_resource(graph, + resource_name, + &dependent)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(resource_name)); + result = -EINVAL; + goto bail; + } + + if (ipa_rm_dep_graph_get_resource(graph, + depends_on_name, + &dependency)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(depends_on_name)); + result = -EINVAL; + goto bail; + } + + result = ipa_rm_resource_delete_dependency(dependent, dependency); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h new file mode 100644 index 000000000000..b76c6636f873 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_ +#define _IPA_RM_DEPENDENCY_GRAPH_H_ + +#include +#include +#include "ipa_rm_resource.h" + +struct ipa_rm_dep_graph { + struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX]; +}; + +int ipa_rm_dep_graph_get_resource( + struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name name, + struct ipa_rm_resource **resource); + +int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph); + +void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph); + +int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, + struct ipa_rm_resource *resource); + +int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name); + +int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h new file mode 100644 index 000000000000..65dbff66a6dd --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_i.h @@ -0,0 +1,130 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_I_H_ +#define _IPA_RM_I_H_ + +#include +#include +#include "ipa_rm_resource.h" + +#define IPA_RM_DRV_NAME "ipa_rm" + +#define IPA_RM_DBG_LOW(fmt, args...) \ + pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) +#define IPA_RM_DBG(fmt, args...) \ + pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) +#define IPA_RM_ERR(fmt, args...) \ + pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +#define IPA_RM_RESOURCE_CONS_MAX \ + (IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX) +#define IPA_RM_RESORCE_IS_PROD(x) \ + (x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX) +#define IPA_RM_RESORCE_IS_CONS(x) \ + (x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX) +#define IPA_RM_INDEX_INVALID (-1) +#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000 + +int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name); +int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name); + +/** + * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release + * work type + * @delayed_work: work struct + * @ipa_rm_resource_name: name of the resource on which this work should be done + * @needed_bw: bandwidth required for resource in Mbps + * @dec_usage_count: decrease usage count on release ? + */ +struct ipa_rm_delayed_release_work_type { + struct delayed_work work; + enum ipa_rm_resource_name resource_name; + u32 needed_bw; + bool dec_usage_count; + +}; + +/** + * enum ipa_rm_wq_cmd - workqueue commands + */ +enum ipa_rm_wq_cmd { + IPA_RM_WQ_NOTIFY_PROD, + IPA_RM_WQ_NOTIFY_CONS, + IPA_RM_WQ_RESOURCE_CB +}; + +/** + * struct ipa_rm_wq_work_type - IPA RM worqueue specific + * work type + * @work: work struct + * @wq_cmd: command that should be processed in workqueue context + * @resource_name: name of the resource on which this work + * should be done + * @dep_graph: data structure to search for resource if exists + * @event: event to notify + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + */ +struct ipa_rm_wq_work_type { + struct work_struct work; + enum ipa_rm_wq_cmd wq_cmd; + enum ipa_rm_resource_name resource_name; + enum ipa_rm_event event; + bool notify_registered_only; +}; + +/** + * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or + * suspend work type + * @work: work struct + * @resource_name: name of the resource on which this work + * should be done + * @prev_state: + * @needed_bw: + */ +struct ipa_rm_wq_suspend_resume_work_type { + struct work_struct work; + enum ipa_rm_resource_name resource_name; + enum ipa_rm_resource_state prev_state; + u32 needed_bw; + +}; + +int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_event event, + bool notify_registered_only); + +int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw); + +int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw); + +int ipa_rm_initialize(void); + +int ipa_rm_stat(char *buf, int size); + +const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name); + +void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name); + +int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name); + +void delayed_release_work_func(struct work_struct *work); + +void ipa_rm_exit(void); + +#endif /* _IPA_RM_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c new file mode 100644 index 000000000000..2f2cef05441d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c @@ -0,0 +1,272 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_rm_i.h" + +/** + * struct ipa_rm_it_private - IPA RM Inactivity Timer private + * data + * @initied: indicates if instance was initialized + * @lock - spinlock for mutual exclusion + * @resource_name - resource name + * @work: delayed work object for running delayed releas + * function + * @resource_requested: boolean flag indicates if resource was requested + * @reschedule_work: boolean flag indicates to not release and to + * reschedule the release work. + * @work_in_progress: boolean flag indicates is release work was scheduled. + * @jiffies: number of jiffies for timeout + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct ipa_rm_it_private { + bool initied; + enum ipa_rm_resource_name resource_name; + spinlock_t lock; + struct delayed_work work; + bool resource_requested; + bool reschedule_work; + bool work_in_progress; + unsigned long jiffies; +}; + +static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX]; + +/** + * ipa_rm_inactivity_timer_func() - called when timer expired in + * the context of the shared workqueue. Checks internally if + * reschedule_work flag is set. In case it is not set this function calls to + * ipa_rm_release_resource(). In case reschedule_work is set this function + * reschedule the work. This flag is cleared cleared when + * calling to ipa_rm_inactivity_timer_release_resource(). + * + * @work: work object provided by the work queue + * + * Return codes: + * None + */ +static void ipa_rm_inactivity_timer_func(struct work_struct *work) +{ + + struct ipa_rm_it_private *me = container_of(to_delayed_work(work), + struct ipa_rm_it_private, + work); + unsigned long flags; + + IPA_RM_DBG_LOW("%s: timer expired for resource %d!\n", __func__, + me->resource_name); + + spin_lock_irqsave( + &ipa_rm_it_handles[me->resource_name].lock, flags); + if (ipa_rm_it_handles[me->resource_name].reschedule_work) { + IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__); + ipa_rm_it_handles[me->resource_name].reschedule_work = false; + schedule_delayed_work( + &ipa_rm_it_handles[me->resource_name].work, + ipa_rm_it_handles[me->resource_name].jiffies); + } else if (ipa_rm_it_handles[me->resource_name].resource_requested) { + IPA_RM_DBG_LOW("%s: not calling release\n", __func__); + ipa_rm_it_handles[me->resource_name].work_in_progress = false; + } else { + IPA_RM_DBG_LOW("%s: calling release_resource on resource %d!\n", + __func__, me->resource_name); + ipa_rm_release_resource(me->resource_name); + ipa_rm_it_handles[me->resource_name].work_in_progress = false; + } + spin_unlock_irqrestore( + &ipa_rm_it_handles[me->resource_name].lock, flags); +} + +/** +* ipa_rm_inactivity_timer_init() - Init function for IPA RM +* inactivity timer. This function shall be called prior calling +* any other API of IPA RM inactivity timer. +* +* @resource_name: Resource name. @see ipa_rm.h +* @msecs: time in miliseccond, that IPA RM inactivity timer +* shall wait prior calling to ipa_rm_release_resource(). +* +* Return codes: +* 0: success +* -EINVAL: invalid parameters +*/ +int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("%s: Invalid parameter\n", __func__); + return -EINVAL; + } + + if (ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("%s: resource %d already inited\n", + __func__, resource_name); + return -EINVAL; + } + + spin_lock_init(&ipa_rm_it_handles[resource_name].lock); + ipa_rm_it_handles[resource_name].resource_name = resource_name; + ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs); + ipa_rm_it_handles[resource_name].resource_requested = false; + ipa_rm_it_handles[resource_name].reschedule_work = false; + ipa_rm_it_handles[resource_name].work_in_progress = false; + + INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work, + ipa_rm_inactivity_timer_func); + ipa_rm_it_handles[resource_name].initied = 1; + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_init); + +/** +* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA +* RM inactivity timer. +* +* @resource_name: Resource name. @see ipa_rm.h +* +* Return codes: +* 0: success +* -EINVAL: invalid parameters +*/ +int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name) +{ + IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("%s: Invalid parameter\n", __func__); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("%s: resource %d already inited\n", + __func__, resource_name); + return -EINVAL; + } + + cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work); + + memset(&ipa_rm_it_handles[resource_name], 0, + sizeof(struct ipa_rm_it_private)); + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy); + +/** +* ipa_rm_inactivity_timer_request_resource() - Same as +* ipa_rm_request_resource(), with a difference that calling to +* this function will also cancel the inactivity timer, if +* ipa_rm_inactivity_timer_release_resource() was called earlier. +* +* @resource_name: Resource name. @see ipa_rm.h +* +* Return codes: +* 0: success +* -EINVAL: invalid parameters +*/ +int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + int ret; + unsigned long flags; + + IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("%s: Invalid parameter\n", __func__); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("%s: Not initialized\n", __func__); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); + ipa_rm_it_handles[resource_name].resource_requested = true; + spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); + ret = ipa_rm_request_resource(resource_name); + IPA_RM_DBG_LOW("%s: resource %d: returning %d\n", __func__, + resource_name, ret); + + return ret; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource); + +/** +* ipa_rm_inactivity_timer_release_resource() - Sets the +* inactivity timer to the timeout set by +* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA +* RM inactivity timer will call to ipa_rm_release_resource(). +* If a call to ipa_rm_inactivity_timer_request_resource() was +* made BEFORE the timout has expired, rge timer will be +* cancelled. +* +* @resource_name: Resource name. @see ipa_rm.h +* +* Return codes: +* 0: success +* -EINVAL: invalid parameters +*/ +int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + + IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("%s: Invalid parameter\n", __func__); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("%s: Not initialized\n", __func__); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); + ipa_rm_it_handles[resource_name].resource_requested = false; + if (ipa_rm_it_handles[resource_name].work_in_progress) { + IPA_RM_DBG_LOW("%s: Timer already set, no sched again %d\n", + __func__, resource_name); + ipa_rm_it_handles[resource_name].reschedule_work = true; + spin_unlock_irqrestore( + &ipa_rm_it_handles[resource_name].lock, flags); + return 0; + } + ipa_rm_it_handles[resource_name].work_in_progress = true; + ipa_rm_it_handles[resource_name].reschedule_work = false; + IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__); + schedule_delayed_work(&ipa_rm_it_handles[resource_name].work, + ipa_rm_it_handles[resource_name].jiffies); + spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource); + diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c new file mode 100644 index 000000000000..51ad9530f6ee --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c @@ -0,0 +1,246 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_rm_i.h" + +/** + * ipa_rm_peers_list_get_resource_index() - resource name to index + * of this resource in corresponding peers list + * @resource_name: [in] resource name + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained in enum + * ipa_rm_resource_name. + * + */ +static int ipa_rm_peers_list_get_resource_index( + enum ipa_rm_resource_name resource_name) +{ + int resource_index = IPA_RM_INDEX_INVALID; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) + resource_index = ipa_rm_prod_index(resource_name); + else if (IPA_RM_RESORCE_IS_CONS(resource_name)) { + resource_index = ipa_rm_cons_index(resource_name); + if (resource_index != IPA_RM_INDEX_INVALID) + resource_index = + resource_index - IPA_RM_RESOURCE_PROD_MAX; + } + + return resource_index; +} + +static bool ipa_rm_peers_list_check_index(int index, + struct ipa_rm_peers_list *peers_list) +{ + return !(index > peers_list->max_peers || index < 0); +} + +/** + * ipa_rm_peers_list_create() - creates the peers list + * + * @max_peers: maximum number of peers in new list + * @peers_list: [out] newly created peers list + * + * Returns: 0 in case of SUCCESS, negative otherwise + */ +int ipa_rm_peers_list_create(int max_peers, + struct ipa_rm_peers_list **peers_list) +{ + int result; + + *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC); + if (!*peers_list) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + (*peers_list)->max_peers = max_peers; + (*peers_list)->peers = kzalloc((*peers_list)->max_peers * + sizeof(struct ipa_rm_resource *), GFP_ATOMIC); + if (!((*peers_list)->peers)) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto list_alloc_fail; + } + + return 0; + +list_alloc_fail: + kfree(*peers_list); +bail: + return result; +} + +/** + * ipa_rm_peers_list_delete() - deletes the peers list + * + * @peers_list: peers list + * + */ +void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list) +{ + if (peers_list) { + kfree(peers_list->peers); + kfree(peers_list); + } +} + +/** + * ipa_rm_peers_list_remove_peer() - removes peer from the list + * + * @peers_list: peers list + * @resource_name: name of the resource to remove + * + */ +void ipa_rm_peers_list_remove_peer( + struct ipa_rm_peers_list *peers_list, + enum ipa_rm_resource_name resource_name) +{ + if (!peers_list) + return; + + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource_name)] = NULL; + peers_list->peers_count--; +} + +/** + * ipa_rm_peers_list_add_peer() - adds peer to the list + * + * @peers_list: peers list + * @resource: resource to add + * + */ +void ipa_rm_peers_list_add_peer( + struct ipa_rm_peers_list *peers_list, + struct ipa_rm_resource *resource) +{ + if (!peers_list || !resource) + return; + + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource->name)] = + resource; + peers_list->peers_count++; +} + +/** + * ipa_rm_peers_list_is_empty() - checks + * if resource peers list is empty + * + * @peers_list: peers list + * + * Returns: true if the list is empty, false otherwise + */ +bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list) +{ + bool result = true; + + if (!peers_list) + goto bail; + + if (peers_list->peers_count > 0) + result = false; +bail: + return result; +} + +/** + * ipa_rm_peers_list_has_last_peer() - checks + * if resource peers list has exactly one peer + * + * @peers_list: peers list + * + * Returns: true if the list has exactly one peer, false otherwise + */ +bool ipa_rm_peers_list_has_last_peer( + struct ipa_rm_peers_list *peers_list) +{ + bool result = false; + + if (!peers_list) + goto bail; + + if (peers_list->peers_count == 1) + result = true; +bail: + return result; +} + +/** + * ipa_rm_peers_list_check_dependency() - check dependency + * between 2 peer lists + * @resource_peers: first peers list + * @resource_name: first peers list resource name + * @depends_on_peers: second peers list + * @depends_on_name: second peers list resource name + * + * Returns: true if there is dependency, false otherwise + * + */ +bool ipa_rm_peers_list_check_dependency( + struct ipa_rm_peers_list *resource_peers, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_peers_list *depends_on_peers, + enum ipa_rm_resource_name depends_on_name) +{ + bool result = false; + + if (!resource_peers || !depends_on_peers) + return result; + + if (resource_peers->peers[ipa_rm_peers_list_get_resource_index( + depends_on_name)] != NULL) + result = true; + + if (depends_on_peers->peers[ipa_rm_peers_list_get_resource_index( + resource_name)] != NULL) + result = true; + + return result; +} + +/** + * ipa_rm_peers_list_get_resource() - get resource by + * resource index + * @resource_index: resource index + * @resource_peers: peers list + * + * Returns: the resource if found, NULL otherwise + */ +struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, + struct ipa_rm_peers_list *resource_peers) +{ + struct ipa_rm_resource *result = NULL; + + if (!ipa_rm_peers_list_check_index(resource_index, resource_peers)) + goto bail; + + result = resource_peers->peers[resource_index]; +bail: + return result; +} + +/** + * ipa_rm_peers_list_get_size() - get peers list sise + * + * @peers_list: peers list + * + * Returns: the size of the peers list + */ +int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list) +{ + return peers_list->max_peers; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h new file mode 100644 index 000000000000..b41de0aa3167 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_PEERS_LIST_H_ +#define _IPA_RM_PEERS_LIST_H_ + +#include "ipa_rm_resource.h" + +/** + * struct ipa_rm_peers_list - IPA RM resource peers list + * @peers: the list of references to resources dependent on this resource + * in case of producer or list of dependencies in case of consumer + * @max_peers: maximum number of peers for this resource + * @peers_count: actual number of peers for this resource + */ +struct ipa_rm_peers_list { + struct ipa_rm_resource **peers; + int max_peers; + int peers_count; +}; + +int ipa_rm_peers_list_create(int max_peers, + struct ipa_rm_peers_list **peers_list); +void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list); +void ipa_rm_peers_list_remove_peer( + struct ipa_rm_peers_list *peers_list, + enum ipa_rm_resource_name resource_name); +void ipa_rm_peers_list_add_peer( + struct ipa_rm_peers_list *peers_list, + struct ipa_rm_resource *resource); +bool ipa_rm_peers_list_check_dependency( + struct ipa_rm_peers_list *resource_peers, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_peers_list *depends_on_peers, + enum ipa_rm_resource_name depends_on_name); +struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, + struct ipa_rm_peers_list *peers_list); +int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_has_last_peer( + struct ipa_rm_peers_list *peers_list); + + +#endif /* _IPA_RM_PEERS_LIST_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c new file mode 100644 index 000000000000..75424eb768f4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_resource.c @@ -0,0 +1,1171 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_rm_resource.h" +#include "ipa_rm_i.h" +#include "ipa_common_i.h" +/** + * ipa_rm_dep_prod_index() - producer name to producer index mapping + * @resource_name: [in] resource name (should be of producer) + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained + * in enum ipa_rm_resource_name or is not of producers. + * + */ +int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name) +{ + int result = resource_name; + + switch (resource_name) { + case IPA_RM_RESOURCE_Q6_PROD: + case IPA_RM_RESOURCE_USB_PROD: + case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD: + case IPA_RM_RESOURCE_HSIC_PROD: + case IPA_RM_RESOURCE_STD_ECM_PROD: + case IPA_RM_RESOURCE_RNDIS_PROD: + case IPA_RM_RESOURCE_WWAN_0_PROD: + case IPA_RM_RESOURCE_WLAN_PROD: + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + case IPA_RM_RESOURCE_MHI_PROD: + break; + default: + result = IPA_RM_INDEX_INVALID; + break; + } + + return result; +} + +/** + * ipa_rm_cons_index() - consumer name to consumer index mapping + * @resource_name: [in] resource name (should be of consumer) + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained + * in enum ipa_rm_resource_name or is not of consumers. + * + */ +int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name) +{ + int result = resource_name; + + switch (resource_name) { + case IPA_RM_RESOURCE_Q6_CONS: + case IPA_RM_RESOURCE_USB_CONS: + case IPA_RM_RESOURCE_HSIC_CONS: + case IPA_RM_RESOURCE_WLAN_CONS: + case IPA_RM_RESOURCE_APPS_CONS: + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + case IPA_RM_RESOURCE_MHI_CONS: + case IPA_RM_RESOURCE_USB_DPL_CONS: + break; + default: + result = IPA_RM_INDEX_INVALID; + break; + } + + return result; +} + +int ipa_rm_resource_consumer_release_work( + struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + bool notify_completion) +{ + int driver_result; + + IPA_RM_DBG("calling driver CB\n"); + driver_result = consumer->release_resource(); + IPA_RM_DBG("driver CB returned with %d\n", driver_result); + /* + * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED + * for CONS which remains in RELEASE_IN_PROGRESS. + */ + if (driver_result == -EINPROGRESS) + driver_result = 0; + if (driver_result != 0 && driver_result != -EINPROGRESS) { + IPA_RM_ERR("driver CB returned error %d\n", driver_result); + consumer->resource.state = prev_state; + goto bail; + } + if (driver_result == 0) { + if (notify_completion) + ipa_rm_resource_consumer_handle_cb(consumer, + IPA_RM_RESOURCE_RELEASED); + else + consumer->resource.state = IPA_RM_RELEASED; + } + complete_all(&consumer->request_consumer_in_progress); + + ipa_rm_perf_profile_change(consumer->resource.name); +bail: + return driver_result; +} + +int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + u32 prod_needed_bw, + bool notify_completion) +{ + int driver_result; + + IPA_RM_DBG("calling driver CB\n"); + driver_result = consumer->request_resource(); + IPA_RM_DBG("driver CB returned with %d\n", driver_result); + if (driver_result == 0) { + if (notify_completion) { + ipa_rm_resource_consumer_handle_cb(consumer, + IPA_RM_RESOURCE_GRANTED); + } else { + consumer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(consumer->resource.name); + ipa_resume_resource(consumer->resource.name); + } + } else if (driver_result != -EINPROGRESS) { + consumer->resource.state = prev_state; + consumer->resource.needed_bw -= prod_needed_bw; + consumer->usage_count--; + } + + return driver_result; +} + +int ipa_rm_resource_consumer_request( + struct ipa_rm_resource_cons *consumer, + u32 prod_needed_bw, + bool inc_usage_count, + bool wake_client) +{ + int result = 0; + enum ipa_rm_resource_state prev_state; + struct ipa_active_client_logging_info log_info; + + IPA_RM_DBG("%s state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + + prev_state = consumer->resource.state; + consumer->resource.needed_bw += prod_needed_bw; + switch (consumer->resource.state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + reinit_completion(&consumer->request_consumer_in_progress); + consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(consumer->resource.name)); + if (prev_state == IPA_RM_RELEASE_IN_PROGRESS || + ipa_inc_client_enable_clks_no_block(&log_info) != 0) { + IPA_RM_DBG("async resume work for %s\n", + ipa_rm_resource_str(consumer->resource.name)); + ipa_rm_wq_send_resume_cmd(consumer->resource.name, + prev_state, + prod_needed_bw); + result = -EINPROGRESS; + break; + } + result = ipa_rm_resource_consumer_request_work(consumer, + prev_state, + prod_needed_bw, + false); + break; + case IPA_RM_GRANTED: + if (wake_client) { + result = ipa_rm_resource_consumer_request_work( + consumer, prev_state, prod_needed_bw, false); + break; + } + ipa_rm_perf_profile_change(consumer->resource.name); + break; + case IPA_RM_REQUEST_IN_PROGRESS: + result = -EINPROGRESS; + break; + default: + consumer->resource.needed_bw -= prod_needed_bw; + result = -EPERM; + goto bail; + } + if (inc_usage_count) + consumer->usage_count++; +bail: + IPA_RM_DBG("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +int ipa_rm_resource_consumer_release( + struct ipa_rm_resource_cons *consumer, + u32 prod_needed_bw, + bool dec_usage_count) +{ + int result = 0; + enum ipa_rm_resource_state save_state; + + IPA_RM_DBG("%s state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + save_state = consumer->resource.state; + consumer->resource.needed_bw -= prod_needed_bw; + switch (consumer->resource.state) { + case IPA_RM_RELEASED: + break; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + if (dec_usage_count && consumer->usage_count > 0) + consumer->usage_count--; + if (consumer->usage_count == 0) { + consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; + if (save_state == IPA_RM_REQUEST_IN_PROGRESS || + ipa_suspend_resource_no_block( + consumer->resource.name) != 0) { + ipa_rm_wq_send_suspend_cmd( + consumer->resource.name, + save_state, + prod_needed_bw); + result = -EINPROGRESS; + goto bail; + } + result = ipa_rm_resource_consumer_release_work(consumer, + save_state, false); + goto bail; + } else if (consumer->resource.state == IPA_RM_GRANTED) { + ipa_rm_perf_profile_change(consumer->resource.name); + } + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (dec_usage_count && consumer->usage_count > 0) + consumer->usage_count--; + result = -EINPROGRESS; + break; + default: + result = -EPERM; + goto bail; + } +bail: + IPA_RM_DBG("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_producer_notify_clients() - notify + * all registered clients of given producer + * @producer: producer + * @event: event to notify + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + */ +void ipa_rm_resource_producer_notify_clients( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event, + bool notify_registered_only) +{ + struct ipa_rm_notification_info *reg_info; + + IPA_RM_DBG("%s event: %d notify_registered_only: %d\n", + ipa_rm_resource_str(producer->resource.name), + event, + notify_registered_only); + + list_for_each_entry(reg_info, &(producer->event_listeners), link) { + if (notify_registered_only && !reg_info->explicit) + continue; + + IPA_RM_DBG("Notifying %s event: %d\n", + ipa_rm_resource_str(producer->resource.name), event); + reg_info->reg_params.notify_cb(reg_info->reg_params.user_data, + event, + 0); + IPA_RM_DBG("back from client CB\n"); + } +} + +static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource, + struct ipa_rm_resource_prod **producer, + struct ipa_rm_create_params *create_params, + int *max_peers) +{ + int result = 0; + + *producer = kzalloc(sizeof(**producer), GFP_ATOMIC); + if (*producer == NULL) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + INIT_LIST_HEAD(&((*producer)->event_listeners)); + result = ipa_rm_resource_producer_register(*producer, + &(create_params->reg_params), + false); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n"); + goto register_fail; + } + + (*resource) = (struct ipa_rm_resource *) (*producer); + (*resource)->type = IPA_RM_PRODUCER; + *max_peers = IPA_RM_RESOURCE_CONS_MAX; + goto bail; +register_fail: + kfree(*producer); +bail: + return result; +} + +static void ipa_rm_resource_producer_delete( + struct ipa_rm_resource_prod *producer) +{ + struct ipa_rm_notification_info *reg_info; + struct list_head *pos, *q; + + ipa_rm_resource_producer_release(producer); + list_for_each_safe(pos, q, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + list_del(pos); + kfree(reg_info); + } +} + +static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource, + struct ipa_rm_resource_cons **consumer, + struct ipa_rm_create_params *create_params, + int *max_peers) +{ + int result = 0; + + *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC); + if (*consumer == NULL) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + (*consumer)->request_resource = create_params->request_resource; + (*consumer)->release_resource = create_params->release_resource; + (*resource) = (struct ipa_rm_resource *) (*consumer); + (*resource)->type = IPA_RM_CONSUMER; + init_completion(&((*consumer)->request_consumer_in_progress)); + *max_peers = IPA_RM_RESOURCE_PROD_MAX; +bail: + return result; +} + +/** + * ipa_rm_resource_create() - creates resource + * @create_params: [in] parameters needed + * for resource initialization with IPA RM + * @resource: [out] created resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_create( + struct ipa_rm_create_params *create_params, + struct ipa_rm_resource **resource) +{ + struct ipa_rm_resource_cons *consumer; + struct ipa_rm_resource_prod *producer; + int max_peers; + int result = 0; + + if (!create_params) { + result = -EINVAL; + goto bail; + } + + if (IPA_RM_RESORCE_IS_PROD(create_params->name)) { + result = ipa_rm_resource_producer_create(resource, + &producer, + create_params, + &max_peers); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); + goto bail; + } + } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) { + result = ipa_rm_resource_consumer_create(resource, + &consumer, + create_params, + &max_peers); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); + goto bail; + } + } else { + IPA_RM_ERR("invalied resource\n"); + result = -EPERM; + goto bail; + } + + result = ipa_rm_peers_list_create(max_peers, + &((*resource)->peers_list)); + if (result) { + IPA_RM_ERR("ipa_rm_peers_list_create failed\n"); + goto peers_alloc_fail; + } + (*resource)->name = create_params->name; + (*resource)->floor_voltage = create_params->floor_voltage; + (*resource)->state = IPA_RM_RELEASED; + goto bail; + +peers_alloc_fail: + ipa_rm_resource_delete(*resource); +bail: + return result; +} + +/** + * ipa_rm_resource_delete() - deletes resource + * @resource: [in] resource + * for resource initialization with IPA RM + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_delete(struct ipa_rm_resource *resource) +{ + struct ipa_rm_resource *consumer; + struct ipa_rm_resource *producer; + int peers_index; + int result = 0; + int list_size; + + if (!resource) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + IPA_RM_DBG("ipa_rm_resource_delete ENTER with resource %d\n", + resource->name); + if (resource->type == IPA_RM_PRODUCER) { + if (resource->peers_list) { + list_size = ipa_rm_peers_list_get_size( + resource->peers_list); + for (peers_index = 0; + peers_index < list_size; + peers_index++) { + consumer = ipa_rm_peers_list_get_resource( + peers_index, + resource->peers_list); + if (consumer) + ipa_rm_resource_delete_dependency( + resource, + consumer); + } + } + + ipa_rm_resource_producer_delete( + (struct ipa_rm_resource_prod *) resource); + } else if (resource->type == IPA_RM_CONSUMER) { + if (resource->peers_list) { + list_size = ipa_rm_peers_list_get_size( + resource->peers_list); + for (peers_index = 0; + peers_index < list_size; + peers_index++){ + producer = ipa_rm_peers_list_get_resource( + peers_index, + resource->peers_list); + if (producer) + ipa_rm_resource_delete_dependency( + producer, + resource); + } + } + } + ipa_rm_peers_list_delete(resource->peers_list); + kfree(resource); + return result; +} + +/** + * ipa_rm_resource_register() - register resource + * @resource: [in] resource + * @reg_params: [in] registration parameters + * @explicit: [in] registered explicitly by ipa_rm_register() + * + * Returns: 0 on success, negative on failure + * + * Producer resource is expected for this call. + * + */ +int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params, + bool explicit) +{ + int result = 0; + struct ipa_rm_notification_info *reg_info; + struct list_head *pos; + + if (!producer || !reg_params) { + IPA_RM_ERR("invalid params\n"); + result = -EPERM; + goto bail; + } + + list_for_each(pos, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + if (reg_info->reg_params.notify_cb == + reg_params->notify_cb) { + IPA_RM_ERR("already registered\n"); + result = -EPERM; + goto bail; + } + + } + + reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); + if (reg_info == NULL) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + reg_info->reg_params.user_data = reg_params->user_data; + reg_info->reg_params.notify_cb = reg_params->notify_cb; + reg_info->explicit = explicit; + INIT_LIST_HEAD(®_info->link); + list_add(®_info->link, &producer->event_listeners); +bail: + return result; +} + +/** + * ipa_rm_resource_deregister() - register resource + * @resource: [in] resource + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Producer resource is expected for this call. + * This function deleted only single instance of + * registration info. + * + */ +int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params) +{ + int result = -EINVAL; + struct ipa_rm_notification_info *reg_info; + struct list_head *pos, *q; + + if (!producer || !reg_params) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + list_for_each_safe(pos, q, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + if (reg_info->reg_params.notify_cb == + reg_params->notify_cb) { + list_del(pos); + kfree(reg_info); + result = 0; + goto bail; + } + } +bail: + return result; +} + +/** + * ipa_rm_resource_add_dependency() - add dependency between two + * given resources + * @resource: [in] resource resource + * @depends_on: [in] depends_on resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on) +{ + int result = 0; + int consumer_result; + + if (!resource || !depends_on) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (ipa_rm_peers_list_check_dependency(resource->peers_list, + resource->name, + depends_on->peers_list, + depends_on->name)) { + IPA_RM_ERR("dependency already exists\n"); + return -EEXIST; + } + + ipa_rm_peers_list_add_peer(resource->peers_list, depends_on); + ipa_rm_peers_list_add_peer(depends_on->peers_list, resource); + IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + + resource->needed_bw += depends_on->max_bw; + switch (resource->state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + break; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + { + enum ipa_rm_resource_state prev_state = resource->state; + + resource->state = IPA_RM_REQUEST_IN_PROGRESS; + ((struct ipa_rm_resource_prod *) + resource)->pending_request++; + consumer_result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)depends_on, + resource->max_bw, + true, false); + if (consumer_result != -EINPROGRESS) { + resource->state = prev_state; + ((struct ipa_rm_resource_prod *) + resource)->pending_request--; + ipa_rm_perf_profile_change(resource->name); + } + result = consumer_result; + break; + } + default: + IPA_RM_ERR("invalid state\n"); + result = -EPERM; + goto bail; + } +bail: + IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_delete_dependency() - add dependency between two + * given resources + * @resource: [in] resource resource + * @depends_on: [in] depends_on resource + * + * Returns: 0 on success, negative on failure + * In case the resource state was changed, a notification + * will be sent to the RM client + */ +int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on) +{ + int result = 0; + bool state_changed = false; + bool release_consumer = false; + enum ipa_rm_event evt; + + if (!resource || !depends_on) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (!ipa_rm_peers_list_check_dependency(resource->peers_list, + resource->name, + depends_on->peers_list, + depends_on->name)) { + IPA_RM_ERR("dependency does not exist\n"); + return -EINVAL; + } + IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + + resource->needed_bw -= depends_on->max_bw; + switch (resource->state) { + case IPA_RM_RELEASED: + break; + case IPA_RM_GRANTED: + ipa_rm_perf_profile_change(resource->name); + release_consumer = true; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (((struct ipa_rm_resource_prod *) + resource)->pending_release > 0) + ((struct ipa_rm_resource_prod *) + resource)->pending_release--; + if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS && + ((struct ipa_rm_resource_prod *) + resource)->pending_release == 0) { + resource->state = IPA_RM_RELEASED; + state_changed = true; + evt = IPA_RM_RESOURCE_RELEASED; + ipa_rm_perf_profile_change(resource->name); + } + break; + case IPA_RM_REQUEST_IN_PROGRESS: + release_consumer = true; + if (((struct ipa_rm_resource_prod *) + resource)->pending_request > 0) + ((struct ipa_rm_resource_prod *) + resource)->pending_request--; + if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS && + ((struct ipa_rm_resource_prod *) + resource)->pending_request == 0) { + resource->state = IPA_RM_GRANTED; + state_changed = true; + evt = IPA_RM_RESOURCE_GRANTED; + ipa_rm_perf_profile_change(resource->name); + } + break; + default: + result = -EINVAL; + goto bail; + } + if (state_changed) { + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + resource->name, + evt, + false); + } + IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + ipa_rm_peers_list_remove_peer(resource->peers_list, + depends_on->name); + ipa_rm_peers_list_remove_peer(depends_on->peers_list, + resource->name); + if (release_consumer) + (void) ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)depends_on, + resource->max_bw, + true); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_producer_request() - producer resource request + * @producer: [in] producer + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer) +{ + int peers_index; + int result = 0; + struct ipa_rm_resource *consumer; + int consumer_result; + enum ipa_rm_resource_state state; + + state = producer->resource.state; + switch (producer->resource.state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; + break; + case IPA_RM_GRANTED: + goto unlock_and_bail; + case IPA_RM_REQUEST_IN_PROGRESS: + result = -EINPROGRESS; + goto unlock_and_bail; + default: + result = -EINVAL; + goto unlock_and_bail; + } + + producer->pending_request = 0; + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + producer->resource.peers_list); + peers_index++) { + consumer = ipa_rm_peers_list_get_resource(peers_index, + producer->resource.peers_list); + if (consumer) { + producer->pending_request++; + consumer_result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)consumer, + producer->resource.max_bw, + true, false); + if (consumer_result == -EINPROGRESS) { + result = -EINPROGRESS; + } else { + producer->pending_request--; + if (consumer_result != 0) { + result = consumer_result; + goto bail; + } + } + } + } + + if (producer->pending_request == 0) { + producer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(producer->resource.name); + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + producer->resource.name, + IPA_RM_RESOURCE_GRANTED, + true); + result = 0; + } +unlock_and_bail: + if (state != producer->resource.state) + IPA_RM_DBG("%s state changed %d->%d\n", + ipa_rm_resource_str(producer->resource.name), + state, + producer->resource.state); +bail: + return result; +} + +/** + * ipa_rm_resource_producer_release() - producer resource release + * producer: [in] producer resource + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer) +{ + int peers_index; + int result = 0; + struct ipa_rm_resource *consumer; + int consumer_result; + enum ipa_rm_resource_state state; + + state = producer->resource.state; + switch (producer->resource.state) { + case IPA_RM_RELEASED: + goto bail; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + result = -EINPROGRESS; + goto bail; + default: + result = -EPERM; + goto bail; + } + + producer->pending_release = 0; + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + producer->resource.peers_list); + peers_index++) { + consumer = ipa_rm_peers_list_get_resource(peers_index, + producer->resource.peers_list); + if (consumer) { + producer->pending_release++; + consumer_result = ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)consumer, + producer->resource.max_bw, + true); + producer->pending_release--; + } + } + + if (producer->pending_release == 0) { + producer->resource.state = IPA_RM_RELEASED; + ipa_rm_perf_profile_change(producer->resource.name); + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + producer->resource.name, + IPA_RM_RESOURCE_RELEASED, + true); + } +bail: + if (state != producer->resource.state) + IPA_RM_DBG("%s state changed %d->%d\n", + ipa_rm_resource_str(producer->resource.name), + state, + producer->resource.state); + + return result; +} + +static void ipa_rm_resource_producer_handle_cb( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event) +{ + IPA_RM_DBG("%s state: %d event: %d pending_request: %d\n", + ipa_rm_resource_str(producer->resource.name), + producer->resource.state, + event, + producer->pending_request); + + switch (producer->resource.state) { + case IPA_RM_REQUEST_IN_PROGRESS: + if (event != IPA_RM_RESOURCE_GRANTED) + goto unlock_and_bail; + if (producer->pending_request > 0) { + producer->pending_request--; + if (producer->pending_request == 0) { + producer->resource.state = + IPA_RM_GRANTED; + ipa_rm_perf_profile_change( + producer->resource.name); + ipa_rm_resource_producer_notify_clients( + producer, + IPA_RM_RESOURCE_GRANTED, + false); + goto bail; + } + } + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (event != IPA_RM_RESOURCE_RELEASED) + goto unlock_and_bail; + if (producer->pending_release > 0) { + producer->pending_release--; + if (producer->pending_release == 0) { + producer->resource.state = + IPA_RM_RELEASED; + ipa_rm_perf_profile_change( + producer->resource.name); + ipa_rm_resource_producer_notify_clients( + producer, + IPA_RM_RESOURCE_RELEASED, + false); + goto bail; + } + } + break; + case IPA_RM_GRANTED: + case IPA_RM_RELEASED: + default: + goto unlock_and_bail; + } +unlock_and_bail: + IPA_RM_DBG("%s new state: %d\n", + ipa_rm_resource_str(producer->resource.name), + producer->resource.state); +bail: + return; +} + +/** + * ipa_rm_resource_consumer_handle_cb() - propagates resource + * notification to all dependent producers + * @consumer: [in] notifying resource + * + */ +void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_event event) +{ + int peers_index; + struct ipa_rm_resource *producer; + + if (!consumer) { + IPA_RM_ERR("invalid params\n"); + return; + } + IPA_RM_DBG("%s state: %d event: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state, + event); + + switch (consumer->resource.state) { + case IPA_RM_REQUEST_IN_PROGRESS: + if (event == IPA_RM_RESOURCE_RELEASED) + goto bail; + consumer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(consumer->resource.name); + ipa_resume_resource(consumer->resource.name); + complete_all(&consumer->request_consumer_in_progress); + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (event == IPA_RM_RESOURCE_GRANTED) + goto bail; + consumer->resource.state = IPA_RM_RELEASED; + break; + case IPA_RM_GRANTED: + case IPA_RM_RELEASED: + default: + goto bail; + } + + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + consumer->resource.peers_list); + peers_index++) { + producer = ipa_rm_peers_list_get_resource(peers_index, + consumer->resource.peers_list); + if (producer) + ipa_rm_resource_producer_handle_cb( + (struct ipa_rm_resource_prod *) + producer, + event); + } + + return; +bail: + IPA_RM_DBG("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); +} + +/* + * ipa_rm_resource_set_perf_profile() - sets the performance profile to + * resource. + * + * @resource: [in] resource + * @profile: [in] profile to be set + * + * sets the profile to the given resource, In case the resource is + * granted, update bandwidth vote of the resource + */ +int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, + struct ipa_rm_perf_profile *profile) +{ + int peers_index; + struct ipa_rm_resource *peer; + + if (!resource || !profile) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (profile->max_supported_bandwidth_mbps == resource->max_bw) { + IPA_RM_DBG("same profile\n"); + return 0; + } + + if ((resource->type == IPA_RM_PRODUCER && + (resource->state == IPA_RM_GRANTED || + resource->state == IPA_RM_REQUEST_IN_PROGRESS)) || + resource->type == IPA_RM_CONSUMER) { + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + resource->peers_list); + peers_index++) { + peer = ipa_rm_peers_list_get_resource(peers_index, + resource->peers_list); + if (!peer) + continue; + peer->needed_bw -= resource->max_bw; + peer->needed_bw += + profile->max_supported_bandwidth_mbps; + if (peer->state == IPA_RM_GRANTED) + ipa_rm_perf_profile_change(peer->name); + } + } + + resource->max_bw = profile->max_supported_bandwidth_mbps; + if (resource->state == IPA_RM_GRANTED) + ipa_rm_perf_profile_change(resource->name); + + return 0; +} + + +/* + * ipa_rm_resource_producer_print_stat() - print the + * resource status and all his dependencies + * + * @resource: [in] Resource resource + * @buff: [in] The buf used to print + * @size: [in] Buf size + * + * Returns: number of bytes used on success, negative on failure + */ +int ipa_rm_resource_producer_print_stat( + struct ipa_rm_resource *resource, + char *buf, + int size){ + + int i; + int nbytes; + int cnt = 0; + struct ipa_rm_resource *consumer; + + if (!buf || size < 0) + return -EINVAL; + + nbytes = scnprintf(buf + cnt, size - cnt, + ipa_rm_resource_str(resource->name)); + cnt += nbytes; + nbytes = scnprintf(buf + cnt, size - cnt, "["); + cnt += nbytes; + + switch (resource->state) { + case IPA_RM_RELEASED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Released] -> "); + cnt += nbytes; + break; + case IPA_RM_REQUEST_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Request In Progress] -> "); + cnt += nbytes; + break; + case IPA_RM_GRANTED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Granted] -> "); + cnt += nbytes; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Release In Progress] -> "); + cnt += nbytes; + break; + default: + return -EPERM; + } + + for (i = 0; i < resource->peers_list->max_peers; ++i) { + consumer = + ipa_rm_peers_list_get_resource( + i, + resource->peers_list); + if (consumer) { + nbytes = scnprintf(buf + cnt, size - cnt, + ipa_rm_resource_str(consumer->name)); + cnt += nbytes; + nbytes = scnprintf(buf + cnt, size - cnt, "["); + cnt += nbytes; + + switch (consumer->state) { + case IPA_RM_RELEASED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Released], "); + cnt += nbytes; + break; + case IPA_RM_REQUEST_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Request In Progress], "); + cnt += nbytes; + break; + case IPA_RM_GRANTED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Granted], "); + cnt += nbytes; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Release In Progress], "); + cnt += nbytes; + break; + default: + return -EPERM; + } + } + } + nbytes = scnprintf(buf + cnt, size - cnt, "\n"); + cnt += nbytes; + + return cnt; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h new file mode 100644 index 000000000000..26573e243b4e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_resource.h @@ -0,0 +1,163 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_RESOURCE_H_ +#define _IPA_RM_RESOURCE_H_ + +#include +#include +#include "ipa_rm_peers_list.h" + +/** + * enum ipa_rm_resource_state - resource state + */ +enum ipa_rm_resource_state { + IPA_RM_RELEASED, + IPA_RM_REQUEST_IN_PROGRESS, + IPA_RM_GRANTED, + IPA_RM_RELEASE_IN_PROGRESS +}; + +/** + * enum ipa_rm_resource_type - IPA resource manager resource type + */ +enum ipa_rm_resource_type { + IPA_RM_PRODUCER, + IPA_RM_CONSUMER +}; + +/** + * struct ipa_rm_notification_info - notification information + * of IPA RM client + * @reg_params: registration parameters + * @explicit: registered explicitly by ipa_rm_register() + * @link: link to the list of all registered clients information + */ +struct ipa_rm_notification_info { + struct ipa_rm_register_params reg_params; + bool explicit; + struct list_head link; +}; + +/** + * struct ipa_rm_resource - IPA RM resource + * @name: name identifying resource + * @type: type of resource (PRODUCER or CONSUMER) + * @floor_voltage: minimum voltage level for operation + * @max_bw: maximum bandwidth required for resource in Mbps + * @state: state of the resource + * @peers_list: list of the peers of the resource + */ +struct ipa_rm_resource { + enum ipa_rm_resource_name name; + enum ipa_rm_resource_type type; + enum ipa_voltage_level floor_voltage; + u32 max_bw; + u32 needed_bw; + enum ipa_rm_resource_state state; + struct ipa_rm_peers_list *peers_list; +}; + +/** + * struct ipa_rm_resource_cons - IPA RM consumer + * @resource: resource + * @usage_count: number of producers in GRANTED / REQUESTED state + * using this consumer + * @request_consumer_in_progress: when set, the consumer is during its request + * phase + * @request_resource: function which should be called to request resource + * from resource manager + * @release_resource: function which should be called to release resource + * from resource manager + * Add new fields after @resource only. + */ +struct ipa_rm_resource_cons { + struct ipa_rm_resource resource; + int usage_count; + struct completion request_consumer_in_progress; + int (*request_resource)(void); + int (*release_resource)(void); +}; + +/** + * struct ipa_rm_resource_prod - IPA RM producer + * @resource: resource + * @event_listeners: clients registered with this producer + * for notifications in resource state + * list Add new fields after @resource only. + */ +struct ipa_rm_resource_prod { + struct ipa_rm_resource resource; + struct list_head event_listeners; + int pending_request; + int pending_release; +}; + +int ipa_rm_resource_create( + struct ipa_rm_create_params *create_params, + struct ipa_rm_resource **resource); + +int ipa_rm_resource_delete(struct ipa_rm_resource *resource); + +int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params, + bool explicit); + +int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on); + +int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on); + +int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer); + +int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer); + +int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer, + u32 needed_bw, + bool inc_usage_count, + bool wake_client); + +int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer, + u32 needed_bw, + bool dec_usage_count); + +int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, + struct ipa_rm_perf_profile *profile); + +void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_event event); + +void ipa_rm_resource_producer_notify_clients( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event, + bool notify_registered_only); + +int ipa_rm_resource_producer_print_stat( + struct ipa_rm_resource *resource, + char *buf, + int size); + +int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool notify_completion); + +int ipa_rm_resource_consumer_release_work( + struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + bool notify_completion); + +#endif /* _IPA_RM_RESOURCE_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile index 1bb9c91d3bd4..435acbf1cab8 100644 --- a/drivers/platform/msm/ipa/ipa_v2/Makefile +++ b/drivers/platform/msm/ipa/ipa_v2/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_IPA) += ipat.o ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ - ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o ipa_rm.o \ - ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \ + ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 2e82d04f56ec..ac6d729db595 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -35,7 +35,7 @@ #include #include #include "ipa_i.h" -#include "ipa_rm_i.h" +#include "../ipa_rm_i.h" #define CREATE_TRACE_POINTS #include "ipa_trace.h" @@ -1091,7 +1091,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - retval = ipa2_rm_add_dependency(rm_depend.resource_name, + retval = ipa_rm_add_dependency(rm_depend.resource_name, rm_depend.depends_on_name); break; case IPA_IOC_RM_DEL_DEPENDENCY: @@ -1100,7 +1100,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - retval = ipa2_rm_delete_dependency(rm_depend.resource_name, + retval = ipa_rm_delete_dependency(rm_depend.resource_name, rm_depend.depends_on_name); break; case IPA_IOC_GENERATE_FLT_EQ: @@ -3496,14 +3496,14 @@ int ipa_create_apps_resource(void) apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS; apps_cons_create_params.request_resource = apps_cons_request_resource; apps_cons_create_params.release_resource = apps_cons_release_resource; - result = ipa2_rm_create_resource(&apps_cons_create_params); + result = ipa_rm_create_resource(&apps_cons_create_params); if (result) { - IPAERR("ipa2_rm_create_resource failed\n"); + IPAERR("ipa_rm_create_resource failed\n"); return result; } profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; - ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); return result; } @@ -4011,7 +4011,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, fail_add_interrupt_handler: free_irq(resource_p->ipa_irq, master_dev); fail_ipa_interrupts_init: - ipa2_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); fail_create_apps_resource: ipa_rm_exit(); fail_ipa_rm_init: diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index 756bd7be9bb9..566cb4d03c51 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -16,7 +16,7 @@ #include #include #include "ipa_i.h" -#include "ipa_rm_i.h" +#include "../ipa_rm_i.h" #define IPA_MAX_MSG_LEN 4096 #define IPA_DBG_CNTR_ON 127265 diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index 8d7b300d0aef..c243eaef37cc 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -1679,46 +1679,6 @@ int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); */ int ipa2_uc_dereg_rdyCB(void); -/* - * Resource manager - */ -int ipa2_rm_create_resource(struct ipa_rm_create_params *create_params); - -int ipa2_rm_delete_resource(enum ipa_rm_resource_name resource_name); - -int ipa2_rm_register(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params); - -int ipa2_rm_deregister(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params); - -int ipa2_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, - struct ipa_rm_perf_profile *profile); - -int ipa2_rm_add_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -int ipa2_rm_delete_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -int ipa2_rm_request_resource(enum ipa_rm_resource_name resource_name); - -int ipa2_rm_release_resource(enum ipa_rm_resource_name resource_name); - -int ipa2_rm_notify_completion(enum ipa_rm_event event, - enum ipa_rm_resource_name resource_name); - -int ipa2_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, - unsigned long msecs); - -int ipa2_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name); - -int ipa2_rm_inactivity_timer_request_resource( - enum ipa_rm_resource_name resource_name); - -int ipa2_rm_inactivity_timer_release_resource( - enum ipa_rm_resource_name resource_name); - /* * Tethering bridge (Rmnet / MBIM) */ @@ -2023,8 +1983,6 @@ struct iommu_domain *ipa_get_uc_smmu_domain(void); int ipa2_ap_suspend(struct device *dev); int ipa2_ap_resume(struct device *dev); struct iommu_domain *ipa2_get_smmu_domain(void); -int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); struct device *ipa2_get_dma_dev(void); int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); @@ -2036,5 +1994,4 @@ int ipa2_restore_suspend_handler(void); void ipa_sps_irq_control_all(bool enable); void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client); void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client); -const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name); #endif /* _IPA_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c index e7032f339405..ab86bac63136 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c @@ -409,7 +409,7 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state) ipa_mhi_ctx->wakeup_notified = false; if (ipa_mhi_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) { - ipa2_rm_notify_completion( + ipa_rm_notify_completion( IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_MHI_CONS); ipa_mhi_ctx->rm_cons_state = @@ -435,7 +435,7 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state) ipa_mhi_ctx->wakeup_notified = false; if (ipa_mhi_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) { - ipa2_rm_notify_completion( + ipa_rm_notify_completion( IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_MHI_CONS); ipa_mhi_ctx->rm_cons_state = @@ -592,7 +592,7 @@ static int ipa_mhi_request_prod(void) reinit_completion(&ipa_mhi_ctx->rm_prod_granted_comp); IPA_MHI_DBG("requesting mhi prod\n"); - res = ipa2_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD); + res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD); if (res) { if (res != -EINPROGRESS) { IPA_MHI_ERR("failed to request mhi prod %d\n", res); @@ -619,7 +619,7 @@ static int ipa_mhi_release_prod(void) IPA_MHI_FUNC_ENTRY(); - res = ipa2_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD); + res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD); IPA_MHI_FUNC_EXIT(); return res; @@ -1037,7 +1037,7 @@ int ipa2_mhi_init(struct ipa_mhi_init_params *params) mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD; mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS; mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify; - res = ipa2_rm_create_resource(&mhi_prod_params); + res = ipa_rm_create_resource(&mhi_prod_params); if (res) { IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n"); goto fail_create_rm_prod; @@ -1049,7 +1049,7 @@ int ipa2_mhi_init(struct ipa_mhi_init_params *params) mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS; mhi_cons_params.request_resource = ipa_mhi_rm_cons_request; mhi_cons_params.release_resource = ipa_mhi_rm_cons_release; - res = ipa2_rm_create_resource(&mhi_cons_params); + res = ipa_rm_create_resource(&mhi_cons_params); if (res) { IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n"); goto fail_create_rm_cons; @@ -1065,7 +1065,7 @@ int ipa2_mhi_init(struct ipa_mhi_init_params *params) return 0; fail_create_rm_cons: - ipa2_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); fail_create_rm_prod: destroy_workqueue(ipa_mhi_ctx->wq); fail_create_wq: @@ -1122,14 +1122,14 @@ int ipa2_mhi_start(struct ipa_mhi_start_params *params) ipa_mhi_ctx->host_data_addr = params->host_data_addr; /* Add MHI <-> Q6 dependencies to IPA RM */ - res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res && res != -EINPROGRESS) { IPA_MHI_ERR("failed to add dependency %d\n", res); goto fail_add_mhi_q6_dep; } - res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_MHI_CONS); if (res && res != -EINPROGRESS) { IPA_MHI_ERR("failed to add dependency %d\n", res); @@ -1164,10 +1164,10 @@ int ipa2_mhi_start(struct ipa_mhi_start_params *params) fail_init_engine: ipa_mhi_release_prod(); fail_request_prod: - ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_MHI_CONS); fail_add_q6_mhi_dep: - ipa2_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, IPA_RM_RESOURCE_Q6_CONS); fail_add_mhi_q6_dep: ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED); @@ -1762,7 +1762,7 @@ int ipa2_mhi_resume(void) } dl_channel_resumed = true; - ipa2_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_MHI_CONS); ipa_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c index 788d7f6c0f9d..70e0db98e948 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c @@ -1064,7 +1064,7 @@ int vote_for_bus_bw(uint32_t *bw_mbps) memset(&profile, 0, sizeof(profile)); profile.max_supported_bandwidth_mbps = *bw_mbps; - ret = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, &profile); if (ret) IPAWANERR("Failed to set perf profile to BW %u\n", diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c deleted file mode 100644 index 3e47d1d5e150..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include "ipa_i.h" -#include "ipa_rm_dependency_graph.h" -#include "ipa_rm_i.h" - -static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = { - __stringify(IPA_RM_RESOURCE_Q6_PROD), - __stringify(IPA_RM_RESOURCE_USB_PROD), - __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD), - __stringify(IPA_RM_RESOURCE_HSIC_PROD), - __stringify(IPA_RM_RESOURCE_STD_ECM_PROD), - __stringify(IPA_RM_RESOURCE_RNDIS_PROD), - __stringify(IPA_RM_RESOURCE_WWAN_0_PROD), - __stringify(IPA_RM_RESOURCE_WLAN_PROD), - __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD), - __stringify(IPA_RM_RESOURCE_MHI_PROD), - __stringify(IPA_RM_RESOURCE_Q6_CONS), - __stringify(IPA_RM_RESOURCE_USB_CONS), - __stringify(IPA_RM_RESOURCE_USB_DPL_CONS), - __stringify(IPA_RM_RESOURCE_HSIC_CONS), - __stringify(IPA_RM_RESOURCE_WLAN_CONS), - __stringify(IPA_RM_RESOURCE_APPS_CONS), - __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS), - __stringify(IPA_RM_RESOURCE_MHI_CONS), -}; - -struct ipa_rm_profile_vote_type { - enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX]; - enum ipa_voltage_level curr_volt; - u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX]; - u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX]; - u32 curr_bw; -}; - -struct ipa_rm_context_type { - struct ipa_rm_dep_graph *dep_graph; - struct workqueue_struct *ipa_rm_wq; - spinlock_t ipa_rm_lock; - struct ipa_rm_profile_vote_type prof_vote; -}; -static struct ipa_rm_context_type *ipa_rm_ctx; - -struct ipa_rm_notify_ipa_work_type { - struct work_struct work; - enum ipa_voltage_level volt; - u32 bandwidth_mbps; -}; - -/** - * ipa2_rm_create_resource() - create resource - * @create_params: [in] parameters needed - * for resource initialization - * - * Returns: 0 on success, negative on failure - * - * This function is called by IPA RM client to initialize client's resources. - * This API should be called before any other IPA RM API on a given resource - * name. - * - */ -int ipa2_rm_create_resource(struct ipa_rm_create_params *create_params) -{ - struct ipa_rm_resource *resource; - unsigned long flags; - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - if (!create_params) { - IPA_RM_ERR("invalid args\n"); - return -EINVAL; - } - IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name)); - - if (create_params->floor_voltage < 0 || - create_params->floor_voltage >= IPA_VOLTAGE_MAX) { - IPA_RM_ERR("invalid voltage %d\n", - create_params->floor_voltage); - return -EINVAL; - } - - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - create_params->name, - &resource) == 0) { - IPA_RM_ERR("resource already exists\n"); - result = -EEXIST; - goto bail; - } - result = ipa_rm_resource_create(create_params, - &resource); - if (result) { - IPA_RM_ERR("ipa_rm_resource_create() failed\n"); - goto bail; - } - result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource); - if (result) { - IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n"); - ipa_rm_resource_delete(resource); - goto bail; - } -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_delete_resource() - delete resource - * @resource_name: name of resource to be deleted - * - * Returns: 0 on success, negative on failure - * - * This function is called by IPA RM client to delete client's resources. - * - */ -int ipa2_rm_delete_resource(enum ipa_rm_resource_name resource_name) -{ - struct ipa_rm_resource *resource; - unsigned long flags; - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exist\n"); - result = -EINVAL; - goto bail; - } - result = ipa_rm_resource_delete(resource); - if (result) { - IPA_RM_ERR("ipa_rm_resource_delete() failed\n"); - goto bail; - } - result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph, - resource_name); - if (result) { - IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n"); - goto bail; - } -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_add_dependency() - create dependency - * between 2 resources - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: IPA_RM_RESORCE_GRANTED could be generated - * in case client registered with IPA RM - */ -int ipa2_rm_add_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - unsigned long flags; - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), - ipa_rm_resource_str(depends_on_name)); - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - result = ipa_rm_dep_graph_add_dependency( - ipa_rm_ctx->dep_graph, - resource_name, - depends_on_name); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_add_dependency_sync() - Create a dependency between 2 resources - * in a synchronized fashion. In case a producer resource is in GRANTED state - * and the newly added consumer resource is in RELEASED state, the consumer - * entity will be requested and the function will block until the consumer - * is granted. - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: May block. See documentation above. - */ -int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - int result; - struct ipa_rm_resource *consumer; - unsigned long time; - unsigned long flags; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), - ipa_rm_resource_str(depends_on_name)); - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - result = ipa_rm_dep_graph_add_dependency( - ipa_rm_ctx->dep_graph, - resource_name, - depends_on_name); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - if (result == -EINPROGRESS) { - ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - depends_on_name, - &consumer); - IPA_RM_DBG("%s waits for GRANT of %s.\n", - ipa_rm_resource_str(resource_name), - ipa_rm_resource_str(depends_on_name)); - time = wait_for_completion_timeout( - &((struct ipa_rm_resource_cons *)consumer)-> - request_consumer_in_progress, - HZ); - result = 0; - if (!time) { - IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.", - ipa_rm_resource_str(depends_on_name)); - result = -ETIME; - } - IPA_RM_DBG("%s waited for %s GRANT %lu time.\n", - ipa_rm_resource_str(resource_name), - ipa_rm_resource_str(depends_on_name), - time); - } - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_delete_dependency() - create dependency - * between 2 resources - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: IPA_RM_RESORCE_GRANTED could be generated - * in case client registered with IPA RM - */ -int ipa2_rm_delete_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - unsigned long flags; - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), - ipa_rm_resource_str(depends_on_name)); - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - result = ipa_rm_dep_graph_delete_dependency( - ipa_rm_ctx->dep_graph, - resource_name, - depends_on_name); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_request_resource() - request resource - * @resource_name: [in] name of the requested resource - * - * Returns: 0 on success, negative on failure - * - * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED - * on successful completion of this operation. - */ -int ipa2_rm_request_resource(enum ipa_rm_resource_name resource_name) -{ - struct ipa_rm_resource *resource; - unsigned long flags; - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa_rm_resource_producer_request( - (struct ipa_rm_resource_prod *)resource); - -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -void delayed_release_work_func(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa_rm_delayed_release_work_type *rwork = container_of( - to_delayed_work(work), - struct ipa_rm_delayed_release_work_type, - work); - - if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) { - IPA_RM_ERR("can be called on CONS only\n"); - kfree(rwork); - return; - } - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - rwork->resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - goto bail; - } - - ipa_rm_resource_consumer_release( - (struct ipa_rm_resource_cons *)resource, rwork->needed_bw, - rwork->dec_usage_count); - -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - kfree(rwork); - -} - -/** - * ipa_rm_request_resource_with_timer() - requests the specified consumer - * resource and releases it after 1 second - * @resource_name: name of the requested resource - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa_rm_delayed_release_work_type *release_work; - int result; - - if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { - IPA_RM_ERR("can be called on CONS only\n"); - return -EINVAL; - } - - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa_rm_resource_consumer_request( - (struct ipa_rm_resource_cons *)resource, 0, false); - if (result != 0 && result != -EINPROGRESS) { - IPA_RM_ERR("consumer request returned error %d\n", result); - result = -EPERM; - goto bail; - } - - release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC); - if (!release_work) { - result = -ENOMEM; - goto bail; - } - release_work->resource_name = resource->name; - release_work->needed_bw = 0; - release_work->dec_usage_count = false; - INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func); - schedule_delayed_work(&release_work->work, - msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC)); - result = 0; -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - - return result; -} -/** - * ipa2_rm_release_resource() - release resource - * @resource_name: [in] name of the requested resource - * - * Returns: 0 on success, negative on failure - * - * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED - * on successful completion of this operation. - */ -int ipa2_rm_release_resource(enum ipa_rm_resource_name resource_name) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa_rm_resource_producer_release( - (struct ipa_rm_resource_prod *)resource); - -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -/** - * ipa2_rm_register() - register for event - * @resource_name: resource name - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Registration parameters provided here should be the same - * as provided later in ipa2_rm_deregister() call. - */ -int ipa2_rm_register(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params) -{ - int result; - unsigned long flags; - struct ipa_rm_resource *resource; - - IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa_rm_resource_producer_register( - (struct ipa_rm_resource_prod *)resource, - reg_params, - true); -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_deregister() - cancel the registration - * @resource_name: resource name - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Registration parameters provided here should be the same - * as provided in ipa2_rm_register() call. - */ -int ipa2_rm_deregister(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params) -{ - int result; - unsigned long flags; - struct ipa_rm_resource *resource; - - IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa_rm_resource_producer_deregister( - (struct ipa_rm_resource_prod *)resource, - reg_params); -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_set_perf_profile() - set performance profile - * @resource_name: resource name - * @profile: [in] profile information. - * - * Returns: 0 on success, negative on failure - * - * Set resource performance profile. - * Updates IPA driver if performance level changed. - */ -int ipa2_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, - struct ipa_rm_perf_profile *profile) -{ - int result; - unsigned long flags; - struct ipa_rm_resource *resource; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); - - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa_rm_resource_set_perf_profile(resource, profile); - if (result) { - IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n", - result); - goto bail; - } - - result = 0; -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa2_rm_notify_completion() - - * consumer driver notification for - * request_resource / release_resource operations - * completion - * @event: notified event - * @resource_name: resource name - * - * Returns: 0 on success, negative on failure - */ -int ipa2_rm_notify_completion(enum ipa_rm_event event, - enum ipa_rm_resource_name resource_name) -{ - int result; - - if (unlikely(!ipa_rm_ctx)) { - IPA_RM_ERR("IPA RM was not initialized\n"); - return -EINVAL; - } - - IPA_RM_DBG("event %d on %s\n", event, - ipa_rm_resource_str(resource_name)); - if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { - IPA_RM_ERR("can be called on CONS only\n"); - result = -EINVAL; - goto bail; - } - ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB, - resource_name, - event, - false); - result = 0; -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -static void ipa_rm_wq_handler(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa_rm_wq_work_type *ipa_rm_work = - container_of(work, - struct ipa_rm_wq_work_type, - work); - IPA_RM_DBG("%s cmd=%d event=%d notify_registered_only=%d\n", - ipa_rm_resource_str(ipa_rm_work->resource_name), - ipa_rm_work->wq_cmd, - ipa_rm_work->event, - ipa_rm_work->notify_registered_only); - switch (ipa_rm_work->wq_cmd) { - case IPA_RM_WQ_NOTIFY_PROD: - if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) { - IPA_RM_ERR("resource is not PROD\n"); - goto free_work; - } - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - goto free_work; - } - ipa_rm_resource_producer_notify_clients( - (struct ipa_rm_resource_prod *)resource, - ipa_rm_work->event, - ipa_rm_work->notify_registered_only); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - break; - case IPA_RM_WQ_NOTIFY_CONS: - break; - case IPA_RM_WQ_RESOURCE_CB: - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - goto free_work; - } - ipa_rm_resource_consumer_handle_cb( - (struct ipa_rm_resource_cons *)resource, - ipa_rm_work->event); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - break; - default: - break; - } - -free_work: - kfree((void *) work); -} - -static void ipa_rm_wq_resume_handler(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = - container_of(work, - struct ipa_rm_wq_suspend_resume_work_type, - work); - IPA_RM_DBG("resume work handler: %s", - ipa_rm_resource_str(ipa_rm_work->resource_name)); - - if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { - IPA_RM_ERR("resource is not CONS\n"); - return; - } - IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str( - ipa_rm_work->resource_name)); - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str( - ipa_rm_work->resource_name)); - goto bail; - } - ipa_rm_resource_consumer_request_work( - (struct ipa_rm_resource_cons *)resource, - ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); -bail: - kfree(ipa_rm_work); -} - - -static void ipa_rm_wq_suspend_handler(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = - container_of(work, - struct ipa_rm_wq_suspend_resume_work_type, - work); - IPA_RM_DBG("suspend work handler: %s", - ipa_rm_resource_str(ipa_rm_work->resource_name)); - - if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { - IPA_RM_ERR("resource is not CONS\n"); - return; - } - ipa_suspend_resource_sync(ipa_rm_work->resource_name); - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - return; - } - ipa_rm_resource_consumer_release_work( - (struct ipa_rm_resource_cons *)resource, - ipa_rm_work->prev_state, - true); - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - - kfree(ipa_rm_work); -} - -/** - * ipa_rm_wq_send_cmd() - send a command for deferred work - * @wq_cmd: command that should be executed - * @resource_name: resource on which command should be executed - * @notify_registered_only: notify only clients registered by - * ipa2_rm_register() - * - * Returns: 0 on success, negative otherwise - */ -int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_event event, - bool notify_registered_only) -{ - int result = -ENOMEM; - struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC); - - if (work) { - INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler); - work->wq_cmd = wq_cmd; - work->resource_name = resource_name; - work->event = event; - work->notify_registered_only = notify_registered_only; - result = queue_work(ipa_rm_ctx->ipa_rm_wq, - (struct work_struct *)work); - } else { - IPA_RM_ERR("no mem\n"); - } - - return result; -} - -int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_state prev_state, - u32 needed_bw) -{ - int result = -ENOMEM; - struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), - GFP_ATOMIC); - if (work) { - INIT_WORK((struct work_struct *)work, - ipa_rm_wq_suspend_handler); - work->resource_name = resource_name; - work->prev_state = prev_state; - work->needed_bw = needed_bw; - result = queue_work(ipa_rm_ctx->ipa_rm_wq, - (struct work_struct *)work); - } else { - IPA_RM_ERR("no mem\n"); - } - - return result; -} - -int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_state prev_state, - u32 needed_bw) -{ - int result = -ENOMEM; - struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), - GFP_ATOMIC); - if (work) { - INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler); - work->resource_name = resource_name; - work->prev_state = prev_state; - work->needed_bw = needed_bw; - result = queue_work(ipa_rm_ctx->ipa_rm_wq, - (struct work_struct *)work); - } else { - IPA_RM_ERR("no mem\n"); - } - - return result; -} -/** - * ipa_rm_initialize() - initialize IPA RM component - * - * Returns: 0 on success, negative otherwise - */ -int ipa_rm_initialize(void) -{ - int result; - - ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL); - if (!ipa_rm_ctx) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq"); - if (!ipa_rm_ctx->ipa_rm_wq) { - IPA_RM_ERR("create workqueue failed\n"); - result = -ENOMEM; - goto create_wq_fail; - } - result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph)); - if (result) { - IPA_RM_ERR("create dependency graph failed\n"); - goto graph_alloc_fail; - } - spin_lock_init(&ipa_rm_ctx->ipa_rm_lock); - IPA_RM_DBG("SUCCESS\n"); - - return 0; -graph_alloc_fail: - destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); -create_wq_fail: - kfree(ipa_rm_ctx); -bail: - return result; -} - -/** - * ipa_rm_stat() - print RM stat - * @buf: [in] The user buff used to print - * @size: [in] The size of buf - * Returns: number of bytes used on success, negative on failure - * - * This function is called by ipa_debugfs in order to receive - * a full picture of the current state of the RM - */ - -int ipa_rm_stat(char *buf, int size) -{ - unsigned long flags; - int i, cnt = 0, result = EINVAL; - struct ipa_rm_resource *resource = NULL; - - if (!buf || size < 0) - return result; - - spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); - for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) { - result = ipa_rm_dep_graph_get_resource( - ipa_rm_ctx->dep_graph, - i, - &resource); - if (!result) { - result = ipa_rm_resource_producer_print_stat( - resource, buf + cnt, - size-cnt); - if (result < 0) - goto bail; - cnt += result; - } - } - result = cnt; -bail: - spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -/** - * ipa_rm_resource_str() - returns string that represent the resource - * @resource_name: [in] resource name - */ -const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name) -{ - if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX) - return "INVALID RESOURCE"; - - return resource_name_to_str[resource_name]; -}; - -static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work) -{ - struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work, - struct ipa_rm_notify_ipa_work_type, - work); - int res; - - IPA_RM_DBG("calling to IPA driver. voltage %d bandwidth %d\n", - notify_work->volt, notify_work->bandwidth_mbps); - - res = ipa_set_required_perf_profile(notify_work->volt, - notify_work->bandwidth_mbps); - if (res) { - IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res); - goto bail; - } - - IPA_RM_DBG("IPA driver notified\n"); -bail: - kfree(notify_work); -} - -static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt, - u32 bandwidth) -{ - struct ipa_rm_notify_ipa_work_type *work; - - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - IPA_RM_ERR("no mem\n"); - return; - } - - INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work); - work->volt = volt; - work->bandwidth_mbps = bandwidth; - queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work); -} - -/** - * ipa_rm_perf_profile_change() - change performance profile vote for resource - * @resource_name: [in] resource name - * - * change bandwidth and voltage vote based on resource state. - */ -void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name) -{ - enum ipa_voltage_level old_volt; - u32 *bw_ptr; - u32 old_bw; - struct ipa_rm_resource *resource; - int i; - u32 sum_bw_prod = 0; - u32 sum_bw_cons = 0; - - IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); - - if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - WARN_ON(1); - return; - } - - old_volt = ipa_rm_ctx->prof_vote.curr_volt; - old_bw = ipa_rm_ctx->prof_vote.curr_bw; - - if (IPA_RM_RESORCE_IS_PROD(resource_name)) { - bw_ptr = &ipa_rm_ctx->prof_vote.bw_prods[resource_name]; - } else if (IPA_RM_RESORCE_IS_CONS(resource_name)) { - bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[ - resource_name - IPA_RM_RESOURCE_PROD_MAX]; - } else { - IPA_RM_ERR("Invalid resource_name\n"); - return; - } - - switch (resource->state) { - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - IPA_RM_DBG("max_bw = %d, needed_bw = %d\n", - resource->max_bw, resource->needed_bw); - *bw_ptr = min(resource->max_bw, resource->needed_bw); - ipa_rm_ctx->prof_vote.volt[resource_name] = - resource->floor_voltage; - break; - - case IPA_RM_RELEASE_IN_PROGRESS: - case IPA_RM_RELEASED: - *bw_ptr = 0; - ipa_rm_ctx->prof_vote.volt[resource_name] = 0; - break; - - default: - IPA_RM_ERR("unknown state %d\n", resource->state); - WARN_ON(1); - return; - } - IPA_RM_DBG("resource bandwidth: %d voltage: %d\n", *bw_ptr, - resource->floor_voltage); - - ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED; - for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { - if (ipa_rm_ctx->prof_vote.volt[i] > - ipa_rm_ctx->prof_vote.curr_volt) { - ipa_rm_ctx->prof_vote.curr_volt = - ipa_rm_ctx->prof_vote.volt[i]; - } - } - - for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++) - sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i]; - - for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++) - sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i]; - - IPA_RM_DBG("all prod bandwidth: %d all cons bandwidth: %d\n", - sum_bw_prod, sum_bw_cons); - ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons); - - if (ipa_rm_ctx->prof_vote.curr_volt == old_volt && - ipa_rm_ctx->prof_vote.curr_bw == old_bw) { - IPA_RM_DBG("same voting\n"); - return; - } - - IPA_RM_DBG("new voting: voltage %d bandwidth %d\n", - ipa_rm_ctx->prof_vote.curr_volt, - ipa_rm_ctx->prof_vote.curr_bw); - - ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt, - ipa_rm_ctx->prof_vote.curr_bw); - - return; -}; - -/** - * ipa_rm_exit() - free all IPA RM resources - */ -void ipa_rm_exit(void) -{ - IPA_RM_DBG("ENTER\n"); - ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph); - destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); - kfree(ipa_rm_ctx); - ipa_rm_ctx = NULL; - IPA_RM_DBG("EXIT\n"); -} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c deleted file mode 100644 index fd437b0c8775..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.c +++ /dev/null @@ -1,245 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "ipa_rm_dependency_graph.h" -#include "ipa_rm_i.h" - -static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name) -{ - int resource_index = IPA_RM_INDEX_INVALID; - - if (IPA_RM_RESORCE_IS_PROD(resource_name)) - resource_index = ipa_rm_prod_index(resource_name); - else if (IPA_RM_RESORCE_IS_CONS(resource_name)) - resource_index = ipa_rm_cons_index(resource_name); - - return resource_index; -} - -/** - * ipa_rm_dep_graph_create() - creates graph - * @dep_graph: [out] created dependency graph - * - * Returns: dependency graph on success, NULL on failure - */ -int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph) -{ - int result = 0; - - *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL); - if (!*dep_graph) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } -bail: - return result; -} - -/** - * ipa_rm_dep_graph_delete() - destroyes the graph - * @graph: [in] dependency graph - * - * Frees all resources. - */ -void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph) -{ - int resource_index; - - if (!graph) { - IPA_RM_ERR("invalid params\n"); - return; - } - for (resource_index = 0; - resource_index < IPA_RM_RESOURCE_MAX; - resource_index++) - kfree(graph->resource_table[resource_index]); - memset(graph->resource_table, 0, sizeof(graph->resource_table)); -} - -/** - * ipa_rm_dep_graph_get_resource() - provides a resource by name - * @graph: [in] dependency graph - * @name: [in] name of the resource - * @resource: [out] resource in case of success - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_dep_graph_get_resource( - struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - struct ipa_rm_resource **resource) -{ - int result; - int resource_index; - - if (!graph) { - result = -EINVAL; - goto bail; - } - resource_index = ipa_rm_dep_get_index(resource_name); - if (resource_index == IPA_RM_INDEX_INVALID) { - result = -EINVAL; - goto bail; - } - *resource = graph->resource_table[resource_index]; - if (!*resource) { - result = -EINVAL; - goto bail; - } - result = 0; -bail: - return result; -} - -/** - * ipa_rm_dep_graph_add() - adds resource to graph - * @graph: [in] dependency graph - * @resource: [in] resource to add - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, - struct ipa_rm_resource *resource) -{ - int result = 0; - int resource_index; - - if (!graph || !resource) { - result = -EINVAL; - goto bail; - } - resource_index = ipa_rm_dep_get_index(resource->name); - if (resource_index == IPA_RM_INDEX_INVALID) { - result = -EINVAL; - goto bail; - } - graph->resource_table[resource_index] = resource; -bail: - return result; -} - -/** - * ipa_rm_dep_graph_remove() - removes resource from graph - * @graph: [in] dependency graph - * @resource: [in] resource to add - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name) -{ - if (!graph) - return -EINVAL; - graph->resource_table[resource_name] = NULL; - - return 0; -} - -/** - * ipa_rm_dep_graph_add_dependency() - adds dependency between - * two nodes in graph - * @graph: [in] dependency graph - * @resource_name: [in] resource to add - * @depends_on_name: [in] resource to add - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - struct ipa_rm_resource *dependent = NULL; - struct ipa_rm_resource *dependency = NULL; - int result; - - if (!graph || - !IPA_RM_RESORCE_IS_PROD(resource_name) || - !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { - IPA_RM_ERR("invalid params\n"); - result = -EINVAL; - goto bail; - } - if (ipa_rm_dep_graph_get_resource(graph, - resource_name, - &dependent)) { - IPA_RM_ERR("%s does not exist\n", - ipa_rm_resource_str(resource_name)); - result = -EINVAL; - goto bail; - } - if (ipa_rm_dep_graph_get_resource(graph, - depends_on_name, - &dependency)) { - IPA_RM_ERR("%s does not exist\n", - ipa_rm_resource_str(depends_on_name)); - result = -EINVAL; - goto bail; - } - result = ipa_rm_resource_add_dependency(dependent, dependency); -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa_rm_dep_graph_delete_dependency() - deleted dependency between - * two nodes in graph - * @graph: [in] dependency graph - * @resource_name: [in] resource to delete - * @depends_on_name: [in] resource to delete - * - * Returns: 0 on success, negative on failure - * - */ -int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - struct ipa_rm_resource *dependent = NULL; - struct ipa_rm_resource *dependency = NULL; - int result; - - if (!graph || - !IPA_RM_RESORCE_IS_PROD(resource_name) || - !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { - IPA_RM_ERR("invalid params\n"); - result = -EINVAL; - goto bail; - } - - if (ipa_rm_dep_graph_get_resource(graph, - resource_name, - &dependent)) { - IPA_RM_ERR("%s does not exist\n", - ipa_rm_resource_str(resource_name)); - result = -EINVAL; - goto bail; - } - - if (ipa_rm_dep_graph_get_resource(graph, - depends_on_name, - &dependency)) { - IPA_RM_ERR("%s does not exist\n", - ipa_rm_resource_str(depends_on_name)); - result = -EINVAL; - goto bail; - } - - result = ipa_rm_resource_delete_dependency(dependent, dependency); -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h deleted file mode 100644 index b76c6636f873..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_dependency_graph.h +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_ -#define _IPA_RM_DEPENDENCY_GRAPH_H_ - -#include -#include -#include "ipa_rm_resource.h" - -struct ipa_rm_dep_graph { - struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX]; -}; - -int ipa_rm_dep_graph_get_resource( - struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name name, - struct ipa_rm_resource **resource); - -int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph); - -void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph); - -int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, - struct ipa_rm_resource *resource); - -int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name); - -int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h deleted file mode 100644 index b286c198160c..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_i.h +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_I_H_ -#define _IPA_RM_I_H_ - -#include -#include -#include "ipa_rm_resource.h" - -#define IPA_RM_DRV_NAME "ipa_rm" - -#define IPA_RM_DBG(fmt, args...) \ - pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) -#define IPA_RM_ERR(fmt, args...) \ - pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) - -#define IPA_RM_RESOURCE_CONS_MAX \ - (IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX) -#define IPA_RM_RESORCE_IS_PROD(x) \ - (x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX) -#define IPA_RM_RESORCE_IS_CONS(x) \ - (x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX) -#define IPA_RM_INDEX_INVALID (-1) -#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000 - -int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name); -int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name); - -/** - * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release - * work type - * @delayed_work: work struct - * @ipa_rm_resource_name: name of the resource on which this work should be done - * @needed_bw: bandwidth required for resource in Mbps - * @dec_usage_count: decrease usage count on release ? - */ -struct ipa_rm_delayed_release_work_type { - struct delayed_work work; - enum ipa_rm_resource_name resource_name; - u32 needed_bw; - bool dec_usage_count; - -}; - -/** - * enum ipa_rm_wq_cmd - workqueue commands - */ -enum ipa_rm_wq_cmd { - IPA_RM_WQ_NOTIFY_PROD, - IPA_RM_WQ_NOTIFY_CONS, - IPA_RM_WQ_RESOURCE_CB -}; - -/** - * struct ipa_rm_wq_work_type - IPA RM worqueue specific - * work type - * @work: work struct - * @wq_cmd: command that should be processed in workqueue context - * @resource_name: name of the resource on which this work - * should be done - * @dep_graph: data structure to search for resource if exists - * @event: event to notify - * @notify_registered_only: notify only clients registered by - * ipa2_rm_register() - */ -struct ipa_rm_wq_work_type { - struct work_struct work; - enum ipa_rm_wq_cmd wq_cmd; - enum ipa_rm_resource_name resource_name; - enum ipa_rm_event event; - bool notify_registered_only; -}; - -/** - * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or - * suspend work type - * @work: work struct - * @resource_name: name of the resource on which this work - * should be done - * @prev_state: - * @needed_bw: - */ -struct ipa_rm_wq_suspend_resume_work_type { - struct work_struct work; - enum ipa_rm_resource_name resource_name; - enum ipa_rm_resource_state prev_state; - u32 needed_bw; - -}; - -int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_event event, - bool notify_registered_only); - -int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_state prev_state, - u32 needed_bw); - -int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_state prev_state, - u32 needed_bw); - -int ipa_rm_initialize(void); - -int ipa_rm_stat(char *buf, int size); - -const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name); - -void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name); - -int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name); - -void delayed_release_work_func(struct work_struct *work); - -void ipa_rm_exit(void); - -#endif /* _IPA_RM_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c deleted file mode 100644 index ee14e722b885..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_inactivity_timer.c +++ /dev/null @@ -1,268 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "ipa_i.h" - -/** - * struct ipa_rm_it_private - IPA RM Inactivity Timer private - * data - * @initied: indicates if instance was initialized - * @lock - spinlock for mutual exclusion - * @resource_name - resource name - * @work: delayed work object for running delayed releas - * function - * @resource_requested: boolean flag indicates if resource was requested - * @reschedule_work: boolean flag indicates to not release and to - * reschedule the release work. - * @work_in_progress: boolean flag indicates is release work was scheduled. - * @jiffies: number of jiffies for timeout - * - * WWAN private - holds all relevant info about WWAN driver - */ -struct ipa_rm_it_private { - bool initied; - enum ipa_rm_resource_name resource_name; - spinlock_t lock; - struct delayed_work work; - bool resource_requested; - bool reschedule_work; - bool work_in_progress; - unsigned long jiffies; -}; - -static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX]; - -/** - * ipa_rm_inactivity_timer_func() - called when timer expired in - * the context of the shared workqueue. Checks internally if - * reschedule_work flag is set. In case it is not set this function calls to - * ipa2_rm_release_resource(). In case reschedule_work is set this function - * reschedule the work. This flag is cleared cleared when - * calling to ipa2_rm_inactivity_timer_release_resource(). - * - * @work: work object provided by the work queue - * - * Return codes: - * None - */ -static void ipa_rm_inactivity_timer_func(struct work_struct *work) -{ - - struct ipa_rm_it_private *me = container_of(to_delayed_work(work), - struct ipa_rm_it_private, - work); - unsigned long flags; - - IPADBG("%s: timer expired for resource %d!\n", __func__, - me->resource_name); - - spin_lock_irqsave( - &ipa_rm_it_handles[me->resource_name].lock, flags); - if (ipa_rm_it_handles[me->resource_name].reschedule_work) { - IPADBG("%s: setting delayed work\n", __func__); - ipa_rm_it_handles[me->resource_name].reschedule_work = false; - schedule_delayed_work( - &ipa_rm_it_handles[me->resource_name].work, - ipa_rm_it_handles[me->resource_name].jiffies); - } else if (ipa_rm_it_handles[me->resource_name].resource_requested) { - IPADBG("%s: not calling release\n", __func__); - ipa_rm_it_handles[me->resource_name].work_in_progress = false; - } else { - IPADBG("%s: calling release_resource on resource %d!\n", - __func__, me->resource_name); - ipa2_rm_release_resource(me->resource_name); - ipa_rm_it_handles[me->resource_name].work_in_progress = false; - } - spin_unlock_irqrestore( - &ipa_rm_it_handles[me->resource_name].lock, flags); -} - -/** -* ipa2_rm_inactivity_timer_init() - Init function for IPA RM -* inactivity timer. This function shall be called prior calling -* any other API of IPA RM inactivity timer. -* -* @resource_name: Resource name. @see ipa_rm.h -* @msecs: time in miliseccond, that IPA RM inactivity timer -* shall wait prior calling to ipa2_rm_release_resource(). -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa2_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, - unsigned long msecs) -{ - IPADBG("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (ipa_rm_it_handles[resource_name].initied) { - IPAERR("%s: resource %d already inited\n", - __func__, resource_name); - return -EINVAL; - } - - spin_lock_init(&ipa_rm_it_handles[resource_name].lock); - ipa_rm_it_handles[resource_name].resource_name = resource_name; - ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs); - ipa_rm_it_handles[resource_name].resource_requested = false; - ipa_rm_it_handles[resource_name].reschedule_work = false; - ipa_rm_it_handles[resource_name].work_in_progress = false; - - INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work, - ipa_rm_inactivity_timer_func); - ipa_rm_it_handles[resource_name].initied = 1; - - return 0; -} - -/** -* ipa2_rm_inactivity_timer_destroy() - De-Init function for IPA -* RM inactivity timer. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa2_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name) -{ - IPADBG("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (!ipa_rm_it_handles[resource_name].initied) { - IPAERR("%s: resource %d already inited\n", - __func__, resource_name); - return -EINVAL; - } - - cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work); - - memset(&ipa_rm_it_handles[resource_name], 0, - sizeof(struct ipa_rm_it_private)); - - return 0; -} - - -/** -* ipa2_rm_inactivity_timer_request_resource() - Same as -* ipa2_rm_request_resource(), with a difference that calling to -* this function will also cancel the inactivity timer, if -* ipa2_rm_inactivity_timer_release_resource() was called earlier. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa2_rm_inactivity_timer_request_resource( - enum ipa_rm_resource_name resource_name) -{ - int ret; - unsigned long flags; - - IPADBG("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (!ipa_rm_it_handles[resource_name].initied) { - IPAERR("%s: Not initialized\n", __func__); - return -EINVAL; - } - - spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); - ipa_rm_it_handles[resource_name].resource_requested = true; - spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); - ret = ipa2_rm_request_resource(resource_name); - IPADBG("%s: resource %d: returning %d\n", __func__, resource_name, ret); - - return ret; -} - -/** -* ipa2_rm_inactivity_timer_release_resource() - Sets the -* inactivity timer to the timeout set by -* ipa2_rm_inactivity_timer_init(). When the timeout expires, IPA -* RM inactivity timer will call to ipa2_rm_release_resource(). -* If a call to ipa2_rm_inactivity_timer_request_resource() was -* made BEFORE the timout has expired, rge timer will be -* cancelled. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa2_rm_inactivity_timer_release_resource( - enum ipa_rm_resource_name resource_name) -{ - unsigned long flags; - - IPADBG("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (!ipa_rm_it_handles[resource_name].initied) { - IPAERR("%s: Not initialized\n", __func__); - return -EINVAL; - } - - spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); - ipa_rm_it_handles[resource_name].resource_requested = false; - if (ipa_rm_it_handles[resource_name].work_in_progress) { - IPADBG("%s: Timer already set, not scheduling again %d\n", - __func__, resource_name); - ipa_rm_it_handles[resource_name].reschedule_work = true; - spin_unlock_irqrestore( - &ipa_rm_it_handles[resource_name].lock, flags); - return 0; - } - ipa_rm_it_handles[resource_name].work_in_progress = true; - ipa_rm_it_handles[resource_name].reschedule_work = false; - IPADBG("%s: setting delayed work\n", __func__); - schedule_delayed_work(&ipa_rm_it_handles[resource_name].work, - ipa_rm_it_handles[resource_name].jiffies); - spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); - - return 0; -} - diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c deleted file mode 100644 index 6f6f2a64b1fc..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.c +++ /dev/null @@ -1,247 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "ipa_i.h" -#include "ipa_rm_i.h" - -/** - * ipa_rm_peers_list_get_resource_index() - resource name to index - * of this resource in corresponding peers list - * @resource_name: [in] resource name - * - * Returns: resource index mapping, IPA_RM_INDEX_INVALID - * in case provided resource name isn't contained in enum - * ipa_rm_resource_name. - * - */ -static int ipa_rm_peers_list_get_resource_index( - enum ipa_rm_resource_name resource_name) -{ - int resource_index = IPA_RM_INDEX_INVALID; - - if (IPA_RM_RESORCE_IS_PROD(resource_name)) - resource_index = ipa_rm_prod_index(resource_name); - else if (IPA_RM_RESORCE_IS_CONS(resource_name)) { - resource_index = ipa_rm_cons_index(resource_name); - if (resource_index != IPA_RM_INDEX_INVALID) - resource_index = - resource_index - IPA_RM_RESOURCE_PROD_MAX; - } - - return resource_index; -} - -static bool ipa_rm_peers_list_check_index(int index, - struct ipa_rm_peers_list *peers_list) -{ - return !(index > peers_list->max_peers || index < 0); -} - -/** - * ipa_rm_peers_list_create() - creates the peers list - * - * @max_peers: maximum number of peers in new list - * @peers_list: [out] newly created peers list - * - * Returns: 0 in case of SUCCESS, negative otherwise - */ -int ipa_rm_peers_list_create(int max_peers, - struct ipa_rm_peers_list **peers_list) -{ - int result; - - *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC); - if (!*peers_list) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - (*peers_list)->max_peers = max_peers; - (*peers_list)->peers = kzalloc((*peers_list)->max_peers * - sizeof(struct ipa_rm_resource *), GFP_ATOMIC); - if (!((*peers_list)->peers)) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto list_alloc_fail; - } - - return 0; - -list_alloc_fail: - kfree(*peers_list); -bail: - return result; -} - -/** - * ipa_rm_peers_list_delete() - deletes the peers list - * - * @peers_list: peers list - * - */ -void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list) -{ - if (peers_list) { - kfree(peers_list->peers); - kfree(peers_list); - } -} - -/** - * ipa_rm_peers_list_remove_peer() - removes peer from the list - * - * @peers_list: peers list - * @resource_name: name of the resource to remove - * - */ -void ipa_rm_peers_list_remove_peer( - struct ipa_rm_peers_list *peers_list, - enum ipa_rm_resource_name resource_name) -{ - if (!peers_list) - return; - - peers_list->peers[ipa_rm_peers_list_get_resource_index( - resource_name)] = NULL; - peers_list->peers_count--; -} - -/** - * ipa_rm_peers_list_add_peer() - adds peer to the list - * - * @peers_list: peers list - * @resource: resource to add - * - */ -void ipa_rm_peers_list_add_peer( - struct ipa_rm_peers_list *peers_list, - struct ipa_rm_resource *resource) -{ - if (!peers_list || !resource) - return; - - peers_list->peers[ipa_rm_peers_list_get_resource_index( - resource->name)] = - resource; - peers_list->peers_count++; -} - -/** - * ipa_rm_peers_list_is_empty() - checks - * if resource peers list is empty - * - * @peers_list: peers list - * - * Returns: true if the list is empty, false otherwise - */ -bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list) -{ - bool result = true; - - if (!peers_list) - goto bail; - - if (peers_list->peers_count > 0) - result = false; -bail: - return result; -} - -/** - * ipa_rm_peers_list_has_last_peer() - checks - * if resource peers list has exactly one peer - * - * @peers_list: peers list - * - * Returns: true if the list has exactly one peer, false otherwise - */ -bool ipa_rm_peers_list_has_last_peer( - struct ipa_rm_peers_list *peers_list) -{ - bool result = false; - - if (!peers_list) - goto bail; - - if (peers_list->peers_count == 1) - result = true; -bail: - return result; -} - -/** - * ipa_rm_peers_list_check_dependency() - check dependency - * between 2 peer lists - * @resource_peers: first peers list - * @resource_name: first peers list resource name - * @depends_on_peers: second peers list - * @depends_on_name: second peers list resource name - * - * Returns: true if there is dependency, false otherwise - * - */ -bool ipa_rm_peers_list_check_dependency( - struct ipa_rm_peers_list *resource_peers, - enum ipa_rm_resource_name resource_name, - struct ipa_rm_peers_list *depends_on_peers, - enum ipa_rm_resource_name depends_on_name) -{ - bool result = false; - - if (!resource_peers || !depends_on_peers) - return result; - - if (resource_peers->peers[ipa_rm_peers_list_get_resource_index( - depends_on_name)] != NULL) - result = true; - - if (depends_on_peers->peers[ipa_rm_peers_list_get_resource_index( - resource_name)] != NULL) - result = true; - - return result; -} - -/** - * ipa_rm_peers_list_get_resource() - get resource by - * resource index - * @resource_index: resource index - * @resource_peers: peers list - * - * Returns: the resource if found, NULL otherwise - */ -struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, - struct ipa_rm_peers_list *resource_peers) -{ - struct ipa_rm_resource *result = NULL; - - if (!ipa_rm_peers_list_check_index(resource_index, resource_peers)) - goto bail; - - result = resource_peers->peers[resource_index]; -bail: - return result; -} - -/** - * ipa_rm_peers_list_get_size() - get peers list sise - * - * @peers_list: peers list - * - * Returns: the size of the peers list - */ -int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list) -{ - return peers_list->max_peers; -} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h deleted file mode 100644 index b41de0aa3167..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_peers_list.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_PEERS_LIST_H_ -#define _IPA_RM_PEERS_LIST_H_ - -#include "ipa_rm_resource.h" - -/** - * struct ipa_rm_peers_list - IPA RM resource peers list - * @peers: the list of references to resources dependent on this resource - * in case of producer or list of dependencies in case of consumer - * @max_peers: maximum number of peers for this resource - * @peers_count: actual number of peers for this resource - */ -struct ipa_rm_peers_list { - struct ipa_rm_resource **peers; - int max_peers; - int peers_count; -}; - -int ipa_rm_peers_list_create(int max_peers, - struct ipa_rm_peers_list **peers_list); -void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list); -void ipa_rm_peers_list_remove_peer( - struct ipa_rm_peers_list *peers_list, - enum ipa_rm_resource_name resource_name); -void ipa_rm_peers_list_add_peer( - struct ipa_rm_peers_list *peers_list, - struct ipa_rm_resource *resource); -bool ipa_rm_peers_list_check_dependency( - struct ipa_rm_peers_list *resource_peers, - enum ipa_rm_resource_name resource_name, - struct ipa_rm_peers_list *depends_on_peers, - enum ipa_rm_resource_name depends_on_name); -struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, - struct ipa_rm_peers_list *peers_list); -int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list); -bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list); -bool ipa_rm_peers_list_has_last_peer( - struct ipa_rm_peers_list *peers_list); - - -#endif /* _IPA_RM_PEERS_LIST_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c deleted file mode 100644 index 66e086768294..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.c +++ /dev/null @@ -1,1164 +0,0 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "ipa_i.h" -#include "ipa_rm_resource.h" -#include "ipa_rm_i.h" - -/** - * ipa_rm_dep_prod_index() - producer name to producer index mapping - * @resource_name: [in] resource name (should be of producer) - * - * Returns: resource index mapping, IPA_RM_INDEX_INVALID - * in case provided resource name isn't contained - * in enum ipa_rm_resource_name or is not of producers. - * - */ -int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name) -{ - int result = resource_name; - - switch (resource_name) { - case IPA_RM_RESOURCE_Q6_PROD: - case IPA_RM_RESOURCE_USB_PROD: - case IPA_RM_RESOURCE_HSIC_PROD: - case IPA_RM_RESOURCE_STD_ECM_PROD: - case IPA_RM_RESOURCE_RNDIS_PROD: - case IPA_RM_RESOURCE_WWAN_0_PROD: - case IPA_RM_RESOURCE_WLAN_PROD: - case IPA_RM_RESOURCE_ODU_ADAPT_PROD: - case IPA_RM_RESOURCE_MHI_PROD: - break; - default: - result = IPA_RM_INDEX_INVALID; - break; - } - - return result; -} - -/** - * ipa_rm_cons_index() - consumer name to consumer index mapping - * @resource_name: [in] resource name (should be of consumer) - * - * Returns: resource index mapping, IPA_RM_INDEX_INVALID - * in case provided resource name isn't contained - * in enum ipa_rm_resource_name or is not of consumers. - * - */ -int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name) -{ - int result = resource_name; - - switch (resource_name) { - case IPA_RM_RESOURCE_Q6_CONS: - case IPA_RM_RESOURCE_USB_CONS: - case IPA_RM_RESOURCE_HSIC_CONS: - case IPA_RM_RESOURCE_WLAN_CONS: - case IPA_RM_RESOURCE_APPS_CONS: - case IPA_RM_RESOURCE_ODU_ADAPT_CONS: - case IPA_RM_RESOURCE_MHI_CONS: - break; - default: - result = IPA_RM_INDEX_INVALID; - break; - } - - return result; -} - -int ipa_rm_resource_consumer_release_work( - struct ipa_rm_resource_cons *consumer, - enum ipa_rm_resource_state prev_state, - bool notify_completion) -{ - int driver_result; - - IPA_RM_DBG("calling driver CB\n"); - driver_result = consumer->release_resource(); - IPA_RM_DBG("driver CB returned with %d\n", driver_result); - /* - * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED - * for CONS which remains in RELEASE_IN_PROGRESS. - */ - if (driver_result == -EINPROGRESS) - driver_result = 0; - if (driver_result != 0 && driver_result != -EINPROGRESS) { - IPA_RM_ERR("driver CB returned error %d\n", driver_result); - consumer->resource.state = prev_state; - goto bail; - } - if (driver_result == 0) { - if (notify_completion) - ipa_rm_resource_consumer_handle_cb(consumer, - IPA_RM_RESOURCE_RELEASED); - else - consumer->resource.state = IPA_RM_RELEASED; - } - complete_all(&consumer->request_consumer_in_progress); - - ipa_rm_perf_profile_change(consumer->resource.name); -bail: - return driver_result; -} - -int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, - enum ipa_rm_resource_state prev_state, - u32 prod_needed_bw, - bool notify_completion) -{ - int driver_result; - - IPA_RM_DBG("calling driver CB\n"); - driver_result = consumer->request_resource(); - IPA_RM_DBG("driver CB returned with %d\n", driver_result); - if (driver_result == 0) { - if (notify_completion) { - ipa_rm_resource_consumer_handle_cb(consumer, - IPA_RM_RESOURCE_GRANTED); - } else { - consumer->resource.state = IPA_RM_GRANTED; - ipa_rm_perf_profile_change(consumer->resource.name); - ipa_resume_resource(consumer->resource.name); - } - } else if (driver_result != -EINPROGRESS) { - consumer->resource.state = prev_state; - consumer->resource.needed_bw -= prod_needed_bw; - consumer->usage_count--; - } - - return driver_result; -} - -int ipa_rm_resource_consumer_request( - struct ipa_rm_resource_cons *consumer, - u32 prod_needed_bw, - bool inc_usage_count) -{ - int result = 0; - enum ipa_rm_resource_state prev_state; - struct ipa_active_client_logging_info log_info; - - IPA_RM_DBG("%s state: %d\n", - ipa_rm_resource_str(consumer->resource.name), - consumer->resource.state); - - prev_state = consumer->resource.state; - consumer->resource.needed_bw += prod_needed_bw; - switch (consumer->resource.state) { - case IPA_RM_RELEASED: - case IPA_RM_RELEASE_IN_PROGRESS: - reinit_completion(&consumer->request_consumer_in_progress); - consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; - IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, - ipa_rm_resource_str(consumer->resource.name)); - if (prev_state == IPA_RM_RELEASE_IN_PROGRESS || - ipa2_inc_client_enable_clks_no_block(&log_info) != 0) { - IPA_RM_DBG("async resume work for %s\n", - ipa_rm_resource_str(consumer->resource.name)); - ipa_rm_wq_send_resume_cmd(consumer->resource.name, - prev_state, - prod_needed_bw); - result = -EINPROGRESS; - break; - } - result = ipa_rm_resource_consumer_request_work(consumer, - prev_state, - prod_needed_bw, - false); - break; - case IPA_RM_GRANTED: - ipa_rm_perf_profile_change(consumer->resource.name); - break; - case IPA_RM_REQUEST_IN_PROGRESS: - result = -EINPROGRESS; - break; - default: - consumer->resource.needed_bw -= prod_needed_bw; - result = -EPERM; - goto bail; - } - if (inc_usage_count) - consumer->usage_count++; -bail: - IPA_RM_DBG("%s new state: %d\n", - ipa_rm_resource_str(consumer->resource.name), - consumer->resource.state); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -int ipa_rm_resource_consumer_release( - struct ipa_rm_resource_cons *consumer, - u32 prod_needed_bw, - bool dec_usage_count) -{ - int result = 0; - enum ipa_rm_resource_state save_state; - - IPA_RM_DBG("%s state: %d\n", - ipa_rm_resource_str(consumer->resource.name), - consumer->resource.state); - save_state = consumer->resource.state; - consumer->resource.needed_bw -= prod_needed_bw; - switch (consumer->resource.state) { - case IPA_RM_RELEASED: - break; - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - if (dec_usage_count && consumer->usage_count > 0) - consumer->usage_count--; - if (consumer->usage_count == 0) { - consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; - if (save_state == IPA_RM_REQUEST_IN_PROGRESS || - ipa_suspend_resource_no_block( - consumer->resource.name) != 0) { - ipa_rm_wq_send_suspend_cmd( - consumer->resource.name, - save_state, - prod_needed_bw); - result = -EINPROGRESS; - goto bail; - } - result = ipa_rm_resource_consumer_release_work(consumer, - save_state, false); - goto bail; - } else if (consumer->resource.state == IPA_RM_GRANTED) { - ipa_rm_perf_profile_change(consumer->resource.name); - } - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (dec_usage_count && consumer->usage_count > 0) - consumer->usage_count--; - result = -EINPROGRESS; - break; - default: - result = -EPERM; - goto bail; - } -bail: - IPA_RM_DBG("%s new state: %d\n", - ipa_rm_resource_str(consumer->resource.name), - consumer->resource.state); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa_rm_resource_producer_notify_clients() - notify - * all registered clients of given producer - * @producer: producer - * @event: event to notify - * @notify_registered_only: notify only clients registered by - * ipa2_rm_register() - */ -void ipa_rm_resource_producer_notify_clients( - struct ipa_rm_resource_prod *producer, - enum ipa_rm_event event, - bool notify_registered_only) -{ - struct ipa_rm_notification_info *reg_info; - - IPA_RM_DBG("%s event: %d notify_registered_only: %d\n", - ipa_rm_resource_str(producer->resource.name), - event, - notify_registered_only); - - list_for_each_entry(reg_info, &(producer->event_listeners), link) { - if (notify_registered_only && !reg_info->explicit) - continue; - - IPA_RM_DBG("Notifying %s event: %d\n", - ipa_rm_resource_str(producer->resource.name), event); - reg_info->reg_params.notify_cb(reg_info->reg_params.user_data, - event, - 0); - IPA_RM_DBG("back from client CB\n"); - } -} - -static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource, - struct ipa_rm_resource_prod **producer, - struct ipa_rm_create_params *create_params, - int *max_peers) -{ - int result = 0; - - *producer = kzalloc(sizeof(**producer), GFP_ATOMIC); - if (*producer == NULL) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - INIT_LIST_HEAD(&((*producer)->event_listeners)); - result = ipa_rm_resource_producer_register(*producer, - &(create_params->reg_params), - false); - if (result) { - IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n"); - goto register_fail; - } - - (*resource) = (struct ipa_rm_resource *) (*producer); - (*resource)->type = IPA_RM_PRODUCER; - *max_peers = IPA_RM_RESOURCE_CONS_MAX; - goto bail; -register_fail: - kfree(*producer); -bail: - return result; -} - -static void ipa_rm_resource_producer_delete( - struct ipa_rm_resource_prod *producer) -{ - struct ipa_rm_notification_info *reg_info; - struct list_head *pos, *q; - - ipa_rm_resource_producer_release(producer); - list_for_each_safe(pos, q, &(producer->event_listeners)) { - reg_info = list_entry(pos, - struct ipa_rm_notification_info, - link); - list_del(pos); - kfree(reg_info); - } -} - -static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource, - struct ipa_rm_resource_cons **consumer, - struct ipa_rm_create_params *create_params, - int *max_peers) -{ - int result = 0; - - *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC); - if (*consumer == NULL) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - (*consumer)->request_resource = create_params->request_resource; - (*consumer)->release_resource = create_params->release_resource; - (*resource) = (struct ipa_rm_resource *) (*consumer); - (*resource)->type = IPA_RM_CONSUMER; - init_completion(&((*consumer)->request_consumer_in_progress)); - *max_peers = IPA_RM_RESOURCE_PROD_MAX; -bail: - return result; -} - -/** - * ipa_rm_resource_create() - creates resource - * @create_params: [in] parameters needed - * for resource initialization with IPA RM - * @resource: [out] created resource - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_resource_create( - struct ipa_rm_create_params *create_params, - struct ipa_rm_resource **resource) -{ - struct ipa_rm_resource_cons *consumer; - struct ipa_rm_resource_prod *producer; - int max_peers; - int result = 0; - - if (!create_params) { - result = -EINVAL; - goto bail; - } - - if (IPA_RM_RESORCE_IS_PROD(create_params->name)) { - result = ipa_rm_resource_producer_create(resource, - &producer, - create_params, - &max_peers); - if (result) { - IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); - goto bail; - } - } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) { - result = ipa_rm_resource_consumer_create(resource, - &consumer, - create_params, - &max_peers); - if (result) { - IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); - goto bail; - } - } else { - IPA_RM_ERR("invalied resource\n"); - result = -EPERM; - goto bail; - } - - result = ipa_rm_peers_list_create(max_peers, - &((*resource)->peers_list)); - if (result) { - IPA_RM_ERR("ipa_rm_peers_list_create failed\n"); - goto peers_alloc_fail; - } - (*resource)->name = create_params->name; - (*resource)->floor_voltage = create_params->floor_voltage; - (*resource)->state = IPA_RM_RELEASED; - goto bail; - -peers_alloc_fail: - ipa_rm_resource_delete(*resource); -bail: - return result; -} - -/** - * ipa_rm_resource_delete() - deletes resource - * @resource: [in] resource - * for resource initialization with IPA RM - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_resource_delete(struct ipa_rm_resource *resource) -{ - struct ipa_rm_resource *consumer; - struct ipa_rm_resource *producer; - int peers_index; - int result = 0; - int list_size; - - if (!resource) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - IPA_RM_DBG("ipa_rm_resource_delete ENTER with resource %d\n", - resource->name); - if (resource->type == IPA_RM_PRODUCER) { - if (resource->peers_list) { - list_size = ipa_rm_peers_list_get_size( - resource->peers_list); - for (peers_index = 0; - peers_index < list_size; - peers_index++) { - consumer = ipa_rm_peers_list_get_resource( - peers_index, - resource->peers_list); - if (consumer) - ipa_rm_resource_delete_dependency( - resource, - consumer); - } - } - - ipa_rm_resource_producer_delete( - (struct ipa_rm_resource_prod *) resource); - } else if (resource->type == IPA_RM_CONSUMER) { - if (resource->peers_list) { - list_size = ipa_rm_peers_list_get_size( - resource->peers_list); - for (peers_index = 0; - peers_index < list_size; - peers_index++){ - producer = ipa_rm_peers_list_get_resource( - peers_index, - resource->peers_list); - if (producer) - ipa_rm_resource_delete_dependency( - producer, - resource); - } - } - } - ipa_rm_peers_list_delete(resource->peers_list); - kfree(resource); - return result; -} - -/** - * ipa_rm_resource_register() - register resource - * @resource: [in] resource - * @reg_params: [in] registration parameters - * @explicit: [in] registered explicitly by ipa2_rm_register() - * - * Returns: 0 on success, negative on failure - * - * Producer resource is expected for this call. - * - */ -int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params, - bool explicit) -{ - int result = 0; - struct ipa_rm_notification_info *reg_info; - struct list_head *pos; - - if (!producer || !reg_params) { - IPA_RM_ERR("invalid params\n"); - result = -EPERM; - goto bail; - } - - list_for_each(pos, &(producer->event_listeners)) { - reg_info = list_entry(pos, - struct ipa_rm_notification_info, - link); - if (reg_info->reg_params.notify_cb == - reg_params->notify_cb) { - IPA_RM_ERR("already registered\n"); - result = -EPERM; - goto bail; - } - - } - - reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); - if (reg_info == NULL) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - reg_info->reg_params.user_data = reg_params->user_data; - reg_info->reg_params.notify_cb = reg_params->notify_cb; - reg_info->explicit = explicit; - INIT_LIST_HEAD(®_info->link); - list_add(®_info->link, &producer->event_listeners); -bail: - return result; -} - -/** - * ipa_rm_resource_deregister() - register resource - * @resource: [in] resource - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Producer resource is expected for this call. - * This function deleted only single instance of - * registration info. - * - */ -int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params) -{ - int result = -EINVAL; - struct ipa_rm_notification_info *reg_info; - struct list_head *pos, *q; - - if (!producer || !reg_params) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - list_for_each_safe(pos, q, &(producer->event_listeners)) { - reg_info = list_entry(pos, - struct ipa_rm_notification_info, - link); - if (reg_info->reg_params.notify_cb == - reg_params->notify_cb) { - list_del(pos); - kfree(reg_info); - result = 0; - goto bail; - } - } -bail: - return result; -} - -/** - * ipa_rm_resource_add_dependency() - add dependency between two - * given resources - * @resource: [in] resource resource - * @depends_on: [in] depends_on resource - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on) -{ - int result = 0; - int consumer_result; - - if (!resource || !depends_on) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - if (ipa_rm_peers_list_check_dependency(resource->peers_list, - resource->name, - depends_on->peers_list, - depends_on->name)) { - IPA_RM_ERR("dependency already exists\n"); - return -EEXIST; - } - - ipa_rm_peers_list_add_peer(resource->peers_list, depends_on); - ipa_rm_peers_list_add_peer(depends_on->peers_list, resource); - IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), - resource->state); - - resource->needed_bw += depends_on->max_bw; - switch (resource->state) { - case IPA_RM_RELEASED: - case IPA_RM_RELEASE_IN_PROGRESS: - break; - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - { - enum ipa_rm_resource_state prev_state = resource->state; - - resource->state = IPA_RM_REQUEST_IN_PROGRESS; - ((struct ipa_rm_resource_prod *) - resource)->pending_request++; - consumer_result = ipa_rm_resource_consumer_request( - (struct ipa_rm_resource_cons *)depends_on, - resource->max_bw, - true); - if (consumer_result != -EINPROGRESS) { - resource->state = prev_state; - ((struct ipa_rm_resource_prod *) - resource)->pending_request--; - ipa_rm_perf_profile_change(resource->name); - } - result = consumer_result; - break; - } - default: - IPA_RM_ERR("invalid state\n"); - result = -EPERM; - goto bail; - } -bail: - IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), - resource->state); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa_rm_resource_delete_dependency() - add dependency between two - * given resources - * @resource: [in] resource resource - * @depends_on: [in] depends_on resource - * - * Returns: 0 on success, negative on failure - * In case the resource state was changed, a notification - * will be sent to the RM client - */ -int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on) -{ - int result = 0; - bool state_changed = false; - bool release_consumer = false; - enum ipa_rm_event evt; - - if (!resource || !depends_on) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - if (!ipa_rm_peers_list_check_dependency(resource->peers_list, - resource->name, - depends_on->peers_list, - depends_on->name)) { - IPA_RM_ERR("dependency does not exist\n"); - return -EINVAL; - } - IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), - resource->state); - - resource->needed_bw -= depends_on->max_bw; - switch (resource->state) { - case IPA_RM_RELEASED: - break; - case IPA_RM_GRANTED: - ipa_rm_perf_profile_change(resource->name); - release_consumer = true; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (((struct ipa_rm_resource_prod *) - resource)->pending_release > 0) - ((struct ipa_rm_resource_prod *) - resource)->pending_release--; - if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS && - ((struct ipa_rm_resource_prod *) - resource)->pending_release == 0) { - resource->state = IPA_RM_RELEASED; - state_changed = true; - evt = IPA_RM_RESOURCE_RELEASED; - ipa_rm_perf_profile_change(resource->name); - } - break; - case IPA_RM_REQUEST_IN_PROGRESS: - release_consumer = true; - if (((struct ipa_rm_resource_prod *) - resource)->pending_request > 0) - ((struct ipa_rm_resource_prod *) - resource)->pending_request--; - if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS && - ((struct ipa_rm_resource_prod *) - resource)->pending_request == 0) { - resource->state = IPA_RM_GRANTED; - state_changed = true; - evt = IPA_RM_RESOURCE_GRANTED; - ipa_rm_perf_profile_change(resource->name); - } - break; - default: - result = -EINVAL; - goto bail; - } - if (state_changed) { - (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, - resource->name, - evt, - false); - } - IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), - resource->state); - ipa_rm_peers_list_remove_peer(resource->peers_list, - depends_on->name); - ipa_rm_peers_list_remove_peer(depends_on->peers_list, - resource->name); - if (release_consumer) - (void) ipa_rm_resource_consumer_release( - (struct ipa_rm_resource_cons *)depends_on, - resource->max_bw, - true); -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa_rm_resource_producer_request() - producer resource request - * @producer: [in] producer - * - * Returns: 0 on success, negative on failure - */ -int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer) -{ - int peers_index; - int result = 0; - struct ipa_rm_resource *consumer; - int consumer_result; - enum ipa_rm_resource_state state; - - state = producer->resource.state; - switch (producer->resource.state) { - case IPA_RM_RELEASED: - case IPA_RM_RELEASE_IN_PROGRESS: - producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; - break; - case IPA_RM_GRANTED: - goto unlock_and_bail; - case IPA_RM_REQUEST_IN_PROGRESS: - result = -EINPROGRESS; - goto unlock_and_bail; - default: - result = -EINVAL; - goto unlock_and_bail; - } - - producer->pending_request = 0; - for (peers_index = 0; - peers_index < ipa_rm_peers_list_get_size( - producer->resource.peers_list); - peers_index++) { - consumer = ipa_rm_peers_list_get_resource(peers_index, - producer->resource.peers_list); - if (consumer) { - producer->pending_request++; - consumer_result = ipa_rm_resource_consumer_request( - (struct ipa_rm_resource_cons *)consumer, - producer->resource.max_bw, - true); - if (consumer_result == -EINPROGRESS) { - result = -EINPROGRESS; - } else { - producer->pending_request--; - if (consumer_result != 0) { - result = consumer_result; - goto bail; - } - } - } - } - - if (producer->pending_request == 0) { - producer->resource.state = IPA_RM_GRANTED; - ipa_rm_perf_profile_change(producer->resource.name); - (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, - producer->resource.name, - IPA_RM_RESOURCE_GRANTED, - true); - result = 0; - } -unlock_and_bail: - if (state != producer->resource.state) - IPA_RM_DBG("%s state changed %d->%d\n", - ipa_rm_resource_str(producer->resource.name), - state, - producer->resource.state); -bail: - return result; -} - -/** - * ipa_rm_resource_producer_release() - producer resource release - * producer: [in] producer resource - * - * Returns: 0 on success, negative on failure - * - */ -int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer) -{ - int peers_index; - int result = 0; - struct ipa_rm_resource *consumer; - int consumer_result; - enum ipa_rm_resource_state state; - - state = producer->resource.state; - switch (producer->resource.state) { - case IPA_RM_RELEASED: - goto bail; - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - result = -EINPROGRESS; - goto bail; - default: - result = -EPERM; - goto bail; - } - - producer->pending_release = 0; - for (peers_index = 0; - peers_index < ipa_rm_peers_list_get_size( - producer->resource.peers_list); - peers_index++) { - consumer = ipa_rm_peers_list_get_resource(peers_index, - producer->resource.peers_list); - if (consumer) { - producer->pending_release++; - consumer_result = ipa_rm_resource_consumer_release( - (struct ipa_rm_resource_cons *)consumer, - producer->resource.max_bw, - true); - producer->pending_release--; - } - } - - if (producer->pending_release == 0) { - producer->resource.state = IPA_RM_RELEASED; - ipa_rm_perf_profile_change(producer->resource.name); - (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, - producer->resource.name, - IPA_RM_RESOURCE_RELEASED, - true); - } -bail: - if (state != producer->resource.state) - IPA_RM_DBG("%s state changed %d->%d\n", - ipa_rm_resource_str(producer->resource.name), - state, - producer->resource.state); - - return result; -} - -static void ipa_rm_resource_producer_handle_cb( - struct ipa_rm_resource_prod *producer, - enum ipa_rm_event event) -{ - IPA_RM_DBG("%s state: %d event: %d pending_request: %d\n", - ipa_rm_resource_str(producer->resource.name), - producer->resource.state, - event, - producer->pending_request); - - switch (producer->resource.state) { - case IPA_RM_REQUEST_IN_PROGRESS: - if (event != IPA_RM_RESOURCE_GRANTED) - goto unlock_and_bail; - if (producer->pending_request > 0) { - producer->pending_request--; - if (producer->pending_request == 0) { - producer->resource.state = - IPA_RM_GRANTED; - ipa_rm_perf_profile_change( - producer->resource.name); - ipa_rm_resource_producer_notify_clients( - producer, - IPA_RM_RESOURCE_GRANTED, - false); - goto bail; - } - } - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (event != IPA_RM_RESOURCE_RELEASED) - goto unlock_and_bail; - if (producer->pending_release > 0) { - producer->pending_release--; - if (producer->pending_release == 0) { - producer->resource.state = - IPA_RM_RELEASED; - ipa_rm_perf_profile_change( - producer->resource.name); - ipa_rm_resource_producer_notify_clients( - producer, - IPA_RM_RESOURCE_RELEASED, - false); - goto bail; - } - } - break; - case IPA_RM_GRANTED: - case IPA_RM_RELEASED: - default: - goto unlock_and_bail; - } -unlock_and_bail: - IPA_RM_DBG("%s new state: %d\n", - ipa_rm_resource_str(producer->resource.name), - producer->resource.state); -bail: - return; -} - -/** - * ipa_rm_resource_consumer_handle_cb() - propagates resource - * notification to all dependent producers - * @consumer: [in] notifying resource - * - */ -void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, - enum ipa_rm_event event) -{ - int peers_index; - struct ipa_rm_resource *producer; - - if (!consumer) { - IPA_RM_ERR("invalid params\n"); - return; - } - IPA_RM_DBG("%s state: %d event: %d\n", - ipa_rm_resource_str(consumer->resource.name), - consumer->resource.state, - event); - - switch (consumer->resource.state) { - case IPA_RM_REQUEST_IN_PROGRESS: - if (event == IPA_RM_RESOURCE_RELEASED) - goto bail; - consumer->resource.state = IPA_RM_GRANTED; - ipa_rm_perf_profile_change(consumer->resource.name); - ipa_resume_resource(consumer->resource.name); - complete_all(&consumer->request_consumer_in_progress); - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (event == IPA_RM_RESOURCE_GRANTED) - goto bail; - consumer->resource.state = IPA_RM_RELEASED; - break; - case IPA_RM_GRANTED: - case IPA_RM_RELEASED: - default: - goto bail; - } - - for (peers_index = 0; - peers_index < ipa_rm_peers_list_get_size( - consumer->resource.peers_list); - peers_index++) { - producer = ipa_rm_peers_list_get_resource(peers_index, - consumer->resource.peers_list); - if (producer) - ipa_rm_resource_producer_handle_cb( - (struct ipa_rm_resource_prod *) - producer, - event); - } - - return; -bail: - IPA_RM_DBG("%s new state: %d\n", - ipa_rm_resource_str(consumer->resource.name), - consumer->resource.state); -} - -/* - * ipa_rm_resource_set_perf_profile() - sets the performance profile to - * resource. - * - * @resource: [in] resource - * @profile: [in] profile to be set - * - * sets the profile to the given resource, In case the resource is - * granted, update bandwidth vote of the resource - */ -int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, - struct ipa_rm_perf_profile *profile) -{ - int peers_index; - struct ipa_rm_resource *peer; - - if (!resource || !profile) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - if (profile->max_supported_bandwidth_mbps == resource->max_bw) { - IPA_RM_DBG("same profile\n"); - return 0; - } - - if ((resource->type == IPA_RM_PRODUCER && - (resource->state == IPA_RM_GRANTED || - resource->state == IPA_RM_REQUEST_IN_PROGRESS)) || - resource->type == IPA_RM_CONSUMER) { - for (peers_index = 0; - peers_index < ipa_rm_peers_list_get_size( - resource->peers_list); - peers_index++) { - peer = ipa_rm_peers_list_get_resource(peers_index, - resource->peers_list); - if (!peer) - continue; - peer->needed_bw -= resource->max_bw; - peer->needed_bw += - profile->max_supported_bandwidth_mbps; - if (peer->state == IPA_RM_GRANTED) - ipa_rm_perf_profile_change(peer->name); - } - } - - resource->max_bw = profile->max_supported_bandwidth_mbps; - if (resource->state == IPA_RM_GRANTED) - ipa_rm_perf_profile_change(resource->name); - - return 0; -} - - -/* - * ipa_rm_resource_producer_print_stat() - print the - * resource status and all his dependencies - * - * @resource: [in] Resource resource - * @buff: [in] The buf used to print - * @size: [in] Buf size - * - * Returns: number of bytes used on success, negative on failure - */ -int ipa_rm_resource_producer_print_stat( - struct ipa_rm_resource *resource, - char *buf, - int size){ - - int i; - int nbytes; - int cnt = 0; - struct ipa_rm_resource *consumer; - - if (!buf || size < 0) - return -EINVAL; - - nbytes = scnprintf(buf + cnt, size - cnt, - ipa_rm_resource_str(resource->name)); - cnt += nbytes; - nbytes = scnprintf(buf + cnt, size - cnt, "["); - cnt += nbytes; - - switch (resource->state) { - case IPA_RM_RELEASED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Released] -> "); - cnt += nbytes; - break; - case IPA_RM_REQUEST_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Request In Progress] -> "); - cnt += nbytes; - break; - case IPA_RM_GRANTED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Granted] -> "); - cnt += nbytes; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Release In Progress] -> "); - cnt += nbytes; - break; - default: - return -EPERM; - } - - for (i = 0; i < resource->peers_list->max_peers; ++i) { - consumer = - ipa_rm_peers_list_get_resource( - i, - resource->peers_list); - if (consumer) { - nbytes = scnprintf(buf + cnt, size - cnt, - ipa_rm_resource_str(consumer->name)); - cnt += nbytes; - nbytes = scnprintf(buf + cnt, size - cnt, "["); - cnt += nbytes; - - switch (consumer->state) { - case IPA_RM_RELEASED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Released], "); - cnt += nbytes; - break; - case IPA_RM_REQUEST_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Request In Progress], "); - cnt += nbytes; - break; - case IPA_RM_GRANTED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Granted], "); - cnt += nbytes; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Release In Progress], "); - cnt += nbytes; - break; - default: - return -EPERM; - } - } - } - nbytes = scnprintf(buf + cnt, size - cnt, "\n"); - cnt += nbytes; - - return cnt; -} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h deleted file mode 100644 index 5b07cf9c837d..000000000000 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm_resource.h +++ /dev/null @@ -1,162 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_RESOURCE_H_ -#define _IPA_RM_RESOURCE_H_ - -#include -#include -#include "ipa_rm_peers_list.h" - -/** - * enum ipa_rm_resource_state - resource state - */ -enum ipa_rm_resource_state { - IPA_RM_RELEASED, - IPA_RM_REQUEST_IN_PROGRESS, - IPA_RM_GRANTED, - IPA_RM_RELEASE_IN_PROGRESS -}; - -/** - * enum ipa_rm_resource_type - IPA resource manager resource type - */ -enum ipa_rm_resource_type { - IPA_RM_PRODUCER, - IPA_RM_CONSUMER -}; - -/** - * struct ipa_rm_notification_info - notification information - * of IPA RM client - * @reg_params: registration parameters - * @explicit: registered explicitly by ipa2_rm_register() - * @link: link to the list of all registered clients information - */ -struct ipa_rm_notification_info { - struct ipa_rm_register_params reg_params; - bool explicit; - struct list_head link; -}; - -/** - * struct ipa_rm_resource - IPA RM resource - * @name: name identifying resource - * @type: type of resource (PRODUCER or CONSUMER) - * @floor_voltage: minimum voltage level for operation - * @max_bw: maximum bandwidth required for resource in Mbps - * @state: state of the resource - * @peers_list: list of the peers of the resource - */ -struct ipa_rm_resource { - enum ipa_rm_resource_name name; - enum ipa_rm_resource_type type; - enum ipa_voltage_level floor_voltage; - u32 max_bw; - u32 needed_bw; - enum ipa_rm_resource_state state; - struct ipa_rm_peers_list *peers_list; -}; - -/** - * struct ipa_rm_resource_cons - IPA RM consumer - * @resource: resource - * @usage_count: number of producers in GRANTED / REQUESTED state - * using this consumer - * @request_consumer_in_progress: when set, the consumer is during its request - * phase - * @request_resource: function which should be called to request resource - * from resource manager - * @release_resource: function which should be called to release resource - * from resource manager - * Add new fields after @resource only. - */ -struct ipa_rm_resource_cons { - struct ipa_rm_resource resource; - int usage_count; - struct completion request_consumer_in_progress; - int (*request_resource)(void); - int (*release_resource)(void); -}; - -/** - * struct ipa_rm_resource_prod - IPA RM producer - * @resource: resource - * @event_listeners: clients registered with this producer - * for notifications in resource state - * list Add new fields after @resource only. - */ -struct ipa_rm_resource_prod { - struct ipa_rm_resource resource; - struct list_head event_listeners; - int pending_request; - int pending_release; -}; - -int ipa_rm_resource_create( - struct ipa_rm_create_params *create_params, - struct ipa_rm_resource **resource); - -int ipa_rm_resource_delete(struct ipa_rm_resource *resource); - -int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params, - bool explicit); - -int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params); - -int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on); - -int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on); - -int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer); - -int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer); - -int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer, - u32 needed_bw, - bool inc_usage_count); - -int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer, - u32 needed_bw, - bool dec_usage_count); - -int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, - struct ipa_rm_perf_profile *profile); - -void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, - enum ipa_rm_event event); - -void ipa_rm_resource_producer_notify_clients( - struct ipa_rm_resource_prod *producer, - enum ipa_rm_event event, - bool notify_registered_only); - -int ipa_rm_resource_producer_print_stat( - struct ipa_rm_resource *resource, - char *buf, - int size); - -int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, - enum ipa_rm_resource_state prev_state, - u32 needed_bw, - bool notify_completion); - -int ipa_rm_resource_consumer_release_work( - struct ipa_rm_resource_cons *consumer, - enum ipa_rm_resource_state prev_state, - bool notify_completion); - -#endif /* _IPA_RM_RESOURCE_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c index 421b737ddfeb..794eaa32bf90 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -17,6 +17,7 @@ #include #include #include "ipa_i.h" +#include "../ipa_rm_i.h" #define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL) #define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL) @@ -5015,24 +5016,6 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB; api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping; api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping; - api_ctrl->ipa_rm_create_resource = ipa2_rm_create_resource; - api_ctrl->ipa_rm_delete_resource = ipa2_rm_delete_resource; - api_ctrl->ipa_rm_register = ipa2_rm_register; - api_ctrl->ipa_rm_deregister = ipa2_rm_deregister; - api_ctrl->ipa_rm_set_perf_profile = ipa2_rm_set_perf_profile; - api_ctrl->ipa_rm_add_dependency = ipa2_rm_add_dependency; - api_ctrl->ipa_rm_delete_dependency = ipa2_rm_delete_dependency; - api_ctrl->ipa_rm_request_resource = ipa2_rm_request_resource; - api_ctrl->ipa_rm_release_resource = ipa2_rm_release_resource; - api_ctrl->ipa_rm_notify_completion = ipa2_rm_notify_completion; - api_ctrl->ipa_rm_inactivity_timer_init = - ipa2_rm_inactivity_timer_init; - api_ctrl->ipa_rm_inactivity_timer_destroy = - ipa2_rm_inactivity_timer_destroy; - api_ctrl->ipa_rm_inactivity_timer_request_resource = - ipa2_rm_inactivity_timer_request_resource; - api_ctrl->ipa_rm_inactivity_timer_release_resource = - ipa2_rm_inactivity_timer_release_resource; api_ctrl->teth_bridge_init = ipa2_teth_bridge_init; api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect; api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect; @@ -5073,7 +5056,6 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain; api_ctrl->ipa_disable_apps_wan_cons_deaggr = ipa2_disable_apps_wan_cons_deaggr; - api_ctrl->ipa_rm_add_dependency_sync = ipa2_rm_add_dependency_sync; api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev; api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info; api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel; diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index dd5ca0a8463d..de703bf6b582 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -1066,7 +1066,7 @@ static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev) send: /* IPA_RM checking start */ - ret = ipa2_rm_inactivity_timer_request_resource( + ret = ipa_rm_inactivity_timer_request_resource( IPA_RM_RESOURCE_WWAN_0_PROD); if (ret == -EINPROGRESS) { netif_stop_queue(dev); @@ -1099,7 +1099,7 @@ send: dev->stats.tx_bytes += skb->len; ret = NETDEV_TX_OK; out: - ipa2_rm_inactivity_timer_release_resource( + ipa_rm_inactivity_timer_release_resource( IPA_RM_RESOURCE_WWAN_0_PROD); return ret; } @@ -1151,7 +1151,7 @@ static void apps_ipa_tx_complete_notify(void *priv, } __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0)); dev_kfree_skb_any(skb); - ipa2_rm_inactivity_timer_release_resource( + ipa_rm_inactivity_timer_release_resource( IPA_RM_RESOURCE_WWAN_0_PROD); } @@ -1679,9 +1679,9 @@ static void q6_prod_rm_request_resource(struct work_struct *work) { int ret = 0; - ret = ipa2_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); if (ret < 0 && ret != -EINPROGRESS) { - IPAWANERR("%s: ipa2_rm_request_resource failed %d\n", __func__, + IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__, ret); return; } @@ -1698,9 +1698,9 @@ static void q6_prod_rm_release_resource(struct work_struct *work) { int ret = 0; - ret = ipa2_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); if (ret < 0 && ret != -EINPROGRESS) { - IPAWANERR("%s: ipa2_rm_release_resource failed %d\n", __func__, + IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__, ret); return; } @@ -1744,44 +1744,44 @@ static int q6_initialize_rm(void) memset(&create_params, 0, sizeof(create_params)); create_params.name = IPA_RM_RESOURCE_Q6_PROD; create_params.reg_params.notify_cb = &q6_rm_notify_cb; - result = ipa2_rm_create_resource(&create_params); + result = ipa_rm_create_resource(&create_params); if (result) goto create_rsrc_err1; memset(&create_params, 0, sizeof(create_params)); create_params.name = IPA_RM_RESOURCE_Q6_CONS; create_params.release_resource = &q6_rm_release_resource; create_params.request_resource = &q6_rm_request_resource; - result = ipa2_rm_create_resource(&create_params); + result = ipa_rm_create_resource(&create_params); if (result) goto create_rsrc_err2; /* add dependency*/ - result = ipa2_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS); if (result) goto add_dpnd_err; /* setup Performance profile */ memset(&profile, 0, sizeof(profile)); profile.max_supported_bandwidth_mbps = 100; - result = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, &profile); if (result) goto set_perf_err; - result = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, &profile); if (result) goto set_perf_err; return result; set_perf_err: - ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS); add_dpnd_err: - result = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); if (result < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_CONS, result); create_rsrc_err2: - result = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); if (result < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, result); @@ -1794,17 +1794,17 @@ void q6_deinitialize_rm(void) { int ret; - ret = ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS); if (ret < 0) IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS, ret); - ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_CONS, ret); - ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, ret); @@ -2013,13 +2013,13 @@ static int ipa_wwan_probe(struct platform_device *pdev) ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD; ipa_rm_params.reg_params.user_data = dev; ipa_rm_params.reg_params.notify_cb = ipa_rm_notify; - ret = ipa2_rm_create_resource(&ipa_rm_params); + ret = ipa_rm_create_resource(&ipa_rm_params); if (ret) { pr_err("%s: unable to create resourse %d in IPA RM\n", __func__, IPA_RM_RESOURCE_WWAN_0_PROD); goto create_rsrc_err; } - ret = ipa2_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_INACTIVITY_TIMER); if (ret) { pr_err("%s: ipa rm timer init failed %d on resourse %d\n", @@ -2027,14 +2027,14 @@ static int ipa_wwan_probe(struct platform_device *pdev) goto timer_init_err; } /* add dependency */ - ret = ipa2_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS); if (ret) goto add_dpnd_err; /* setup Performance profile */ memset(&profile, 0, sizeof(profile)); profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; - ret = ipa2_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, &profile); if (ret) goto set_perf_err; @@ -2066,20 +2066,20 @@ static int ipa_wwan_probe(struct platform_device *pdev) config_err: unregister_netdev(ipa_netdevs[0]); set_perf_err: - ret = ipa2_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS); if (ret) IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, ret); add_dpnd_err: - ret = ipa2_rm_inactivity_timer_destroy( + ret = ipa_rm_inactivity_timer_destroy( IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */ if (ret) - IPAWANERR("Error ipa2_rm_inactivity_timer_destroy %d, ret=%d\n", + IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); timer_init_err: - ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); if (ret) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); @@ -2113,18 +2113,18 @@ static int ipa_wwan_remove(struct platform_device *pdev) ipa_to_apps_hdl = -1; mutex_unlock(&ipa_to_apps_pipe_handle_guard); unregister_netdev(ipa_netdevs[0]); - ret = ipa2_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS); if (ret < 0) IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, ret); - ret = ipa2_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); if (ret < 0) IPAWANERR( - "Error ipa2_rm_inactivity_timer_destroy resource %d, ret=%d\n", + "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); - ret = ipa2_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); @@ -2175,7 +2175,7 @@ static int rmnet_ipa_ap_suspend(struct device *dev) /* Make sure that there is no Tx operation ongoing */ netif_tx_lock_bh(netdev); - ipa2_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); netif_tx_unlock_bh(netdev); IPAWANDBG("Exit\n"); diff --git a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c index a7459bc27df0..da68be2ed69b 100644 --- a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c +++ b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -113,18 +113,18 @@ int ipa2_teth_bridge_init(struct teth_bridge_init_params *params) params->skip_ep_cfg = true; /* Build dependency graph */ - res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res < 0 && res != -EINPROGRESS) { - TETH_ERR("ipa2_rm_add_dependency() failed.\n"); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); goto bail; } - res = ipa2_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_USB_CONS); if (res < 0 && res != -EINPROGRESS) { - ipa2_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, IPA_RM_RESOURCE_Q6_CONS); - TETH_ERR("ipa2_rm_add_dependency() failed.\n"); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); goto bail; } @@ -142,9 +142,9 @@ bail: int ipa2_teth_bridge_disconnect(enum ipa_client_type client) { TETH_DBG_FUNC_ENTRY(); - ipa2_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, IPA_RM_RESOURCE_Q6_CONS); - ipa2_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_USB_CONS); TETH_DBG_FUNC_EXIT(); diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile index 2ba86bbfd80c..9653dd6d27f2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/Makefile +++ b/drivers/platform/msm/ipa/ipa_v3/Makefile @@ -2,8 +2,7 @@ obj-$(CONFIG_IPA3) += ipahal/ obj-$(CONFIG_IPA3) += ipat.o ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ - ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o ipa_rm.o \ - ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \ + ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index dcec51d4f3c7..1747460bbd86 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -39,7 +39,7 @@ #include #define IPA_SUBSYSTEM_NAME "ipa_fws" #include "ipa_i.h" -#include "ipa_rm_i.h" +#include "../ipa_rm_i.h" #include "ipahal/ipahal.h" #define CREATE_TRACE_POINTS @@ -1172,7 +1172,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - retval = ipa3_rm_add_dependency(rm_depend.resource_name, + retval = ipa_rm_add_dependency(rm_depend.resource_name, rm_depend.depends_on_name); break; case IPA_IOC_RM_DEL_DEPENDENCY: @@ -1181,7 +1181,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - retval = ipa3_rm_delete_dependency(rm_depend.resource_name, + retval = ipa_rm_delete_dependency(rm_depend.resource_name, rm_depend.depends_on_name); break; case IPA_IOC_GENERATE_FLT_EQ: @@ -3394,7 +3394,7 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt, } else { resource = ipa3_get_rm_resource_from_ep(i); res = - ipa3_rm_request_resource_with_timer(resource); + ipa_rm_request_resource_with_timer(resource); if (res == -EPERM && IPA_CLIENT_IS_CONS( ipa3_ctx->ep[i].client)) { @@ -3479,14 +3479,14 @@ int ipa3_create_apps_resource(void) ipa3_apps_cons_request_resource; apps_cons_create_params.release_resource = ipa3_apps_cons_release_resource; - result = ipa3_rm_create_resource(&apps_cons_create_params); + result = ipa_rm_create_resource(&apps_cons_create_params); if (result) { - IPAERR("ipa3_rm_create_resource failed\n"); + IPAERR("ipa_rm_create_resource failed\n"); return result; } profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; - ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); return result; } @@ -3760,8 +3760,8 @@ fail_setup_apps_pipes: else sps_deregister_bam_device(ipa3_ctx->bam_handle); fail_register_device: - ipa3_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); - ipa3_rm_exit(); + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); + ipa_rm_exit(); cdev_del(&ipa3_ctx->cdev); device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num); unregister_chrdev_region(ipa3_ctx->dev_num, 1); @@ -4342,7 +4342,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock); /* Initialize IPA RM (resource manager) */ - result = ipa3_rm_initialize(); + result = ipa_rm_initialize(); if (result) { IPAERR("RM initialization failed (%d)\n", -result); result = -ENODEV; @@ -4397,9 +4397,9 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, return 0; fail_ipa_init_interrupts: - ipa3_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); fail_create_apps_resource: - ipa3_rm_exit(); + ipa_rm_exit(); fail_ipa_rm_init: fail_nat_dev_add: cdev_del(&ipa3_ctx->cdev); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 6c639e1c7a1a..96832869a496 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -16,7 +16,7 @@ #include #include #include "ipa_i.h" -#include "ipa_rm_i.h" +#include "../ipa_rm_i.h" #define IPA_MAX_MSG_LEN 4096 #define IPA_DBG_MAX_RULE_IN_TBL 128 @@ -1470,7 +1470,7 @@ static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf, { int result, nbytes, cnt = 0; - result = ipa3_rm_stat(dbg_buff, IPA_MAX_MSG_LEN); + result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN); if (result < 0) { nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, "Error in printing RM stat %d\n", result); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index a7ba4ca49ecf..c2e93adb2a17 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1944,46 +1944,6 @@ int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); */ int ipa3_uc_dereg_rdyCB(void); -/* - * Resource manager - */ -int ipa3_rm_create_resource(struct ipa_rm_create_params *create_params); - -int ipa3_rm_delete_resource(enum ipa_rm_resource_name resource_name); - -int ipa3_rm_register(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params); - -int ipa3_rm_deregister(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params); - -int ipa3_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, - struct ipa_rm_perf_profile *profile); - -int ipa3_rm_add_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -int ipa3_rm_delete_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -int ipa3_rm_request_resource(enum ipa_rm_resource_name resource_name); - -int ipa3_rm_release_resource(enum ipa_rm_resource_name resource_name); - -int ipa3_rm_notify_completion(enum ipa_rm_event event, - enum ipa_rm_resource_name resource_name); - -int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, - unsigned long msecs); - -int ipa3_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name); - -int ipa3_rm_inactivity_timer_request_resource( - enum ipa_rm_resource_name resource_name); - -int ipa3_rm_inactivity_timer_release_resource( - enum ipa_rm_resource_name resource_name); - /* * Tethering bridge (Rmnet / MBIM) */ @@ -2271,8 +2231,6 @@ int ipa3_ap_suspend(struct device *dev); int ipa3_ap_resume(struct device *dev); int ipa3_init_interrupts(void); struct iommu_domain *ipa3_get_smmu_domain(void); -int ipa3_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple); @@ -2294,7 +2252,6 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, struct ipa3_debugfs_rt_entry entry[], int *num_entry); int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib); -const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name); int ipa3_restore_suspend_handler(void); int ipa3_inject_dma_task_for_gsi(void); int ipa3_uc_panic_notifier(struct notifier_block *this, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index a3058a010354..ea8e2b9ea38e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -664,7 +664,7 @@ static int ipa3_mhi_set_state(enum ipa3_mhi_state new_state) ipa3_mhi_ctx->trigger_wakeup = false; if (ipa3_mhi_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) { - ipa3_rm_notify_completion( + ipa_rm_notify_completion( IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_MHI_CONS); ipa3_mhi_ctx->rm_cons_state = @@ -691,7 +691,7 @@ static int ipa3_mhi_set_state(enum ipa3_mhi_state new_state) ipa3_mhi_ctx->wakeup_notified = false; if (ipa3_mhi_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) { - ipa3_rm_notify_completion( + ipa_rm_notify_completion( IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_MHI_CONS); ipa3_mhi_ctx->rm_cons_state = @@ -848,7 +848,7 @@ static int ipa3_mhi_request_prod(void) reinit_completion(&ipa3_mhi_ctx->rm_prod_granted_comp); IPA_MHI_DBG("requesting mhi prod\n"); - res = ipa3_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD); + res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD); if (res) { if (res != -EINPROGRESS) { IPA_MHI_ERR("failed to request mhi prod %d\n", res); @@ -875,7 +875,7 @@ static int ipa3_mhi_release_prod(void) IPA_MHI_FUNC_ENTRY(); - res = ipa3_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD); + res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD); IPA_MHI_FUNC_EXIT(); return res; @@ -1743,7 +1743,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params) mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD; mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS; mhi_prod_params.reg_params.notify_cb = ipa3_mhi_rm_prod_notify; - res = ipa3_rm_create_resource(&mhi_prod_params); + res = ipa_rm_create_resource(&mhi_prod_params); if (res) { IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n"); goto fail_create_rm_prod; @@ -1755,7 +1755,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params) mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS; mhi_cons_params.request_resource = ipa3_mhi_rm_cons_request; mhi_cons_params.release_resource = ipa3_mhi_rm_cons_release; - res = ipa3_rm_create_resource(&mhi_cons_params); + res = ipa_rm_create_resource(&mhi_cons_params); if (res) { IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n"); goto fail_create_rm_cons; @@ -1779,7 +1779,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params) return 0; fail_create_rm_cons: - ipa3_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); fail_create_rm_prod: destroy_workqueue(ipa3_mhi_ctx->wq); fail_create_wq: @@ -1845,14 +1845,14 @@ int ipa3_mhi_start(struct ipa_mhi_start_params *params) ipa3_mhi_ctx->event_context_array_addr); /* Add MHI <-> Q6 dependencies to IPA RM */ - res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res && res != -EINPROGRESS) { IPA_MHI_ERR("failed to add dependency %d\n", res); goto fail_add_mhi_q6_dep; } - res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_MHI_CONS); if (res && res != -EINPROGRESS) { IPA_MHI_ERR("failed to add dependency %d\n", res); @@ -1910,10 +1910,10 @@ int ipa3_mhi_start(struct ipa_mhi_start_params *params) fail_init_engine: ipa3_mhi_release_prod(); fail_request_prod: - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_MHI_CONS); fail_add_q6_mhi_dep: - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, IPA_RM_RESOURCE_Q6_CONS); fail_add_mhi_q6_dep: ipa3_mhi_set_state(IPA_MHI_STATE_INITIALIZED); @@ -2712,7 +2712,7 @@ int ipa3_mhi_resume(void) } dl_channel_resumed = true; - ipa3_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_MHI_CONS); ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED; } @@ -2871,7 +2871,7 @@ void ipa3_mhi_destroy(void) IPA_MHI_SUSPEND_SLEEP_MAX); IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n"); - res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_MHI_CONS); if (res) { IPAERR("Error deleting dependency %d->%d, res=%d\n", @@ -2879,7 +2879,7 @@ void ipa3_mhi_destroy(void) goto fail; } IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n"); - res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res) { IPAERR("Error deleting dependency %d->%d, res=%d\n", @@ -2888,14 +2888,14 @@ void ipa3_mhi_destroy(void) } } - res = ipa3_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); + res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); if (res) { IPAERR("Error deleting resource %d, res=%d\n", IPA_RM_RESOURCE_MHI_PROD, res); goto fail; } - res = ipa3_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); + res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); if (res) { IPAERR("Error deleting resource %d, res=%d\n", IPA_RM_RESOURCE_MHI_CONS, res); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c deleted file mode 100644 index 662d2699a39a..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c +++ /dev/null @@ -1,1039 +0,0 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include "ipa_i.h" -#include "ipa_rm_dependency_graph.h" -#include "ipa_rm_i.h" - -static const char *ipa3_resource_name_to_str[IPA_RM_RESOURCE_MAX] = { - __stringify(IPA_RM_RESOURCE_Q6_PROD), - __stringify(IPA_RM_RESOURCE_USB_PROD), - __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD), - __stringify(IPA_RM_RESOURCE_HSIC_PROD), - __stringify(IPA_RM_RESOURCE_STD_ECM_PROD), - __stringify(IPA_RM_RESOURCE_RNDIS_PROD), - __stringify(IPA_RM_RESOURCE_WWAN_0_PROD), - __stringify(IPA_RM_RESOURCE_WLAN_PROD), - __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD), - __stringify(IPA_RM_RESOURCE_MHI_PROD), - __stringify(IPA_RM_RESOURCE_Q6_CONS), - __stringify(IPA_RM_RESOURCE_USB_CONS), - __stringify(IPA_RM_RESOURCE_USB_DPL_CONS), - __stringify(IPA_RM_RESOURCE_HSIC_CONS), - __stringify(IPA_RM_RESOURCE_WLAN_CONS), - __stringify(IPA_RM_RESOURCE_APPS_CONS), - __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS), - __stringify(IPA_RM_RESOURCE_MHI_CONS), -}; - -struct ipa3_rm_profile_vote_type { - enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX]; - enum ipa_voltage_level curr_volt; - u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX]; - u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX]; - u32 curr_bw; -}; - -struct ipa3_rm_context_type { - struct ipa3_rm_dep_graph *dep_graph; - struct workqueue_struct *ipa_rm_wq; - spinlock_t ipa_rm_lock; - struct ipa3_rm_profile_vote_type prof_vote; -}; -static struct ipa3_rm_context_type *ipa3_rm_ctx; - -struct ipa3_rm_notify_ipa_work_type { - struct work_struct work; - enum ipa_voltage_level volt; - u32 bandwidth_mbps; -}; - -/** - * ipa3_rm_create_resource() - create resource - * @create_params: [in] parameters needed - * for resource initialization - * - * Returns: 0 on success, negative on failure - * - * This function is called by IPA RM client to initialize client's resources. - * This API should be called before any other IPA RM API on a given resource - * name. - * - */ -int ipa3_rm_create_resource(struct ipa_rm_create_params *create_params) -{ - struct ipa_rm_resource *resource; - unsigned long flags; - int result; - - if (!create_params) { - IPA_RM_ERR("invalid args\n"); - return -EINVAL; - } - IPA_RM_DBG("%s\n", ipa3_rm_resource_str(create_params->name)); - - if (create_params->floor_voltage < 0 || - create_params->floor_voltage >= IPA_VOLTAGE_MAX) { - IPA_RM_ERR("invalid voltage %d\n", - create_params->floor_voltage); - return -EINVAL; - } - - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - create_params->name, - &resource) == 0) { - IPA_RM_ERR("resource already exists\n"); - result = -EEXIST; - goto bail; - } - result = ipa3_rm_resource_create(create_params, - &resource); - if (result) { - IPA_RM_ERR("ipa3_rm_resource_create() failed\n"); - goto bail; - } - result = ipa3_rm_dep_graph_add(ipa3_rm_ctx->dep_graph, resource); - if (result) { - IPA_RM_ERR("ipa3_rm_dep_graph_add() failed\n"); - ipa3_rm_resource_delete(resource); - goto bail; - } -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_delete_resource() - delete resource - * @resource_name: name of resource to be deleted - * - * Returns: 0 on success, negative on failure - * - * This function is called by IPA RM client to delete client's resources. - * - */ -int ipa3_rm_delete_resource(enum ipa_rm_resource_name resource_name) -{ - struct ipa_rm_resource *resource; - unsigned long flags; - int result; - - IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name)); - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exist\n"); - result = -EINVAL; - goto bail; - } - result = ipa3_rm_resource_delete(resource); - if (result) { - IPA_RM_ERR("ipa3_rm_resource_delete() failed\n"); - goto bail; - } - result = ipa3_rm_dep_graph_remove(ipa3_rm_ctx->dep_graph, - resource_name); - if (result) { - IPA_RM_ERR("ipa3_rm_dep_graph_remove() failed\n"); - goto bail; - } -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_add_dependency() - create dependency - * between 2 resources - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: IPA_RM_RESORCE_GRANTED could be generated - * in case client registered with IPA RM - */ -int ipa3_rm_add_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - unsigned long flags; - int result; - - IPA_RM_DBG("%s -> %s\n", ipa3_rm_resource_str(resource_name), - ipa3_rm_resource_str(depends_on_name)); - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - result = ipa3_rm_dep_graph_add_dependency( - ipa3_rm_ctx->dep_graph, - resource_name, - depends_on_name); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources - * in a synchronized fashion. In case a producer resource is in GRANTED state - * and the newly added consumer resource is in RELEASED state, the consumer - * entity will be requested and the function will block until the consumer - * is granted. - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: May block. See documentation above. - */ -int ipa3_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - int result; - struct ipa_rm_resource *consumer; - unsigned long time; - unsigned long flags; - - IPA_RM_DBG("%s -> %s\n", ipa3_rm_resource_str(resource_name), - ipa3_rm_resource_str(depends_on_name)); - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - result = ipa3_rm_dep_graph_add_dependency( - ipa3_rm_ctx->dep_graph, - resource_name, - depends_on_name); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (result == -EINPROGRESS) { - ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - depends_on_name, - &consumer); - IPA_RM_DBG("%s waits for GRANT of %s.\n", - ipa3_rm_resource_str(resource_name), - ipa3_rm_resource_str(depends_on_name)); - time = wait_for_completion_timeout( - &((struct ipa3_rm_resource_cons *)consumer)-> - request_consumer_in_progress, - HZ); - result = 0; - if (!time) { - IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.", - ipa3_rm_resource_str(depends_on_name)); - result = -ETIME; - } - IPA_RM_DBG("%s waited for %s GRANT %lu time.\n", - ipa3_rm_resource_str(resource_name), - ipa3_rm_resource_str(depends_on_name), - time); - } - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_delete_dependency() - create dependency - * between 2 resources - * @resource_name: name of dependent resource - * @depends_on_name: name of its dependency - * - * Returns: 0 on success, negative on failure - * - * Side effects: IPA_RM_RESORCE_GRANTED could be generated - * in case client registered with IPA RM - */ -int ipa3_rm_delete_dependency(enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - unsigned long flags; - int result; - - IPA_RM_DBG("%s -> %s\n", ipa3_rm_resource_str(resource_name), - ipa3_rm_resource_str(depends_on_name)); - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - result = ipa3_rm_dep_graph_delete_dependency( - ipa3_rm_ctx->dep_graph, - resource_name, - depends_on_name); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_request_resource() - request resource - * @resource_name: [in] name of the requested resource - * - * Returns: 0 on success, negative on failure - * - * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED - * on successful completion of this operation. - */ -int ipa3_rm_request_resource(enum ipa_rm_resource_name resource_name) -{ - struct ipa_rm_resource *resource; - unsigned long flags; - int result; - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa3_rm_resource_producer_request( - (struct ipa3_rm_resource_prod *)resource); - -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -void ipa3_delayed_release_work_func(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa3_rm_delayed_release_work_type *rwork = container_of( - to_delayed_work(work), - struct ipa3_rm_delayed_release_work_type, - work); - - if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) { - IPA_RM_ERR("can be called on CONS only\n"); - kfree(rwork); - return; - } - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - rwork->resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - goto bail; - } - - ipa3_rm_resource_consumer_release( - (struct ipa3_rm_resource_cons *)resource, rwork->needed_bw, - rwork->dec_usage_count); - -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - kfree(rwork); - -} - -/** - * ipa3_rm_request_resource_with_timer() - requests the specified consumer - * resource and releases it after 1 second - * @resource_name: name of the requested resource - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa3_rm_delayed_release_work_type *release_work; - int result; - - if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { - IPA_RM_ERR("can be called on CONS only\n"); - return -EINVAL; - } - - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa3_rm_resource_consumer_request( - (struct ipa3_rm_resource_cons *)resource, 0, false, true); - if (result != 0 && result != -EINPROGRESS) { - IPA_RM_ERR("consumer request returned error %d\n", result); - result = -EPERM; - goto bail; - } - - release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC); - if (!release_work) { - result = -ENOMEM; - goto bail; - } - release_work->resource_name = resource->name; - release_work->needed_bw = 0; - release_work->dec_usage_count = false; - INIT_DELAYED_WORK(&release_work->work, ipa3_delayed_release_work_func); - schedule_delayed_work(&release_work->work, - msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC)); - result = 0; -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -/** - * ipa3_rm_release_resource() - release resource - * @resource_name: [in] name of the requested resource - * - * Returns: 0 on success, negative on failure - * - * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED - * on successful completion of this operation. - */ -int ipa3_rm_release_resource(enum ipa_rm_resource_name resource_name) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - int result; - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa3_rm_resource_producer_release( - (struct ipa3_rm_resource_prod *)resource); - -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -/** - * ipa3_rm_register() - register for event - * @resource_name: resource name - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Registration parameters provided here should be the same - * as provided later in ipa3_rm_deregister() call. - */ -int ipa3_rm_register(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params) -{ - int result; - unsigned long flags; - struct ipa_rm_resource *resource; - - IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name)); - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa3_rm_resource_producer_register( - (struct ipa3_rm_resource_prod *)resource, - reg_params, - true); -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_deregister() - cancel the registration - * @resource_name: resource name - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Registration parameters provided here should be the same - * as provided in ipa3_rm_register() call. - */ -int ipa3_rm_deregister(enum ipa_rm_resource_name resource_name, - struct ipa_rm_register_params *reg_params) -{ - int result; - unsigned long flags; - struct ipa_rm_resource *resource; - - IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name)); - - if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { - IPA_RM_ERR("can be called on PROD only\n"); - return -EINVAL; - } - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa3_rm_resource_producer_deregister( - (struct ipa3_rm_resource_prod *)resource, - reg_params); -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_set_perf_profile() - set performance profile - * @resource_name: resource name - * @profile: [in] profile information. - * - * Returns: 0 on success, negative on failure - * - * Set resource performance profile. - * Updates IPA driver if performance level changed. - */ -int ipa3_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, - struct ipa_rm_perf_profile *profile) -{ - int result; - unsigned long flags; - struct ipa_rm_resource *resource; - - IPADBG("resource: %s ", ipa3_rm_resource_str(resource_name)); - if (profile) - IPADBG("BW: %d\n", profile->max_supported_bandwidth_mbps); - IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name)); - - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - result = -EPERM; - goto bail; - } - result = ipa3_rm_resource_set_perf_profile(resource, profile); - if (result) { - IPA_RM_ERR("ipa3_rm_resource_set_perf_profile failed %d\n", - result); - goto bail; - } - - result = 0; -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_notify_completion() - - * consumer driver notification for - * request_resource / release_resource operations - * completion - * @event: notified event - * @resource_name: resource name - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_notify_completion(enum ipa_rm_event event, - enum ipa_rm_resource_name resource_name) -{ - int result; - - IPA_RM_DBG("event %d on %s\n", event, - ipa3_rm_resource_str(resource_name)); - if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { - IPA_RM_ERR("can be called on CONS only\n"); - result = -EINVAL; - goto bail; - } - ipa3_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB, - resource_name, - event, - false); - result = 0; -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -static void ipa3_rm_wq_handler(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa3_rm_wq_work_type *ipa_rm_work = - container_of(work, - struct ipa3_rm_wq_work_type, - work); - IPA_RM_DBG("%s cmd=%d event=%d notify_registered_only=%d\n", - ipa3_rm_resource_str(ipa_rm_work->resource_name), - ipa_rm_work->wq_cmd, - ipa_rm_work->event, - ipa_rm_work->notify_registered_only); - switch (ipa_rm_work->wq_cmd) { - case IPA_RM_WQ_NOTIFY_PROD: - if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) { - IPA_RM_ERR("resource is not PROD\n"); - goto free_work; - } - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, - flags); - goto free_work; - } - ipa3_rm_resource_producer_notify_clients( - (struct ipa3_rm_resource_prod *)resource, - ipa_rm_work->event, - ipa_rm_work->notify_registered_only); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - break; - case IPA_RM_WQ_NOTIFY_CONS: - break; - case IPA_RM_WQ_RESOURCE_CB: - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, - flags); - goto free_work; - } - ipa3_rm_resource_consumer_handle_cb( - (struct ipa3_rm_resource_cons *)resource, - ipa_rm_work->event); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - break; - default: - break; - } - -free_work: - kfree((void *) work); -} - -static void ipa3_rm_wq_resume_handler(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa3_rm_wq_suspend_resume_work_type *ipa_rm_work = - container_of(work, - struct ipa3_rm_wq_suspend_resume_work_type, - work); - IPA_RM_DBG("resume work handler: %s", - ipa3_rm_resource_str(ipa_rm_work->resource_name)); - - if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { - IPA_RM_ERR("resource is not CONS\n"); - return; - } - IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa3_rm_resource_str( - ipa_rm_work->resource_name)); - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str( - ipa_rm_work->resource_name)); - goto bail; - } - ipa3_rm_resource_consumer_request_work( - (struct ipa3_rm_resource_cons *)resource, - ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); -bail: - kfree(ipa_rm_work); -} - - -static void ipa3_rm_wq_suspend_handler(struct work_struct *work) -{ - unsigned long flags; - struct ipa_rm_resource *resource; - struct ipa3_rm_wq_suspend_resume_work_type *ipa_rm_work = - container_of(work, - struct ipa3_rm_wq_suspend_resume_work_type, - work); - IPA_RM_DBG("suspend work handler: %s", - ipa3_rm_resource_str(ipa_rm_work->resource_name)); - - if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { - IPA_RM_ERR("resource is not CONS\n"); - return; - } - ipa3_suspend_resource_sync(ipa_rm_work->resource_name); - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - ipa_rm_work->resource_name, - &resource) != 0){ - IPA_RM_ERR("resource does not exists\n"); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - return; - } - ipa3_rm_resource_consumer_release_work( - (struct ipa3_rm_resource_cons *)resource, - ipa_rm_work->prev_state, - true); - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - - kfree(ipa_rm_work); -} - -/** - * ipa3_rm_wq_send_cmd() - send a command for deferred work - * @wq_cmd: command that should be executed - * @resource_name: resource on which command should be executed - * @notify_registered_only: notify only clients registered by - * ipa3_rm_register() - * - * Returns: 0 on success, negative otherwise - */ -int ipa3_rm_wq_send_cmd(enum ipa3_rm_wq_cmd wq_cmd, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_event event, - bool notify_registered_only) -{ - int result = -ENOMEM; - struct ipa3_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC); - - if (work) { - INIT_WORK((struct work_struct *)work, ipa3_rm_wq_handler); - work->wq_cmd = wq_cmd; - work->resource_name = resource_name; - work->event = event; - work->notify_registered_only = notify_registered_only; - result = queue_work(ipa3_rm_ctx->ipa_rm_wq, - (struct work_struct *)work); - } else { - IPA_RM_ERR("no mem\n"); - } - - return result; -} - -int ipa3_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, - enum ipa3_rm_resource_state prev_state, - u32 needed_bw) -{ - int result = -ENOMEM; - struct ipa3_rm_wq_suspend_resume_work_type *work = - kzalloc(sizeof(*work), GFP_ATOMIC); - if (work) { - INIT_WORK((struct work_struct *)work, - ipa3_rm_wq_suspend_handler); - work->resource_name = resource_name; - work->prev_state = prev_state; - work->needed_bw = needed_bw; - result = queue_work(ipa3_rm_ctx->ipa_rm_wq, - (struct work_struct *)work); - } else { - IPA_RM_ERR("no mem\n"); - } - - return result; -} - -int ipa3_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, - enum ipa3_rm_resource_state prev_state, - u32 needed_bw) -{ - int result = -ENOMEM; - struct ipa3_rm_wq_suspend_resume_work_type *work = - kzalloc(sizeof(*work), GFP_ATOMIC); - if (work) { - INIT_WORK((struct work_struct *)work, - ipa3_rm_wq_resume_handler); - work->resource_name = resource_name; - work->prev_state = prev_state; - work->needed_bw = needed_bw; - result = queue_work(ipa3_rm_ctx->ipa_rm_wq, - (struct work_struct *)work); - } else { - IPA_RM_ERR("no mem\n"); - } - - return result; -} -/** - * ipa3_rm_initialize() - initialize IPA RM component - * - * Returns: 0 on success, negative otherwise - */ -int ipa3_rm_initialize(void) -{ - int result; - - ipa3_rm_ctx = kzalloc(sizeof(*ipa3_rm_ctx), GFP_KERNEL); - if (!ipa3_rm_ctx) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - ipa3_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq"); - if (!ipa3_rm_ctx->ipa_rm_wq) { - IPA_RM_ERR("create workqueue failed\n"); - result = -ENOMEM; - goto create_wq_fail; - } - result = ipa3_rm_dep_graph_create(&(ipa3_rm_ctx->dep_graph)); - if (result) { - IPA_RM_ERR("create dependency graph failed\n"); - goto graph_alloc_fail; - } - spin_lock_init(&ipa3_rm_ctx->ipa_rm_lock); - IPA_RM_DBG("SUCCESS\n"); - - return 0; -graph_alloc_fail: - destroy_workqueue(ipa3_rm_ctx->ipa_rm_wq); -create_wq_fail: - kfree(ipa3_rm_ctx); -bail: - return result; -} - -/** - * ipa3_rm_stat() - print RM stat - * @buf: [in] The user buff used to print - * @size: [in] The size of buf - * Returns: number of bytes used on success, negative on failure - * - * This function is called by ipa_debugfs in order to receive - * a full picture of the current state of the RM - */ - -int ipa3_rm_stat(char *buf, int size) -{ - unsigned long flags; - int i, cnt = 0, result = EINVAL; - struct ipa_rm_resource *resource = NULL; - - if (!buf || size < 0) - return result; - - spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags); - for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) { - result = ipa3_rm_dep_graph_get_resource( - ipa3_rm_ctx->dep_graph, - i, - &resource); - if (!result) { - result = ipa3_rm_resource_producer_print_stat( - resource, buf + cnt, - size-cnt); - if (result < 0) - goto bail; - cnt += result; - } - } - result = cnt; -bail: - spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags); - - return result; -} - -/** - * ipa3_rm_resource_str() - returns string that represent the resource - * @resource_name: [in] resource name - */ -const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name) -{ - if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX) - return "INVALID RESOURCE"; - - return ipa3_resource_name_to_str[resource_name]; -}; - -static void ipa3_rm_perf_profile_notify_to_ipa_work(struct work_struct *work) -{ - struct ipa3_rm_notify_ipa_work_type *notify_work = container_of(work, - struct ipa3_rm_notify_ipa_work_type, - work); - int res; - - IPA_RM_DBG("calling to IPA driver. voltage %d bandwidth %d\n", - notify_work->volt, notify_work->bandwidth_mbps); - - res = ipa3_set_required_perf_profile(notify_work->volt, - notify_work->bandwidth_mbps); - if (res) { - IPA_RM_ERR("ipa3_set_required_perf_profile failed %d\n", res); - goto bail; - } - - IPA_RM_DBG("IPA driver notified\n"); -bail: - kfree(notify_work); -} - -static void ipa3_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt, - u32 bandwidth) -{ - struct ipa3_rm_notify_ipa_work_type *work; - - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - IPA_RM_ERR("no mem\n"); - return; - } - - INIT_WORK(&work->work, ipa3_rm_perf_profile_notify_to_ipa_work); - work->volt = volt; - work->bandwidth_mbps = bandwidth; - queue_work(ipa3_rm_ctx->ipa_rm_wq, &work->work); -} - -/** - * ipa3_rm_perf_profile_change() - change performance profile vote for resource - * @resource_name: [in] resource name - * - * change bandwidth and voltage vote based on resource state. - */ -void ipa3_rm_perf_profile_change(enum ipa_rm_resource_name resource_name) -{ - enum ipa_voltage_level old_volt; - u32 *bw_ptr; - u32 old_bw; - struct ipa_rm_resource *resource; - int i; - u32 sum_bw_prod = 0; - u32 sum_bw_cons = 0; - - IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name)); - - if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph, - resource_name, - &resource) != 0) { - IPA_RM_ERR("resource does not exists\n"); - WARN_ON(1); - return; - } - - old_volt = ipa3_rm_ctx->prof_vote.curr_volt; - old_bw = ipa3_rm_ctx->prof_vote.curr_bw; - - if (IPA_RM_RESORCE_IS_PROD(resource_name)) { - bw_ptr = &ipa3_rm_ctx->prof_vote.bw_prods[resource_name]; - } else if (IPA_RM_RESORCE_IS_CONS(resource_name)) { - bw_ptr = &ipa3_rm_ctx->prof_vote.bw_cons[ - resource_name - IPA_RM_RESOURCE_PROD_MAX]; - } else { - IPAERR("Invalid resource_name\n"); - return; - } - - switch (resource->state) { - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - IPA_RM_DBG("max_bw = %d, needed_bw = %d\n", - resource->max_bw, resource->needed_bw); - *bw_ptr = min(resource->max_bw, resource->needed_bw); - ipa3_rm_ctx->prof_vote.volt[resource_name] = - resource->floor_voltage; - break; - - case IPA_RM_RELEASE_IN_PROGRESS: - case IPA_RM_RELEASED: - *bw_ptr = 0; - ipa3_rm_ctx->prof_vote.volt[resource_name] = 0; - break; - - default: - IPA_RM_ERR("unknown state %d\n", resource->state); - WARN_ON(1); - return; - } - IPA_RM_DBG("resource bandwidth: %d voltage: %d\n", *bw_ptr, - resource->floor_voltage); - - ipa3_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED; - for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { - if (ipa3_rm_ctx->prof_vote.volt[i] > - ipa3_rm_ctx->prof_vote.curr_volt) { - ipa3_rm_ctx->prof_vote.curr_volt = - ipa3_rm_ctx->prof_vote.volt[i]; - } - } - - for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++) - sum_bw_prod += ipa3_rm_ctx->prof_vote.bw_prods[i]; - - for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++) - sum_bw_cons += ipa3_rm_ctx->prof_vote.bw_cons[i]; - - IPA_RM_DBG("all prod bandwidth: %d all cons bandwidth: %d\n", - sum_bw_prod, sum_bw_cons); - ipa3_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons); - - if (ipa3_rm_ctx->prof_vote.curr_volt == old_volt && - ipa3_rm_ctx->prof_vote.curr_bw == old_bw) { - IPA_RM_DBG("same voting\n"); - return; - } - - IPA_RM_DBG("new voting: voltage %d bandwidth %d\n", - ipa3_rm_ctx->prof_vote.curr_volt, - ipa3_rm_ctx->prof_vote.curr_bw); - - ipa3_rm_perf_profile_notify_to_ipa(ipa3_rm_ctx->prof_vote.curr_volt, - ipa3_rm_ctx->prof_vote.curr_bw); - - return; -}; - -/** - * ipa3_rm_exit() - free all IPA RM resources - */ -void ipa3_rm_exit(void) -{ - IPA_RM_DBG("ENTER\n"); - ipa3_rm_dep_graph_delete(ipa3_rm_ctx->dep_graph); - destroy_workqueue(ipa3_rm_ctx->ipa_rm_wq); - kfree(ipa3_rm_ctx); - ipa3_rm_ctx = NULL; - IPA_RM_DBG("EXIT\n"); -} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c deleted file mode 100644 index dabb9a63b39f..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.c +++ /dev/null @@ -1,245 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "ipa_rm_dependency_graph.h" -#include "ipa_rm_i.h" - -static int ipa3_rm_dep_get_index(enum ipa_rm_resource_name resource_name) -{ - int resource_index = IPA_RM_INDEX_INVALID; - - if (IPA_RM_RESORCE_IS_PROD(resource_name)) - resource_index = ipa3_rm_prod_index(resource_name); - else if (IPA_RM_RESORCE_IS_CONS(resource_name)) - resource_index = ipa3_rm_cons_index(resource_name); - - return resource_index; -} - -/** - * ipa3_rm_dep_graph_create() - creates graph - * @dep_graph: [out] created dependency graph - * - * Returns: dependency graph on success, NULL on failure - */ -int ipa3_rm_dep_graph_create(struct ipa3_rm_dep_graph **dep_graph) -{ - int result = 0; - - *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL); - if (!*dep_graph) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } -bail: - return result; -} - -/** - * ipa3_rm_dep_graph_delete() - destroyes the graph - * @graph: [in] dependency graph - * - * Frees all resources. - */ -void ipa3_rm_dep_graph_delete(struct ipa3_rm_dep_graph *graph) -{ - int resource_index; - - if (!graph) { - IPA_RM_ERR("invalid params\n"); - return; - } - for (resource_index = 0; - resource_index < IPA_RM_RESOURCE_MAX; - resource_index++) - kfree(graph->resource_table[resource_index]); - memset(graph->resource_table, 0, sizeof(graph->resource_table)); -} - -/** - * ipa3_rm_dep_graph_get_resource() - provides a resource by name - * @graph: [in] dependency graph - * @name: [in] name of the resource - * @resource: [out] resource in case of success - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_dep_graph_get_resource( - struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - struct ipa_rm_resource **resource) -{ - int result; - int resource_index; - - if (!graph) { - result = -EINVAL; - goto bail; - } - resource_index = ipa3_rm_dep_get_index(resource_name); - if (resource_index == IPA_RM_INDEX_INVALID) { - result = -EINVAL; - goto bail; - } - *resource = graph->resource_table[resource_index]; - if (!*resource) { - result = -EINVAL; - goto bail; - } - result = 0; -bail: - return result; -} - -/** - * ipa3_rm_dep_graph_add() - adds resource to graph - * @graph: [in] dependency graph - * @resource: [in] resource to add - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_dep_graph_add(struct ipa3_rm_dep_graph *graph, - struct ipa_rm_resource *resource) -{ - int result = 0; - int resource_index; - - if (!graph || !resource) { - result = -EINVAL; - goto bail; - } - resource_index = ipa3_rm_dep_get_index(resource->name); - if (resource_index == IPA_RM_INDEX_INVALID) { - result = -EINVAL; - goto bail; - } - graph->resource_table[resource_index] = resource; -bail: - return result; -} - -/** - * ipa3_rm_dep_graph_remove() - removes resource from graph - * @graph: [in] dependency graph - * @resource: [in] resource to add - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_dep_graph_remove(struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name) -{ - if (!graph) - return -EINVAL; - graph->resource_table[resource_name] = NULL; - - return 0; -} - -/** - * ipa3_rm_dep_graph_add_dependency() - adds dependency between - * two nodes in graph - * @graph: [in] dependency graph - * @resource_name: [in] resource to add - * @depends_on_name: [in] resource to add - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_dep_graph_add_dependency(struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - struct ipa_rm_resource *dependent = NULL; - struct ipa_rm_resource *dependency = NULL; - int result; - - if (!graph || - !IPA_RM_RESORCE_IS_PROD(resource_name) || - !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { - IPA_RM_ERR("invalid params\n"); - result = -EINVAL; - goto bail; - } - if (ipa3_rm_dep_graph_get_resource(graph, - resource_name, - &dependent)) { - IPA_RM_ERR("%s does not exist\n", - ipa3_rm_resource_str(resource_name)); - result = -EINVAL; - goto bail; - } - if (ipa3_rm_dep_graph_get_resource(graph, - depends_on_name, - &dependency)) { - IPA_RM_ERR("%s does not exist\n", - ipa3_rm_resource_str(depends_on_name)); - result = -EINVAL; - goto bail; - } - result = ipa3_rm_resource_add_dependency(dependent, dependency); -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_dep_graph_delete_dependency() - deleted dependency between - * two nodes in graph - * @graph: [in] dependency graph - * @resource_name: [in] resource to delete - * @depends_on_name: [in] resource to delete - * - * Returns: 0 on success, negative on failure - * - */ -int ipa3_rm_dep_graph_delete_dependency(struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name) -{ - struct ipa_rm_resource *dependent = NULL; - struct ipa_rm_resource *dependency = NULL; - int result; - - if (!graph || - !IPA_RM_RESORCE_IS_PROD(resource_name) || - !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { - IPA_RM_ERR("invalid params\n"); - result = -EINVAL; - goto bail; - } - - if (ipa3_rm_dep_graph_get_resource(graph, - resource_name, - &dependent)) { - IPA_RM_ERR("%s does not exist\n", - ipa3_rm_resource_str(resource_name)); - result = -EINVAL; - goto bail; - } - - if (ipa3_rm_dep_graph_get_resource(graph, - depends_on_name, - &dependency)) { - IPA_RM_ERR("%s does not exist\n", - ipa3_rm_resource_str(depends_on_name)); - result = -EINVAL; - goto bail; - } - - result = ipa3_rm_resource_delete_dependency(dependent, dependency); -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h deleted file mode 100644 index 2a68ce91814f..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_dependency_graph.h +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_ -#define _IPA_RM_DEPENDENCY_GRAPH_H_ - -#include -#include -#include "ipa_rm_resource.h" - -struct ipa3_rm_dep_graph { - struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX]; -}; - -int ipa3_rm_dep_graph_get_resource( - struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name name, - struct ipa_rm_resource **resource); - -int ipa3_rm_dep_graph_create(struct ipa3_rm_dep_graph **dep_graph); - -void ipa3_rm_dep_graph_delete(struct ipa3_rm_dep_graph *graph); - -int ipa3_rm_dep_graph_add(struct ipa3_rm_dep_graph *graph, - struct ipa_rm_resource *resource); - -int ipa3_rm_dep_graph_remove(struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name); - -int ipa3_rm_dep_graph_add_dependency(struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -int ipa3_rm_dep_graph_delete_dependency(struct ipa3_rm_dep_graph *graph, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_resource_name depends_on_name); - -#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h deleted file mode 100644 index 4650babddaaa..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_i.h +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_I_H_ -#define _IPA_RM_I_H_ - -#include -#include -#include "ipa_rm_resource.h" - -#define IPA_RM_DRV_NAME "ipa_rm" - -#define IPA_RM_DBG(fmt, args...) \ - pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) -#define IPA_RM_ERR(fmt, args...) \ - pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) - -#define IPA_RM_RESOURCE_CONS_MAX \ - (IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX) -#define IPA_RM_RESORCE_IS_PROD(x) \ - (x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX) -#define IPA_RM_RESORCE_IS_CONS(x) \ - (x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX) -#define IPA_RM_INDEX_INVALID (-1) -#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000 - -int ipa3_rm_prod_index(enum ipa_rm_resource_name resource_name); -int ipa3_rm_cons_index(enum ipa_rm_resource_name resource_name); - -/** - * struct ipa3_rm_delayed_release_work_type - IPA RM delayed resource release - * work type - * @delayed_work: work struct - * @ipa_rm_resource_name: name of the resource on which this work should be done - * @needed_bw: bandwidth required for resource in Mbps - * @dec_usage_count: decrease usage count on release ? - */ -struct ipa3_rm_delayed_release_work_type { - struct delayed_work work; - enum ipa_rm_resource_name resource_name; - u32 needed_bw; - bool dec_usage_count; - -}; - -/** - * enum ipa3_rm_wq_cmd - workqueue commands - */ -enum ipa3_rm_wq_cmd { - IPA_RM_WQ_NOTIFY_PROD, - IPA_RM_WQ_NOTIFY_CONS, - IPA_RM_WQ_RESOURCE_CB -}; - -/** - * struct ipa3_rm_wq_work_type - IPA RM worqueue specific - * work type - * @work: work struct - * @wq_cmd: command that should be processed in workqueue context - * @resource_name: name of the resource on which this work - * should be done - * @dep_graph: data structure to search for resource if exists - * @event: event to notify - * @notify_registered_only: notify only clients registered by - * ipa3_rm_register() - */ -struct ipa3_rm_wq_work_type { - struct work_struct work; - enum ipa3_rm_wq_cmd wq_cmd; - enum ipa_rm_resource_name resource_name; - enum ipa_rm_event event; - bool notify_registered_only; -}; - -/** - * struct ipa3_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or - * suspend work type - * @work: work struct - * @resource_name: name of the resource on which this work - * should be done - * @prev_state: - * @needed_bw: - */ -struct ipa3_rm_wq_suspend_resume_work_type { - struct work_struct work; - enum ipa_rm_resource_name resource_name; - enum ipa3_rm_resource_state prev_state; - u32 needed_bw; - -}; - -int ipa3_rm_wq_send_cmd(enum ipa3_rm_wq_cmd wq_cmd, - enum ipa_rm_resource_name resource_name, - enum ipa_rm_event event, - bool notify_registered_only); - -int ipa3_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, - enum ipa3_rm_resource_state prev_state, - u32 needed_bw); - -int ipa3_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, - enum ipa3_rm_resource_state prev_state, - u32 needed_bw); - -int ipa3_rm_initialize(void); - -int ipa3_rm_stat(char *buf, int size); - -const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name); - -void ipa3_rm_perf_profile_change(enum ipa_rm_resource_name resource_name); - -int ipa3_rm_request_resource_with_timer(enum ipa_rm_resource_name - resource_name); - -void ipa3_delayed_release_work_func(struct work_struct *work); - -void ipa3_rm_exit(void); - -#endif /* _IPA_RM_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c deleted file mode 100644 index cd72b058b00d..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c +++ /dev/null @@ -1,268 +0,0 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "ipa_i.h" - -/** - * struct ipa3_rm_it_private - IPA RM Inactivity Timer private - * data - * @initied: indicates if instance was initialized - * @lock - spinlock for mutual exclusion - * @resource_name - resource name - * @work: delayed work object for running delayed releas - * function - * @resource_requested: boolean flag indicates if resource was requested - * @reschedule_work: boolean flag indicates to not release and to - * reschedule the release work. - * @work_in_progress: boolean flag indicates is release work was scheduled. - * @jiffies: number of jiffies for timeout - * - * WWAN private - holds all relevant info about WWAN driver - */ -struct ipa3_rm_it_private { - bool initied; - enum ipa_rm_resource_name resource_name; - spinlock_t lock; - struct delayed_work work; - bool resource_requested; - bool reschedule_work; - bool work_in_progress; - unsigned long jiffies; -}; - -static struct ipa3_rm_it_private ipa3_rm_it_handles[IPA_RM_RESOURCE_MAX]; - -/** - * ipa3_rm_inactivity_timer_func() - called when timer expired in - * the context of the shared workqueue. Checks internally if - * reschedule_work flag is set. In case it is not set this function calls to - * ipa_rm_release_resource(). In case reschedule_work is set this function - * reschedule the work. This flag is cleared cleared when - * calling to ipa_rm_inactivity_timer_release_resource(). - * - * @work: work object provided by the work queue - * - * Return codes: - * None - */ -static void ipa3_rm_inactivity_timer_func(struct work_struct *work) -{ - - struct ipa3_rm_it_private *me = container_of(to_delayed_work(work), - struct ipa3_rm_it_private, - work); - unsigned long flags; - - IPADBG_LOW("%s: timer expired for resource %d!\n", __func__, - me->resource_name); - - spin_lock_irqsave( - &ipa3_rm_it_handles[me->resource_name].lock, flags); - if (ipa3_rm_it_handles[me->resource_name].reschedule_work) { - IPADBG_LOW("%s: setting delayed work\n", __func__); - ipa3_rm_it_handles[me->resource_name].reschedule_work = false; - schedule_delayed_work( - &ipa3_rm_it_handles[me->resource_name].work, - ipa3_rm_it_handles[me->resource_name].jiffies); - } else if (ipa3_rm_it_handles[me->resource_name].resource_requested) { - IPADBG_LOW("%s: not calling release\n", __func__); - ipa3_rm_it_handles[me->resource_name].work_in_progress = false; - } else { - IPADBG_LOW("%s: calling release_resource on resource %d!\n", - __func__, me->resource_name); - ipa3_rm_release_resource(me->resource_name); - ipa3_rm_it_handles[me->resource_name].work_in_progress = false; - } - spin_unlock_irqrestore( - &ipa3_rm_it_handles[me->resource_name].lock, flags); -} - -/** -* ipa3_rm_inactivity_timer_init() - Init function for IPA RM -* inactivity timer. This function shall be called prior calling -* any other API of IPA RM inactivity timer. -* -* @resource_name: Resource name. @see ipa_rm.h -* @msecs: time in miliseccond, that IPA RM inactivity timer -* shall wait prior calling to ipa3_rm_release_resource(). -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, - unsigned long msecs) -{ - IPADBG_LOW("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (ipa3_rm_it_handles[resource_name].initied) { - IPAERR("%s: resource %d already inited\n", - __func__, resource_name); - return -EINVAL; - } - - spin_lock_init(&ipa3_rm_it_handles[resource_name].lock); - ipa3_rm_it_handles[resource_name].resource_name = resource_name; - ipa3_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs); - ipa3_rm_it_handles[resource_name].resource_requested = false; - ipa3_rm_it_handles[resource_name].reschedule_work = false; - ipa3_rm_it_handles[resource_name].work_in_progress = false; - - INIT_DELAYED_WORK(&ipa3_rm_it_handles[resource_name].work, - ipa3_rm_inactivity_timer_func); - ipa3_rm_it_handles[resource_name].initied = 1; - - return 0; -} - -/** -* ipa3_rm_inactivity_timer_destroy() - De-Init function for IPA -* RM inactivity timer. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa3_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name) -{ - IPADBG_LOW("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (!ipa3_rm_it_handles[resource_name].initied) { - IPAERR("%s: resource %d already inited\n", - __func__, resource_name); - return -EINVAL; - } - - cancel_delayed_work_sync(&ipa3_rm_it_handles[resource_name].work); - - memset(&ipa3_rm_it_handles[resource_name], 0, - sizeof(struct ipa3_rm_it_private)); - - return 0; -} - -/** -* ipa3_rm_inactivity_timer_request_resource() - Same as -* ipa3_rm_request_resource(), with a difference that calling to -* this function will also cancel the inactivity timer, if -* ipa3_rm_inactivity_timer_release_resource() was called earlier. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa3_rm_inactivity_timer_request_resource( - enum ipa_rm_resource_name resource_name) -{ - int ret; - unsigned long flags; - - IPADBG_LOW("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (!ipa3_rm_it_handles[resource_name].initied) { - IPAERR("%s: Not initialized\n", __func__); - return -EINVAL; - } - - spin_lock_irqsave(&ipa3_rm_it_handles[resource_name].lock, flags); - ipa3_rm_it_handles[resource_name].resource_requested = true; - spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags); - ret = ipa3_rm_request_resource(resource_name); - IPADBG_LOW("%s: resource %d: returning %d\n", __func__, - resource_name, ret); - - return ret; -} - -/** -* ipa3_rm_inactivity_timer_release_resource() - Sets the -* inactivity timer to the timeout set by -* ipa3_rm_inactivity_timer_init(). When the timeout expires, IPA -* RM inactivity timer will call to ipa3_rm_release_resource(). -* If a call to ipa3_rm_inactivity_timer_request_resource() was -* made BEFORE the timout has expired, rge timer will be -* cancelled. -* -* @resource_name: Resource name. @see ipa_rm.h -* -* Return codes: -* 0: success -* -EINVAL: invalid parameters -*/ -int ipa3_rm_inactivity_timer_release_resource( - enum ipa_rm_resource_name resource_name) -{ - unsigned long flags; - - IPADBG_LOW("%s: resource %d\n", __func__, resource_name); - - if (resource_name < 0 || - resource_name >= IPA_RM_RESOURCE_MAX) { - IPAERR("%s: Invalid parameter\n", __func__); - return -EINVAL; - } - - if (!ipa3_rm_it_handles[resource_name].initied) { - IPAERR("%s: Not initialized\n", __func__); - return -EINVAL; - } - - spin_lock_irqsave(&ipa3_rm_it_handles[resource_name].lock, flags); - ipa3_rm_it_handles[resource_name].resource_requested = false; - if (ipa3_rm_it_handles[resource_name].work_in_progress) { - IPADBG_LOW("%s: Timer already set, not scheduling again %d\n", - __func__, resource_name); - ipa3_rm_it_handles[resource_name].reschedule_work = true; - spin_unlock_irqrestore( - &ipa3_rm_it_handles[resource_name].lock, flags); - return 0; - } - ipa3_rm_it_handles[resource_name].work_in_progress = true; - ipa3_rm_it_handles[resource_name].reschedule_work = false; - IPADBG_LOW("%s: setting delayed work\n", __func__); - schedule_delayed_work(&ipa3_rm_it_handles[resource_name].work, - ipa3_rm_it_handles[resource_name].jiffies); - spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags); - - return 0; -} - diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c deleted file mode 100644 index 7386135d59ff..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.c +++ /dev/null @@ -1,247 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "ipa_i.h" -#include "ipa_rm_i.h" - -/** - * ipa3_rm_peers_list_get_resource_index() - resource name to index - * of this resource in corresponding peers list - * @resource_name: [in] resource name - * - * Returns: resource index mapping, IPA_RM_INDEX_INVALID - * in case provided resource name isn't contained in enum - * ipa_rm_resource_name. - * - */ -static int ipa3_rm_peers_list_get_resource_index( - enum ipa_rm_resource_name resource_name) -{ - int resource_index = IPA_RM_INDEX_INVALID; - - if (IPA_RM_RESORCE_IS_PROD(resource_name)) - resource_index = ipa3_rm_prod_index(resource_name); - else if (IPA_RM_RESORCE_IS_CONS(resource_name)) { - resource_index = ipa3_rm_cons_index(resource_name); - if (resource_index != IPA_RM_INDEX_INVALID) - resource_index = - resource_index - IPA_RM_RESOURCE_PROD_MAX; - } - - return resource_index; -} - -static bool ipa3_rm_peers_list_check_index(int index, - struct ipa3_rm_peers_list *peers_list) -{ - return !(index > peers_list->max_peers || index < 0); -} - -/** - * ipa3_rm_peers_list_create() - creates the peers list - * - * @max_peers: maximum number of peers in new list - * @peers_list: [out] newly created peers list - * - * Returns: 0 in case of SUCCESS, negative otherwise - */ -int ipa3_rm_peers_list_create(int max_peers, - struct ipa3_rm_peers_list **peers_list) -{ - int result; - - *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC); - if (!*peers_list) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - (*peers_list)->max_peers = max_peers; - (*peers_list)->peers = kzalloc((*peers_list)->max_peers * - sizeof(struct ipa_rm_resource *), GFP_ATOMIC); - if (!((*peers_list)->peers)) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto list_alloc_fail; - } - - return 0; - -list_alloc_fail: - kfree(*peers_list); -bail: - return result; -} - -/** - * ipa3_rm_peers_list_delete() - deletes the peers list - * - * @peers_list: peers list - * - */ -void ipa3_rm_peers_list_delete(struct ipa3_rm_peers_list *peers_list) -{ - if (peers_list) { - kfree(peers_list->peers); - kfree(peers_list); - } -} - -/** - * ipa3_rm_peers_list_remove_peer() - removes peer from the list - * - * @peers_list: peers list - * @resource_name: name of the resource to remove - * - */ -void ipa3_rm_peers_list_remove_peer( - struct ipa3_rm_peers_list *peers_list, - enum ipa_rm_resource_name resource_name) -{ - if (!peers_list) - return; - - peers_list->peers[ipa3_rm_peers_list_get_resource_index( - resource_name)] = NULL; - peers_list->peers_count--; -} - -/** - * ipa3_rm_peers_list_add_peer() - adds peer to the list - * - * @peers_list: peers list - * @resource: resource to add - * - */ -void ipa3_rm_peers_list_add_peer( - struct ipa3_rm_peers_list *peers_list, - struct ipa_rm_resource *resource) -{ - if (!peers_list || !resource) - return; - - peers_list->peers[ipa3_rm_peers_list_get_resource_index( - resource->name)] = - resource; - peers_list->peers_count++; -} - -/** - * ipa3_rm_peers_list_is_empty() - checks - * if resource peers list is empty - * - * @peers_list: peers list - * - * Returns: true if the list is empty, false otherwise - */ -bool ipa3_rm_peers_list_is_empty(struct ipa3_rm_peers_list *peers_list) -{ - bool result = true; - - if (!peers_list) - goto bail; - - if (peers_list->peers_count > 0) - result = false; -bail: - return result; -} - -/** - * ipa3_rm_peers_list_has_last_peer() - checks - * if resource peers list has exactly one peer - * - * @peers_list: peers list - * - * Returns: true if the list has exactly one peer, false otherwise - */ -bool ipa3_rm_peers_list_has_last_peer( - struct ipa3_rm_peers_list *peers_list) -{ - bool result = false; - - if (!peers_list) - goto bail; - - if (peers_list->peers_count == 1) - result = true; -bail: - return result; -} - -/** - * ipa3_rm_peers_list_check_dependency() - check dependency - * between 2 peer lists - * @resource_peers: first peers list - * @resource_name: first peers list resource name - * @depends_on_peers: second peers list - * @depends_on_name: second peers list resource name - * - * Returns: true if there is dependency, false otherwise - * - */ -bool ipa3_rm_peers_list_check_dependency( - struct ipa3_rm_peers_list *resource_peers, - enum ipa_rm_resource_name resource_name, - struct ipa3_rm_peers_list *depends_on_peers, - enum ipa_rm_resource_name depends_on_name) -{ - bool result = false; - - if (!resource_peers || !depends_on_peers) - return result; - - if (resource_peers->peers[ipa3_rm_peers_list_get_resource_index( - depends_on_name)] != NULL) - result = true; - - if (depends_on_peers->peers[ipa3_rm_peers_list_get_resource_index( - resource_name)] != NULL) - result = true; - - return result; -} - -/** - * ipa3_rm_peers_list_get_resource() - get resource by - * resource index - * @resource_index: resource index - * @resource_peers: peers list - * - * Returns: the resource if found, NULL otherwise - */ -struct ipa_rm_resource *ipa3_rm_peers_list_get_resource(int resource_index, - struct ipa3_rm_peers_list *resource_peers) -{ - struct ipa_rm_resource *result = NULL; - - if (!ipa3_rm_peers_list_check_index(resource_index, resource_peers)) - goto bail; - - result = resource_peers->peers[resource_index]; -bail: - return result; -} - -/** - * ipa3_rm_peers_list_get_size() - get peers list sise - * - * @peers_list: peers list - * - * Returns: the size of the peers list - */ -int ipa3_rm_peers_list_get_size(struct ipa3_rm_peers_list *peers_list) -{ - return peers_list->max_peers; -} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h deleted file mode 100644 index dc58e5d23fa6..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_peers_list.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_PEERS_LIST_H_ -#define _IPA_RM_PEERS_LIST_H_ - -#include "ipa_rm_resource.h" - -/** - * struct ipa3_rm_peers_list - IPA RM resource peers list - * @peers: the list of references to resources dependent on this resource - * in case of producer or list of dependencies in case of consumer - * @max_peers: maximum number of peers for this resource - * @peers_count: actual number of peers for this resource - */ -struct ipa3_rm_peers_list { - struct ipa_rm_resource **peers; - int max_peers; - int peers_count; -}; - -int ipa3_rm_peers_list_create(int max_peers, - struct ipa3_rm_peers_list **peers_list); -void ipa3_rm_peers_list_delete(struct ipa3_rm_peers_list *peers_list); -void ipa3_rm_peers_list_remove_peer( - struct ipa3_rm_peers_list *peers_list, - enum ipa_rm_resource_name resource_name); -void ipa3_rm_peers_list_add_peer( - struct ipa3_rm_peers_list *peers_list, - struct ipa_rm_resource *resource); -bool ipa3_rm_peers_list_check_dependency( - struct ipa3_rm_peers_list *resource_peers, - enum ipa_rm_resource_name resource_name, - struct ipa3_rm_peers_list *depends_on_peers, - enum ipa_rm_resource_name depends_on_name); -struct ipa_rm_resource *ipa3_rm_peers_list_get_resource(int resource_index, - struct ipa3_rm_peers_list *peers_list); -int ipa3_rm_peers_list_get_size(struct ipa3_rm_peers_list *peers_list); -bool ipa3_rm_peers_list_is_empty(struct ipa3_rm_peers_list *peers_list); -bool ipa3_rm_peers_list_has_last_peer( - struct ipa3_rm_peers_list *peers_list); - - -#endif /* _IPA_RM_PEERS_LIST_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c deleted file mode 100644 index 426836bb6363..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c +++ /dev/null @@ -1,1176 +0,0 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "ipa_i.h" -#include "ipa_rm_resource.h" -#include "ipa_rm_i.h" - -/** - * ipa_rm_dep_prod_index() - producer name to producer index mapping - * @resource_name: [in] resource name (should be of producer) - * - * Returns: resource index mapping, IPA_RM_INDEX_INVALID - * in case provided resource name isn't contained - * in enum ipa_rm_resource_name or is not of producers. - * - */ -int ipa3_rm_prod_index(enum ipa_rm_resource_name resource_name) -{ - int result = resource_name; - - switch (resource_name) { - case IPA_RM_RESOURCE_Q6_PROD: - case IPA_RM_RESOURCE_USB_PROD: - case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD: - case IPA_RM_RESOURCE_HSIC_PROD: - case IPA_RM_RESOURCE_STD_ECM_PROD: - case IPA_RM_RESOURCE_RNDIS_PROD: - case IPA_RM_RESOURCE_WWAN_0_PROD: - case IPA_RM_RESOURCE_WLAN_PROD: - case IPA_RM_RESOURCE_ODU_ADAPT_PROD: - case IPA_RM_RESOURCE_MHI_PROD: - break; - default: - result = IPA_RM_INDEX_INVALID; - break; - } - - return result; -} - -/** - * ipa3_rm_cons_index() - consumer name to consumer index mapping - * @resource_name: [in] resource name (should be of consumer) - * - * Returns: resource index mapping, IPA_RM_INDEX_INVALID - * in case provided resource name isn't contained - * in enum ipa_rm_resource_name or is not of consumers. - * - */ -int ipa3_rm_cons_index(enum ipa_rm_resource_name resource_name) -{ - int result = resource_name; - - switch (resource_name) { - case IPA_RM_RESOURCE_Q6_CONS: - case IPA_RM_RESOURCE_USB_CONS: - case IPA_RM_RESOURCE_HSIC_CONS: - case IPA_RM_RESOURCE_WLAN_CONS: - case IPA_RM_RESOURCE_APPS_CONS: - case IPA_RM_RESOURCE_ODU_ADAPT_CONS: - case IPA_RM_RESOURCE_MHI_CONS: - case IPA_RM_RESOURCE_USB_DPL_CONS: - break; - default: - result = IPA_RM_INDEX_INVALID; - break; - } - - return result; -} - -int ipa3_rm_resource_consumer_release_work( - struct ipa3_rm_resource_cons *consumer, - enum ipa3_rm_resource_state prev_state, - bool notify_completion) -{ - int driver_result; - - IPA_RM_DBG("calling driver CB\n"); - driver_result = consumer->release_resource(); - IPA_RM_DBG("driver CB returned with %d\n", driver_result); - /* - * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED - * for CONS which remains in RELEASE_IN_PROGRESS. - */ - if (driver_result == -EINPROGRESS) - driver_result = 0; - if (driver_result != 0 && driver_result != -EINPROGRESS) { - IPA_RM_ERR("driver CB returned error %d\n", driver_result); - consumer->resource.state = prev_state; - goto bail; - } - if (driver_result == 0) { - if (notify_completion) - ipa3_rm_resource_consumer_handle_cb(consumer, - IPA_RM_RESOURCE_RELEASED); - else - consumer->resource.state = IPA_RM_RELEASED; - } - complete_all(&consumer->request_consumer_in_progress); - - ipa3_rm_perf_profile_change(consumer->resource.name); -bail: - return driver_result; -} - -int ipa3_rm_resource_consumer_request_work(struct ipa3_rm_resource_cons - *consumer, - enum ipa3_rm_resource_state prev_state, - u32 prod_needed_bw, - bool notify_completion) -{ - int driver_result; - - IPA_RM_DBG("calling driver CB\n"); - driver_result = consumer->request_resource(); - IPA_RM_DBG("driver CB returned with %d\n", driver_result); - if (driver_result == 0) { - if (notify_completion) { - ipa3_rm_resource_consumer_handle_cb(consumer, - IPA_RM_RESOURCE_GRANTED); - } else { - consumer->resource.state = IPA_RM_GRANTED; - ipa3_rm_perf_profile_change(consumer->resource.name); - ipa3_resume_resource(consumer->resource.name); - } - } else if (driver_result != -EINPROGRESS) { - consumer->resource.state = prev_state; - consumer->resource.needed_bw -= prod_needed_bw; - consumer->usage_count--; - } - - return driver_result; -} - -int ipa3_rm_resource_consumer_request( - struct ipa3_rm_resource_cons *consumer, - u32 prod_needed_bw, - bool inc_usage_count, - bool wake_client) -{ - int result = 0; - enum ipa3_rm_resource_state prev_state; - struct ipa_active_client_logging_info log_info; - - IPA_RM_DBG("%s state: %d\n", - ipa3_rm_resource_str(consumer->resource.name), - consumer->resource.state); - - prev_state = consumer->resource.state; - consumer->resource.needed_bw += prod_needed_bw; - switch (consumer->resource.state) { - case IPA_RM_RELEASED: - case IPA_RM_RELEASE_IN_PROGRESS: - reinit_completion(&consumer->request_consumer_in_progress); - consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; - IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, - ipa3_rm_resource_str(consumer->resource.name)); - if (prev_state == IPA_RM_RELEASE_IN_PROGRESS || - ipa3_inc_client_enable_clks_no_block(&log_info) != 0) { - IPA_RM_DBG("async resume work for %s\n", - ipa3_rm_resource_str(consumer->resource.name)); - ipa3_rm_wq_send_resume_cmd(consumer->resource.name, - prev_state, - prod_needed_bw); - result = -EINPROGRESS; - break; - } - result = ipa3_rm_resource_consumer_request_work(consumer, - prev_state, - prod_needed_bw, - false); - break; - case IPA_RM_GRANTED: - if (wake_client) { - result = ipa3_rm_resource_consumer_request_work( - consumer, prev_state, prod_needed_bw, false); - break; - } - ipa3_rm_perf_profile_change(consumer->resource.name); - break; - case IPA_RM_REQUEST_IN_PROGRESS: - result = -EINPROGRESS; - break; - default: - consumer->resource.needed_bw -= prod_needed_bw; - result = -EPERM; - goto bail; - } - if (inc_usage_count) - consumer->usage_count++; -bail: - IPA_RM_DBG("%s new state: %d\n", - ipa3_rm_resource_str(consumer->resource.name), - consumer->resource.state); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -int ipa3_rm_resource_consumer_release( - struct ipa3_rm_resource_cons *consumer, - u32 prod_needed_bw, - bool dec_usage_count) -{ - int result = 0; - enum ipa3_rm_resource_state save_state; - - IPA_RM_DBG("%s state: %d\n", - ipa3_rm_resource_str(consumer->resource.name), - consumer->resource.state); - save_state = consumer->resource.state; - consumer->resource.needed_bw -= prod_needed_bw; - switch (consumer->resource.state) { - case IPA_RM_RELEASED: - break; - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - if (dec_usage_count && consumer->usage_count > 0) - consumer->usage_count--; - if (consumer->usage_count == 0) { - consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; - if (save_state == IPA_RM_REQUEST_IN_PROGRESS || - ipa3_suspend_resource_no_block( - consumer->resource.name) != 0) { - ipa3_rm_wq_send_suspend_cmd( - consumer->resource.name, - save_state, - prod_needed_bw); - result = -EINPROGRESS; - goto bail; - } - result = ipa3_rm_resource_consumer_release_work( - consumer, - save_state, - false); - goto bail; - } else if (consumer->resource.state == IPA_RM_GRANTED) { - ipa3_rm_perf_profile_change(consumer->resource.name); - } - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (dec_usage_count && consumer->usage_count > 0) - consumer->usage_count--; - result = -EINPROGRESS; - break; - default: - result = -EPERM; - goto bail; - } -bail: - IPA_RM_DBG("%s new state: %d\n", - ipa3_rm_resource_str(consumer->resource.name), - consumer->resource.state); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_resource_producer_notify_clients() - notify - * all registered clients of given producer - * @producer: producer - * @event: event to notify - * @notify_registered_only: notify only clients registered by - * ipa3_rm_register() - */ -void ipa3_rm_resource_producer_notify_clients( - struct ipa3_rm_resource_prod *producer, - enum ipa_rm_event event, - bool notify_registered_only) -{ - struct ipa3_rm_notification_info *reg_info; - - IPA_RM_DBG("%s event: %d notify_registered_only: %d\n", - ipa3_rm_resource_str(producer->resource.name), - event, - notify_registered_only); - - list_for_each_entry(reg_info, &(producer->event_listeners), link) { - if (notify_registered_only && !reg_info->explicit) - continue; - - IPA_RM_DBG("Notifying %s event: %d\n", - ipa3_rm_resource_str(producer->resource.name), - event); - reg_info->reg_params.notify_cb(reg_info->reg_params.user_data, - event, - 0); - IPA_RM_DBG("back from client CB\n"); - } -} - -static int ipa3_rm_resource_producer_create(struct ipa_rm_resource **resource, - struct ipa3_rm_resource_prod **producer, - struct ipa_rm_create_params *create_params, - int *max_peers) -{ - int result = 0; - - *producer = kzalloc(sizeof(**producer), GFP_ATOMIC); - if (*producer == NULL) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - INIT_LIST_HEAD(&((*producer)->event_listeners)); - result = ipa3_rm_resource_producer_register(*producer, - &(create_params->reg_params), - false); - if (result) { - IPA_RM_ERR("ipa3_rm_resource_producer_register() failed\n"); - goto register_fail; - } - - (*resource) = (struct ipa_rm_resource *) (*producer); - (*resource)->type = IPA_RM_PRODUCER; - *max_peers = IPA_RM_RESOURCE_CONS_MAX; - goto bail; -register_fail: - kfree(*producer); -bail: - return result; -} - -static void ipa3_rm_resource_producer_delete( - struct ipa3_rm_resource_prod *producer) -{ - struct ipa3_rm_notification_info *reg_info; - struct list_head *pos, *q; - - ipa3_rm_resource_producer_release(producer); - list_for_each_safe(pos, q, &(producer->event_listeners)) { - reg_info = list_entry(pos, - struct ipa3_rm_notification_info, - link); - list_del(pos); - kfree(reg_info); - } -} - -static int ipa3_rm_resource_consumer_create(struct ipa_rm_resource **resource, - struct ipa3_rm_resource_cons **consumer, - struct ipa_rm_create_params *create_params, - int *max_peers) -{ - int result = 0; - - *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC); - if (*consumer == NULL) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - (*consumer)->request_resource = create_params->request_resource; - (*consumer)->release_resource = create_params->release_resource; - (*resource) = (struct ipa_rm_resource *) (*consumer); - (*resource)->type = IPA_RM_CONSUMER; - init_completion(&((*consumer)->request_consumer_in_progress)); - *max_peers = IPA_RM_RESOURCE_PROD_MAX; -bail: - return result; -} - -/** - * ipa3_rm_resource_create() - creates resource - * @create_params: [in] parameters needed - * for resource initialization with IPA RM - * @resource: [out] created resource - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_resource_create( - struct ipa_rm_create_params *create_params, - struct ipa_rm_resource **resource) -{ - struct ipa3_rm_resource_cons *consumer; - struct ipa3_rm_resource_prod *producer; - int max_peers; - int result = 0; - - if (!create_params) { - result = -EINVAL; - goto bail; - } - - if (IPA_RM_RESORCE_IS_PROD(create_params->name)) { - result = ipa3_rm_resource_producer_create(resource, - &producer, - create_params, - &max_peers); - if (result) { - IPA_RM_ERR("ipa3_rm_resource_producer_create failed\n"); - goto bail; - } - } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) { - result = ipa3_rm_resource_consumer_create(resource, - &consumer, - create_params, - &max_peers); - if (result) { - IPA_RM_ERR("ipa3_rm_resource_producer_create failed\n"); - goto bail; - } - } else { - IPA_RM_ERR("invalied resource\n"); - result = -EPERM; - goto bail; - } - - result = ipa3_rm_peers_list_create(max_peers, - &((*resource)->peers_list)); - if (result) { - IPA_RM_ERR("ipa3_rm_peers_list_create failed\n"); - goto peers_alloc_fail; - } - (*resource)->name = create_params->name; - (*resource)->floor_voltage = create_params->floor_voltage; - (*resource)->state = IPA_RM_RELEASED; - goto bail; - -peers_alloc_fail: - ipa3_rm_resource_delete(*resource); -bail: - return result; -} - -/** - * ipa3_rm_resource_delete() - deletes resource - * @resource: [in] resource - * for resource initialization with IPA RM - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_resource_delete(struct ipa_rm_resource *resource) -{ - struct ipa_rm_resource *consumer; - struct ipa_rm_resource *producer; - int peers_index; - int result = 0; - int list_size; - - if (!resource) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - IPA_RM_DBG("ipa3_rm_resource_delete ENTER with resource %d\n", - resource->name); - if (resource->type == IPA_RM_PRODUCER) { - if (resource->peers_list) { - list_size = ipa3_rm_peers_list_get_size( - resource->peers_list); - for (peers_index = 0; - peers_index < list_size; - peers_index++) { - consumer = ipa3_rm_peers_list_get_resource( - peers_index, - resource->peers_list); - if (consumer) - ipa3_rm_resource_delete_dependency( - resource, - consumer); - } - } - - ipa3_rm_resource_producer_delete( - (struct ipa3_rm_resource_prod *) resource); - } else if (resource->type == IPA_RM_CONSUMER) { - if (resource->peers_list) { - list_size = ipa3_rm_peers_list_get_size( - resource->peers_list); - for (peers_index = 0; - peers_index < list_size; - peers_index++){ - producer = ipa3_rm_peers_list_get_resource( - peers_index, - resource->peers_list); - if (producer) - ipa3_rm_resource_delete_dependency( - producer, - resource); - } - } - } - ipa3_rm_peers_list_delete(resource->peers_list); - kfree(resource); - return result; -} - -/** - * ipa_rm_resource_register() - register resource - * @resource: [in] resource - * @reg_params: [in] registration parameters - * @explicit: [in] registered explicitly by ipa3_rm_register() - * - * Returns: 0 on success, negative on failure - * - * Producer resource is expected for this call. - * - */ -int ipa3_rm_resource_producer_register(struct ipa3_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params, - bool explicit) -{ - int result = 0; - struct ipa3_rm_notification_info *reg_info; - struct list_head *pos; - - if (!producer || !reg_params) { - IPA_RM_ERR("invalid params\n"); - result = -EPERM; - goto bail; - } - - list_for_each(pos, &(producer->event_listeners)) { - reg_info = list_entry(pos, - struct ipa3_rm_notification_info, - link); - if (reg_info->reg_params.notify_cb == - reg_params->notify_cb) { - IPA_RM_ERR("already registered\n"); - result = -EPERM; - goto bail; - } - - } - - reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); - if (reg_info == NULL) { - IPA_RM_ERR("no mem\n"); - result = -ENOMEM; - goto bail; - } - - reg_info->reg_params.user_data = reg_params->user_data; - reg_info->reg_params.notify_cb = reg_params->notify_cb; - reg_info->explicit = explicit; - INIT_LIST_HEAD(®_info->link); - list_add(®_info->link, &producer->event_listeners); -bail: - return result; -} - -/** - * ipa_rm_resource_deregister() - register resource - * @resource: [in] resource - * @reg_params: [in] registration parameters - * - * Returns: 0 on success, negative on failure - * - * Producer resource is expected for this call. - * This function deleted only single instance of - * registration info. - * - */ -int ipa3_rm_resource_producer_deregister(struct ipa3_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params) -{ - int result = -EINVAL; - struct ipa3_rm_notification_info *reg_info; - struct list_head *pos, *q; - - if (!producer || !reg_params) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - list_for_each_safe(pos, q, &(producer->event_listeners)) { - reg_info = list_entry(pos, - struct ipa3_rm_notification_info, - link); - if (reg_info->reg_params.notify_cb == - reg_params->notify_cb) { - list_del(pos); - kfree(reg_info); - result = 0; - goto bail; - } - } -bail: - return result; -} - -/** - * ipa3_rm_resource_add_dependency() - add dependency between two - * given resources - * @resource: [in] resource resource - * @depends_on: [in] depends_on resource - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_resource_add_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on) -{ - int result = 0; - int consumer_result; - - if (!resource || !depends_on) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - if (ipa3_rm_peers_list_check_dependency(resource->peers_list, - resource->name, - depends_on->peers_list, - depends_on->name)) { - IPA_RM_ERR("dependency already exists\n"); - return -EEXIST; - } - - ipa3_rm_peers_list_add_peer(resource->peers_list, depends_on); - ipa3_rm_peers_list_add_peer(depends_on->peers_list, resource); - IPA_RM_DBG("%s state: %d\n", ipa3_rm_resource_str(resource->name), - resource->state); - - resource->needed_bw += depends_on->max_bw; - switch (resource->state) { - case IPA_RM_RELEASED: - case IPA_RM_RELEASE_IN_PROGRESS: - break; - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - { - enum ipa3_rm_resource_state prev_state = resource->state; - - resource->state = IPA_RM_REQUEST_IN_PROGRESS; - ((struct ipa3_rm_resource_prod *) - resource)->pending_request++; - consumer_result = ipa3_rm_resource_consumer_request( - (struct ipa3_rm_resource_cons *)depends_on, - resource->max_bw, - true, false); - if (consumer_result != -EINPROGRESS) { - resource->state = prev_state; - ((struct ipa3_rm_resource_prod *) - resource)->pending_request--; - ipa3_rm_perf_profile_change(resource->name); - } - result = consumer_result; - break; - } - default: - IPA_RM_ERR("invalid state\n"); - result = -EPERM; - goto bail; - } -bail: - IPA_RM_DBG("%s new state: %d\n", ipa3_rm_resource_str(resource->name), - resource->state); - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_resource_delete_dependency() - add dependency between two - * given resources - * @resource: [in] resource resource - * @depends_on: [in] depends_on resource - * - * Returns: 0 on success, negative on failure - * In case the resource state was changed, a notification - * will be sent to the RM client - */ -int ipa3_rm_resource_delete_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on) -{ - int result = 0; - bool state_changed = false; - bool release_consumer = false; - enum ipa_rm_event evt; - - if (!resource || !depends_on) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - if (!ipa3_rm_peers_list_check_dependency(resource->peers_list, - resource->name, - depends_on->peers_list, - depends_on->name)) { - IPA_RM_ERR("dependency does not exist\n"); - return -EINVAL; - } - IPA_RM_DBG("%s state: %d\n", ipa3_rm_resource_str(resource->name), - resource->state); - - resource->needed_bw -= depends_on->max_bw; - switch (resource->state) { - case IPA_RM_RELEASED: - break; - case IPA_RM_GRANTED: - ipa3_rm_perf_profile_change(resource->name); - release_consumer = true; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (((struct ipa3_rm_resource_prod *) - resource)->pending_release > 0) - ((struct ipa3_rm_resource_prod *) - resource)->pending_release--; - if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS && - ((struct ipa3_rm_resource_prod *) - resource)->pending_release == 0) { - resource->state = IPA_RM_RELEASED; - state_changed = true; - evt = IPA_RM_RESOURCE_RELEASED; - ipa3_rm_perf_profile_change(resource->name); - } - break; - case IPA_RM_REQUEST_IN_PROGRESS: - release_consumer = true; - if (((struct ipa3_rm_resource_prod *) - resource)->pending_request > 0) - ((struct ipa3_rm_resource_prod *) - resource)->pending_request--; - if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS && - ((struct ipa3_rm_resource_prod *) - resource)->pending_request == 0) { - resource->state = IPA_RM_GRANTED; - state_changed = true; - evt = IPA_RM_RESOURCE_GRANTED; - ipa3_rm_perf_profile_change(resource->name); - } - break; - default: - result = -EINVAL; - goto bail; - } - if (state_changed) { - (void) ipa3_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, - resource->name, - evt, - false); - } - IPA_RM_DBG("%s new state: %d\n", ipa3_rm_resource_str(resource->name), - resource->state); - ipa3_rm_peers_list_remove_peer(resource->peers_list, - depends_on->name); - ipa3_rm_peers_list_remove_peer(depends_on->peers_list, - resource->name); - if (release_consumer) - (void) ipa3_rm_resource_consumer_release( - (struct ipa3_rm_resource_cons *)depends_on, - resource->max_bw, - true); -bail: - IPA_RM_DBG("EXIT with %d\n", result); - - return result; -} - -/** - * ipa3_rm_resource_producer_request() - producer resource request - * @producer: [in] producer - * - * Returns: 0 on success, negative on failure - */ -int ipa3_rm_resource_producer_request(struct ipa3_rm_resource_prod *producer) -{ - int peers_index; - int result = 0; - struct ipa_rm_resource *consumer; - int consumer_result; - enum ipa3_rm_resource_state state; - - state = producer->resource.state; - switch (producer->resource.state) { - case IPA_RM_RELEASED: - case IPA_RM_RELEASE_IN_PROGRESS: - producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; - break; - case IPA_RM_GRANTED: - goto unlock_and_bail; - case IPA_RM_REQUEST_IN_PROGRESS: - result = -EINPROGRESS; - goto unlock_and_bail; - default: - result = -EINVAL; - goto unlock_and_bail; - } - - producer->pending_request = 0; - for (peers_index = 0; - peers_index < ipa3_rm_peers_list_get_size( - producer->resource.peers_list); - peers_index++) { - consumer = ipa3_rm_peers_list_get_resource(peers_index, - producer->resource.peers_list); - if (consumer) { - producer->pending_request++; - consumer_result = ipa3_rm_resource_consumer_request( - (struct ipa3_rm_resource_cons *)consumer, - producer->resource.max_bw, - true, false); - if (consumer_result == -EINPROGRESS) { - result = -EINPROGRESS; - } else { - producer->pending_request--; - if (consumer_result != 0) { - result = consumer_result; - goto bail; - } - } - } - } - - if (producer->pending_request == 0) { - producer->resource.state = IPA_RM_GRANTED; - ipa3_rm_perf_profile_change(producer->resource.name); - (void) ipa3_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, - producer->resource.name, - IPA_RM_RESOURCE_GRANTED, - true); - result = 0; - } -unlock_and_bail: - if (state != producer->resource.state) - IPA_RM_DBG("%s state changed %d->%d\n", - ipa3_rm_resource_str(producer->resource.name), - state, - producer->resource.state); -bail: - return result; -} - -/** - * ipa3_rm_resource_producer_release() - producer resource release - * producer: [in] producer resource - * - * Returns: 0 on success, negative on failure - * - */ -int ipa3_rm_resource_producer_release(struct ipa3_rm_resource_prod *producer) -{ - int peers_index; - int result = 0; - struct ipa_rm_resource *consumer; - int consumer_result; - enum ipa3_rm_resource_state state; - - state = producer->resource.state; - switch (producer->resource.state) { - case IPA_RM_RELEASED: - goto bail; - case IPA_RM_GRANTED: - case IPA_RM_REQUEST_IN_PROGRESS: - producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - result = -EINPROGRESS; - goto bail; - default: - result = -EPERM; - goto bail; - } - - producer->pending_release = 0; - for (peers_index = 0; - peers_index < ipa3_rm_peers_list_get_size( - producer->resource.peers_list); - peers_index++) { - consumer = ipa3_rm_peers_list_get_resource(peers_index, - producer->resource.peers_list); - if (consumer) { - producer->pending_release++; - consumer_result = ipa3_rm_resource_consumer_release( - (struct ipa3_rm_resource_cons *)consumer, - producer->resource.max_bw, - true); - producer->pending_release--; - } - } - - if (producer->pending_release == 0) { - producer->resource.state = IPA_RM_RELEASED; - ipa3_rm_perf_profile_change(producer->resource.name); - (void) ipa3_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, - producer->resource.name, - IPA_RM_RESOURCE_RELEASED, - true); - } -bail: - if (state != producer->resource.state) - IPA_RM_DBG("%s state changed %d->%d\n", - ipa3_rm_resource_str(producer->resource.name), - state, - producer->resource.state); - - return result; -} - -static void ipa3_rm_resource_producer_handle_cb( - struct ipa3_rm_resource_prod *producer, - enum ipa_rm_event event) -{ - IPA_RM_DBG("%s state: %d event: %d pending_request: %d\n", - ipa3_rm_resource_str(producer->resource.name), - producer->resource.state, - event, - producer->pending_request); - - switch (producer->resource.state) { - case IPA_RM_REQUEST_IN_PROGRESS: - if (event != IPA_RM_RESOURCE_GRANTED) - goto unlock_and_bail; - if (producer->pending_request > 0) { - producer->pending_request--; - if (producer->pending_request == 0) { - producer->resource.state = - IPA_RM_GRANTED; - ipa3_rm_perf_profile_change( - producer->resource.name); - ipa3_rm_resource_producer_notify_clients( - producer, - IPA_RM_RESOURCE_GRANTED, - false); - goto bail; - } - } - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (event != IPA_RM_RESOURCE_RELEASED) - goto unlock_and_bail; - if (producer->pending_release > 0) { - producer->pending_release--; - if (producer->pending_release == 0) { - producer->resource.state = - IPA_RM_RELEASED; - ipa3_rm_perf_profile_change( - producer->resource.name); - ipa3_rm_resource_producer_notify_clients( - producer, - IPA_RM_RESOURCE_RELEASED, - false); - goto bail; - } - } - break; - case IPA_RM_GRANTED: - case IPA_RM_RELEASED: - default: - goto unlock_and_bail; - } -unlock_and_bail: - IPA_RM_DBG("%s new state: %d\n", - ipa3_rm_resource_str(producer->resource.name), - producer->resource.state); -bail: - return; -} - -/** - * ipa3_rm_resource_consumer_handle_cb() - propagates resource - * notification to all dependent producers - * @consumer: [in] notifying resource - * - */ -void ipa3_rm_resource_consumer_handle_cb(struct ipa3_rm_resource_cons *consumer, - enum ipa_rm_event event) -{ - int peers_index; - struct ipa_rm_resource *producer; - - if (!consumer) { - IPA_RM_ERR("invalid params\n"); - return; - } - IPA_RM_DBG("%s state: %d event: %d\n", - ipa3_rm_resource_str(consumer->resource.name), - consumer->resource.state, - event); - - switch (consumer->resource.state) { - case IPA_RM_REQUEST_IN_PROGRESS: - if (event == IPA_RM_RESOURCE_RELEASED) - goto bail; - consumer->resource.state = IPA_RM_GRANTED; - ipa3_rm_perf_profile_change(consumer->resource.name); - ipa3_resume_resource(consumer->resource.name); - complete_all(&consumer->request_consumer_in_progress); - break; - case IPA_RM_RELEASE_IN_PROGRESS: - if (event == IPA_RM_RESOURCE_GRANTED) - goto bail; - consumer->resource.state = IPA_RM_RELEASED; - break; - case IPA_RM_GRANTED: - case IPA_RM_RELEASED: - default: - goto bail; - } - - for (peers_index = 0; - peers_index < ipa3_rm_peers_list_get_size( - consumer->resource.peers_list); - peers_index++) { - producer = ipa3_rm_peers_list_get_resource(peers_index, - consumer->resource.peers_list); - if (producer) - ipa3_rm_resource_producer_handle_cb( - (struct ipa3_rm_resource_prod *) - producer, - event); - } - - return; -bail: - IPA_RM_DBG("%s new state: %d\n", - ipa3_rm_resource_str(consumer->resource.name), - consumer->resource.state); -} - -/* - * ipa3_rm_resource_set_perf_profile() - sets the performance profile to - * resource. - * - * @resource: [in] resource - * @profile: [in] profile to be set - * - * sets the profile to the given resource, In case the resource is - * granted, update bandwidth vote of the resource - */ -int ipa3_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, - struct ipa_rm_perf_profile *profile) -{ - int peers_index; - struct ipa_rm_resource *peer; - - if (!resource || !profile) { - IPA_RM_ERR("invalid params\n"); - return -EINVAL; - } - - if (profile->max_supported_bandwidth_mbps == resource->max_bw) { - IPA_RM_DBG("same profile\n"); - return 0; - } - - if ((resource->type == IPA_RM_PRODUCER && - (resource->state == IPA_RM_GRANTED || - resource->state == IPA_RM_REQUEST_IN_PROGRESS)) || - resource->type == IPA_RM_CONSUMER) { - for (peers_index = 0; - peers_index < ipa3_rm_peers_list_get_size( - resource->peers_list); - peers_index++) { - peer = ipa3_rm_peers_list_get_resource(peers_index, - resource->peers_list); - if (!peer) - continue; - peer->needed_bw -= resource->max_bw; - peer->needed_bw += - profile->max_supported_bandwidth_mbps; - if (peer->state == IPA_RM_GRANTED) - ipa3_rm_perf_profile_change(peer->name); - } - } - - resource->max_bw = profile->max_supported_bandwidth_mbps; - if (resource->state == IPA_RM_GRANTED) - ipa3_rm_perf_profile_change(resource->name); - - return 0; -} - - -/* - * ipa3_rm_resource_producer_print_stat() - print the - * resource status and all his dependencies - * - * @resource: [in] Resource resource - * @buff: [in] The buf used to print - * @size: [in] Buf size - * - * Returns: number of bytes used on success, negative on failure - */ -int ipa3_rm_resource_producer_print_stat( - struct ipa_rm_resource *resource, - char *buf, - int size){ - - int i; - int nbytes; - int cnt = 0; - struct ipa_rm_resource *consumer; - - if (!buf || size < 0) - return -EINVAL; - - nbytes = scnprintf(buf + cnt, size - cnt, - ipa3_rm_resource_str(resource->name)); - cnt += nbytes; - nbytes = scnprintf(buf + cnt, size - cnt, "["); - cnt += nbytes; - - switch (resource->state) { - case IPA_RM_RELEASED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Released] -> "); - cnt += nbytes; - break; - case IPA_RM_REQUEST_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Request In Progress] -> "); - cnt += nbytes; - break; - case IPA_RM_GRANTED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Granted] -> "); - cnt += nbytes; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Release In Progress] -> "); - cnt += nbytes; - break; - default: - return -EPERM; - } - - for (i = 0; i < resource->peers_list->max_peers; ++i) { - consumer = - ipa3_rm_peers_list_get_resource( - i, - resource->peers_list); - if (consumer) { - nbytes = scnprintf(buf + cnt, size - cnt, - ipa3_rm_resource_str(consumer->name)); - cnt += nbytes; - nbytes = scnprintf(buf + cnt, size - cnt, "["); - cnt += nbytes; - - switch (consumer->state) { - case IPA_RM_RELEASED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Released], "); - cnt += nbytes; - break; - case IPA_RM_REQUEST_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Request In Progress], "); - cnt += nbytes; - break; - case IPA_RM_GRANTED: - nbytes = scnprintf(buf + cnt, size - cnt, - "Granted], "); - cnt += nbytes; - break; - case IPA_RM_RELEASE_IN_PROGRESS: - nbytes = scnprintf(buf + cnt, size - cnt, - "Release In Progress], "); - cnt += nbytes; - break; - default: - return -EPERM; - } - } - } - nbytes = scnprintf(buf + cnt, size - cnt, "\n"); - cnt += nbytes; - - return cnt; -} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h deleted file mode 100644 index 34d228ea3666..000000000000 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.h +++ /dev/null @@ -1,164 +0,0 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _IPA_RM_RESOURCE_H_ -#define _IPA_RM_RESOURCE_H_ - -#include -#include -#include "ipa_rm_peers_list.h" - -/** - * enum ipa3_rm_resource_state - resource state - */ -enum ipa3_rm_resource_state { - IPA_RM_RELEASED, - IPA_RM_REQUEST_IN_PROGRESS, - IPA_RM_GRANTED, - IPA_RM_RELEASE_IN_PROGRESS -}; - -/** - * enum ipa3_rm_resource_type - IPA resource manager resource type - */ -enum ipa3_rm_resource_type { - IPA_RM_PRODUCER, - IPA_RM_CONSUMER -}; - -/** - * struct ipa3_rm_notification_info - notification information - * of IPA RM client - * @reg_params: registration parameters - * @explicit: registered explicitly by ipa3_rm_register() - * @link: link to the list of all registered clients information - */ -struct ipa3_rm_notification_info { - struct ipa_rm_register_params reg_params; - bool explicit; - struct list_head link; -}; - -/** - * struct ipa_rm_resource - IPA RM resource - * @name: name identifying resource - * @type: type of resource (PRODUCER or CONSUMER) - * @floor_voltage: minimum voltage level for operation - * @max_bw: maximum bandwidth required for resource in Mbps - * @state: state of the resource - * @peers_list: list of the peers of the resource - */ -struct ipa_rm_resource { - enum ipa_rm_resource_name name; - enum ipa3_rm_resource_type type; - enum ipa_voltage_level floor_voltage; - u32 max_bw; - u32 needed_bw; - enum ipa3_rm_resource_state state; - struct ipa3_rm_peers_list *peers_list; -}; - -/** - * struct ipa3_rm_resource_cons - IPA RM consumer - * @resource: resource - * @usage_count: number of producers in GRANTED / REQUESTED state - * using this consumer - * @request_consumer_in_progress: when set, the consumer is during its request - * phase - * @request_resource: function which should be called to request resource - * from resource manager - * @release_resource: function which should be called to release resource - * from resource manager - * Add new fields after @resource only. - */ -struct ipa3_rm_resource_cons { - struct ipa_rm_resource resource; - int usage_count; - struct completion request_consumer_in_progress; - int (*request_resource)(void); - int (*release_resource)(void); -}; - -/** - * struct ipa3_rm_resource_prod - IPA RM producer - * @resource: resource - * @event_listeners: clients registered with this producer - * for notifications in resource state - * list Add new fields after @resource only. - */ -struct ipa3_rm_resource_prod { - struct ipa_rm_resource resource; - struct list_head event_listeners; - int pending_request; - int pending_release; -}; - -int ipa3_rm_resource_create( - struct ipa_rm_create_params *create_params, - struct ipa_rm_resource **resource); - -int ipa3_rm_resource_delete(struct ipa_rm_resource *resource); - -int ipa3_rm_resource_producer_register(struct ipa3_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params, - bool explicit); - -int ipa3_rm_resource_producer_deregister(struct ipa3_rm_resource_prod *producer, - struct ipa_rm_register_params *reg_params); - -int ipa3_rm_resource_add_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on); - -int ipa3_rm_resource_delete_dependency(struct ipa_rm_resource *resource, - struct ipa_rm_resource *depends_on); - -int ipa3_rm_resource_producer_request(struct ipa3_rm_resource_prod *producer); - -int ipa3_rm_resource_producer_release(struct ipa3_rm_resource_prod *producer); - -int ipa3_rm_resource_consumer_request(struct ipa3_rm_resource_cons *consumer, - u32 needed_bw, - bool inc_usage_count, - bool wake_client); - -int ipa3_rm_resource_consumer_release(struct ipa3_rm_resource_cons *consumer, - u32 needed_bw, - bool dec_usage_count); - -int ipa3_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, - struct ipa_rm_perf_profile *profile); - -void ipa3_rm_resource_consumer_handle_cb(struct ipa3_rm_resource_cons *consumer, - enum ipa_rm_event event); - -void ipa3_rm_resource_producer_notify_clients( - struct ipa3_rm_resource_prod *producer, - enum ipa_rm_event event, - bool notify_registered_only); - -int ipa3_rm_resource_producer_print_stat( - struct ipa_rm_resource *resource, - char *buf, - int size); - -int ipa3_rm_resource_consumer_request_work(struct ipa3_rm_resource_cons - *consumer, - enum ipa3_rm_resource_state prev_state, - u32 needed_bw, - bool notify_completion); - -int ipa3_rm_resource_consumer_release_work( - struct ipa3_rm_resource_cons *consumer, - enum ipa3_rm_resource_state prev_state, - bool notify_completion); - -#endif /* _IPA_RM_RESOURCE_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 38837bbfe09d..5ea39b732ee6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -20,6 +20,7 @@ #include #include "ipa_i.h" #include "ipahal/ipahal.h" +#include "../ipa_rm_i.h" #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL) #define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) @@ -608,7 +609,7 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource) /* before gating IPA clocks do TAG process */ ipa3_ctx->tag_process_before_gating = true; - IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str(resource)); + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); return 0; } @@ -671,7 +672,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource) if (res == 0) { IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, - ipa3_rm_resource_str(resource)); + ipa_rm_resource_str(resource)); ipa3_active_clients_log_dec(&log_info, true); ipa3_ctx->ipa3_active_clients.cnt--; IPADBG("active clients = %d\n", @@ -4519,24 +4520,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa; api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB; api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB; - api_ctrl->ipa_rm_create_resource = ipa3_rm_create_resource; - api_ctrl->ipa_rm_delete_resource = ipa3_rm_delete_resource; - api_ctrl->ipa_rm_register = ipa3_rm_register; - api_ctrl->ipa_rm_deregister = ipa3_rm_deregister; - api_ctrl->ipa_rm_set_perf_profile = ipa3_rm_set_perf_profile; - api_ctrl->ipa_rm_add_dependency = ipa3_rm_add_dependency; - api_ctrl->ipa_rm_delete_dependency = ipa3_rm_delete_dependency; - api_ctrl->ipa_rm_request_resource = ipa3_rm_request_resource; - api_ctrl->ipa_rm_release_resource = ipa3_rm_release_resource; - api_ctrl->ipa_rm_notify_completion = ipa3_rm_notify_completion; - api_ctrl->ipa_rm_inactivity_timer_init = - ipa3_rm_inactivity_timer_init; - api_ctrl->ipa_rm_inactivity_timer_destroy = - ipa3_rm_inactivity_timer_destroy; - api_ctrl->ipa_rm_inactivity_timer_request_resource = - ipa3_rm_inactivity_timer_request_resource; - api_ctrl->ipa_rm_inactivity_timer_release_resource = - ipa3_rm_inactivity_timer_release_resource; api_ctrl->teth_bridge_init = ipa3_teth_bridge_init; api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect; api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect; @@ -4577,7 +4560,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain; api_ctrl->ipa_disable_apps_wan_cons_deaggr = ipa3_disable_apps_wan_cons_deaggr; - api_ctrl->ipa_rm_add_dependency_sync = ipa3_rm_add_dependency_sync; api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev; api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping; api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping; diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 7e7848dea52c..8a34f006d3ee 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1086,7 +1086,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) send: /* IPA_RM checking start */ - ret = ipa3_rm_inactivity_timer_request_resource( + ret = ipa_rm_inactivity_timer_request_resource( IPA_RM_RESOURCE_WWAN_0_PROD); if (ret == -EINPROGRESS) { netif_stop_queue(dev); @@ -1119,7 +1119,7 @@ send: dev->stats.tx_bytes += skb->len; ret = NETDEV_TX_OK; out: - ipa3_rm_inactivity_timer_release_resource( + ipa_rm_inactivity_timer_release_resource( IPA_RM_RESOURCE_WWAN_0_PROD); return ret; } @@ -1171,7 +1171,7 @@ static void apps_ipa_tx_complete_notify(void *priv, } __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0)); dev_kfree_skb_any(skb); - ipa3_rm_inactivity_timer_release_resource( + ipa_rm_inactivity_timer_release_resource( IPA_RM_RESOURCE_WWAN_0_PROD); } @@ -1691,9 +1691,9 @@ static void ipa3_q6_prod_rm_request_resource(struct work_struct *work) { int ret = 0; - ret = ipa3_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); if (ret < 0 && ret != -EINPROGRESS) { - IPAWANERR("%s: ipa3_rm_request_resource failed %d\n", __func__, + IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__, ret); return; } @@ -1710,9 +1710,9 @@ static void ipa3_q6_prod_rm_release_resource(struct work_struct *work) { int ret = 0; - ret = ipa3_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); if (ret < 0 && ret != -EINPROGRESS) { - IPAWANERR("%s: ipa3_rm_release_resource failed %d\n", __func__, + IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__, ret); return; } @@ -1756,44 +1756,44 @@ static int ipa3_q6_initialize_rm(void) memset(&create_params, 0, sizeof(create_params)); create_params.name = IPA_RM_RESOURCE_Q6_PROD; create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb; - result = ipa3_rm_create_resource(&create_params); + result = ipa_rm_create_resource(&create_params); if (result) goto create_rsrc_err1; memset(&create_params, 0, sizeof(create_params)); create_params.name = IPA_RM_RESOURCE_Q6_CONS; create_params.release_resource = &ipa3_q6_rm_release_resource; create_params.request_resource = &ipa3_q6_rm_request_resource; - result = ipa3_rm_create_resource(&create_params); + result = ipa_rm_create_resource(&create_params); if (result) goto create_rsrc_err2; /* add dependency*/ - result = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS); if (result) goto add_dpnd_err; /* setup Performance profile */ memset(&profile, 0, sizeof(profile)); profile.max_supported_bandwidth_mbps = 100; - result = ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, &profile); if (result) goto set_perf_err; - result = ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, &profile); if (result) goto set_perf_err; return result; set_perf_err: - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS); add_dpnd_err: - result = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); if (result < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_CONS, result); create_rsrc_err2: - result = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); if (result < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, result); @@ -1806,17 +1806,17 @@ void ipa3_q6_deinitialize_rm(void) { int ret; - ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS); if (ret < 0) IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS, ret); - ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_CONS, ret); - ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, ret); @@ -2056,13 +2056,13 @@ static int ipa3_wwan_probe(struct platform_device *pdev) ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD; ipa_rm_params.reg_params.user_data = dev; ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify; - ret = ipa3_rm_create_resource(&ipa_rm_params); + ret = ipa_rm_create_resource(&ipa_rm_params); if (ret) { pr_err("%s: unable to create resourse %d in IPA RM\n", __func__, IPA_RM_RESOURCE_WWAN_0_PROD); goto create_rsrc_err; } - ret = ipa3_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_INACTIVITY_TIMER); if (ret) { pr_err("%s: ipa rm timer init failed %d on resourse %d\n", @@ -2070,14 +2070,14 @@ static int ipa3_wwan_probe(struct platform_device *pdev) goto timer_init_err; } /* add dependency */ - ret = ipa3_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS); if (ret) goto add_dpnd_err; /* setup Performance profile */ memset(&profile, 0, sizeof(profile)); profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; - ret = ipa3_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, &profile); if (ret) goto set_perf_err; @@ -2108,20 +2108,20 @@ static int ipa3_wwan_probe(struct platform_device *pdev) config_err: unregister_netdev(dev); set_perf_err: - ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS); if (ret) IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, ret); add_dpnd_err: - ret = ipa3_rm_inactivity_timer_destroy( + ret = ipa_rm_inactivity_timer_destroy( IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */ if (ret) - IPAWANERR("Error ipa3_rm_inactivity_timer_destroy %d, ret=%d\n", + IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); timer_init_err: - ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); if (ret) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); @@ -2155,18 +2155,18 @@ static int ipa3_wwan_remove(struct platform_device *pdev) rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1; mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard); unregister_netdev(IPA_NETDEV()); - ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS); if (ret < 0) IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, ret); - ret = ipa3_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); if (ret < 0) IPAWANERR( - "Error ipa3_rm_inactivity_timer_destroy resource %d, ret=%d\n", + "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); - ret = ipa3_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); @@ -2229,7 +2229,7 @@ static int rmnet_ipa_ap_suspend(struct device *dev) /* Make sure that there is no Tx operation ongoing */ netif_tx_lock_bh(netdev); - ipa3_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); netif_tx_unlock_bh(netdev); IPAWANDBG("Exit\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c index b629ec740b1e..9aa0ff3d4445 100644 --- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c +++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -120,9 +120,9 @@ int ipa3_teth_bridge_init(struct teth_bridge_init_params *params) int ipa3_teth_bridge_disconnect(enum ipa_client_type client) { TETH_DBG_FUNC_ENTRY(); - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, IPA_RM_RESOURCE_Q6_CONS); - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_USB_CONS); TETH_DBG_FUNC_EXIT(); @@ -148,10 +148,10 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params) * in order to make sure the IPA clocks are up before we continue * and notify the USB driver it may continue. */ - res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD, + res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD, IPA_RM_RESOURCE_Q6_CONS); if (res < 0) { - TETH_ERR("ipa3_rm_add_dependency() failed.\n"); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); goto bail; } @@ -160,12 +160,12 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params) * bridge is connected), the clocks are already up so the call doesn't * need to block. */ - res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_USB_CONS); if (res < 0 && res != -EINPROGRESS) { - ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, IPA_RM_RESOURCE_Q6_CONS); - TETH_ERR("ipa3_rm_add_dependency() failed.\n"); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); goto bail; } -- cgit v1.2.3 From 902992e2ff431aa5e4905b7e8a5b3af5fe18dfb1 Mon Sep 17 00:00:00 2001 From: Ghanim Fodi Date: Sun, 20 Mar 2016 16:04:16 +0200 Subject: msm: ipa3: add support for MHI burst mode MHI channel burst mode is used to reduce link activity by reducing the frequency of host initiated doorbells and device initiated link accesses. This change adds support for mhi burst mode in IPA MHI driver, according to MHI v0.45 spec. CRs-Fixed: 990856 Change-Id: Iae170042b70c6eaf5bc05ea2b4a1ccdb7dd6f946 Acked-by: Nadine Toledano Signed-off-by: Ghanim Fodi --- drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 194 +++++++++++++++++++++++------- 1 file changed, 148 insertions(+), 46 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index ea8e2b9ea38e..82f63d3cf5a5 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -36,7 +36,7 @@ #define IPA_MHI_RM_TIMEOUT_MSEC 10000 -#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 5 +#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10 #define IPA_MHI_MAX_UL_CHANNELS 1 #define IPA_MHI_MAX_DL_CHANNELS 1 @@ -82,9 +82,34 @@ enum ipa_mhi_dma_dir { IPA_MHI_DMA_FROM_HOST, }; +/** + * enum ipa3_mhi_burst_mode - MHI channel burst mode state + * + * Values are according to MHI specification + * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels, + * disabled for SW channels + * @IPA_MHI_BURST_MODE_RESERVED: + * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel + * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel + * + */ +enum ipa3_mhi_burst_mode { + IPA_MHI_BURST_MODE_DEFAULT, + IPA_MHI_BURST_MODE_RESERVED, + IPA_MHI_BURST_MODE_DISABLE, + IPA_MHI_BURST_MODE_ENABLE, +}; + +enum ipa3_mhi_polling_mode { + IPA_MHI_POLLING_MODE_DB_MODE, + IPA_MHI_POLLING_MODE_POLL_MODE, +}; struct ipa3_mhi_ch_ctx { - u32 chstate; + u8 chstate;/*0-7*/ + u8 brstmode:2;/*8-9*/ + u8 pollcfg:6;/*10-15*/ + u16 rsvd;/*16-31*/ u32 chtype; u32 erindex; u64 rbase; @@ -118,6 +143,8 @@ struct ipa3_mhi_ev_ctx { * @event_context_addr: the event context address in host address space * @ev_ctx_host: MHI event context * @cached_gsi_evt_ring_hdl: GSI channel event ring handle + * @brstmode_enabled: is burst mode enabled for this channel? + * @ch_scratch: the channel scratch configuration */ struct ipa3_mhi_channel_ctx { bool valid; @@ -132,6 +159,8 @@ struct ipa3_mhi_channel_ctx { u64 event_context_addr; struct ipa3_mhi_ev_ctx ev_ctx_host; unsigned long cached_gsi_evt_ring_hdl; + bool brstmode_enabled; + union __packed gsi_channel_scratch ch_scratch; }; enum ipa3_mhi_rm_state { @@ -349,6 +378,8 @@ static int ipa3_mhi_print_host_channel_ctx_info( "ch_id: %d\n", channel->id); nbytes += scnprintf(&buff[nbytes], len - nbytes, "chstate: 0x%x\n", ch_ctx_host.chstate); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "brstmode: 0x%x\n", ch_ctx_host.brstmode); nbytes += scnprintf(&buff[nbytes], len - nbytes, "chtype: 0x%x\n", ch_ctx_host.chtype); nbytes += scnprintf(&buff[nbytes], len - nbytes, @@ -1037,7 +1068,6 @@ static bool ipa3_mhi_sps_channel_empty(struct ipa3_mhi_channel_ctx *channel) static bool ipa3_mhi_gsi_channel_empty(struct ipa3_mhi_channel_ctx *channel) { int res; - IPA_MHI_FUNC_ENTRY(); if (!channel->stop_in_proc) { @@ -1102,7 +1132,13 @@ static bool ipa3_mhi_wait_for_ul_empty_timeout(unsigned int msecs) IPA_MHI_DBG("timeout waiting for UL empty\n"); break; } + + if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI && + IPA_MHI_MAX_UL_CHANNELS == 1) + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); } + IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty"); IPA_MHI_FUNC_EXIT(); @@ -1354,7 +1390,7 @@ static int ipa3_mhi_reset_channel(struct ipa3_mhi_channel_ctx *channel) res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, &channel->state, channel->channel_context_addr + offsetof(struct ipa3_mhi_ch_ctx, chstate), - sizeof(channel->state)); + sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate)); if (res) { IPAERR("ipa_mhi_read_write_host failed %d\n", res); return res; @@ -1407,6 +1443,8 @@ static void ipa_mhi_dump_ch_ctx(struct ipa3_mhi_channel_ctx *channel) { IPA_MHI_DBG("ch_id %d\n", channel->id); IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate); + IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode); + IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg); IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype); IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex); IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase); @@ -1518,6 +1556,22 @@ static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify) IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); } +static int ipa3_mhi_get_ch_poll_cfg(struct ipa3_mhi_channel_ctx *channel, + int ring_size) +{ + switch (channel->ch_ctx_host.pollcfg) { + case 0: + /*set default polling configuration according to MHI spec*/ + if (IPA_CLIENT_IS_PROD(channel->ep->client)) + return 7; + else + return (ring_size/2)/8; + break; + default: + return channel->ch_ctx_host.pollcfg; + } +} + static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel, int ipa_ep_idx) { @@ -1525,7 +1579,6 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel, struct ipa3_ep_context *ep; struct gsi_evt_ring_props ev_props; struct ipa_mhi_msi_info msi; - union __packed gsi_evt_scratch ev_scratch; struct gsi_chan_props ch_props; union __packed gsi_channel_scratch ch_scratch; struct ipa_gsi_ep_config *ep_cfg; @@ -1588,14 +1641,6 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel, channel->cached_gsi_evt_ring_hdl = channel->ep->gsi_evt_ring_hdl; - memset(&ev_scratch, 0, sizeof(ev_scratch)); - res = gsi_write_evt_ring_scratch(channel->ep->gsi_evt_ring_hdl, - ev_scratch); - if (res) { - IPA_MHI_ERR("gsi_write_evt_ring_scratch failed %d\n", - res); - goto fail_evt_scratch; - } } memset(&ch_props, 0, sizeof(ch_props)); @@ -1608,7 +1653,7 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel, ch_props.ring_len = channel->ch_ctx_host.rlen; ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND( channel->ch_ctx_host.rbase); - ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE; + ch_props.use_db_eng = GSI_CHAN_DB_MODE; ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG; ch_props.low_weight = 1; ch_props.err_cb = ipa_mhi_gsi_ch_err_cb; @@ -1626,10 +1671,20 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel, channel->channel_context_addr + offsetof(struct ipa3_mhi_ch_ctx, wp)); ch_scratch.mhi.assert_bit40 = ipa3_mhi_ctx->assert_bit40; - ch_scratch.mhi.max_outstanding_tre = ep_cfg->ipa_if_aos * - GSI_CHAN_RE_SIZE_16B; + ch_scratch.mhi.max_outstanding_tre = 0; ch_scratch.mhi.outstanding_threshold = 4 * GSI_CHAN_RE_SIZE_16B; + ch_scratch.mhi.oob_mod_threshold = 4; + if (channel->ch_ctx_host.brstmode == IPA_MHI_BURST_MODE_DEFAULT || + channel->ch_ctx_host.brstmode == IPA_MHI_BURST_MODE_ENABLE) { + ch_scratch.mhi.burst_mode_enabled = true; + ch_scratch.mhi.polling_configuration = + ipa3_mhi_get_ch_poll_cfg(channel, + (ch_props.ring_len / ch_props.re_size)); + ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE; + } else { + ch_scratch.mhi.burst_mode_enabled = false; + } res = gsi_write_channel_scratch(channel->ep->gsi_chan_hdl, ch_scratch); if (res) { @@ -1637,6 +1692,8 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel, res); goto fail_ch_scratch; } + channel->brstmode_enabled = ch_scratch.mhi.burst_mode_enabled; + channel->ch_scratch.mhi = ch_scratch.mhi; IPA_MHI_DBG("Starting channel\n"); res = gsi_start_channel(channel->ep->gsi_chan_hdl); @@ -1652,7 +1709,6 @@ fail_ch_start: fail_ch_scratch: gsi_dealloc_channel(channel->ep->gsi_chan_hdl); fail_alloc_ch: -fail_evt_scratch: gsi_dealloc_evt_ring(channel->ep->gsi_evt_ring_hdl); channel->ep->gsi_evt_ring_hdl = ~0; fail_alloc_evt: @@ -1724,7 +1780,7 @@ int ipa3_mhi_init(struct ipa_mhi_init_params *params) ipa3_mhi_ctx->cb_priv = params->priv; ipa3_mhi_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED; ipa3_mhi_ctx->qmi_req_id = 0; - ipa3_mhi_ctx->use_ipadma = 1; + ipa3_mhi_ctx->use_ipadma = true; ipa3_mhi_ctx->assert_bit40 = !!params->assert_bit40; ipa3_mhi_ctx->test_mode = params->test_mode; init_completion(&ipa3_mhi_ctx->rm_prod_granted_comp); @@ -2021,7 +2077,7 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl) res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, &channel->state, channel->channel_context_addr + offsetof(struct ipa3_mhi_ch_ctx, chstate), - sizeof(channel->state)); + sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate)); if (res) { IPAERR("ipa_mhi_read_write_host failed\n"); return res; @@ -2200,11 +2256,11 @@ static int ipa3_mhi_suspend_ul_channels(void) IPA_MHI_FUNC_EXIT(); return 0; } - static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected) { int i; int res; + struct ipa3_mhi_channel_ctx *channel; IPA_MHI_FUNC_ENTRY(); for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { @@ -2213,16 +2269,30 @@ static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected) if (ipa3_mhi_ctx->ul_channels[i].state != IPA_HW_MHI_CHANNEL_STATE_SUSPEND) continue; - IPA_MHI_DBG("resuming channel %d\n", - ipa3_mhi_ctx->ul_channels[i].id); + channel = &ipa3_mhi_ctx->ul_channels[i]; + IPA_MHI_DBG("resuming channel %d\n", channel->id); - if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) - res = gsi_start_channel( - ipa3_mhi_ctx->ul_channels[i].ep->gsi_chan_hdl); - else - res = ipa3_uc_mhi_resume_channel( - ipa3_mhi_ctx->ul_channels[i].index, + if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) { + if (channel->brstmode_enabled && + !LPTransitionRejected) { + /* + * set polling mode bit to DB mode before + * resuming the channel + */ + res = gsi_write_channel_scratch( + channel->ep->gsi_chan_hdl, + channel->ch_scratch); + if (res) { + IPA_MHI_ERR("write ch scratch fail %d\n" + , res); + return res; + } + } + res = gsi_start_channel(channel->ep->gsi_chan_hdl); + } else { + res = ipa3_uc_mhi_resume_channel(channel->index, LPTransitionRejected); + } if (res) { IPA_MHI_ERR("failed to resume channel %d error %d\n", @@ -2230,9 +2300,8 @@ static int ipa3_mhi_resume_ul_channels(bool LPTransitionRejected) return res; } - ipa3_mhi_ctx->ul_channels[i].stop_in_proc = false; - ipa3_mhi_ctx->ul_channels[i].state = - IPA_HW_MHI_CHANNEL_STATE_RUN; + channel->stop_in_proc = false; + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; } IPA_MHI_FUNC_EXIT(); @@ -2306,6 +2375,7 @@ static int ipa3_mhi_resume_dl_channels(bool LPTransitionRejected) { int i; int res; + struct ipa3_mhi_channel_ctx *channel; IPA_MHI_FUNC_ENTRY(); for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { @@ -2314,23 +2384,37 @@ static int ipa3_mhi_resume_dl_channels(bool LPTransitionRejected) if (ipa3_mhi_ctx->dl_channels[i].state != IPA_HW_MHI_CHANNEL_STATE_SUSPEND) continue; - IPA_MHI_DBG("resuming channel %d\n", - ipa3_mhi_ctx->dl_channels[i].id); - if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) - res = gsi_start_channel( - ipa3_mhi_ctx->dl_channels[i].ep->gsi_chan_hdl); - else - res = ipa3_uc_mhi_resume_channel( - ipa3_mhi_ctx->dl_channels[i].index, + channel = &ipa3_mhi_ctx->dl_channels[i]; + IPA_MHI_DBG("resuming channel %d\n", channel->id); + + if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) { + if (channel->brstmode_enabled && + !LPTransitionRejected) { + /* + * set polling mode bit to DB mode before + * resuming the channel + */ + res = gsi_write_channel_scratch( + channel->ep->gsi_chan_hdl, + channel->ch_scratch); + if (res) { + IPA_MHI_ERR("write ch scratch fail %d\n" + , res); + return res; + } + } + res = gsi_start_channel(channel->ep->gsi_chan_hdl); + } else { + res = ipa3_uc_mhi_resume_channel(channel->index, LPTransitionRejected); + } if (res) { - IPA_MHI_ERR("failed to suspend channel %d error %d\n", + IPA_MHI_ERR("failed to resume channel %d error %d\n", i, res); return res; } - ipa3_mhi_ctx->dl_channels[i].stop_in_proc = false; - ipa3_mhi_ctx->dl_channels[i].state = - IPA_HW_MHI_CHANNEL_STATE_RUN; + channel->stop_in_proc = false; + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; } IPA_MHI_FUNC_EXIT(); @@ -2439,7 +2523,7 @@ static void ipa3_mhi_update_host_ch_state(bool update_rp) res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, &channel->state, channel->channel_context_addr + offsetof(struct ipa3_mhi_ch_ctx, chstate), - sizeof(channel->state)); + sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate)); if (res) { IPAERR("ipa_mhi_read_write_host failed\n"); BUG(); @@ -2476,7 +2560,7 @@ static void ipa3_mhi_update_host_ch_state(bool update_rp) res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, &channel->state, channel->channel_context_addr + offsetof(struct ipa3_mhi_ch_ctx, chstate), - sizeof(channel->state)); + sizeof(((struct ipa3_mhi_ch_ctx *)0)->chstate)); if (res) { IPAERR("ipa_mhi_read_write_host failed\n"); BUG(); @@ -2567,6 +2651,16 @@ int ipa3_mhi_suspend(bool force) empty = ipa3_mhi_wait_for_ul_empty_timeout( IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); IPADBG("empty=%d\n", empty); + if (!empty && ipa3_ctx->transport_prototype + == IPA_TRANSPORT_TYPE_GSI) { + IPA_MHI_ERR("Failed to suspend UL channels\n"); + if (ipa3_mhi_ctx->test_mode) { + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + + BUG(); + } } else { IPA_MHI_DBG("IPA not empty\n"); res = -EAGAIN; @@ -2672,6 +2766,14 @@ fail_release_prod: fail_suspend_ul_channel: ipa3_mhi_resume_ul_channels(true); ipa3_mhi_set_state(IPA_MHI_STATE_STARTED); + if (force_clear) { + if (ipa3_mhi_disable_force_clear(ipa3_mhi_ctx->qmi_req_id)) { + IPA_MHI_ERR("failed to disable force clear\n"); + BUG(); + } + IPA_MHI_DBG("force clear datapath disabled\n"); + ipa3_mhi_ctx->qmi_req_id++; + } return res; } @@ -2731,7 +2833,7 @@ int ipa3_mhi_resume(void) } if (!dl_channel_resumed) { - res = ipa3_mhi_resume_dl_channels(true); + res = ipa3_mhi_resume_dl_channels(false); if (res) { IPA_MHI_ERR("ipa3_mhi_resume_dl_channels failed %d\n", res); -- cgit v1.2.3 From 1d67a4738541b96fff6fdd7eb9f9ac7ae99f5bb1 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Fri, 11 Mar 2016 12:58:45 -0800 Subject: ARM: dts: msm: add IPA smp2p entries for msmcobalt Add smp2p entries to device tree to enable IPA driver handshake with modem. This handshake is used to synchronize IPA clocks vote with modem during apps or modem crash. CRs-Fixed: 988881 Change-Id: Idc8ac5fc273d91035254c675c7cedefa9a92a5a5 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi | 25 ++++++++++++++++++++++++- arch/arm/boot/dts/qcom/msmcobalt.dtsi | 11 +++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi index d20f3ba3ffe6..2926a6889395 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-smp2p.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -241,4 +241,27 @@ compatible = "qcom,smp2pgpio_sleepstate_3_out"; gpios = <&smp2pgpio_sleepstate_3_out 0 0>; }; + + /* ipa - outbound entry to mss */ + smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "ipa"; + qcom,remote-pid = <1>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + /* ipa - inbound entry from mss */ + smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "ipa"; + qcom,remote-pid = <1>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; }; diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index fb5a743ca330..e66e63f9d717 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -780,6 +780,17 @@ <90 512 206000 960000>, <90 585 206000 960000>, /* NOMINAL */ <90 512 206000 3600000>, <90 585 206000 3600000>; /* TURBO */ qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO"; + + /* smp2p gpio information */ + qcom,smp2pgpio_map_ipa_1_out { + compatible = "qcom,smp2pgpio-map-ipa-1-out"; + gpios = <&smp2pgpio_ipa_1_out 0 0>; + }; + + qcom,smp2pgpio_map_ipa_1_in { + compatible = "qcom,smp2pgpio-map-ipa-1-in"; + gpios = <&smp2pgpio_ipa_1_in 0 0>; + }; }; qcom,ipa_fws@1e08000 { -- cgit v1.2.3 From c41f0385ee8d28857dc9bfea86f75380c0178bdc Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Fri, 1 Apr 2016 17:24:25 -0700 Subject: msm: ipa: fix to handle deaggr error In case of deaggregation error in IPA, IPA HW will send the deaggregation error frame as exception. This change fixes a bug in this logic for large frames. CRs-Fixed: 999351 Change-Id: I49ae94cea34dda039d03dbeeab2add2bdd1760bd Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v2/ipa_dp.c | 40 ++++++++++++++++++++++---------- drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 1 + drivers/platform/msm/ipa/ipa_v3/ipa_dp.c | 40 ++++++++++++++++++++++---------- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 1 + 4 files changed, 58 insertions(+), 24 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c index 25b29cdb3e32..0a3f6795e92d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -2115,7 +2115,6 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, int pad_len_byte; int len; unsigned char *buf; - bool drop_packet; int src_pipe; unsigned int used = *(unsigned int *)skb->cb; unsigned int used_align = ALIGN(used, 32); @@ -2135,6 +2134,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, memcpy(buf, sys->prev_skb->data, sys->len_partial); sys->len_partial = 0; sys->free_skb(sys->prev_skb); + sys->prev_skb = NULL; goto begin; } @@ -2154,9 +2154,13 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, skb2->len - sys->len_pad); skb2->truesize = skb2->len + sizeof(struct sk_buff); - sys->ep->client_notify(sys->ep->priv, - IPA_RECEIVE, - (unsigned long)(skb2)); + if (sys->drop_packet) + dev_kfree_skb_any(skb2); + else + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); } else { IPAERR("copy expand failed\n"); } @@ -2187,7 +2191,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, begin: while (skb->len) { - drop_packet = false; + sys->drop_packet = false; IPADBG("LEN_REM %d\n", skb->len); if (skb->len < IPA_PKT_STATUS_SIZE) { @@ -2226,9 +2230,11 @@ begin: IPA_STATS_EXCP_CNT(status->exception, ipa_ctx->stats.rx_excp_pkts); if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes || - status->endp_src_idx >= ipa_ctx->ipa_num_pipes || - status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) { + status->endp_src_idx >= ipa_ctx->ipa_num_pipes) { IPAERR("status fields invalid\n"); + IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); WARN_ON(1); BUG(); } @@ -2270,7 +2276,7 @@ begin: * there was no route match. */ if (!status->exception && !status->route_match) - drop_packet = true; + sys->drop_packet = true; if (skb->len == IPA_PKT_STATUS_SIZE && !status->exception) { @@ -2292,8 +2298,7 @@ begin: if (status->exception == IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) { IPADBG("Dropping packet on DeAggr Exception\n"); - skb_pull(skb, len + IPA_PKT_STATUS_SIZE); - continue; + sys->drop_packet = true; } skb2 = skb_clone(skb, GFP_KERNEL); @@ -2311,9 +2316,20 @@ begin: IPA_PKT_STATUS_SIZE); IPADBG("rx avail for %d\n", status->endp_dest_idx); - if (drop_packet) + if (sys->drop_packet) { dev_kfree_skb_any(skb2); - else { + } else if (status->pkt_len > + IPA_GENERIC_AGGR_BYTE_LIMIT * + 1024) { + IPAERR("packet size invalid\n"); + IPAERR("STATUS opcode=%d\n", + status->status_opcode); + IPAERR("src=%d dst=%d len=%d\n", + status->endp_src_idx, + status->endp_dest_idx, + status->pkt_len); + BUG(); + } else { skb2->truesize = skb2->len + sizeof(struct sk_buff) + (ALIGN(len + diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index c243eaef37cc..50e30291bb0f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -619,6 +619,7 @@ struct ipa_sys_context { unsigned int len_rem; unsigned int len_pad; unsigned int len_partial; + bool drop_packet; struct work_struct work; void (*sps_callback)(struct sps_event_notify *notify); enum sps_option sps_option; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index d545de10296d..d3f24b9403f0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -2225,7 +2225,6 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb, int pad_len_byte; int len; unsigned char *buf; - bool drop_packet; int src_pipe; unsigned int used = *(unsigned int *)skb->cb; unsigned int used_align = ALIGN(used, 32); @@ -2247,6 +2246,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb, memcpy(buf, sys->prev_skb->data, sys->len_partial); sys->len_partial = 0; sys->free_skb(sys->prev_skb); + sys->prev_skb = NULL; goto begin; } @@ -2266,9 +2266,13 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb, skb2->len - sys->len_pad); skb2->truesize = skb2->len + sizeof(struct sk_buff); - sys->ep->client_notify(sys->ep->priv, - IPA_RECEIVE, - (unsigned long)(skb2)); + if (sys->drop_packet) + dev_kfree_skb_any(skb2); + else + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); } else { IPAERR("copy expand failed\n"); } @@ -2300,7 +2304,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb, begin: pkt_status_sz = ipahal_pkt_status_get_size(); while (skb->len) { - drop_packet = false; + sys->drop_packet = false; IPADBG_LOW("LEN_REM %d\n", skb->len); if (skb->len < pkt_status_sz) { @@ -2339,9 +2343,11 @@ begin: IPA_STATS_EXCP_CNT(status.exception, ipa3_ctx->stats.rx_excp_pkts); if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes || - status.endp_src_idx >= ipa3_ctx->ipa_num_pipes || - status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) { + status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) { IPAERR("status fields invalid\n"); + IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); WARN_ON(1); BUG(); } @@ -2389,7 +2395,7 @@ begin: if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE && status.rt_rule_id == IPA_RULE_ID_INVALID) - drop_packet = true; + sys->drop_packet = true; if (skb->len == pkt_status_sz && status.exception == @@ -2413,8 +2419,7 @@ begin: IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) { IPADBG_LOW( "Dropping packet on DeAggr Exception\n"); - skb_pull(skb, len + pkt_status_sz); - continue; + sys->drop_packet = true; } skb2 = ipa3_skb_copy_for_client(skb, @@ -2433,9 +2438,20 @@ begin: pkt_status_sz); IPADBG_LOW("rx avail for %d\n", status.endp_dest_idx); - if (drop_packet) + if (sys->drop_packet) { dev_kfree_skb_any(skb2); - else { + } else if (status.pkt_len > + IPA_GENERIC_AGGR_BYTE_LIMIT * + 1024) { + IPAERR("packet size invalid\n"); + IPAERR("STATUS opcode=%d\n", + status.status_opcode); + IPAERR("src=%d dst=%d len=%d\n", + status.endp_src_idx, + status.endp_dest_idx, + status.pkt_len); + BUG(); + } else { skb2->truesize = skb2->len + sizeof(struct sk_buff) + (ALIGN(len + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index c2e93adb2a17..13639cf8491e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -714,6 +714,7 @@ struct ipa3_sys_context { unsigned int len_rem; unsigned int len_pad; unsigned int len_partial; + bool drop_packet; struct work_struct work; void (*sps_callback)(struct sps_event_notify *notify); enum sps_option sps_option; -- cgit v1.2.3 From f0d99999f889747aeacea5a25fe07f6869c9f3e6 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Thu, 25 Feb 2016 16:10:06 -0800 Subject: soc: qcom: remoteqdss: Implement the new communication protocol Replace the previous format of a unique scm call id per function to a buffer-based protocol. CRs-Fixed: 997749 Change-Id: I7738b6369e095868fa6087aac94116f4767dc168 Signed-off-by: Patrick Daly --- drivers/soc/qcom/remoteqdss.c | 111 +++++++++++++++++++++++++++++++++++------- 1 file changed, 93 insertions(+), 18 deletions(-) diff --git a/drivers/soc/qcom/remoteqdss.c b/drivers/soc/qcom/remoteqdss.c index bb10099db83f..e578fb1b8b47 100644 --- a/drivers/soc/qcom/remoteqdss.c +++ b/drivers/soc/qcom/remoteqdss.c @@ -16,6 +16,7 @@ #include #include #include +#include #define REMOTEQDSS_FLAG_QUIET (BIT(0)) @@ -42,12 +43,58 @@ struct qdss_msg_translation { * dir Parent debugfs directory */ struct remoteqdss_data { - u8 id; - u64 sw_entity_group; - u64 sw_event_group; + uint32_t id; + uint32_t sw_entity_group; + uint32_t sw_event_group; struct dentry *dir; }; +static struct device dma_dev; + +/* Allowed message formats */ + +enum remoteqdss_cmd_id { + CMD_ID_QUERY_SWEVENT_TAG, + CMD_ID_FILTER_SWTRACE_STATE, + CMD_ID_QUERY_SWTRACE_STATE, + CMD_ID_FILTER_SWEVENT, + CMD_ID_QUERY_SWEVENT, + CMD_ID_FILTER_SWENTITY, + CMD_ID_QUERY_SWENTITY, +}; + +struct remoteqdss_header_fmt { + uint32_t subsys_id; + uint32_t cmd_id; +}; + +struct remoteqdss_filter_swtrace_state_fmt { + struct remoteqdss_header_fmt h; + uint32_t state; +}; + +struct remoteqdss_filter_swevent_fmt { + struct remoteqdss_header_fmt h; + uint32_t event_group; + uint32_t event_mask; +}; + +struct remoteqdss_query_swevent_fmt { + struct remoteqdss_header_fmt h; + uint32_t event_group; +}; + +struct remoteqdss_filter_swentity_fmt { + struct remoteqdss_header_fmt h; + uint32_t entity_group; + uint32_t entity_mask; +}; + +struct remoteqdss_query_swentity_fmt { + struct remoteqdss_header_fmt h; + uint32_t entity_group; +}; + /* msgs is a null terminated array */ static void remoteqdss_err_translation(struct qdss_msg_translation *msgs, u64 err) @@ -71,9 +118,8 @@ static void remoteqdss_err_translation(struct qdss_msg_translation *msgs, REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", err); } -/* SCM based devices */ -#define SCM_FILTER_SWTRACE_ID (0x1) -#define SCM_QUERY_SWTRACE_ID (0x2) +/* Shared across all remoteqdss scm functions */ +#define SCM_CMD_ID (0x1) /* Response Values */ #define SCM_CMD_FAIL (0x80) @@ -118,20 +164,31 @@ static int remoteqdss_scm_query_swtrace(void *priv, u64 *val) struct remoteqdss_data *data = priv; int ret; struct scm_desc desc; + struct remoteqdss_header_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->subsys_id = data->id; + fmt->cmd_id = CMD_ID_QUERY_SWTRACE_STATE; memset(&desc, 0, sizeof(desc)); - desc.args[0] = data->id; - desc.arginfo = SCM_ARGS(1, SCM_VAL); + desc.args[0] = dma_to_phys(NULL, addr); + desc.args[1] = sizeof(*fmt); + desc.arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL); ret = scm_call2( - SCM_SIP_FNID(SCM_SVC_QDSS, SCM_QUERY_SWTRACE_ID), + SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID), &desc); if (ret) - return ret; + goto out; remoteqdss_err_translation(remoteqdss_scm_msgs, desc.ret[0]); ret = desc.ret[0] ? -EINVAL : 0; *val = desc.ret[1]; +out: + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; } @@ -140,26 +197,37 @@ static int remoteqdss_scm_filter_swtrace(void *priv, u64 val) struct remoteqdss_data *data = priv; int ret; struct scm_desc desc; + struct remoteqdss_filter_swtrace_state_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->h.subsys_id = data->id; + fmt->h.cmd_id = CMD_ID_FILTER_SWTRACE_STATE; + fmt->state = (uint32_t)val; memset(&desc, 0, sizeof(desc)); - desc.args[0] = data->id; - desc.args[1] = val; + desc.args[0] = dma_to_phys(NULL, addr); + desc.args[1] = sizeof(*fmt); desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL); ret = scm_call2( - SCM_SIP_FNID(SCM_SVC_QDSS, SCM_FILTER_SWTRACE_ID), + SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID), &desc); if (ret) - return ret; + goto out; remoteqdss_err_translation(remoteqdss_scm_msgs, desc.ret[0]); ret = desc.ret[0] ? -EINVAL : 0; +out: + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; } DEFINE_SIMPLE_ATTRIBUTE(fops_sw_trace_output, remoteqdss_scm_query_swtrace, remoteqdss_scm_filter_swtrace, - "%llu\n"); + "0x%llx\n"); static void __init enumerate_scm_devices(struct dentry *parent) { @@ -189,12 +257,12 @@ static void __init enumerate_scm_devices(struct dentry *parent) if (IS_ERR_OR_NULL(dentry)) goto out; - dentry = debugfs_create_u64("sw_entity_group", S_IRUGO | S_IWUSR, + dentry = debugfs_create_u32("sw_entity_group", S_IRUGO | S_IWUSR, data->dir, &data->sw_entity_group); if (IS_ERR_OR_NULL(dentry)) goto out; - dentry = debugfs_create_u64("sw_event_group", S_IRUGO | S_IWUSR, + dentry = debugfs_create_u32("sw_event_group", S_IRUGO | S_IWUSR, data->dir, &data->sw_event_group); if (IS_ERR_OR_NULL(dentry)) goto out; @@ -209,6 +277,13 @@ out: static int __init remoteqdss_init(void) { unsigned long old_flags = remoteqdss_dbg_flags; + int ret; + + /* Set up DMA */ + arch_setup_dma_ops(&dma_dev, 0, U64_MAX, NULL, false); + ret = dma_coerce_mask_and_coherent(&dma_dev, DMA_BIT_MASK(64)); + if (ret) + return ret; /* * disable normal error messages while checking @@ -225,4 +300,4 @@ static int __init remoteqdss_init(void) remoteqdss_dbg_flags = old_flags; return 0; } -module_init(remoteqdss_init); +late_initcall(remoteqdss_init); -- cgit v1.2.3 From 72e7eb12e749d3d83f1529bc861bfa14ba2698d8 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Mon, 28 Mar 2016 18:32:04 -0700 Subject: soc: qcom: remoteqdss: Implement the remaining message types Implement the tag, swevent, and swentity message formats. Additionally, abstract the actual scm call and error checking into a separate shared function. CRs-Fixed: 997749 Change-Id: Ie80846978a84a114b38d69b0bca86639bfab7e91 Signed-off-by: Patrick Daly --- drivers/soc/qcom/remoteqdss.c | 195 +++++++++++++++++++++++++++++++++++------- 1 file changed, 166 insertions(+), 29 deletions(-) diff --git a/drivers/soc/qcom/remoteqdss.c b/drivers/soc/qcom/remoteqdss.c index e578fb1b8b47..e66ca587adca 100644 --- a/drivers/soc/qcom/remoteqdss.c +++ b/drivers/soc/qcom/remoteqdss.c @@ -26,10 +26,10 @@ module_param_named(dbg_flags, remoteqdss_dbg_flags, ulong, 0644); static struct dentry *remoteqdss_dir; #define REMOTEQDSS_ERR(fmt, ...) \ - pr_err("%s: " fmt, __func__, ## __VA_ARGS__) + pr_debug("%s: " fmt, __func__, ## __VA_ARGS__) #define REMOTEQDSS_ERR_CALLER(fmt, ...) \ - pr_err("%pf: " fmt, __builtin_return_address(0), ## __VA_ARGS__) + pr_debug("%pf: " fmt, __builtin_return_address(1), ## __VA_ARGS__) struct qdss_msg_translation { u64 val; @@ -159,6 +159,27 @@ static void free_remoteqdss_data(struct remoteqdss_data *data) kfree(data); } +static int remoteqdss_do_scm_call(struct scm_desc *desc, + dma_addr_t addr, size_t size) +{ + int ret; + + memset(desc, 0, sizeof(*desc)); + desc->args[0] = dma_to_phys(NULL, addr); + desc->args[1] = size; + desc->arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL); + + ret = scm_call2( + SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID), + desc); + if (ret) + return ret; + + remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0]); + ret = desc->ret[0] ? -EINVAL : 0; + return ret; +} + static int remoteqdss_scm_query_swtrace(void *priv, u64 *val) { struct remoteqdss_data *data = priv; @@ -173,21 +194,9 @@ static int remoteqdss_scm_query_swtrace(void *priv, u64 *val) fmt->subsys_id = data->id; fmt->cmd_id = CMD_ID_QUERY_SWTRACE_STATE; - memset(&desc, 0, sizeof(desc)); - desc.args[0] = dma_to_phys(NULL, addr); - desc.args[1] = sizeof(*fmt); - desc.arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL); - - ret = scm_call2( - SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID), - &desc); - if (ret) - goto out; - - remoteqdss_err_translation(remoteqdss_scm_msgs, desc.ret[0]); - ret = desc.ret[0] ? -EINVAL : 0; + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); *val = desc.ret[1]; -out: + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; } @@ -207,28 +216,141 @@ static int remoteqdss_scm_filter_swtrace(void *priv, u64 val) fmt->h.cmd_id = CMD_ID_FILTER_SWTRACE_STATE; fmt->state = (uint32_t)val; - memset(&desc, 0, sizeof(desc)); - desc.args[0] = dma_to_phys(NULL, addr); - desc.args[1] = sizeof(*fmt); - desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL); - - ret = scm_call2( - SCM_SIP_FNID(SCM_SVC_QDSS, SCM_CMD_ID), - &desc); - if (ret) - goto out; + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); - remoteqdss_err_translation(remoteqdss_scm_msgs, desc.ret[0]); - ret = desc.ret[0] ? -EINVAL : 0; -out: dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); return ret; } + DEFINE_SIMPLE_ATTRIBUTE(fops_sw_trace_output, remoteqdss_scm_query_swtrace, remoteqdss_scm_filter_swtrace, "0x%llx\n"); +static int remoteqdss_scm_query_tag(void *priv, u64 *val) +{ + struct remoteqdss_data *data = priv; + int ret; + struct scm_desc desc; + struct remoteqdss_header_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->subsys_id = data->id; + fmt->cmd_id = CMD_ID_QUERY_SWEVENT_TAG; + + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + *val = desc.ret[1]; + + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_tag, + remoteqdss_scm_query_tag, + NULL, + "0x%llx\n"); + +static int remoteqdss_scm_query_swevent(void *priv, u64 *val) +{ + struct remoteqdss_data *data = priv; + int ret; + struct scm_desc desc; + struct remoteqdss_query_swevent_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->h.subsys_id = data->id; + fmt->h.cmd_id = CMD_ID_QUERY_SWEVENT; + fmt->event_group = data->sw_event_group; + + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + *val = desc.ret[1]; + + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); + return ret; +} + +static int remoteqdss_scm_filter_swevent(void *priv, u64 val) +{ + struct remoteqdss_data *data = priv; + int ret; + struct scm_desc desc; + struct remoteqdss_filter_swevent_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->h.subsys_id = data->id; + fmt->h.cmd_id = CMD_ID_FILTER_SWEVENT; + fmt->event_group = data->sw_event_group; + fmt->event_mask = (uint32_t)val; + + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_swevent, + remoteqdss_scm_query_swevent, + remoteqdss_scm_filter_swevent, + "0x%llx\n"); + +static int remoteqdss_scm_query_swentity(void *priv, u64 *val) +{ + struct remoteqdss_data *data = priv; + int ret; + struct scm_desc desc; + struct remoteqdss_query_swentity_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->h.subsys_id = data->id; + fmt->h.cmd_id = CMD_ID_QUERY_SWENTITY; + fmt->entity_group = data->sw_entity_group; + + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + *val = desc.ret[1]; + + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); + return ret; +} + +static int remoteqdss_scm_filter_swentity(void *priv, u64 val) +{ + struct remoteqdss_data *data = priv; + int ret; + struct scm_desc desc; + struct remoteqdss_filter_swentity_fmt *fmt; + dma_addr_t addr; + + fmt = dma_alloc_coherent(&dma_dev, sizeof(*fmt), &addr, GFP_KERNEL); + if (!fmt) + return -ENOMEM; + fmt->h.subsys_id = data->id; + fmt->h.cmd_id = CMD_ID_FILTER_SWENTITY; + fmt->entity_group = data->sw_entity_group; + fmt->entity_mask = (uint32_t)val; + + ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt)); + + dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr); + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_swentity, + remoteqdss_scm_query_swentity, + remoteqdss_scm_filter_swentity, + "0x%llx\n"); + static void __init enumerate_scm_devices(struct dentry *parent) { u64 unused; @@ -267,6 +389,21 @@ static void __init enumerate_scm_devices(struct dentry *parent) if (IS_ERR_OR_NULL(dentry)) goto out; + dentry = debugfs_create_file("tag", S_IRUGO, + data->dir, data, &fops_tag); + if (IS_ERR_OR_NULL(dentry)) + goto out; + + dentry = debugfs_create_file("swevent", S_IRUGO | S_IWUSR, + data->dir, data, &fops_swevent); + if (IS_ERR_OR_NULL(dentry)) + goto out; + + dentry = debugfs_create_file("swentity", S_IRUGO | S_IWUSR, + data->dir, data, &fops_swentity); + if (IS_ERR_OR_NULL(dentry)) + goto out; + return; out: -- cgit v1.2.3 From 1e82e660308d5ff2e8729661f49450ed049b64f7 Mon Sep 17 00:00:00 2001 From: Sathish Ambley Date: Tue, 8 Mar 2016 00:44:04 -0800 Subject: msm: ADSPRPC: FastRPC migration to GLINK from SMD FastRPC migration to use GLINK from existing SMD driver for inter- process communication. Updated FastRPC context bank details for msmcobalt. Added "qcom,enable-glink" option in dtsi file for using glink, uses smd if this flag is not defined. Change-Id: I4a933c9b3355b0aa1b653719ec1ec7ded1f368dd Acked-by: Viswanatham Paduchuri Signed-off-by: Sathish Ambley --- .../devicetree/bindings/qdsp/msm-fastrpc.txt | 14 +- arch/arm/boot/dts/qcom/msmcobalt.dtsi | 46 +++++ drivers/char/adsprpc.c | 189 +++++++++++++++++---- drivers/char/adsprpc_shared.h | 3 +- 4 files changed, 217 insertions(+), 35 deletions(-) diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt index dfdf1f8fe1b5..f419655722d4 100644 --- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt +++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt @@ -9,18 +9,22 @@ other tasks. Required properties: - compatible : Must be "qcom,msm-fastrpc-adsp" +Optional properties: +- qcom,fastrpc-glink: Flag to use glink instead of smd for IPC + Optional subnodes: - qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context banks -Subnode properties: -- compatible : Must be "qcom,msm-fastrpc-compute-cb" -- label: Label describing the channel this context bank belongs to -- iommus : A list of phandle and IOMMU specifier pairs that describe the - IOMMU master interfaces of the device +Subnode Required properties: +- compatible : Must be "qcom,msm-fastrpc-compute-cb" +- label: Label describing the channel this context bank belongs to +- iommus : A list of phandle and IOMMU specifier pairs that describe the + IOMMU master interfaces of the device Example: qcom,msm_fastrpc { compatible = "qcom,msm-fastrpc-adsp"; + qcom,fastrpc-glink; qcom,msm_fastrpc_compute_cb_1 { compatible = "qcom,msm-fastrpc-compute-cb"; diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index e66e63f9d717..daa870107c3e 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -862,6 +862,52 @@ }; }; + qcom,msm_fastrpc { + compatible = "qcom,msm-fastrpc-adsp"; + qcom,fastrpc-glink; + + qcom,msm_fastrpc_compute_cb1 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 8>; + }; + qcom,msm_fastrpc_compute_cb2 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 9>; + }; + qcom,msm_fastrpc_compute_cb3 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 10>; + }; + qcom,msm_fastrpc_compute_cb4 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 11>; + }; + qcom,msm_fastrpc_compute_cb5 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 12>; + }; + qcom,msm_fastrpc_compute_cb6 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 5>; + }; + qcom,msm_fastrpc_compute_cb7 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 6>; + }; + qcom,msm_fastrpc_compute_cb8 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 7>; + }; + }; + rpm_bus: qcom,rpm-smd { compatible = "qcom,rpm-glink"; qcom,glink-edge = "rpm"; diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 0a70bc44dcdb..73e615dabe41 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ struct smq_invoke_ctx { uint32_t sc; struct overlap *overs; struct overlap **overps; + struct smq_msg msg; }; struct fastrpc_ctx_lst { @@ -159,7 +161,7 @@ struct fastrpc_session_ctx { struct fastrpc_channel_ctx { char *name; char *subsys; - smd_channel_t *chan; + void *chan; struct device *dev; struct fastrpc_session_ctx session[NUM_SESSIONS]; struct completion work; @@ -174,6 +176,10 @@ struct fastrpc_channel_ctx { int vmid; int ramdumpenabled; void *remoteheap_ramdump_dev; + struct glink_link_info link_info; + void *link_notify_handle; + struct glink_open_config cfg; + char *edge; }; struct fastrpc_apps { @@ -189,6 +195,7 @@ struct fastrpc_apps { spinlock_t hlock; struct ion_client *client; struct device *dev; + bool glink; }; struct fastrpc_mmap { @@ -231,14 +238,15 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = { .name = "adsprpc-smd", .subsys = "adsp", .channel = SMD_APPS_QDSP, + .edge = "lpass", }, { .name = "sdsprpc-smd", .subsys = "dsps", .channel = SMD_APPS_DSPS, + .edge = "dsps", .vmid = VMID_SSC_Q6, }, - }; static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache) @@ -1164,31 +1172,40 @@ static void inv_args(struct smq_invoke_ctx *ctx) static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, uint32_t kernel, uint32_t handle) { - struct smq_msg msg = {0}; + struct smq_msg *msg = &ctx->msg; struct fastrpc_file *fl = ctx->fl; int err = 0, len; VERIFY(err, 0 != fl->apps->channel[fl->cid].chan); if (err) goto bail; - msg.pid = current->tgid; - msg.tid = current->pid; + msg->pid = current->tgid; + msg->tid = current->pid; if (kernel) - msg.pid = 0; - msg.invoke.header.ctx = ptr_to_uint64(ctx); - msg.invoke.header.handle = handle; - msg.invoke.header.sc = ctx->sc; - msg.invoke.page.addr = ctx->buf ? ctx->buf->phys : 0; - msg.invoke.page.size = buf_page_size(ctx->used); - spin_lock(&fl->apps->hlock); - len = smd_write(fl->apps->channel[fl->cid].chan, &msg, sizeof(msg)); - spin_unlock(&fl->apps->hlock); - VERIFY(err, len == sizeof(msg)); + msg->pid = 0; + msg->invoke.header.ctx = ptr_to_uint64(ctx); + msg->invoke.header.handle = handle; + msg->invoke.header.sc = ctx->sc; + msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0; + msg->invoke.page.size = buf_page_size(ctx->used); + + if (fl->apps->glink) { + err = glink_tx(fl->apps->channel[fl->cid].chan, + (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg), + GLINK_TX_REQ_INTENT); + } else { + spin_lock(&fl->apps->hlock); + len = smd_write((smd_channel_t *) + fl->apps->channel[fl->cid].chan, + msg, sizeof(*msg)); + spin_unlock(&fl->apps->hlock); + VERIFY(err, len == sizeof(*msg)); + } bail: return err; } -static void fastrpc_read_handler(int cid) +static void fastrpc_smd_read_handler(int cid) { struct fastrpc_apps *me = &gfa; struct smq_invoke_rsp rsp = {0}; @@ -1216,7 +1233,7 @@ static void smd_event_handler(void *priv, unsigned event) fastrpc_notify_drivers(me, cid); break; case SMD_EVENT_DATA: - fastrpc_read_handler(cid); + fastrpc_smd_read_handler(cid); break; } } @@ -1334,7 +1351,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, int pageslen; } inbuf; inbuf.pgid = current->tgid; - inbuf.namelen = strlen(current->comm); + inbuf.namelen = strlen(current->comm) + 1; inbuf.filelen = init->filelen; VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, init->file, init->filelen, mflags, &file)); @@ -1632,7 +1649,12 @@ static void fastrpc_channel_close(struct kref *kref) int cid; ctx = container_of(kref, struct fastrpc_channel_ctx, kref); - smd_close(ctx->chan); + if (!me->glink) { + smd_close(ctx->chan); + } else { + glink_unregister_link_state_cb(ctx->link_notify_handle); + glink_close(ctx->chan); + } ctx->chan = 0; mutex_unlock(&me->smd_mutex); cid = ctx - &gcinfo[0]; @@ -1707,6 +1729,49 @@ static int fastrpc_session_free(struct fastrpc_channel_ctx *chan, int session) return err; } +bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size) +{ + if (glink_queue_rx_intent(h, NULL, size)) + return false; + return true; +} + +void fastrpc_glink_notify_tx_done(void *handle, const void *priv, + const void *pkt_priv, const void *ptr) +{ +} + +void fastrpc_glink_notify_rx(void *handle, const void *priv, + const void *pkt_priv, const void *ptr, size_t size) +{ + struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr; + int len = size; + + while (len >= sizeof(*rsp) && rsp) { + context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval); + rsp++; + len = len - sizeof(*rsp); + } + glink_rx_done(handle, ptr, true); +} + +void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event) +{ + struct fastrpc_apps *me = &gfa; + int cid = (int)(uintptr_t)priv; + + switch (event) { + case GLINK_CONNECTED: + complete(&me->channel[cid].work); + break; + case GLINK_LOCAL_DISCONNECTED: + break; + case GLINK_REMOTE_DISCONNECTED: + fastrpc_notify_drivers(me, cid); + break; + } +} + static int fastrpc_device_release(struct inode *inode, struct file *file) { struct fastrpc_apps *me = &gfa; @@ -1725,6 +1790,55 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) return 0; } +static void fastrpc_glink_register_cb(struct glink_link_state_cb_info *cb_info, + void *priv) +{ + switch (cb_info->link_state) { + case GLINK_LINK_STATE_UP: + if (priv) + complete(priv); + break; + case GLINK_LINK_STATE_DOWN: + break; + default: + pr_err("adsprpc: unknown glnk state %d\n", cb_info->link_state); + break; + } +} + +static int fastrpc_glink_open(int cid, struct fastrpc_apps *me) +{ + int err = 0; + struct glink_open_config *cfg = &me->channel[cid].cfg; + struct glink_link_info *link_info = &me->channel[cid].link_info; + + link_info->edge = gcinfo[cid].edge; + link_info->transport = "smem"; + link_info->glink_link_state_notif_cb = fastrpc_glink_register_cb; + me->channel[cid].link_notify_handle = glink_register_link_state_cb( + &me->channel[cid].link_info, + (void *)(&me->channel[cid].work)); + VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link_notify_handle)); + if (err) + goto bail; + + VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work, + RPC_TIMEOUT)); + if (err) + goto bail; + + cfg->priv = (void *)(uintptr_t)cid; + cfg->edge = gcinfo[cid].edge; + cfg->name = FASTRPC_GLINK_GUID; + cfg->notify_rx = fastrpc_glink_notify_rx; + cfg->notify_tx_done = fastrpc_glink_notify_tx_done; + cfg->notify_state = fastrpc_glink_notify_state; + cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req; + VERIFY(err, 0 != (me->channel[cid].chan = glink_open(cfg))); +bail: + return err; +} + static int fastrpc_device_open(struct inode *inode, struct file *filp) { int cid = MINOR(inode->i_rdev); @@ -1756,17 +1870,24 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->ssrcount = me->channel[cid].ssrcount; if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) || (me->channel[cid].chan == 0)) { - VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID, - gcinfo[cid].channel, - &me->channel[cid].chan, - (void *)(uintptr_t)cid, - smd_event_handler)); + if (me->glink) { + VERIFY(err, 0 == fastrpc_glink_open(cid, me)); + } else { + VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID, + gcinfo[cid].channel, + (smd_channel_t **)&me->channel[cid].chan, + (void *)(uintptr_t)cid, + smd_event_handler)); + } if (err) goto bail; + VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work, - RPC_TIMEOUT)); - if (err) + RPC_TIMEOUT)); + if (err) { + me->channel[cid].chan = 0; goto bail; + } kref_init(&me->channel[cid].kref); pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); @@ -1884,10 +2005,16 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, mutex_lock(&me->smd_mutex); ctx->ssrcount++; if (ctx->chan) { - smd_close(ctx->chan); + if (me->glink) { + glink_unregister_link_state_cb( + ctx->link_notify_handle); + glink_close(ctx->chan); + } else { + smd_close(ctx->chan); + } ctx->chan = 0; - pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name, - MAJOR(me->dev_no), cid); + pr_info("'restart notifier: closed /dev/%s c %d %d'\n", + gcinfo[cid].name, MAJOR(me->dev_no), cid); } mutex_unlock(&me->smd_mutex); fastrpc_notify_drivers(me, cid); @@ -1956,6 +2083,7 @@ static int fastrpc_cb_probe(struct device *dev) VERIFY(err, chan->sesscount < NUM_SESSIONS); if (err) goto bail; + VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", 0, &iommuspec)); if (err) @@ -2093,6 +2221,9 @@ static int fastrpc_probe(struct platform_device *pdev) return 0; } + me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink"); + pr_debug("adsprpc: channel link type: %d\n", me->glink); + VERIFY(err, !of_platform_populate(pdev->dev.of_node, fastrpc_match_table, NULL, &pdev->dev)); diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h index fdd479df0d8d..d0a1e11871f3 100644 --- a/drivers/char/adsprpc_shared.h +++ b/drivers/char/adsprpc_shared.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,6 +22,7 @@ #define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd) #define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t) #define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init) +#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp" #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp" #define DEVICE_NAME "adsprpc-smd" -- cgit v1.2.3 From 43351724f4adb3043bcb26093ac95e9dfbc28580 Mon Sep 17 00:00:00 2001 From: Sureshnaidu Laveti Date: Mon, 11 Apr 2016 12:09:51 -0700 Subject: ARM: dts: msm: update the pmcobalt GPIO nodes -Pmcobalt GPIO 9 belongs to GPIO_LV subtype, so update the voltage selection value to 0 which is the allowed selection. -Pull up/down is not needed for pmcobalt GPIOs, so update the pull value to PULL_NO. Change-Id: I7b9faf485dda6f450f6df4410e3ae25efa40aad1 Signed-off-by: Sureshnaidu Laveti --- arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi | 12 ++++++------ arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi index 06719a0fcd06..e68f746bda38 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi @@ -280,9 +280,9 @@ }; &pmcobalt_gpios { gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */ - qcom,mode = <1>; /* Output*/ - qcom,pull = <4>; /* Pulldown 10uA */ - qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <0>; /* VIN1 GPIO_LV */ qcom,src-sel = <0>; /* GPIO */ qcom,invert = <0>; /* Invert */ qcom,master-en = <1>; /* Enable GPIO */ @@ -290,9 +290,9 @@ }; gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */ - qcom,mode = <1>; /* Output*/ - qcom,pull = <4>; /* Pulldown 10uA */ - qcom,vin-sel = <1>; /* VIN1 GPIO_MV*/ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ qcom,src-sel = <0>; /* GPIO */ qcom,invert = <0>; /* Invert */ qcom,master-en = <1>; /* Enable GPIO */ diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi index 06719a0fcd06..e68f746bda38 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi @@ -280,9 +280,9 @@ }; &pmcobalt_gpios { gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */ - qcom,mode = <1>; /* Output*/ - qcom,pull = <4>; /* Pulldown 10uA */ - qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <0>; /* VIN1 GPIO_LV */ qcom,src-sel = <0>; /* GPIO */ qcom,invert = <0>; /* Invert */ qcom,master-en = <1>; /* Enable GPIO */ @@ -290,9 +290,9 @@ }; gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */ - qcom,mode = <1>; /* Output*/ - qcom,pull = <4>; /* Pulldown 10uA */ - qcom,vin-sel = <1>; /* VIN1 GPIO_MV*/ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ qcom,src-sel = <0>; /* GPIO */ qcom,invert = <0>; /* Invert */ qcom,master-en = <1>; /* Enable GPIO */ -- cgit v1.2.3 From 0a8e2adbbc9c732b7c5c460c4664d20bdce165aa Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Sun, 10 Apr 2016 15:01:42 -0700 Subject: USB: QTI: Add missing usb_ctrl_qti.h f_gsi compilation is failing with below error: fatal error: linux/usb/usb_ctrl_qti.h: No such file or directory Hence add missing usb_ctrl_qti.h to resolve above error while compiling f_gsi.c file. This file's snapshot is taken from msm-3.18 kernel as 'commit 24b986908cc1 ("power: qpnp-fg: stop IMA transactions during FG shutdown")'. CRs-Fixed: 1001469 Change-Id: Ib7dce6b2ae1670554a29847e4381e71ba7b75edf Signed-off-by: Mayank Rana --- include/uapi/linux/usb/Kbuild | 1 + include/uapi/linux/usb/usb_ctrl_qti.h | 41 +++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 include/uapi/linux/usb/usb_ctrl_qti.h diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild index 4cc4d6e7e523..ba153d582422 100644 --- a/include/uapi/linux/usb/Kbuild +++ b/include/uapi/linux/usb/Kbuild @@ -9,4 +9,5 @@ header-y += g_printer.h header-y += gadgetfs.h header-y += midi.h header-y += tmc.h +header-y += usb_ctrl_qti.h header-y += video.h diff --git a/include/uapi/linux/usb/usb_ctrl_qti.h b/include/uapi/linux/usb/usb_ctrl_qti.h new file mode 100644 index 000000000000..b02272a03e40 --- /dev/null +++ b/include/uapi/linux/usb/usb_ctrl_qti.h @@ -0,0 +1,41 @@ +#ifndef __UAPI_LINUX_USB_CTRL_QTI_H +#define __UAPI_LINUX_USB_CTRL_QTI_H + +#include +#include + +#define MAX_QTI_PKT_SIZE 2048 + +#define QTI_CTRL_IOCTL_MAGIC 'r' +#define QTI_CTRL_GET_LINE_STATE _IOR(QTI_CTRL_IOCTL_MAGIC, 2, int) +#define QTI_CTRL_EP_LOOKUP _IOR(QTI_CTRL_IOCTL_MAGIC, 3, struct ep_info) +#define QTI_CTRL_MODEM_OFFLINE _IO(QTI_CTRL_IOCTL_MAGIC, 4) +#define QTI_CTRL_MODEM_ONLINE _IO(QTI_CTRL_IOCTL_MAGIC, 5) + +enum peripheral_ep_type { + DATA_EP_TYPE_RESERVED = 0x0, + DATA_EP_TYPE_HSIC = 0x1, + DATA_EP_TYPE_HSUSB = 0x2, + DATA_EP_TYPE_PCIE = 0x3, + DATA_EP_TYPE_EMBEDDED = 0x4, + DATA_EP_TYPE_BAM_DMUX = 0x5, +}; + +struct peripheral_ep_info { + enum peripheral_ep_type ep_type; + __u32 peripheral_iface_id; +}; + +struct ipa_ep_pair { + __u32 cons_pipe_num; + __u32 prod_pipe_num; +}; + +struct ep_info { + struct peripheral_ep_info ph_ep_info; + struct ipa_ep_pair ipa_ep_pair; + +}; + +#endif + -- cgit v1.2.3 From bd632ce9dfa34f35a130b5f2e8b02df2e8c6c00b Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Sun, 10 Apr 2016 15:04:13 -0700 Subject: usb: gadget: f_gsi: Add missing includes to compile f_gsi file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change adds required .h files to resolve below seen compilation errors. In function ‘gsi_update_function_bind_params’: error: implicit declaration of function ‘msm_ep_config’ warning: ‘enum ipa_usb_notify_event’ declared inside parameter list CRs-Fixed: 1001469 Change-Id: Ic62b4dae798726055beb778509e6b65e69f4db34 Signed-off-by: Mayank Rana --- drivers/usb/gadget/function/f_gsi.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index 4becac67bcdc..eb42feff0712 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -24,6 +24,8 @@ #include #include #include +#include +#include #define GSI_RMNET_CTRL_NAME "rmnet_ctrl" #define GSI_MBIM_CTRL_NAME "android_mbim" -- cgit v1.2.3 From d40b7c124fec2b1c0163a531e778b9974346e37b Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Sun, 10 Apr 2016 15:18:35 -0700 Subject: defconfig: Enable USB GSI Function driver for msmcobalt This change enables CONFIG_USB_CONFIGFS_F_GSI required for USB GSI (RMNET, RNDIS and DPL) related functionality. CRs-Fixed: 1001469 Change-Id: I91b2531a2ce739613181f2e13c692263d9e2454a Signed-off-by: Mayank Rana --- arch/arm64/configs/msmcortex-perf_defconfig | 1 + arch/arm64/configs/msmcortex_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index f20fff11bf93..0b075acbc8e4 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -359,6 +359,7 @@ CONFIG_USB_CONFIGFS_F_ACC=y CONFIG_USB_CONFIGFS_UEVENT=y CONFIG_USB_CONFIGFS_F_HID=y CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_GSI=y CONFIG_USB_CONFIGFS_F_CDEV=y CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_MMC=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 46268d6644ef..9c571aab8fc8 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -373,6 +373,7 @@ CONFIG_USB_CONFIGFS_F_ACC=y CONFIG_USB_CONFIGFS_UEVENT=y CONFIG_USB_CONFIGFS_F_HID=y CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_GSI=y CONFIG_USB_CONFIGFS_F_CDEV=y CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_MMC=y -- cgit v1.2.3 From 21f86651a66abefcfce33f4611c2de7863a8642b Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 7 Feb 2013 14:31:36 -0800 Subject: android/lowmemorykiller: Ignore tasks with freed mm A killed task can stay in the task list long after its memory has been returned to the system, therefore ignore any tasks whose mm struct has been freed. Change-Id: I76394b203b4ab2312437c839976f0ecb7b6dde4e CRs-fixed: 450383 Signed-off-by: Liam Mark --- arch/arm/include/asm/thread_info.h | 1 + drivers/staging/android/lowmemorykiller.c | 9 +++++++-- include/linux/sched.h | 2 +- kernel/exit.c | 6 +++++- kernel/fork.c | 5 ++++- 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 776757d1604a..f23454db246f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 +#define TIF_MM_RELEASED 21 /* task MM has been released */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index b9be6d9a52ef..976453cc834f 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -155,6 +155,10 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) if (tsk->flags & PF_KTHREAD) continue; + /* if task no longer has any memory ignore it */ + if (test_task_flag(tsk, TIF_MM_RELEASED)) + continue; + if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); @@ -220,13 +224,14 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) free); lowmem_deathpending_timeout = jiffies + HZ; rem += selected_tasksize; + rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); - } + } else + rcu_read_unlock(); lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", sc->nr_to_scan, sc->gfp_mask, rem); - rcu_read_unlock(); mutex_unlock(&scan_mutex); return rem; } diff --git a/include/linux/sched.h b/include/linux/sched.h index e963ff30a7f6..7ece18efd02b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2740,7 +2740,7 @@ static inline void mmdrop(struct mm_struct * mm) } /* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); +extern int mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* diff --git a/kernel/exit.c b/kernel/exit.c index 77d54139672b..a32e83d567b9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -388,6 +388,7 @@ static void exit_mm(struct task_struct *tsk) { struct mm_struct *mm = tsk->mm; struct core_state *core_state; + int mm_released; mm_release(tsk, mm); if (!mm) @@ -434,9 +435,12 @@ static void exit_mm(struct task_struct *tsk) enter_lazy_tlb(mm, current); task_unlock(tsk); mm_update_next_owner(mm); - mmput(mm); + + mm_released = mmput(mm); if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim(); + if (mm_released) + set_tsk_thread_flag(tsk, TIF_MM_RELEASED); } static struct task_struct *find_alive_thread(struct task_struct *p) diff --git a/kernel/fork.c b/kernel/fork.c index 859b949d106f..c9eb86b646ab 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -694,8 +694,9 @@ EXPORT_SYMBOL_GPL(__mmdrop); /* * Decrement the use count and release all resources for an mm. */ -void mmput(struct mm_struct *mm) +int mmput(struct mm_struct *mm) { + int mm_freed = 0; might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { @@ -713,7 +714,9 @@ void mmput(struct mm_struct *mm) if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); + mm_freed = 1; } + return mm_freed; } EXPORT_SYMBOL_GPL(mmput); -- cgit v1.2.3 From e4c1c94265a50d396d7bd4655f3bcc945dc774c4 Mon Sep 17 00:00:00 2001 From: "seungho1.park" Date: Tue, 24 Jul 2012 10:20:44 +0900 Subject: android: lowmemorykiller: add lmk parameters tunning code. There are cases that LMK doesn't run, even when it must run. It is due to LMK shrinker not considering memory status per zone. So add LMK parameters(other_free, other_file) tunnig code to consider target zone of LMK shrinker. Change-Id: I6f1f8660d5da920a0e3af45a160499965032081d Git-commit: 22d990a58fc17b3f0155e15eb2dc3efa037bea1c Git-repo: https://android.googlesource.com/kernel/common/ [ohaugan@codeaurora.org: Fix compilation issues] Signed-off-by: Olav Haugan Signed-off-by: Liam Mark --- drivers/staging/android/lowmemorykiller.c | 87 +++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 976453cc834f..b0c2b3eb45be 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -44,6 +44,13 @@ #include #include #include +#include + +#ifdef CONFIG_HIGHMEM +#define _ZONE ZONE_HIGHMEM +#else +#define _ZONE ZONE_NORMAL +#endif #define CREATE_TRACE_POINTS #include "trace/lowmemorykiller.h" @@ -63,6 +70,7 @@ static int lowmem_minfree[6] = { 16 * 1024, /* 64MB */ }; static int lowmem_minfree_size = 4; +static int lmk_fast_run = 1; static unsigned long lowmem_deathpending_timeout; @@ -99,6 +107,82 @@ static int test_task_flag(struct task_struct *p, int flag) static DEFINE_MUTEX(scan_mutex); +void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, + int *other_free, int *other_file) +{ + struct zone *zone; + struct zoneref *zoneref; + int zone_idx; + + for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) { + if ((zone_idx = zonelist_zone_idx(zoneref)) == ZONE_MOVABLE) + continue; + + if (zone_idx > classzone_idx) { + if (other_free != NULL) + *other_free -= zone_page_state(zone, + NR_FREE_PAGES); + if (other_file != NULL) + *other_file -= zone_page_state(zone, + NR_FILE_PAGES) + - zone_page_state(zone, NR_SHMEM); + } else if (zone_idx < classzone_idx) { + if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) + *other_free -= + zone->lowmem_reserve[classzone_idx]; + else + *other_free -= + zone_page_state(zone, NR_FREE_PAGES); + } + } +} + +void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) +{ + gfp_t gfp_mask; + struct zone *preferred_zone; + struct zonelist *zonelist; + enum zone_type high_zoneidx, classzone_idx; + unsigned long balance_gap; + + gfp_mask = sc->gfp_mask; + zonelist = node_zonelist(0, gfp_mask); + high_zoneidx = gfp_zone(gfp_mask); + first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); + classzone_idx = zone_idx(preferred_zone); + + balance_gap = min(low_wmark_pages(preferred_zone), + (preferred_zone->present_pages + + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / + KSWAPD_ZONE_BALANCE_GAP_RATIO); + + if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0, + high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX + + balance_gap, 0, 0))) { + if (lmk_fast_run) + tune_lmk_zone_param(zonelist, classzone_idx, other_free, + other_file); + else + tune_lmk_zone_param(zonelist, classzone_idx, other_free, + NULL); + + if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) + *other_free -= + preferred_zone->lowmem_reserve[_ZONE]; + else + *other_free -= zone_page_state(preferred_zone, + NR_FREE_PAGES); + lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " + "ofree %d, %d\n", *other_free, *other_file); + } else { + tune_lmk_zone_param(zonelist, classzone_idx, other_free, + other_file); + + lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " + "%d\n", *other_free, *other_file); + } +} + static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; @@ -122,6 +206,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) global_page_state(NR_SHMEM) - total_swapcache_pages(); + tune_lmk_param(&other_free, &other_file, sc); + if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) @@ -343,4 +429,5 @@ module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR); module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); +module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR); -- cgit v1.2.3 From 92c1fefed56e839edbcce0d3cc734a53ed477394 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Wed, 27 Mar 2013 12:34:51 -0700 Subject: android/lowmemorykiller: Selectively count free CMA pages In certain memory configurations there can be a large number of CMA pages which are not suitable to satisfy certain memory requests. This large number of unsuitable pages can cause the lowmemorykiller to not kill any tasks because the lowmemorykiller counts all free pages. In order to ensure the lowmemorykiller properly evaluates the free memory only count the free CMA pages if they are suitable for satisfying the memory request. Change-Id: I7f06d53e2d8cfe7439e5561fe6e5209ce73b1c90 CRs-fixed: 437016 Signed-off-by: Liam Mark --- drivers/staging/android/lowmemorykiller.c | 89 +++++++++++++++++++++++++------ include/linux/mmzone.h | 8 +++ mm/page_alloc.c | 5 ++ 3 files changed, 87 insertions(+), 15 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index b0c2b3eb45be..14bdf880e551 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -107,16 +107,47 @@ static int test_task_flag(struct task_struct *p, int flag) static DEFINE_MUTEX(scan_mutex); +int can_use_cma_pages(gfp_t gfp_mask) +{ + int can_use = 0; + int mtype = gfpflags_to_migratetype(gfp_mask); + int i = 0; + int *mtype_fallbacks = get_migratetype_fallbacks(mtype); + + if (is_migrate_cma(mtype)) { + can_use = 1; + } else { + for (i = 0;; i++) { + int fallbacktype = mtype_fallbacks[i]; + + if (is_migrate_cma(fallbacktype)) { + can_use = 1; + break; + } + + if (fallbacktype == MIGRATE_TYPES) + break; + } + } + return can_use; +} + void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, - int *other_free, int *other_file) + int *other_free, int *other_file, + int use_cma_pages) { struct zone *zone; struct zoneref *zoneref; int zone_idx; for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) { - if ((zone_idx = zonelist_zone_idx(zoneref)) == ZONE_MOVABLE) + zone_idx = zonelist_zone_idx(zoneref); + if (zone_idx == ZONE_MOVABLE) { + if (!use_cma_pages) + *other_free -= + zone_page_state(zone, NR_FREE_CMA_PAGES); continue; + } if (zone_idx > classzone_idx) { if (other_free != NULL) @@ -127,12 +158,22 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, NR_FILE_PAGES) - zone_page_state(zone, NR_SHMEM); } else if (zone_idx < classzone_idx) { - if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) + if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) { + if (!use_cma_pages) { + *other_free -= min( + zone->lowmem_reserve[classzone_idx] + + zone_page_state( + zone, NR_FREE_CMA_PAGES), + zone_page_state( + zone, NR_FREE_PAGES)); + } else { + *other_free -= + zone->lowmem_reserve[classzone_idx]; + } + } else { *other_free -= - zone->lowmem_reserve[classzone_idx]; - else - *other_free -= - zone_page_state(zone, NR_FREE_PAGES); + zone_page_state(zone, NR_FREE_PAGES); + } } } } @@ -144,12 +185,14 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) struct zonelist *zonelist; enum zone_type high_zoneidx, classzone_idx; unsigned long balance_gap; + int use_cma_pages; gfp_mask = sc->gfp_mask; zonelist = node_zonelist(0, gfp_mask); high_zoneidx = gfp_zone(gfp_mask); first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); classzone_idx = zone_idx(preferred_zone); + use_cma_pages = can_use_cma_pages(gfp_mask); balance_gap = min(low_wmark_pages(preferred_zone), (preferred_zone->present_pages + @@ -161,22 +204,38 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) balance_gap, 0, 0))) { if (lmk_fast_run) tune_lmk_zone_param(zonelist, classzone_idx, other_free, - other_file); + other_file, use_cma_pages); else tune_lmk_zone_param(zonelist, classzone_idx, other_free, - NULL); - - if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) - *other_free -= - preferred_zone->lowmem_reserve[_ZONE]; - else + NULL, use_cma_pages); + + if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) { + if (!use_cma_pages) { + *other_free -= min( + preferred_zone->lowmem_reserve[_ZONE] + + zone_page_state( + preferred_zone, NR_FREE_CMA_PAGES), + zone_page_state( + preferred_zone, NR_FREE_PAGES)); + } else { + *other_free -= + preferred_zone->lowmem_reserve[_ZONE]; + } + } else { *other_free -= zone_page_state(preferred_zone, NR_FREE_PAGES); + } + lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " "ofree %d, %d\n", *other_free, *other_file); } else { tune_lmk_zone_param(zonelist, classzone_idx, other_free, - other_file); + other_file, use_cma_pages); + + if (!use_cma_pages) { + *other_free -= + zone_page_state(preferred_zone, NR_FREE_CMA_PAGES); + } lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " "%d\n", *other_free, *other_file); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 423d214f708b..8a5894308eb2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -63,6 +63,14 @@ enum { MIGRATE_TYPES }; +/* + * Returns a list which contains the migrate types on to which + * an allocation falls back when the free list for the migrate + * type mtype is depleted. + * The end of the list is delimited by the type MIGRATE_TYPES. + */ +extern int *get_migratetype_fallbacks(int mtype); + #ifdef CONFIG_CMA bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d63689fda9b4..1eafd75f402e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1466,6 +1466,11 @@ static int fallbacks[MIGRATE_TYPES][4] = { #endif }; +int *get_migratetype_fallbacks(int mtype) +{ + return fallbacks[mtype]; +} + #ifdef CONFIG_CMA static struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) -- cgit v1.2.3 From 55fc1595ecb35f8f3ba263d534047f24f04b75f8 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Fri, 30 Aug 2013 12:10:39 -0700 Subject: android/lowmemorykiller: Account for total_swapcache_pages The lowmemorykiller relies on NR_FILE_PAGES when measuring the amount of reclaimable memory in the system. However when swap is enabled swap cache pages are counted in NR_FILE_PAGES, and swap cache pages aren't as reclaimable in low memory as file cache pages. Therefore a large swap cache can result in the lowmemorykiller not running and an OOM occurring. In order to ensure the lowmemorykiller properly evaluates the amount of reclaimable memory don't count the swap cache pages. Change-Id: I38239283e572f814b277c718eaf6be7f92abacbb Signed-off-by: Liam Mark --- drivers/staging/android/lowmemorykiller.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 14bdf880e551..347becf1cc35 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -45,6 +45,7 @@ #include #include #include +#include #ifdef CONFIG_HIGHMEM #define _ZONE ZONE_HIGHMEM @@ -261,9 +262,14 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) return 0; other_free = global_page_state(NR_FREE_PAGES); - other_file = global_page_state(NR_FILE_PAGES) - + + if (global_page_state(NR_SHMEM) + total_swapcache_pages() < + global_page_state(NR_FILE_PAGES)) + other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages(); + else + other_file = 0; tune_lmk_param(&other_free, &other_file, sc); -- cgit v1.2.3 From f47218c220b6fe29c5440bec65ec4b8bb1ac030b Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Mon, 10 Mar 2014 16:06:39 -0700 Subject: lowmemorykiller: Account for highmem during kswapd reclaim Currenlty most memory reclaim is done through kswapd. Since kswapd uses a gfp mask of GFP_KERNEL, and because the lowmemorykiller is zone aware, the lowmemorykiller will ignore highmem most of the time. This results in the lowmemorykiller being overly aggressive. The fix to this issue is to allow the lowmemorykiller to count highmem when being called by the kswapd if the lowmem watermarks are satisfied. Change-Id: I938644584f374763d10d429d835e74daa4854a38 Signed-off-by: Liam Mark --- drivers/staging/android/lowmemorykiller.c | 32 +++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 347becf1cc35..0536ba2672af 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -179,6 +179,36 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, } } +#ifdef CONFIG_HIGHMEM +void adjust_gfp_mask(gfp_t *gfp_mask) +{ + struct zone *preferred_zone; + struct zonelist *zonelist; + enum zone_type high_zoneidx; + + if (current_is_kswapd()) { + zonelist = node_zonelist(0, *gfp_mask); + high_zoneidx = gfp_zone(*gfp_mask); + first_zones_zonelist(zonelist, high_zoneidx, NULL, + &preferred_zone); + + if (high_zoneidx == ZONE_NORMAL) { + if (zone_watermark_ok_safe( + preferred_zone, 0, + high_wmark_pages(preferred_zone), 0, + 0)) + *gfp_mask |= __GFP_HIGHMEM; + } else if (high_zoneidx == ZONE_HIGHMEM) { + *gfp_mask |= __GFP_HIGHMEM; + } + } +} +#else +void adjust_gfp_mask(gfp_t *unused) +{ +} +#endif + void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) { gfp_t gfp_mask; @@ -189,6 +219,8 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) int use_cma_pages; gfp_mask = sc->gfp_mask; + adjust_gfp_mask(&gfp_mask); + zonelist = node_zonelist(0, gfp_mask); high_zoneidx = gfp_zone(gfp_mask); first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); -- cgit v1.2.3 From 2f6130c61d0aa147d43b6bb2a2f9b8d2cdd6c239 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Tue, 3 Jun 2014 13:57:47 -0700 Subject: lowmemorykiller: enhance debug information Add extra debug information to make it easier to both determine why the lowmemorykiller killed a process and to help find the source of memory leaks. Also increase the debug level for "select" statements to help prevent flooding the log. Change-Id: I3b6876c5ecdf192ecc271aed3f37579f66d47a08 Signed-off-by: Liam Mark --- drivers/staging/android/lowmemorykiller.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 0536ba2672af..752aba43fd7f 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -46,6 +46,7 @@ #include #include #include +#include #ifdef CONFIG_HIGHMEM #define _ZONE ZONE_HIGHMEM @@ -375,7 +376,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; - lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n", + lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n", p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { @@ -397,14 +398,33 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \ " to free %ldkB on behalf of '%s' (%d) because\n" \ " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \ - " Free memory is %ldkB above reserved\n", + " Free memory is %ldkB above reserved.\n" \ + " Free CMA is %ldkB\n" \ + " Total reserve is %ldkB\n" \ + " Total free pages is %ldkB\n" \ + " Total file cache is %ldkB\n" \ + " GFP mask is 0x%x\n", selected->comm, selected->pid, selected_oom_score_adj, selected_tasksize * (long)(PAGE_SIZE / 1024), current->comm, current->pid, cache_size, cache_limit, min_score_adj, - free); + free, + global_page_state(NR_FREE_CMA_PAGES) * + (long)(PAGE_SIZE / 1024), + totalreserve_pages * (long)(PAGE_SIZE / 1024), + global_page_state(NR_FREE_PAGES) * + (long)(PAGE_SIZE / 1024), + global_page_state(NR_FILE_PAGES) * + (long)(PAGE_SIZE / 1024), + sc->gfp_mask); + + if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) { + show_mem(SHOW_MEM_FILTER_NODES); + dump_tasks(NULL, NULL); + } + lowmem_deathpending_timeout = jiffies + HZ; rem += selected_tasksize; rcu_read_unlock(); -- cgit v1.2.3 From 8e3a1a07e3bd18b17ac29de14823ee56a7d4bf03 Mon Sep 17 00:00:00 2001 From: Susheel Khiani Date: Thu, 12 Feb 2015 19:00:15 +0530 Subject: lowmemorykiller: Do proper NULL checks Pointer other_free is getting dereferenced without performing proper NULL checks which may cause issue. Do proper NULL checks at all points before dereferencing it. Change-Id: I88515703d64730e42598ab16136dcce4c18b099c Signed-off-by: Susheel Khiani --- drivers/staging/android/lowmemorykiller.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 752aba43fd7f..11535e3065ac 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -145,7 +145,7 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) { zone_idx = zonelist_zone_idx(zoneref); if (zone_idx == ZONE_MOVABLE) { - if (!use_cma_pages) + if (!use_cma_pages && other_free) *other_free -= zone_page_state(zone, NR_FREE_CMA_PAGES); continue; @@ -160,7 +160,8 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, NR_FILE_PAGES) - zone_page_state(zone, NR_SHMEM); } else if (zone_idx < classzone_idx) { - if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) { + if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) && + other_free) { if (!use_cma_pages) { *other_free -= min( zone->lowmem_reserve[classzone_idx] + @@ -173,8 +174,9 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, zone->lowmem_reserve[classzone_idx]; } } else { - *other_free -= - zone_page_state(zone, NR_FREE_PAGES); + if (other_free) + *other_free -= + zone_page_state(zone, NR_FREE_PAGES); } } } -- cgit v1.2.3 From 2f5e2c732d2b8a2b0ddffe8dfe133c4cfe6850f2 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Wed, 4 Mar 2015 20:41:21 +0530 Subject: lowmemorykiller: adapt to vmpressure There were issues reported, where page cache thrashing was observed because of LMK not killing tasks when required, resulting in sluggishness and higher app launch latency. LMK does not kill a task for the following reasons. 1. The free and file pages are above the LMK thresholds 2. LMK tries to pick task with an adj level corresponding to current thresholds, but fails to do so because of the absence of tasks in that level. But sometimes it is better to kill a lower adj task, than thrashing. And there are cases where the number of file pages are huge, though we dont thrash, the reclaim process becomes time consuming, since LMK triggers will be delayed because of higher number of file pages. Even in such cases, when reclaim path finds it difficult to reclaim pages, it is better to trigger lmk to free up some memory faster. The basic idea here is to make LMK more aggressive dynamically when such a thrashing scenario is detected. To detect thrashing, this patch uses vmpressure events. The values of vmpressure upon which an action has to be taken, was derived empirically. This patch also adds tracepoints to validate this feature, almk_shrink and almk_vmpressure. Two knobs are available for the user to tune adaptive lmk behaviour. /sys/module/lowmemorykiller/parameters/adaptive_lmk - Write 1 to enable the feature, 0 to disable. By default disabled. /sys/module/lowmemorykiller/parameters/vmpressure_file_min - This parameter controls the behaviour of LMK when vmpressure is in the range of 90-94. Adaptive lmk triggers based on number file pages wrt vmpressure_file_min, when vmpressure is in the range of 90-94. Usually this is a pseudo minfree value, higher than the highest configured value in minfree array. Change-Id: I1a08160c35d3e33bdfd1d2c789c288fc07d0f0d3 Signed-off-by: Vinayak Menon --- drivers/staging/android/lowmemorykiller.c | 105 +++++++++++++++++++++++++++++- include/trace/events/almk.h | 84 ++++++++++++++++++++++++ 2 files changed, 188 insertions(+), 1 deletion(-) create mode 100644 include/trace/events/almk.h diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 11535e3065ac..e60421299164 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -47,6 +47,10 @@ #include #include #include +#include + +#define CREATE_TRACE_POINTS +#include #ifdef CONFIG_HIGHMEM #define _ZONE ZONE_HIGHMEM @@ -91,6 +95,95 @@ static unsigned long lowmem_count(struct shrinker *s, global_page_state(NR_INACTIVE_FILE); } +static atomic_t shift_adj = ATOMIC_INIT(0); +static short adj_max_shift = 353; + +/* User knob to enable/disable adaptive lmk feature */ +static int enable_adaptive_lmk; +module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int, + S_IRUGO | S_IWUSR); + +/* + * This parameter controls the behaviour of LMK when vmpressure is in + * the range of 90-94. Adaptive lmk triggers based on number of file + * pages wrt vmpressure_file_min, when vmpressure is in the range of + * 90-94. Usually this is a pseudo minfree value, higher than the + * highest configured value in minfree array. + */ +static int vmpressure_file_min; +module_param_named(vmpressure_file_min, vmpressure_file_min, int, + S_IRUGO | S_IWUSR); + +enum { + VMPRESSURE_NO_ADJUST = 0, + VMPRESSURE_ADJUST_ENCROACH, + VMPRESSURE_ADJUST_NORMAL, +}; + +int adjust_minadj(short *min_score_adj) +{ + int ret = VMPRESSURE_NO_ADJUST; + + if (!enable_adaptive_lmk) + return 0; + + if (atomic_read(&shift_adj) && + (*min_score_adj > adj_max_shift)) { + if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1) + ret = VMPRESSURE_ADJUST_ENCROACH; + else + ret = VMPRESSURE_ADJUST_NORMAL; + *min_score_adj = adj_max_shift; + } + atomic_set(&shift_adj, 0); + + return ret; +} + +static int lmk_vmpressure_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int other_free, other_file; + unsigned long pressure = action; + int array_size = ARRAY_SIZE(lowmem_adj); + + if (!enable_adaptive_lmk) + return 0; + + if (pressure >= 95) { + other_file = global_page_state(NR_FILE_PAGES) - + global_page_state(NR_SHMEM) - + total_swapcache_pages(); + other_free = global_page_state(NR_FREE_PAGES); + + atomic_set(&shift_adj, 1); + trace_almk_vmpressure(pressure, other_free, other_file); + } else if (pressure >= 90) { + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + + other_file = global_page_state(NR_FILE_PAGES) - + global_page_state(NR_SHMEM) - + total_swapcache_pages(); + + other_free = global_page_state(NR_FREE_PAGES); + + if ((other_free < lowmem_minfree[array_size - 1]) && + (other_file < vmpressure_file_min)) { + atomic_set(&shift_adj, 1); + trace_almk_vmpressure(pressure, other_free, other_file); + } + } + + return 0; +} + +static struct notifier_block lmk_vmpr_nb = { + .notifier_call = lmk_vmpressure_notifier, +}; + static int test_task_flag(struct task_struct *p, int flag) { struct task_struct *t = p; @@ -285,6 +378,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) unsigned long rem = 0; int tasksize; int i; + int ret = 0; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; int minfree = 0; int selected_tasksize = 0; @@ -320,11 +414,14 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) } } + ret = adjust_minadj(&min_score_adj); + lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) { + trace_almk_shrink(0, ret, other_free, other_file, 0); lowmem_print(5, "lowmem_scan %lu, %x, return 0\n", sc->nr_to_scan, sc->gfp_mask); mutex_unlock(&scan_mutex); @@ -432,8 +529,13 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); - } else + trace_almk_shrink(selected_tasksize, ret, + other_free, other_file, + selected_oom_score_adj); + } else { + trace_almk_shrink(1, ret, other_free, other_file, 0); rcu_read_unlock(); + } lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", sc->nr_to_scan, sc->gfp_mask, rem); @@ -450,6 +552,7 @@ static struct shrinker lowmem_shrinker = { static int __init lowmem_init(void) { register_shrinker(&lowmem_shrinker); + vmpressure_notifier_register(&lmk_vmpr_nb); return 0; } device_initcall(lowmem_init); diff --git a/include/trace/events/almk.h b/include/trace/events/almk.h new file mode 100644 index 000000000000..85d712d48f50 --- /dev/null +++ b/include/trace/events/almk.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM almk + +#if !defined(_TRACE_EVENT_ALMK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENT_ALMK_H + +#include +#include + +TRACE_EVENT(almk_vmpressure, + + TP_PROTO(unsigned long pressure, + int other_free, + int other_file), + + TP_ARGS(pressure, other_free, other_file), + + TP_STRUCT__entry( + __field(unsigned long, pressure) + __field(int, other_free) + __field(int, other_file) + ), + + TP_fast_assign( + __entry->pressure = pressure; + __entry->other_free = other_free; + __entry->other_file = other_file; + ), + + TP_printk("%lu, %d, %d", + __entry->pressure, __entry->other_free, + __entry->other_file) +); + +TRACE_EVENT(almk_shrink, + + TP_PROTO(int tsize, + int vmp, + int other_free, + int other_file, + short adj), + + TP_ARGS(tsize, vmp, other_free, other_file, adj), + + TP_STRUCT__entry( + __field(int, tsize) + __field(int, vmp) + __field(int, other_free) + __field(int, other_file) + __field(short, adj) + ), + + TP_fast_assign( + __entry->tsize = tsize; + __entry->vmp = vmp; + __entry->other_free = other_free; + __entry->other_file = other_file; + __entry->adj = adj; + ), + + TP_printk("%d, %d, %d, %d, %d", + __entry->tsize, + __entry->vmp, + __entry->other_free, + __entry->other_file, + __entry->adj) +); + +#endif + +#include + -- cgit v1.2.3 From 1426d1f8d93d9a4eea97c91eb5ebd1357afe7b16 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Fri, 27 Feb 2015 12:59:00 -0800 Subject: lowmemorykiller: Don't count swap cache pages twice The lowmem_shrink function discounts all the swap cache pages from the file cache count. The zone aware code also discounts all file cache pages from a certain zone. This results in some swap cache pages being discounted twice, which can result in the low memory killer being unnecessarily aggressive. Fix the low memory killer to only discount the swap cache pages once. Change-Id: I650bbfbf0fbbabd01d82bdb3502b57ff59c3e14f Signed-off-by: Liam Mark Signed-off-by: Vinayak Menon --- drivers/staging/android/lowmemorykiller.c | 3 ++- include/linux/mmzone.h | 1 + mm/swap_state.c | 2 ++ mm/vmstat.c | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index e60421299164..1aab1f67e571 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -251,7 +251,8 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, if (other_file != NULL) *other_file -= zone_page_state(zone, NR_FILE_PAGES) - - zone_page_state(zone, NR_SHMEM); + - zone_page_state(zone, NR_SHMEM) + - zone_page_state(zone, NR_SWAPCACHE); } else if (zone_idx < classzone_idx) { if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) && other_free) { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8a5894308eb2..04030f756e7c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -167,6 +167,7 @@ enum zone_stat_item { WORKINGSET_NODERECLAIM, NR_ANON_TRANSPARENT_HUGEPAGES, NR_FREE_CMA_PAGES, + NR_SWAPCACHE, NR_VM_ZONE_STAT_ITEMS }; /* diff --git a/mm/swap_state.c b/mm/swap_state.c index 4e166f1c692c..61039e39e25f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -96,6 +96,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) if (likely(!error)) { address_space->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(page, NR_SWAPCACHE); INC_CACHE_INFO(add_total); } spin_unlock_irq(&address_space->tree_lock); @@ -148,6 +149,7 @@ void __delete_from_swap_cache(struct page *page) ClearPageSwapCache(page); address_space->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); + __dec_zone_page_state(page, NR_SWAPCACHE); INC_CACHE_INFO(del_total); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 4923dfe89983..c0e67319d3e8 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -763,6 +763,7 @@ const char * const vmstat_text[] = { "workingset_nodereclaim", "nr_anon_transparent_hugepages", "nr_free_cma", + "nr_swapcache", /* enum writeback_stat_item counters */ "nr_dirty_threshold", -- cgit v1.2.3 From ef5918f6805d51d9249a59ab6eead53db3e91327 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Thu, 21 May 2015 20:44:14 +0530 Subject: lowmemorykiller: use for_each_thread instead of buggy while_each_thread Couple of cases were reported few months ago, where the cpu was blocked on the following call stack for /seconds/ after which the watchdog fires. test_task_flag(p = 0xE14ABF00, ?) lowmem_shrink(?, sc = 0xD7A03C04) shrink_slab(shrink = 0xD7A03C04, nr_pages_scanned = 0, lru_pages = 120) try_to_free_pages(zonelist = 0xC1116440, ?, ?, ?) __alloc_pages_nodemask(?, order = 0, ?, nodemask = 0x0) __do_page_cache_readahead(mapping = 0xEB819364, filp = 0xCC16DC00, offset = ra_submit(?, ?, ?) filemap_fault(vma = 0xC105D240, vmf = 0xD7A03DC8) There weren't any dumps to analyse the case, but this can be a possible reason. while_each_thread is known to be buggy and can result in the function looping forever if the task exits, even when protected with rcu_read_lock. Use for_each_thread instead. More details on the problems with while_each_thread can be found at https://lkml.org/lkml/2013/12/2/320 Change-Id: I5eb6e4b463f81142a2a7824db389201357432ec7 Signed-off-by: Vinayak Menon --- drivers/staging/android/lowmemorykiller.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 1aab1f67e571..d840a4544629 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -186,16 +186,16 @@ static struct notifier_block lmk_vmpr_nb = { static int test_task_flag(struct task_struct *p, int flag) { - struct task_struct *t = p; + struct task_struct *t; - do { + for_each_thread(p, t) { task_lock(t); if (test_tsk_thread_flag(t, flag)) { task_unlock(t); return 1; } task_unlock(t); - } while_each_thread(p, t); + } return 0; } -- cgit v1.2.3 From 59a8d2507c667b6973135d882278a119dca09454 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Wed, 19 Aug 2015 16:11:04 +0530 Subject: lowmemorykiller: avoid false adaptive LMK triggers In vmpressure notifier of LMK, shift_adj would have been set by a previous invocation of notifier, which is not followed by a lowmem_shrink yet. If this is follwed by a lower vmpressure, and then by a lowmem_shrink, ALMK still triggers because of the previous higher vmpressure notification. This is wrong. Since vmpressure has improved, reset shift_adj to avoid false adaptive LMK trigger. CRs-fixed: 893699 Change-Id: I2d77103d7c8f4d8a66e4652cba78e619a7bcef9a Signed-off-by: Vinayak Menon --- drivers/staging/android/lowmemorykiller.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index d840a4544629..72e1d437fd1b 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -175,6 +175,15 @@ static int lmk_vmpressure_notifier(struct notifier_block *nb, atomic_set(&shift_adj, 1); trace_almk_vmpressure(pressure, other_free, other_file); } + } else if (atomic_read(&shift_adj)) { + /* + * shift_adj would have been set by a previous invocation + * of notifier, which is not followed by a lowmem_shrink yet. + * Since vmpressure has improved, reset shift_adj to avoid + * false adaptive LMK trigger. + */ + trace_almk_vmpressure(pressure, other_free, other_file); + atomic_set(&shift_adj, 0); } return 0; -- cgit v1.2.3 From d491cf59f01c82ba8c91ff80d984a1bee9186b0d Mon Sep 17 00:00:00 2001 From: Heesub Shin Date: Mon, 7 Jan 2013 11:10:13 +0900 Subject: cma: redirect page allocation to CMA CMA pages are designed to be used as fallback for movable allocations and cannot be used for non-movable allocations. If CMA pages are utilized poorly, non-movable allocations may end up getting starved if all regular movable pages are allocated and the only pages left are CMA. Always using CMA pages first creates unacceptable performance problems. As a midway alternative, use CMA pages for certain userspace allocations. The userspace pages can be migrated or dropped quickly which giving decent utilization. Change-Id: I6165dda01b705309eebabc6dfa67146b7a95c174 CRs-Fixed: 452508 Signed-off-by: Kyungmin Park Signed-off-by: Heesub Shin [lmark@codeaurora.org: resolve conflicts relating to MIGRATE_HIGHATOMIC and some other trivial merge conflicts] Signed-off-by: Liam Mark --- include/linux/gfp.h | 11 +++++++++-- include/linux/highmem.h | 15 +++++++++++++++ include/linux/mmzone.h | 3 +++ mm/page_alloc.c | 44 ++++++++++++++++++++++++++++++++++++-------- 4 files changed, 63 insertions(+), 10 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8942af0813e3..994f08fe426f 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -36,6 +36,7 @@ struct vm_area_struct; #define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x1000000u #define ___GFP_KSWAPD_RECLAIM 0x2000000u +#define ___GFP_CMA 0x4000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -50,8 +51,9 @@ struct vm_area_struct; #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ -#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) - +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \ + __GFP_CMA) /* * Page mobility and placement hints * @@ -264,7 +266,12 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ +#ifndef CONFIG_CMA return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; +#else + return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) | + ((gfp_flags & __GFP_CMA) != 0); +#endif } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 4c70716759a6..61aff324bd5e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -187,9 +187,24 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { +#ifndef CONFIG_CMA return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +#else + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +#endif } +#ifdef CONFIG_CMA +static inline struct page * +alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +} +#endif + static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 04030f756e7c..dfb8a6159997 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -376,6 +376,9 @@ struct zone { * considered dirtyable memory. */ unsigned long dirty_balance_reserve; +#ifdef CONFIG_CMA + bool cma_alloc; +#endif #ifndef CONFIG_SPARSEMEM /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1eafd75f402e..2695ca00653e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1815,11 +1815,26 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { - if (migratetype == MIGRATE_MOVABLE) - page = __rmqueue_cma_fallback(zone, order); + page = __rmqueue_fallback(zone, order, migratetype); + } - if (!page) - page = __rmqueue_fallback(zone, order, migratetype); + trace_mm_page_alloc_zone_locked(page, order, migratetype); + return page; +} + +static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, + int migratetype, gfp_t gfp_flags) +{ + struct page *page = 0; +#ifdef CONFIG_CMA + if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) + page = __rmqueue_cma_fallback(zone, order); + else +#endif + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page)) { + page = __rmqueue_fallback(zone, order, migratetype); } trace_mm_page_alloc_zone_locked(page, order, migratetype); @@ -1833,13 +1848,17 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, bool cold) + int migratetype, bool cold, int cma) { int i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order, migratetype, 0); + struct page *page; + if (cma) + page = __rmqueue_cma(zone, order, migratetype, 0); + else + page = __rmqueue(zone, order, migratetype, 0); if (unlikely(page == NULL)) break; @@ -2229,7 +2248,8 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, - migratetype, cold); + migratetype, cold, + gfp_flags & __GFP_CMA); if (unlikely(list_empty(list))) goto failed; } @@ -2263,8 +2283,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } - if (!page) + if (!page) { + if (gfp_flags & __GFP_CMA) + page = __rmqueue_cma(zone, order, migratetype, gfp_flags); + else page = __rmqueue(zone, order, migratetype, gfp_flags); + + } spin_unlock(&zone->lock); if (!page) goto failed; @@ -6753,6 +6778,8 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) return ret; + cc.zone->cma_alloc = 1; + ret = __alloc_contig_migrate_range(&cc, start, end); if (ret) goto done; @@ -6811,6 +6838,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, done: undo_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); + cc.zone->cma_alloc = 0; return ret; } -- cgit v1.2.3 From 50050f2a1d798b65e049e8e2a910c593d29083fb Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Mon, 23 Jun 2014 14:13:47 -0700 Subject: mm: add cma pcp list Add a cma pcp list in order to increase cma memory utilization. Increased cma memory utilization will improve overall memory utilization because free cma pages are ignored when memory reclaim is done with gfp mask GFP_KERNEL. Since most memory reclaim is done by kswapd, which uses a gfp mask of GFP_KERNEL, by increasing cma memory utilization we are therefore ensuring that less aggressive memory reclaim takes place. Increased cma memory utilization will improve performance, for example it will increase app concurrency. Change-Id: I809589a25c6abca51f1c963f118adfc78e955cf9 Signed-off-by: Liam Mark --- include/linux/mmzone.h | 6 ++-- mm/page_alloc.c | 94 ++++++++++++++++++++++++++++++++------------------ mm/vmstat.c | 2 +- 3 files changed, 66 insertions(+), 36 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index dfb8a6159997..ad4c3f186f61 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -39,8 +39,6 @@ enum { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, - MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ - MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way @@ -57,6 +55,8 @@ enum { */ MIGRATE_CMA, #endif + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ #endif @@ -74,9 +74,11 @@ extern int *get_migratetype_fallbacks(int mtype); #ifdef CONFIG_CMA bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define get_cma_migrate_type() MIGRATE_CMA #else # define is_cma_pageblock(page) false # define is_migrate_cma(migratetype) false +# define get_cma_migrate_type() MIGRATE_MOVABLE #endif #define for_each_migratetype_order(order, type) \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2695ca00653e..f54a84fb5e6e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1822,22 +1822,13 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, return page; } -static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, - int migratetype, gfp_t gfp_flags) +static struct page *__rmqueue_cma(struct zone *zone, unsigned int order) { struct page *page = 0; -#ifdef CONFIG_CMA - if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) - page = __rmqueue_cma_fallback(zone, order); - else -#endif - page = __rmqueue_smallest(zone, order, migratetype); - - if (unlikely(!page)) { - page = __rmqueue_fallback(zone, order, migratetype); - } - - trace_mm_page_alloc_zone_locked(page, order, migratetype); + if (IS_ENABLED(CONFIG_CMA)) + if (!zone->cma_alloc) + page = __rmqueue_cma_fallback(zone, order); + trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA); return page; } @@ -1848,15 +1839,21 @@ static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, bool cold, int cma) + int migratetype, bool cold) { int i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { struct page *page; - if (cma) - page = __rmqueue_cma(zone, order, migratetype, 0); + + /* + * If migrate type CMA is being requested only try to + * satisfy the request with CMA pages to try and increase + * CMA utlization. + */ + if (is_migrate_cma(migratetype)) + page = __rmqueue_cma(zone, order); else page = __rmqueue(zone, order, migratetype, 0); if (unlikely(page == NULL)) @@ -1885,6 +1882,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, return i; } +/* + * Return the pcp list that corresponds to the migrate type if that list isn't + * empty. + * If the list is empty return NULL. + */ +static struct list_head *get_populated_pcp_list(struct zone *zone, + unsigned int order, struct per_cpu_pages *pcp, + int migratetype, int cold) +{ + struct list_head *list = &pcp->lists[migratetype]; + + if (list_empty(list)) { + pcp->count += rmqueue_bulk(zone, order, + pcp->batch, list, + migratetype, cold); + + if (list_empty(list)) + list = NULL; + } + return list; +} + #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this @@ -2085,8 +2104,7 @@ void free_hot_cold_page(struct page *page, bool cold) * excessively into the page allocator */ if (migratetype >= MIGRATE_PCPTYPES) { - if (unlikely(is_migrate_isolate(migratetype)) || - is_migrate_cma(migratetype)) { + if (unlikely(is_migrate_isolate(migratetype))) { free_one_page(zone, page, pfn, 0, migratetype); goto out; } @@ -2235,22 +2253,32 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, gfp_t gfp_flags, int alloc_flags, int migratetype) { unsigned long flags; - struct page *page; + struct page *page = NULL; bool cold = ((gfp_flags & __GFP_COLD) != 0); if (likely(order == 0)) { struct per_cpu_pages *pcp; - struct list_head *list; + struct list_head *list = NULL; local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - if (list_empty(list)) { - pcp->count += rmqueue_bulk(zone, 0, - pcp->batch, list, - migratetype, cold, - gfp_flags & __GFP_CMA); - if (unlikely(list_empty(list))) + + /* First try to get CMA pages */ + if (migratetype == MIGRATE_MOVABLE && + gfp_flags & __GFP_CMA) { + list = get_populated_pcp_list(zone, 0, pcp, + get_cma_migrate_type(), cold); + } + + if (list == NULL) { + /* + * Either CMA is not suitable or there are no free CMA + * pages. + */ + list = get_populated_pcp_list(zone, 0, pcp, + migratetype, cold); + if (unlikely(list == NULL) || + unlikely(list_empty(list))) goto failed; } @@ -2283,13 +2311,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } - if (!page) { - if (gfp_flags & __GFP_CMA) - page = __rmqueue_cma(zone, order, migratetype, gfp_flags); - else + if (!page && migratetype == MIGRATE_MOVABLE && + gfp_flags & __GFP_CMA) + page = __rmqueue_cma(zone, order); + + if (!page) page = __rmqueue(zone, order, migratetype, gfp_flags); - } spin_unlock(&zone->lock); if (!page) goto failed; diff --git a/mm/vmstat.c b/mm/vmstat.c index c0e67319d3e8..ca75eeecbad1 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -927,10 +927,10 @@ static char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", - "HighAtomic", #ifdef CONFIG_CMA "CMA", #endif + "HighAtomic", #ifdef CONFIG_MEMORY_ISOLATION "Isolate", #endif -- cgit v1.2.3 From 41728295e3ac3464aefa309c8247fd5cc75f5d90 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Jan 2014 14:30:11 -0800 Subject: mm: Increase number of GFP masks The __GFP_CMA mask is now placed after all available GFP masks. With this we need to increase the total number of GFP flags. Do so accordingly. CRs-Fixed: 648978 Change-Id: I53f5f064ac16a50ee10c84ff2bb50fdb7e085bd0 Signed-off-by: Laura Abbott [lmark@codeaurora.org: resolve trivial merge conflicts] Signed-off-by: Liam Mark --- include/linux/gfp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 994f08fe426f..9796b4426710 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -185,7 +185,7 @@ struct vm_area_struct; #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT 26 +#define __GFP_BITS_SHIFT 27 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* -- cgit v1.2.3 From 00eb45fc95b968b9fa0f4228106c18a9efd6464b Mon Sep 17 00:00:00 2001 From: Shashank Mittal Date: Wed, 9 Mar 2016 20:52:42 -0800 Subject: coresight: replace bitmap_scnprintf with scnprintf bitmap_scnprintf has been deprecated, so replace it with scnprintf. Change-Id: I8563fafc56515fde764046f882814c1c6e4c4299 Signed-off-by: Shashank Mittal --- drivers/hwtracing/coresight/coresight-stm.c | 4 ++-- drivers/hwtracing/coresight/coresight-tpdm.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index b911be00b338..e71b8718037c 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -706,8 +706,8 @@ static ssize_t stm_show_entities(struct device *dev, struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); ssize_t len; - len = bitmap_scnprintf(buf, PAGE_SIZE, drvdata->entities, - OST_ENTITY_MAX); + len = scnprintf(buf, PAGE_SIZE, "%*pb\n", + OST_ENTITY_MAX, drvdata->entities); if (PAGE_SIZE - len < 2) len = -EINVAL; diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index bc4246e6724a..8870fceb0350 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -727,8 +727,8 @@ static ssize_t tpdm_show_enable_datasets(struct device *dev, struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); ssize_t size; - size = bitmap_scnprintf(buf, PAGE_SIZE, drvdata->enable_ds, - TPDM_DATASETS); + size = scnprintf(buf, PAGE_SIZE, "%*pb\n", TPDM_DATASETS, + drvdata->enable_ds); if (PAGE_SIZE - size < 2) size = -EINVAL; -- cgit v1.2.3 From 4cb670d8db1073c3076d93e88ea299d94c81189e Mon Sep 17 00:00:00 2001 From: Siddartha Mohanadoss Date: Thu, 7 Apr 2016 14:59:33 -0700 Subject: msm: mhi_dev: Add MHI device driver The Modem Host Interface (MHI) device driver supports clients to send control and data packets such as IP data packets, control messages and Diagnostic data between the Host and the device. It follows the MHI specification to transfer data. The driver interfaces with the IPA driver for Hardware accelerated channels and PCIe End point driver to communicate between the Host and the device. The driver exposes to both userspace and kernel space generic IO read/write/open/close system calls and kernel APIs to communicate and transfer data between Host and the device. Change-Id: I64990a972cbf7c2022d638c35f7517071de67f19 Signed-off-by: Siddartha Mohanadoss --- .../devicetree/bindings/mhi/msm_mhi_dev.txt | 34 + drivers/platform/msm/Kconfig | 10 + drivers/platform/msm/Makefile | 1 + drivers/platform/msm/mhi_dev/Makefile | 6 + drivers/platform/msm/mhi_dev/mhi.c | 1952 ++++++++++++++++++++ drivers/platform/msm/mhi_dev/mhi.h | 1126 +++++++++++ drivers/platform/msm/mhi_dev/mhi_hwio.h | 191 ++ drivers/platform/msm/mhi_dev/mhi_mmio.c | 999 ++++++++++ drivers/platform/msm/mhi_dev/mhi_ring.c | 438 +++++ drivers/platform/msm/mhi_dev/mhi_sm.c | 1319 +++++++++++++ drivers/platform/msm/mhi_dev/mhi_sm.h | 51 + drivers/platform/msm/mhi_dev/mhi_uci.c | 835 +++++++++ 12 files changed, 6962 insertions(+) create mode 100644 Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt create mode 100644 drivers/platform/msm/mhi_dev/Makefile create mode 100644 drivers/platform/msm/mhi_dev/mhi.c create mode 100644 drivers/platform/msm/mhi_dev/mhi.h create mode 100644 drivers/platform/msm/mhi_dev/mhi_hwio.h create mode 100644 drivers/platform/msm/mhi_dev/mhi_mmio.c create mode 100644 drivers/platform/msm/mhi_dev/mhi_ring.c create mode 100644 drivers/platform/msm/mhi_dev/mhi_sm.c create mode 100644 drivers/platform/msm/mhi_dev/mhi_sm.h create mode 100644 drivers/platform/msm/mhi_dev/mhi_uci.c diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt new file mode 100644 index 000000000000..49d33a3c4440 --- /dev/null +++ b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt @@ -0,0 +1,34 @@ +MSM MHI DEV + +MSM MHI DEV enables communication with the host over a PCIe link using the +Modem Host Interface protocol. The driver interfaces with the IPA for +enabling the HW acceleration channel path and provides interface for +software channels to communicate between Host and device. + +Required properties: + - compatible: should be "qcom,msm-mhi-dev" for MHI device driver. + - reg: MHI MMIO physical register space. + - reg-names: resource names used for the MHI MMIO physical address region, + IPA uC command and event ring doorbell mail box address. + Should be "mhi_mmio_base" for MHI MMIO physical address, + "ipa_uc_mbox_crdb" for IPA uC Command Ring doorbell, + "ipa_uc_mbox_erdb" for IPA uC Event Ring doorbell passed to + the IPA driver. + - qcom,mhi-ifc-id: ID of HW interface via which MHI on device side + communicates with host side. + - qcom,mhi-ep-msi: End point MSI number. + - qcom,mhi-version: MHI specification version supported by the device. + +Example: + + mhi: qcom,msm-mhi-dev { + compatible = "qcom,msm-mhi-dev"; + reg = <0xfc527000 0x1000>, + <0xfd4fa000 0x1>, + <0xfd4fa080 0x1>; + reg-names = "mhi_mmio_base", "ipa_uc_mbox_crdb", + "ipa_uc_mbox_erdb"; + qcom,mhi-ifc-id = <0x030017cb>; + qcom,mhi-ep-msi = <1>; + qcom,mhi-version = <0x1000000>; + }; diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 19510e5c2279..18ae7fa5454b 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -171,6 +171,16 @@ config MSM_MHI_DEBUG throughput as individual MHI packets and state transitions will be logged. +config MSM_MHI_DEV + tristate "Modem Device Interface Driver" + depends on EP_PCIE && IPA + help + This kernel module is used to interact with PCIe Root complex + supporting MHI protocol. MHI is a data transmission protocol + involving communication between a host and a device over shared + memory. MHI interacts with the IPA for supporting transfers + on the HW accelerated channels between Host and device. + config MSM_11AD tristate "Platform driver for 11ad chip" depends on PCI diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile index c33f5e53c1b3..d5e87c209c21 100644 --- a/drivers/platform/msm/Makefile +++ b/drivers/platform/msm/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_MSM_11AD) += msm_11ad/ obj-$(CONFIG_SEEMP_CORE) += seemp_core/ obj-$(CONFIG_SSM) += ssm.o obj-$(CONFIG_USB_BAM) += usb_bam.o +obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/ diff --git a/drivers/platform/msm/mhi_dev/Makefile b/drivers/platform/msm/mhi_dev/Makefile new file mode 100644 index 000000000000..c1969e20426d --- /dev/null +++ b/drivers/platform/msm/mhi_dev/Makefile @@ -0,0 +1,6 @@ +# Makefile for MHI driver +obj-y += mhi_mmio.o +obj-y += mhi.o +obj-y += mhi_ring.o +obj-y += mhi_uci.o +obj-y += mhi_sm.o diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c new file mode 100644 index 000000000000..142263be23aa --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi.c @@ -0,0 +1,1952 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" +#include "mhi_hwio.h" +#include "mhi_sm.h" + +/* Wait time on the device for Host to set M0 state */ +#define MHI_M0_WAIT_MIN_USLEEP 20000000 +#define MHI_M0_WAIT_MAX_USLEEP 25000000 +#define MHI_DEV_M0_MAX_CNT 30 +/* Wait time before suspend/resume is complete */ +#define MHI_SUSPEND_WAIT_MIN 3100 +#define MHI_SUSPEND_WAIT_MAX 3200 +#define MHI_SUSPEND_WAIT_TIMEOUT 500 +#define MHI_MASK_CH_EV_LEN 32 +#define MHI_RING_CMD_ID 0 +#define MHI_RING_PRIMARY_EVT_ID 1 +#define MHI_1K_SIZE 0x1000 +/* Updated Specification for event start is NER - 2 and end - NER -1 */ +#define MHI_HW_ACC_EVT_RING_START 2 +#define MHI_HW_ACC_EVT_RING_END 1 + +#define MHI_HOST_REGION_NUM 2 + +#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1 +#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2 + +#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32)) +#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF) +#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF) + +#define MHI_IPC_LOG_PAGES (100) +enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR; +enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE; +void *mhi_ipc_log; + +static struct mhi_dev *mhi_ctx; +static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event, + unsigned long data); +static void mhi_ring_init_cb(void *user_data); + +void mhi_dev_read_from_host(struct mhi_addr *host, dma_addr_t dev, size_t size) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0; + + host_addr_pa = ((u64) host->host_pa) | bit_40; + + mhi_log(MHI_MSG_ERROR, "device 0x%x <<-- host 0x%llx, size %d\n", + dev, host_addr_pa, size); + + rc = ipa_dma_sync_memcpy((u64) dev, host_addr_pa, (int) size); + if (rc) + pr_err("error while reading from host:%d\n", rc); +} +EXPORT_SYMBOL(mhi_dev_read_from_host); + +void mhi_dev_write_to_host(struct mhi_addr *host, void *dev, size_t size, + struct mhi_dev *mhi) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0; + + if (!mhi) { + pr_err("invalid MHI ctx\n"); + return; + } + + host_addr_pa = ((u64) host->host_pa) | bit_40; + /* Copy the device content to a local device physical address */ + memcpy(mhi->dma_cache, dev, size); + mhi_log(MHI_MSG_ERROR, "device 0x%llx --> host 0x%llx, size %d\n", + (uint64_t) mhi->cache_dma_handle, host_addr_pa, (int) size); + + rc = ipa_dma_sync_memcpy(host_addr_pa, (u64) mhi->cache_dma_handle, + (int) size); + if (rc) + pr_err("error while reading from host:%d\n", rc); +} +EXPORT_SYMBOL(mhi_dev_write_to_host); + +int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len, + struct mhi_dev *mhi) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0; + + if (!mhi) { + pr_err("Invalid mhi device\n"); + return -EINVAL; + } + + if (!dev) { + pr_err("Invalid virt device\n"); + return -EINVAL; + } + + if (!host_pa) { + pr_err("Invalid host pa device\n"); + return -EINVAL; + } + + host_addr_pa = host_pa | bit_40; + mhi_log(MHI_MSG_ERROR, "device 0x%llx <-- host 0x%llx, size %d\n", + (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len); + rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle, + host_addr_pa, (int) len); + if (rc) { + pr_err("error while reading from host:%d\n", rc); + return rc; + } + + memcpy(dev, mhi->read_handle, len); + + return rc; +} +EXPORT_SYMBOL(mhi_transfer_host_to_device); + +int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len, + struct mhi_dev *mhi) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0; + + if (!mhi || !dev || !host_addr) { + pr_err("%sInvalid parameters\n", __func__); + return -EINVAL; + } + + host_addr_pa = host_addr | bit_40; + memcpy(mhi->write_handle, dev, len); + + mhi_log(MHI_MSG_ERROR, "device 0x%llx ---> host 0x%llx, size %d\n", + (uint64_t) mhi->write_dma_handle, host_addr_pa, (int) len); + rc = ipa_dma_sync_memcpy(host_addr_pa, + (u64) mhi->write_dma_handle, + (int) len); + if (rc) + pr_err("error while reading from host:%d\n", rc); + + return rc; +} +EXPORT_SYMBOL(mhi_transfer_device_to_host); + +int mhi_dev_is_list_empty(void) +{ + + if (list_empty(&mhi_ctx->event_ring_list) && + list_empty(&mhi_ctx->process_ring_list)) + return 0; + else + return 1; +} +EXPORT_SYMBOL(mhi_dev_is_list_empty); + +static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi, + struct ep_pcie_db_config *erdb_cfg) +{ + switch (mhi->cfg.event_rings) { + case NUM_CHANNELS: + erdb_cfg->base = HW_CHANNEL_BASE; + erdb_cfg->end = HW_CHANNEL_END; + break; + default: + erdb_cfg->base = mhi->cfg.event_rings - + MHI_HW_ACC_EVT_RING_START; + erdb_cfg->end = mhi->cfg.event_rings - + MHI_HW_ACC_EVT_RING_END; + break; + } +} + +int mhi_pcie_config_db_routing(struct mhi_dev *mhi) +{ + int rc = 0; + struct ep_pcie_db_config chdb_cfg, erdb_cfg; + + if (!mhi) { + pr_err("Invalid MHI context\n"); + return -EINVAL; + } + + /* Configure Doorbell routing */ + chdb_cfg.base = HW_CHANNEL_BASE; + chdb_cfg.end = HW_CHANNEL_END; + chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb; + + mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg); + + mhi_log(MHI_MSG_ERROR, + "Event rings 0x%x => er_base 0x%x, er_end %d\n", + mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end); + erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb; + ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg); + + return rc; +} +EXPORT_SYMBOL(mhi_pcie_config_db_routing); + +static int mhi_hwc_init(struct mhi_dev *mhi) +{ + int rc = 0; + struct ep_pcie_msi_config cfg; + struct ipa_mhi_init_params ipa_init_params; + struct ep_pcie_db_config erdb_cfg; + + /* Call IPA HW_ACC Init with MSI Address and db routing info */ + rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg); + if (rc) { + pr_err("Error retrieving pcie msi logic\n"); + return rc; + } + + rc = mhi_pcie_config_db_routing(mhi); + if (rc) { + pr_err("Error configuring DB routing\n"); + return rc; + } + + mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg); + mhi_log(MHI_MSG_ERROR, + "Event rings 0x%x => er_base 0x%x, er_end %d\n", + mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end); + + erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb; + memset(&ipa_init_params, 0, sizeof(ipa_init_params)); + ipa_init_params.msi.addr_hi = cfg.upper; + ipa_init_params.msi.addr_low = cfg.lower; + ipa_init_params.msi.data = cfg.data; + ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1); + ipa_init_params.first_er_idx = erdb_cfg.base; + ipa_init_params.first_ch_idx = HW_CHANNEL_BASE; + ipa_init_params.mmio_addr = ((uint32_t) mhi_ctx->mmio_base_pa_addr); + ipa_init_params.assert_bit40 = true; + + mhi_log(MHI_MSG_ERROR, + "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n", + ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data); + ipa_init_params.notify = mhi_hwc_cb; + ipa_init_params.priv = mhi; + + rc = ipa_mhi_init(&ipa_init_params); + if (rc) { + pr_err("Error initializing IPA\n"); + return rc; + } + + return rc; +} + +static int mhi_hwc_start(struct mhi_dev *mhi) +{ + int rc = 0; + struct ipa_mhi_start_params ipa_start_params; + + memset(&ipa_start_params, 0, sizeof(ipa_start_params)); + + ipa_start_params.channel_context_array_addr = + mhi->ch_ctx_shadow.host_pa; + ipa_start_params.event_context_array_addr = + mhi->ev_ctx_shadow.host_pa; + + rc = ipa_mhi_start(&ipa_start_params); + if (rc) + pr_err("Error starting IPA (rc = 0x%X)\n", rc); + + return rc; +} + +static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event, + unsigned long data) +{ + int rc = 0; + + switch (event) { + case IPA_MHI_EVENT_READY: + mhi_log(MHI_MSG_ERROR, + "HW Channel uC is ready event=0x%X\n", event); + rc = mhi_hwc_start(mhi_ctx); + if (rc) { + pr_err("hwc_init start failed with %d\n", rc); + return; + } + + rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx); + if (rc) { + pr_err("Failed to enable channel db\n"); + return; + } + + rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx); + if (rc) { + pr_err("Failed to enable control interrupt\n"); + return; + } + + rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx); + + if (rc) { + pr_err("Failed to enable command db\n"); + return; + } + break; + case IPA_MHI_EVENT_DATA_AVAILABLE: + rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP); + if (rc) { + pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc); + return; + } + break; + default: + pr_err("HW Channel uC unknown event 0x%X\n", event); + break; + } +} + +static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid, + enum mhi_dev_ring_element_type_id type) +{ + int rc = 0; + struct ipa_mhi_connect_params connect_params; + + memset(&connect_params, 0, sizeof(connect_params)); + + switch (type) { + case MHI_DEV_RING_EL_STOP: + rc = ipa_mhi_disconnect_pipe( + mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]); + if (rc) + pr_err("Stopping HW Channel%d failed 0x%X\n", + chid, rc); + break; + case MHI_DEV_RING_EL_START: + connect_params.channel_id = chid; + connect_params.sys.skip_ep_cfg = true; + if ((chid % 2) == 0x0) + connect_params.sys.client = IPA_CLIENT_MHI_PROD; + else + connect_params.sys.client = IPA_CLIENT_MHI_CONS; + + rc = ipa_mhi_connect_pipe(&connect_params, + &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]); + if (rc) + pr_err("HW Channel%d start failed 0x%X\n", + chid, rc); + break; + case MHI_DEV_RING_EL_INVALID: + default: + pr_err("Invalid Ring Element type = 0x%X\n", type); + break; + } + + return rc; +} + +static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev, + uint32_t *int_value) +{ + int rc = 0; + + rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value); + if (rc) { + pr_err("Failed to read A7 status\n"); + return; + } + + mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value); + if (rc) { + pr_err("Failed to clear A7 status\n"); + return; + } +} + +static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id) +{ + struct mhi_addr addr; + + addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + addr.size = sizeof(struct mhi_dev_ch_ctx); + /* Fetch the channel ctx (*dst, *src, size) */ + mhi_dev_read_from_host(&addr, mhi->ch_ctx_cache_dma_handle + + (sizeof(struct mhi_dev_ch_ctx) * ch_id), + sizeof(struct mhi_dev_ch_ctx)); +} + +int mhi_dev_syserr(struct mhi_dev *mhi) +{ + + if (!mhi) { + pr_err("%s: Invalid MHI ctx\n", __func__); + return -EINVAL; + } + + mhi_dev_dump_mmio(mhi); + pr_err("MHI dev sys error\n"); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_syserr); + +int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring, + union mhi_dev_ring_element_type *el) +{ + int rc = 0; + uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring; + struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx]; + union mhi_dev_ring_ctx *ctx; + struct ep_pcie_msi_config cfg; + struct mhi_addr msi_addr; + uint32_t msi = 0; + struct mhi_addr host_rp_addr; + + rc = ep_pcie_get_msi_config(mhi->phandle, + &cfg); + if (rc) { + pr_err("Error retrieving pcie msi logic\n"); + return rc; + } + + if (evnt_ring_idx > mhi->cfg.event_rings) { + pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx); + return -EINVAL; + } + + if (mhi_ring_get_state(ring) == RING_STATE_UINT) { + ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring]; + rc = mhi_ring_start(ring, ctx, mhi); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error starting event ring %d\n", evnt_ring); + return rc; + } + } + + mutex_lock(&mhi->mhi_event_lock); + /* add the ring element */ + mhi_dev_add_element(ring, el); + + ring->ring_ctx_shadow->ev.rp = (ring->rd_offset * + sizeof(union mhi_dev_ring_element_type)) + + ring->ring_ctx->generic.rbase; + + mhi_log(MHI_MSG_ERROR, "ev.rp = %llx for %lld\n", + ring->ring_ctx_shadow->ev.rp, evnt_ring_idx); + + host_rp_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ev_ctx) * + evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - + (uint32_t) ring->ring_ctx; + mhi_dev_write_to_host(&host_rp_addr, &ring->ring_ctx_shadow->ev.rp, + sizeof(uint64_t), + mhi); + + /* + * rp update in host memory should be flushed + * before sending a MSI to the host + */ + wmb(); + + mutex_unlock(&mhi->mhi_event_lock); + mhi_log(MHI_MSG_ERROR, "event sent:\n"); + mhi_log(MHI_MSG_ERROR, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr); + mhi_log(MHI_MSG_ERROR, "evnt len : 0x%x\n", el->evt_tr_comp.len); + mhi_log(MHI_MSG_ERROR, "evnt code :0x%x\n", el->evt_tr_comp.code); + mhi_log(MHI_MSG_ERROR, "evnt type :0x%x\n", el->evt_tr_comp.type); + mhi_log(MHI_MSG_ERROR, "evnt chid :0x%x\n", el->evt_tr_comp.chid); + + msi_addr.host_pa = (uint64_t)((uint64_t)cfg.upper << 32) | + (uint64_t)cfg.lower; + msi = cfg.data + mhi_ctx->mhi_ep_msi_num; + mhi_log(MHI_MSG_ERROR, "Sending MSI %d to 0x%llx as data = 0x%x\n", + mhi_ctx->mhi_ep_msi_num, msi_addr.host_pa, msi); + mhi_dev_write_to_host(&msi_addr, &msi, 4, mhi); + + return rc; +} + +static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch, + uint32_t rd_ofst, uint32_t len, + enum mhi_dev_cmd_completion_code code) +{ + int rc = 0; + union mhi_dev_ring_element_type compl_event; + struct mhi_dev *mhi = ch->ring->mhi_dev; + + compl_event.evt_tr_comp.chid = ch->ch_id; + compl_event.evt_tr_comp.type = + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT; + compl_event.evt_tr_comp.len = len; + compl_event.evt_tr_comp.code = code; + compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase + + rd_ofst * sizeof(struct mhi_dev_transfer_ring_element); + + rc = mhi_dev_send_event(mhi, + mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event); + + return rc; +} + +int mhi_dev_send_state_change_event(struct mhi_dev *mhi, + enum mhi_dev_state state) +{ + union mhi_dev_ring_element_type event; + int rc = 0; + + event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG; + event.evt_state_change.mhistate = state; + + rc = mhi_dev_send_event(mhi, 0, &event); + if (rc) { + pr_err("Sending state change event failed\n"); + return rc; + } + + return rc; +} +EXPORT_SYMBOL(mhi_dev_send_state_change_event); + +int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env) +{ + union mhi_dev_ring_element_type event; + int rc = 0; + + event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY; + event.evt_ee_state.execenv = exec_env; + + rc = mhi_dev_send_event(mhi, 0, &event); + if (rc) { + pr_err("Sending EE change event failed\n"); + return rc; + } + + return rc; +} +EXPORT_SYMBOL(mhi_dev_send_ee_event); + +int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi) +{ + int rc = 0; + + /* + * Expected usuage is when there is HW ACC traffic IPA uC notifes + * Q6 -> IPA A7 -> MHI core -> MHI SM + */ + rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP); + if (rc) { + pr_err("error sending SM event\n"); + return rc; + } + + return rc; +} +EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup); + +static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi) +{ + int rc = 0; + union mhi_dev_ring_element_type event; + + /* send the command completion event to the host */ + event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase + + (mhi->ring[MHI_RING_CMD_ID].rd_offset * + (sizeof(union mhi_dev_ring_element_type))); + mhi_log(MHI_MSG_ERROR, "evt cmd comp ptr :%d\n", + (uint32_t) event.evt_cmd_comp.ptr); + event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT; + event.evt_cmd_comp.code = MHI_CMD_COMPL_CODE_SUCCESS; + + rc = mhi_dev_send_event(mhi, 0, &event); + if (rc) + pr_err("channel start command faied\n"); + + return rc; +} + +static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id, + struct mhi_dev *mhi) +{ + int rc = 0; + struct mhi_addr host_addr; + + if (ring->rd_offset != ring->wr_offset && + mhi->ch_ctx_cache[ch_id].ch_type == + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) { + mhi_log(MHI_MSG_INFO, "Pending transaction to be processed\n"); + return 0; + } else if (mhi->ch_ctx_cache[ch_id].ch_type == + MHI_DEV_CH_TYPE_INBOUND_CHANNEL && + mhi->ch[ch_id].wr_request_active) { + return 0; + } + + /* set the channel to stop */ + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP; + + host_addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + /* update the channel state in the host */ + mhi_dev_write_to_host(&host_addr, &mhi->ch_ctx_cache[ch_id].ch_state, + sizeof(enum mhi_dev_ch_ctx_state), mhi); + + /* send the completion event to the host */ + rc = mhi_dev_send_cmd_comp_event(mhi); + if (rc) + pr_err("Error sending command completion event\n"); + + return rc; +} + +static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi, + union mhi_dev_ring_element_type *el, void *ctx) +{ + int rc = 0; + uint32_t ch_id = 0; + union mhi_dev_ring_element_type event; + struct mhi_addr host_addr; + + mhi_log(MHI_MSG_ERROR, "for channel:%d and cmd:%d\n", + ch_id, el->generic.type); + ch_id = el->generic.chid; + + switch (el->generic.type) { + case MHI_DEV_RING_EL_START: + mhi_log(MHI_MSG_ERROR, "recived start cmd for channel %d\n", + ch_id); + if (ch_id >= (HW_CHANNEL_BASE)) { + rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type); + if (rc) { + pr_err("Error with HW channel cmd :%d\n", rc); + return; + } + goto send_start_completion_event; + } + + /* fetch the channel context from host */ + mhi_dev_fetch_ch_ctx(mhi, ch_id); + + /* Initialize and configure the corresponding channel ring */ + rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id], + (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id], + mhi); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "start ring failed for ch %d\n", ch_id); + return; + } + + mhi->ring[mhi->ch_ring_start + ch_id].state = + RING_STATE_PENDING; + + /* set the channel to running */ + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING; + mhi->ch[ch_id].ch_id = ch_id; + mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id]; + mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type; + + /* enable DB for event ring */ + rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id); + if (rc) { + pr_err("Failed to enable channel db\n"); + return; + } + + host_addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + mhi_dev_write_to_host(&host_addr, + &mhi->ch_ctx_cache[ch_id].ch_state, + sizeof(enum mhi_dev_ch_ctx_state), mhi); + +send_start_completion_event: + rc = mhi_dev_send_cmd_comp_event(mhi); + if (rc) + pr_err("Error sending command completion event\n"); + + break; + case MHI_DEV_RING_EL_STOP: + if (ch_id >= HW_CHANNEL_BASE) { + rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "send channel stop cmd event failed\n"); + return; + } + + /* send the completion event to the host */ + event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase + + (mhi->ring[MHI_RING_CMD_ID].rd_offset * + (sizeof(union mhi_dev_ring_element_type))); + event.evt_cmd_comp.type = + MHI_DEV_RING_EL_CMD_COMPLETION_EVT; + if (rc == 0) + event.evt_cmd_comp.code = + MHI_CMD_COMPL_CODE_SUCCESS; + else + event.evt_cmd_comp.code = + MHI_CMD_COMPL_CODE_UNDEFINED; + + rc = mhi_dev_send_event(mhi, 0, &event); + if (rc) { + pr_err("stop event send failed\n"); + return; + } + } else { + /* + * Check if there are any pending transactions for the + * ring associated with the channel. If no, proceed to + * write disable the channel state else send stop + * channel command to check if one can suspend the + * command. + */ + mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP; + rc = mhi_dev_process_stop_cmd( + &mhi->ring[mhi->ch_ring_start + ch_id], + ch_id, mhi); + if (rc) { + pr_err("stop event send failed\n"); + return; + } + } + break; + case MHI_DEV_RING_EL_RESET: + /* hard stop and set the channel to stop */ + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP; + host_addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + + /* update the channel state in the host */ + mhi_dev_write_to_host(&host_addr, + &mhi->ch_ctx_cache[ch_id].ch_state, + sizeof(enum mhi_dev_ch_ctx_state), mhi); + + /* send the completion event to the host */ + rc = mhi_dev_send_cmd_comp_event(mhi); + if (rc) + pr_err("Error sending command completion event\n"); + break; + default: + pr_err("%s: Invalid command:%d\n", __func__, el->generic.type); + break; + } +} + +static void mhi_dev_process_tre_ring(struct mhi_dev *mhi, + union mhi_dev_ring_element_type *el, void *ctx) +{ + struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx; + struct mhi_dev_channel *ch; + struct mhi_dev_client_cb_reason reason; + + if (ring->id < mhi->ch_ring_start) { + mhi_log(MHI_MSG_ERROR, + "invalid channel ring id (%d), should be < %d\n", + ring->id, mhi->ch_ring_start); + return; + } + + ch = &mhi->ch[ring->id - mhi->ch_ring_start]; + reason.ch_id = ch->ch_id; + reason.reason = MHI_DEV_TRE_AVAILABLE; + + /* Invoke a callback to let the client know its data is ready. + * Copy this event to the clients context so that it can be + * sent out once the client has fetch the data. Update the rp + * before sending the data as part of the event completion + */ + if (ch->active_client && ch->active_client->event_trigger != NULL) + ch->active_client->event_trigger(&reason); +} + +static void mhi_dev_process_ring_pending(struct work_struct *work) +{ + struct mhi_dev *mhi = container_of(work, + struct mhi_dev, pending_work); + struct list_head *cp, *q; + struct mhi_dev_ring *ring; + struct mhi_dev_channel *ch; + int rc = 0; + + mutex_lock(&mhi_ctx->mhi_lock); + rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]); + if (rc) { + mhi_log(MHI_MSG_ERROR, "error processing command ring\n"); + goto exit; + } + + list_for_each_safe(cp, q, &mhi->process_ring_list) { + ring = list_entry(cp, struct mhi_dev_ring, list); + list_del(cp); + mhi_log(MHI_MSG_ERROR, "processing ring %d\n", ring->id); + rc = mhi_dev_process_ring(ring); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error processing ring %d\n", ring->id); + goto exit; + } + + if (ring->id < mhi->ch_ring_start) { + mhi_log(MHI_MSG_ERROR, + "ring (%d) is not a channel ring\n", ring->id); + goto exit; + } + + ch = &mhi->ch[ring->id - mhi->ch_ring_start]; + rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error enabling chdb interrupt for %d\n", ch->ch_id); + goto exit; + } + } + +exit: + mutex_unlock(&mhi_ctx->mhi_lock); +} + +static int mhi_dev_get_event_notify(enum mhi_dev_state state, + enum mhi_dev_event *event) +{ + int rc = 0; + + switch (state) { + case MHI_DEV_M0_STATE: + *event = MHI_DEV_EVENT_M0_STATE; + break; + case MHI_DEV_M1_STATE: + *event = MHI_DEV_EVENT_M1_STATE; + break; + case MHI_DEV_M2_STATE: + *event = MHI_DEV_EVENT_M2_STATE; + break; + case MHI_DEV_M3_STATE: + *event = MHI_DEV_EVENT_M3_STATE; + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static void mhi_dev_queue_channel_db(struct mhi_dev *mhi, + uint32_t chintr_value, uint32_t ch_num) +{ + struct mhi_dev_ring *ring; + int rc = 0; + + for (; chintr_value; ch_num++, chintr_value >>= 1) { + if (chintr_value & 1) { + ring = &mhi->ring[ch_num + mhi->ch_ring_start]; + if (ring->state == RING_STATE_UINT) { + pr_err("Channel not opened for %d\n", ch_num); + break; + } + mhi_ring_set_state(ring, RING_STATE_PENDING); + list_add(&ring->list, &mhi->process_ring_list); + rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num); + if (rc) { + pr_err("Error disabling chdb\n"); + return; + } + queue_work(mhi->pending_ring_wq, &mhi->pending_work); + } + } +} + +static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi) +{ + int i, rc = 0; + uint32_t chintr_value = 0, ch_num = 0; + + rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi); + if (rc) { + pr_err("Read channel db\n"); + return; + } + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + ch_num = i * MHI_MASK_CH_EV_LEN; + chintr_value = mhi->chdb[i].status; + if (chintr_value) { + mhi_log(MHI_MSG_ERROR, + "processing id: %d, ch interrupt 0x%x\n", + i, chintr_value); + mhi_dev_queue_channel_db(mhi, chintr_value, ch_num); + rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i), + mhi->chdb[i].status); + if (rc) { + pr_err("Error writing interrupt clear for A7\n"); + return; + } + } + } +} + +static void mhi_dev_scheduler(struct work_struct *work) +{ + struct mhi_dev *mhi = container_of(work, + struct mhi_dev, chdb_ctrl_work); + int rc = 0; + uint32_t int_value = 0; + struct mhi_dev_ring *ring; + enum mhi_dev_state state; + enum mhi_dev_event event = 0; + + mutex_lock(&mhi_ctx->mhi_lock); + /* Check for interrupts */ + mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value); + + if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) { + mhi_log(MHI_MSG_ERROR, + "processing ctrl interrupt with %d\n", int_value); + rc = mhi_dev_mmio_get_mhi_state(mhi, &state); + if (rc) { + pr_err("%s: get mhi state failed\n", __func__); + mutex_unlock(&mhi_ctx->mhi_lock); + return; + } + + rc = mhi_dev_get_event_notify(state, &event); + if (rc) { + pr_err("unsupported state :%d\n", state); + mutex_unlock(&mhi_ctx->mhi_lock); + return; + } + + rc = mhi_dev_notify_sm_event(event); + if (rc) { + pr_err("error sending SM event\n"); + mutex_unlock(&mhi_ctx->mhi_lock); + return; + } + } + + if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) { + mhi_log(MHI_MSG_ERROR, + "processing cmd db interrupt with %d\n", int_value); + ring = &mhi->ring[MHI_RING_CMD_ID]; + ring->state = RING_STATE_PENDING; + queue_work(mhi->pending_ring_wq, &mhi->pending_work); + } + + /* get the specific channel interrupts */ + mhi_dev_check_channel_interrupt(mhi); + + mutex_unlock(&mhi_ctx->mhi_lock); + ep_pcie_mask_irq_event(mhi->phandle, + EP_PCIE_INT_EVT_MHI_A7, true); +} + +void mhi_dev_notify_a7_event(struct mhi_dev *mhi) +{ + schedule_work(&mhi->chdb_ctrl_work); + mhi_log(MHI_MSG_ERROR, "mhi irq triggered\n"); +} +EXPORT_SYMBOL(mhi_dev_notify_a7_event); + +int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi) +{ + return 0; +} +EXPORT_SYMBOL(mhi_dev_config_outbound_iatu); + +static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi) +{ + int rc = 0; + struct platform_device *pdev; + uint64_t addr1 = 0; + + pdev = mhi->pdev; + + /* Get host memory region configuration */ + mhi_dev_get_mhi_addr(mhi); + + mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb, + mhi->host_addr.ctrl_base_msb); + mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb, + mhi->host_addr.data_base_msb); + + addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb, + mhi->host_addr.ctrl_limit_msb); + mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa; + addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb, + mhi->host_addr.data_limit_msb); + mhi->data_base.size = addr1 - mhi->data_base.host_pa; + + /* Get Channel, event and command context base pointer */ + rc = mhi_dev_mmio_get_chc_base(mhi); + if (rc) { + pr_err("Fetching channel context failed\n"); + return rc; + } + + rc = mhi_dev_mmio_get_erc_base(mhi); + if (rc) { + pr_err("Fetching event ring context failed\n"); + return rc; + } + + rc = mhi_dev_mmio_get_crc_base(mhi); + if (rc) { + pr_err("Fetching command ring context failed\n"); + return rc; + } + + rc = mhi_dev_update_ner(mhi); + if (rc) { + pr_err("Fetching NER failed\n"); + return rc; + } + + mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx); + mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings; + mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) * + mhi->cfg.channels; + + mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_cmd_ctx), + &mhi->cmd_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->cmd_ctx_cache) { + pr_err("no memory while allocating cmd ctx\n"); + return -ENOMEM; + } + memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx)); + + mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings, + &mhi->ev_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->ev_ctx_cache) + return -ENOMEM; + + mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_ch_ctx) * + mhi->cfg.channels, + &mhi->ch_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi_ctx->ch_ctx_cache) + return -ENOMEM; + + /* Cache the command and event context */ + mhi_dev_read_from_host(&mhi->cmd_ctx_shadow, + mhi->cmd_ctx_cache_dma_handle, + mhi->cmd_ctx_shadow.size); + + mhi_dev_read_from_host(&mhi->ev_ctx_shadow, + mhi->ev_ctx_cache_dma_handle, + mhi->ev_ctx_shadow.size); + + mhi_log(MHI_MSG_ERROR, + "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n", + mhi->cmd_ctx_cache->rbase, + mhi->cmd_ctx_cache->rp, + mhi->cmd_ctx_cache->wp); + mhi_log(MHI_MSG_ERROR, + "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n", + mhi_ctx->ev_ctx_cache->rbase, + mhi->ev_ctx_cache->rp, + mhi->ev_ctx_cache->wp); + + rc = mhi_ring_start(&mhi->ring[0], + (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi); + if (rc) { + pr_err("error in ring start\n"); + return rc; + } + + return 0; +} + +int mhi_dev_suspend(struct mhi_dev *mhi) +{ + int ch_id = 0, rc = 0; + struct mhi_addr host_addr; + + mutex_lock(&mhi_ctx->mhi_write_test); + atomic_set(&mhi->is_suspended, 1); + + for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) { + if (mhi->ch_ctx_cache[ch_id].ch_state != + MHI_DEV_CH_STATE_RUNNING) + continue; + + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED; + + host_addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + + /* update the channel state in the host */ + mhi_dev_write_to_host(&host_addr, + &mhi->ch_ctx_cache[ch_id].ch_state, + sizeof(enum mhi_dev_ch_ctx_state), mhi); + + } + + rc = ipa_dma_disable(); + if (rc) + pr_err("Disable IPA failed\n"); + + mutex_unlock(&mhi_ctx->mhi_write_test); + + return rc; +} +EXPORT_SYMBOL(mhi_dev_suspend); + +int mhi_dev_resume(struct mhi_dev *mhi) +{ + int ch_id = 0, rc = 0; + struct mhi_addr host_addr; + + rc = ipa_dma_enable(); + if (rc) { + pr_err("IPA enable failed\n"); + return rc; + } + + for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) { + if (mhi->ch_ctx_cache[ch_id].ch_state != + MHI_DEV_CH_STATE_SUSPENDED) + continue; + + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING; + host_addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + + /* update the channel state in the host */ + mhi_dev_write_to_host(&host_addr, + &mhi->ch_ctx_cache[ch_id].ch_state, + sizeof(enum mhi_dev_ch_ctx_state), mhi); + } + + atomic_set(&mhi->is_suspended, 0); + + return rc; +} +EXPORT_SYMBOL(mhi_dev_resume); + +static int mhi_dev_ring_init(struct mhi_dev *dev) +{ + int i = 0; + + mhi_log(MHI_MSG_INFO, "initializing all rings"); + dev->cmd_ring_idx = 0; + dev->ev_ring_start = 1; + dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings; + + /* Initialize CMD ring */ + mhi_ring_init(&dev->ring[dev->cmd_ring_idx], + RING_TYPE_CMD, dev->cmd_ring_idx); + + mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx], + mhi_dev_process_cmd_ring); + + /* Initialize Event ring */ + for (i = dev->ev_ring_start; i < (dev->cfg.event_rings + + dev->ev_ring_start); i++) + mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i); + + /* Initialize CH */ + for (i = dev->ch_ring_start; i < (dev->cfg.channels + + dev->ch_ring_start); i++) { + mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i); + mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring); + } + + + return 0; +} + +int mhi_dev_open_channel(uint32_t chan_id, + struct mhi_dev_client **handle_client, + void (*mhi_dev_client_cb_reason) + (struct mhi_dev_client_cb_reason *cb)) +{ + int rc = 0; + struct mhi_dev_channel *ch; + struct platform_device *pdev; + + pdev = mhi_ctx->pdev; + ch = &mhi_ctx->ch[chan_id]; + + mutex_lock(&ch->ch_lock); + + if (ch->active_client) { + mhi_log(MHI_MSG_ERROR, + "Channel (%d) already opened by client\n", chan_id); + rc = -EINVAL; + goto exit; + } + + /* Initialize the channel, client and state information */ + *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL); + if (!(*handle_client)) { + dev_err(&pdev->dev, "can not allocate mhi_dev memory\n"); + rc = -ENOMEM; + goto exit; + } + + ch->active_client = (*handle_client); + (*handle_client)->channel = ch; + (*handle_client)->event_trigger = mhi_dev_client_cb_reason; + + if (ch->state == MHI_DEV_CH_UNINT) { + ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start]; + ch->state = MHI_DEV_CH_PENDING_START; + } else if (ch->state == MHI_DEV_CH_CLOSED) + ch->state = MHI_DEV_CH_STARTED; + else if (ch->state == MHI_DEV_CH_STOPPED) + ch->state = MHI_DEV_CH_PENDING_START; + +exit: + mutex_unlock(&ch->ch_lock); + return rc; +} +EXPORT_SYMBOL(mhi_dev_open_channel); + +int mhi_dev_channel_isempty(struct mhi_dev_client *handle) +{ + struct mhi_dev_channel *ch; + int rc; + + ch = handle->channel; + + rc = ch->ring->rd_offset == ch->ring->wr_offset; + + return rc; +} +EXPORT_SYMBOL(mhi_dev_channel_isempty); + +int mhi_dev_close_channel(struct mhi_dev_client *handle) +{ + struct mhi_dev_channel *ch; + int rc = 0; + + ch = handle->channel; + + mutex_lock(&ch->ch_lock); + if (ch->state != MHI_DEV_CH_PENDING_START) { + if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL && + !mhi_dev_channel_isempty(handle)) { + mhi_log(MHI_MSG_ERROR, + "Trying to close an active channel (%d)\n", + ch->ch_id); + mutex_unlock(&ch->ch_lock); + rc = -EAGAIN; + goto exit; + } else if (ch->tre_loc) { + mhi_log(MHI_MSG_ERROR, + "Trying to close channel (%d) when a TRE is active", + ch->ch_id); + mutex_unlock(&ch->ch_lock); + rc = -EAGAIN; + goto exit; + } + } + + ch->state = MHI_DEV_CH_CLOSED; + ch->active_client = NULL; + kfree(handle); +exit: + mutex_unlock(&ch->ch_lock); + return rc; +} +EXPORT_SYMBOL(mhi_dev_close_channel); + +static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch, + struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el, + uint32_t *chain) +{ + uint32_t td_done = 0; + + /* + * A full TRE worth of data was consumed. + * Check if we are at a TD boundary. + */ + if (ch->tre_bytes_left == 0) { + if (el->tre.chain) { + if (el->tre.ieob) + mhi_dev_send_completion_event(ch, + ring->rd_offset, el->tre.len, + MHI_CMD_COMPL_CODE_EOB); + *chain = 1; + } else { + if (el->tre.ieot) + mhi_dev_send_completion_event( + ch, ring->rd_offset, el->tre.len, + MHI_CMD_COMPL_CODE_EOT); + td_done = 1; + *chain = 0; + } + mhi_dev_ring_inc_index(ring, ring->rd_offset); + ch->tre_bytes_left = 0; + ch->tre_loc = 0; + } + + return td_done; +} + +int mhi_dev_read_channel(struct mhi_dev_client *handle_client, + void *buf, uint32_t buf_size, uint32_t *chain) +{ + struct mhi_dev_channel *ch; + struct mhi_dev_ring *ring; + union mhi_dev_ring_element_type *el; + uint32_t ch_id; + size_t bytes_to_read, addr_offset; + uint64_t read_from_loc; + ssize_t bytes_read = 0; + uint32_t write_to_loc = 0; + size_t usr_buf_remaining = buf_size; + int td_done = 0, rc = 0; + + if (!handle_client) { + mhi_log(MHI_MSG_ERROR, "invalid client handle\n"); + return -ENXIO; + } + + ch = handle_client->channel; + ring = ch->ring; + ch_id = ch->ch_id; + *chain = 0; + + mutex_lock(&ch->ch_lock); + + do { + el = &ring->ring_cache[ring->rd_offset]; + if (ch->tre_loc) { + bytes_to_read = min(usr_buf_remaining, + ch->tre_bytes_left); + *chain = 1; + mhi_log(MHI_MSG_ERROR, + "remaining buffered data size %d\n", + (int) ch->tre_bytes_left); + } else { + if (ring->rd_offset == ring->wr_offset) { + mhi_log(MHI_MSG_ERROR, + "nothing to read, returning\n"); + bytes_read = 0; + goto exit; + } + + if (ch->state == MHI_DEV_CH_STOPPED) { + mhi_log(MHI_MSG_ERROR, + "channel (%d) already stopped\n", + ch_id); + bytes_read = -1; + goto exit; + } + + ch->tre_loc = el->tre.data_buf_ptr; + ch->tre_size = el->tre.len; + ch->tre_bytes_left = ch->tre_size; + + mhi_log(MHI_MSG_ERROR, + "user_buf_remaining %d, ch->tre_size %d\n", + usr_buf_remaining, ch->tre_size); + bytes_to_read = min(usr_buf_remaining, ch->tre_size); + } + + addr_offset = ch->tre_size - ch->tre_bytes_left; + read_from_loc = ch->tre_loc + addr_offset; + write_to_loc = (uint32_t) buf + (buf_size - usr_buf_remaining); + + mhi_log(MHI_MSG_ERROR, "reading %d bytes from chan %d\n", + bytes_to_read, ch_id); + + mhi_transfer_host_to_device((void *) write_to_loc, + read_from_loc, bytes_to_read, mhi_ctx); + + bytes_read += bytes_to_read; + ch->tre_bytes_left -= bytes_to_read; + usr_buf_remaining -= bytes_to_read; + td_done = mhi_dev_check_tre_bytes_left(ch, ring, el, chain); + } while (usr_buf_remaining && !td_done); + + if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) { + ch->state = MHI_DEV_CH_STOPPED; + rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error while stopping channel (%d)\n", ch_id); + bytes_read = -1; + } + } +exit: + mutex_unlock(&ch->ch_lock); + return bytes_read; +} +EXPORT_SYMBOL(mhi_dev_read_channel); + +static void skip_to_next_td(struct mhi_dev_channel *ch) +{ + struct mhi_dev_ring *ring = ch->ring; + union mhi_dev_ring_element_type *el; + uint32_t td_boundary_reached = 0; + + ch->skip_td = 1; + el = &ring->ring_cache[ring->rd_offset]; + while (ring->rd_offset != ring->wr_offset) { + if (td_boundary_reached) { + ch->skip_td = 0; + break; + } + if (!el->tre.chain) + td_boundary_reached = 1; + mhi_dev_ring_inc_index(ring, ring->rd_offset); + el = &ring->ring_cache[ring->rd_offset]; + } +} + +int mhi_dev_write_channel(struct mhi_dev_client *handle_client, + void *buf, size_t buf_size) +{ + struct mhi_dev_channel *ch; + struct mhi_dev_ring *ring; + union mhi_dev_ring_element_type *el; + enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID; + int rc = 0; + uint64_t ch_id, skip_tres = 0, write_to_loc; + uint32_t read_from_loc; + size_t usr_buf_remaining = buf_size; + size_t usr_buf_offset = 0; + size_t bytes_to_write = 0; + size_t bytes_written = 0; + uint32_t tre_len = 0, suspend_wait_timeout = 0; + + if (!handle_client) { + pr_err("%s: invalid client handle\n", __func__); + return -ENXIO; + } + + if (!buf) { + pr_err("%s: invalid buffer to write data\n", __func__); + return -ENXIO; + } + + mutex_lock(&mhi_ctx->mhi_write_test); + + if (atomic_read(&mhi_ctx->is_suspended)) { + /* + * Expected usage is when there is a write + * to the MHI core -> notify SM. + */ + rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP); + if (rc) { + pr_err("error sending core wakeup event\n"); + mutex_unlock(&mhi_ctx->mhi_write_test); + return rc; + } + } + + atomic_inc(&mhi_ctx->write_active); + while (atomic_read(&mhi_ctx->is_suspended) && + suspend_wait_timeout < MHI_SUSPEND_WAIT_TIMEOUT) { + /* wait for the suspend to finish */ + usleep_range(MHI_SUSPEND_WAIT_MIN, MHI_SUSPEND_WAIT_MAX); + suspend_wait_timeout++; + } + + ch = handle_client->channel; + ch->wr_request_active = true; + + ring = ch->ring; + ch_id = ch->ch_id; + + mutex_lock(&ch->ch_lock); + + if (ch->state == MHI_DEV_CH_STOPPED) { + mhi_log(MHI_MSG_ERROR, + "channel (%lld) already stopped\n", ch_id); + bytes_written = -1; + goto exit; + } + + if (ch->state == MHI_DEV_CH_PENDING_STOP) { + if (mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx) < 0) + bytes_written = -1; + goto exit; + } + + if (ch->skip_td) + skip_to_next_td(ch); + + do { + if (ring->rd_offset == ring->wr_offset) { + mhi_log(MHI_MSG_INFO, "No TREs available\n"); + break; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = el->tre.len; + + bytes_to_write = min(usr_buf_remaining, tre_len); + usr_buf_offset = buf_size - bytes_to_write; + read_from_loc = (uint32_t) buf + usr_buf_offset; + write_to_loc = el->tre.data_buf_ptr; + mhi_transfer_device_to_host(write_to_loc, + (void *) read_from_loc, + bytes_to_write, mhi_ctx); + bytes_written += bytes_to_write; + usr_buf_remaining -= bytes_to_write; + + if (usr_buf_remaining) { + if (!el->tre.chain) + code = MHI_CMD_COMPL_CODE_OVERFLOW; + else if (el->tre.ieob) + code = MHI_CMD_COMPL_CODE_EOB; + } else { + if (el->tre.chain) + skip_tres = 1; + code = MHI_CMD_COMPL_CODE_EOT; + } + + if (mhi_dev_send_completion_event(ch, + ring->rd_offset, bytes_to_write, code) < 0) { + mhi_log(MHI_MSG_ERROR, + "error sending completion event ch_id:%lld\n", + ch_id); + } + + if (ch->state == MHI_DEV_CH_PENDING_STOP) + break; + + mhi_dev_ring_inc_index(ring, ring->rd_offset); + } while (!skip_tres && usr_buf_remaining); + + if (skip_tres) + skip_to_next_td(ch); + + if (ch->state == MHI_DEV_CH_PENDING_STOP) { + rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "channel (%lld) stop failed\n", ch_id); + } + } +exit: + mutex_unlock(&ch->ch_lock); + atomic_dec(&mhi_ctx->write_active); + mutex_unlock(&mhi_ctx->mhi_write_test); + return bytes_written; +} +EXPORT_SYMBOL(mhi_dev_write_channel); + +static void mhi_dev_enable(struct work_struct *work) +{ + int rc = 0; + struct ep_pcie_msi_config msi_cfg; + struct mhi_dev *mhi = container_of(work, + struct mhi_dev, ring_init_cb_work); + + enum mhi_dev_state state; + uint32_t max_cnt = 0; + + + rc = ipa_dma_init(); + if (rc) { + pr_err("ipa dma init failed\n"); + return; + } + + rc = ipa_dma_enable(); + if (rc) { + pr_err("ipa enable failed\n"); + return; + } + + rc = mhi_dev_ring_init(mhi); + if (rc) { + pr_err("MHI dev ring init failed\n"); + return; + } + + /* Invoke MHI SM when device is in RESET state */ + mhi_dev_sm_init(mhi); + + /* set the env before setting the ready bit */ + rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE); + if (rc) { + pr_err("%s: env setting failed\n", __func__); + return; + } + mhi_uci_init(); + + /* All set...let's notify the host */ + mhi_dev_sm_set_ready(); + + rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg); + if (rc) + pr_warn("MHI: error geting msi configs\n"); + + rc = mhi_dev_mmio_get_mhi_state(mhi, &state); + if (rc) { + pr_err("%s: get mhi state failed\n", __func__); + return; + } + + while (state != MHI_DEV_M0_STATE && max_cnt < MHI_DEV_M0_MAX_CNT) { + /* Wait for Host to set the M0 state */ + usleep_range(MHI_M0_WAIT_MIN_USLEEP, MHI_M0_WAIT_MAX_USLEEP); + rc = mhi_dev_mmio_get_mhi_state(mhi, &state); + if (rc) { + pr_err("%s: get mhi state failed\n", __func__); + return; + } + max_cnt++; + } + + mhi_log(MHI_MSG_INFO, "state:%d\n", state); + + if (state == MHI_DEV_M0_STATE) { + rc = mhi_dev_cache_host_cfg(mhi); + if (rc) { + pr_err("Failed to cache the host config\n"); + return; + } + + rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE); + if (rc) { + pr_err("%s: env setting failed\n", __func__); + return; + } + } else { + pr_err("MHI device failed to enter M0\n"); + return; + } + + rc = mhi_hwc_init(mhi_ctx); + if (rc) { + pr_err("error during hwc_init\n"); + return; + } +} + +static void mhi_ring_init_cb(void *data) +{ + struct mhi_dev *mhi = data; + + if (!mhi) { + pr_err("Invalid MHI ctx\n"); + return; + } + + queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work); +} + +static int get_device_tree_data(struct platform_device *pdev) +{ + struct mhi_dev *mhi; + int rc = 0; + struct resource *res_mem = NULL; + + mhi = devm_kzalloc(&pdev->dev, + sizeof(struct mhi_dev), GFP_KERNEL); + if (!mhi) + return -ENOMEM; + + mhi->pdev = pdev; + mhi->dev = &pdev->dev; + res_mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "mhi_mmio_base"); + if (!res_mem) { + rc = -EINVAL; + pr_err("Request MHI MMIO physical memory region failed\n"); + return rc; + } + + mhi->mmio_base_pa_addr = res_mem->start; + mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE); + if (!mhi->mmio_base_addr) { + pr_err("Failed to IO map MMIO registers.\n"); + rc = -EINVAL; + return rc; + } + + res_mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "ipa_uc_mbox_crdb"); + if (!res_mem) { + rc = -EINVAL; + pr_err("Request IPA_UC_MBOX CRDB physical region failed\n"); + return rc; + } + + mhi->ipa_uc_mbox_crdb = res_mem->start; + + res_mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "ipa_uc_mbox_erdb"); + if (!res_mem) { + rc = -EINVAL; + pr_err("Request IPA_UC_MBOX ERDB physical region failed\n"); + return rc; + } + + mhi->ipa_uc_mbox_erdb = res_mem->start; + + mhi_ctx = mhi; + + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-ifc-id", + &mhi_ctx->ifc_id); + + if (rc) { + pr_err("qcom,mhi-ifc-id does not exist.\n"); + return rc; + } + + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-ep-msi", + &mhi_ctx->mhi_ep_msi_num); + if (rc) { + pr_err("qcom,mhi-ep-msi does not exist.\n"); + return rc; + } + + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-version", + &mhi_ctx->mhi_version); + if (rc) { + pr_err("qcom,mhi-version does not exist.\n"); + return rc; + } + + return 0; +} + +static int mhi_init(struct mhi_dev *mhi) +{ + int rc = 0, i = 0; + struct platform_device *pdev = mhi->pdev; + + + rc = mhi_dev_mmio_init(mhi); + if (rc) { + pr_err("Failed to update the MMIO init\n"); + return rc; + } + + + mhi->ring = devm_kzalloc(&pdev->dev, + (sizeof(struct mhi_dev_ring) * + (mhi->cfg.channels + mhi->cfg.event_rings + 1)), + GFP_KERNEL); + if (!mhi->ring) + return -ENOMEM; + + mhi->ch = devm_kzalloc(&pdev->dev, + (sizeof(struct mhi_dev_channel) * + (mhi->cfg.channels)), GFP_KERNEL); + if (!mhi->ch) + return -ENOMEM; + + for (i = 0; i < mhi->cfg.channels; i++) + mutex_init(&mhi->ch[i].ch_lock); + + mhi->mmio_backup = devm_kzalloc(&pdev->dev, MHI_DEV_MMIO_RANGE, + GFP_KERNEL); + if (!mhi->mmio_backup) + return -ENOMEM; + + mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0); + if (mhi_ipc_log == NULL) { + dev_err(&pdev->dev, + "Failed to create IPC logging context\n"); + } + + return 0; +} + +static int mhi_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + if (pdev->dev.of_node) { + rc = get_device_tree_data(pdev); + if (rc) { + pr_err("Error reading MHI Dev DT\n"); + return rc; + } + } + + mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id); + if (!mhi_ctx->phandle) { + pr_err("PCIe driver is not ready yet.\n"); + return -EPROBE_DEFER; + } + + if (ep_pcie_get_linkstatus(mhi_ctx->phandle) != EP_PCIE_LINK_ENABLED) { + pr_err("PCIe link is not ready to use.\n"); + return -EPROBE_DEFER; + } + + INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler); + + mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq", + WQ_HIGHPRI, 0); + if (!mhi_ctx->pending_ring_wq) { + rc = -ENOMEM; + return rc; + } + + INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending); + + INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable); + + mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq", + WQ_HIGHPRI, 0); + if (!mhi_ctx->ring_init_wq) { + rc = -ENOMEM; + return rc; + } + + INIT_LIST_HEAD(&mhi_ctx->event_ring_list); + INIT_LIST_HEAD(&mhi_ctx->process_ring_list); + mutex_init(&mhi_ctx->mhi_lock); + mutex_init(&mhi_ctx->mhi_event_lock); + mutex_init(&mhi_ctx->mhi_write_test); + + rc = mhi_init(mhi_ctx); + if (rc) + return rc; + + mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev, + (TRB_MAX_DATA_SIZE * 4), + &mhi_ctx->cache_dma_handle, GFP_KERNEL); + if (!mhi_ctx->dma_cache) + return -ENOMEM; + + mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev, + (TRB_MAX_DATA_SIZE * 4), + &mhi_ctx->read_dma_handle, + GFP_KERNEL); + if (!mhi_ctx->read_handle) + return -ENOMEM; + + mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev, + (TRB_MAX_DATA_SIZE * 24), + &mhi_ctx->write_dma_handle, + GFP_KERNEL); + if (!mhi_ctx->write_handle) + return -ENOMEM; + + rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version); + if (rc) { + pr_err("Failed to update the MHI version\n"); + return rc; + } + + mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT | + EP_PCIE_EVENT_PM_D3_COLD | + EP_PCIE_EVENT_PM_D0 | + EP_PCIE_EVENT_PM_RST_DEAST | + EP_PCIE_EVENT_MHI_A7 | + EP_PCIE_EVENT_LINKDOWN; + mhi_ctx->event_reg.user = mhi_ctx; + mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK; + mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler; + + rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg); + if (rc) { + pr_err("Failed to register for events from PCIe\n"); + return rc; + } + + pr_err("Registering with IPA\n"); + + rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx); + if (rc < 0) { + if (rc == -EEXIST) { + mhi_ring_init_cb(mhi_ctx); + } else { + pr_err("Error calling IPA cb with %d\n", rc); + return rc; + } + } + + return 0; +} + +static int mhi_dev_remove(struct platform_device *pdev) +{ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id mhi_dev_match_table[] = { + { .compatible = "qcom,msm-mhi-dev" }, + {} +}; + +static struct platform_driver mhi_dev_driver = { + .driver = { + .name = "qcom,msm-mhi-dev", + .of_match_table = mhi_dev_match_table, + }, + .probe = mhi_dev_probe, + .remove = mhi_dev_remove, +}; + +module_param(mhi_msg_lvl, uint, S_IRUGO | S_IWUSR); +module_param(mhi_ipc_msg_lvl, uint, S_IRUGO | S_IWUSR); + +MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl"); +MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl"); + +static int __init mhi_dev_init(void) +{ + return platform_driver_register(&mhi_dev_driver); +} +module_init(mhi_dev_init); + +static void __exit mhi_dev_exit(void) +{ + platform_driver_unregister(&mhi_dev_driver); +} +module_exit(mhi_dev_exit); + +MODULE_DESCRIPTION("MHI device driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h new file mode 100644 index 000000000000..6b3c6a8a78b2 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi.h @@ -0,0 +1,1126 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MHI_H +#define __MHI_H + +#include +#include +#include +#include + +/** + * MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings. + */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 4096 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element tre; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + uint32_t size; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +struct mhi_dev_channel; + +struct mhi_dev_ring { + struct list_head list; + struct mhi_dev *mhi_dev; + + uint32_t id; + uint32_t rd_offset; + uint32_t wr_offset; + uint32_t ring_size; + + enum mhi_dev_ring_type type; + enum mhi_dev_ring_state state; + + /* device virtual address location of the cached host ring ctx data */ + union mhi_dev_ring_element_type *ring_cache; + /* Physical address of the cached ring copy on the device side */ + dma_addr_t ring_cache_dma_handle; + /* Physical address of the host where we will write/read to/from */ + struct mhi_addr ring_shadow; + /* Ring type - cmd, event, transfer ring and its rp/wp... */ + union mhi_dev_ring_ctx *ring_ctx; + /* ring_ctx_shadow -> tracking ring_ctx in the host */ + union mhi_dev_ring_ctx *ring_ctx_shadow; + void (*ring_cb)(struct mhi_dev *dev, + union mhi_dev_ring_element_type *el, + void *ctx); +}; + +static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring, + uint32_t rd_offset) +{ + ring->rd_offset++; + if (ring->rd_offset == ring->ring_size) + ring->rd_offset = 0; +} + +/* trace information planned to use for read/write */ +#define TRACE_DATA_MAX 128 +#define MHI_DEV_DATA_MAX 512 + +#define MHI_DEV_MMIO_RANGE 0xc80 + +enum cb_reason { + MHI_DEV_TRE_AVAILABLE = 0, +}; + +struct mhi_dev_client_cb_reason { + uint32_t ch_id; + enum cb_reason reason; +}; + +struct mhi_dev_client { + struct list_head list; + struct mhi_dev_channel *channel; + void (*event_trigger)(struct mhi_dev_client_cb_reason *cb); + + /* mhi_dev calls are fully synchronous -- only one call may be + * active per client at a time for now. + */ + struct mutex write_lock; + wait_queue_head_t wait; + + /* trace logs */ + spinlock_t tr_lock; + unsigned tr_head; + unsigned tr_tail; + struct mhi_dev_trace *tr_log; + + /* client buffers */ + struct mhi_dev_iov *iov; + uint32_t nr_iov; +}; + +struct mhi_dev_channel { + struct list_head list; + struct list_head clients; + /* synchronization for changing channel state, + * adding/removing clients, mhi_dev callbacks, etc + */ + spinlock_t lock; + + struct mhi_dev_ring *ring; + + enum mhi_dev_channel_state state; + uint32_t ch_id; + enum mhi_dev_ch_ctx_type ch_type; + struct mutex ch_lock; + /* client which the current inbound/outbound message is for */ + struct mhi_dev_client *active_client; + + /* current TRE being processed */ + uint64_t tre_loc; + /* current TRE size */ + uint32_t tre_size; + /* tre bytes left to read/write */ + uint32_t tre_bytes_left; + /* td size being read/written from/to so far */ + uint32_t td_size; + bool wr_request_active; + bool skip_td; +}; + +/* Structure device for mhi dev */ +struct mhi_dev { + struct platform_device *pdev; + struct device *dev; + /* MHI MMIO related members */ + phys_addr_t mmio_base_pa_addr; + void *mmio_base_addr; + phys_addr_t ipa_uc_mbox_crdb; + phys_addr_t ipa_uc_mbox_erdb; + + uint32_t *mmio_backup; + struct mhi_config cfg; + bool mmio_initialized; + + /* Host control base information */ + struct mhi_host_addr host_addr; + struct mhi_addr ctrl_base; + struct mhi_addr data_base; + struct mhi_addr ch_ctx_shadow; + struct mhi_dev_ch_ctx *ch_ctx_cache; + dma_addr_t ch_ctx_cache_dma_handle; + struct mhi_addr ev_ctx_shadow; + struct mhi_dev_ch_ctx *ev_ctx_cache; + dma_addr_t ev_ctx_cache_dma_handle; + + struct mhi_addr cmd_ctx_shadow; + struct mhi_dev_ch_ctx *cmd_ctx_cache; + dma_addr_t cmd_ctx_cache_dma_handle; + struct mhi_dev_ring *ring; + struct mhi_dev_channel *ch; + + int ctrl_int; + int cmd_int; + /* CHDB and EVDB device interrupt state */ + struct mhi_interrupt_state chdb[4]; + struct mhi_interrupt_state evdb[4]; + + /* Scheduler work */ + struct work_struct chdb_ctrl_work; + struct mutex mhi_lock; + struct mutex mhi_event_lock; + + /* process a ring element */ + struct workqueue_struct *pending_ring_wq; + struct work_struct pending_work; + + struct list_head event_ring_list; + struct list_head process_ring_list; + + uint32_t cmd_ring_idx; + uint32_t ev_ring_start; + uint32_t ch_ring_start; + + /* IPA Handles */ + u32 ipa_clnt_hndl[4]; + struct workqueue_struct *ring_init_wq; + struct work_struct ring_init_cb_work; + + /* EP PCIe registration */ + struct ep_pcie_register_event event_reg; + u32 ifc_id; + struct ep_pcie_hw *phandle; + + atomic_t write_active; + atomic_t is_suspended; + struct mutex mhi_write_test; + u32 mhi_ep_msi_num; + u32 mhi_version; + void *dma_cache; + void *read_handle; + void *write_handle; + /* Physical scratch buffer for writing control data to the host */ + dma_addr_t cache_dma_handle; + /* + * Physical scratch buffer address used when picking host data + * from the host used in mhi_read() + */ + dma_addr_t read_dma_handle; + /* + * Physical scratch buffer address used when writing to the host + * region from device used in mhi_write() + */ + dma_addr_t write_dma_handle; +}; + +enum mhi_msg_level { + MHI_MSG_VERBOSE = 0x0, + MHI_MSG_INFO = 0x1, + MHI_MSG_DBG = 0x2, + MHI_MSG_WARNING = 0x3, + MHI_MSG_ERROR = 0x4, + MHI_MSG_CRITICAL = 0x5, + MHI_MSG_reserved = 0x80000000 +}; + +extern enum mhi_msg_level mhi_msg_lvl; +extern enum mhi_msg_level mhi_ipc_msg_lvl; +extern void *mhi_ipc_log; + +#define mhi_log(_msg_lvl, _msg, ...) do { \ + if (_msg_lvl >= mhi_msg_lvl) { \ + pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ + } \ + if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \ + ipc_log_string(mhi_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ + } \ +} while (0) + +/* SW channel client list */ +enum mhi_client_channel { + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_IP_CTRL_0_OUT = 16, + MHI_CLIENT_IP_CTRL_0_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_DCI_OUT = 20, + MHI_CLIENT_DCI_IN = 21, + MHI_CLIENT_IP_CTRL_3_OUT = 22, + MHI_CLIENT_IP_CTRL_3_IN = 23, + MHI_CLIENT_IP_CTRL_4_OUT = 24, + MHI_CLIENT_IP_CTRL_4_IN = 25, + MHI_CLIENT_IP_CTRL_5_OUT = 26, + MHI_CLIENT_IP_CTRL_5_IN = 27, + MHI_CLIENT_IP_CTRL_6_OUT = 28, + MHI_CLIENT_IP_CTRL_6_IN = 29, + MHI_CLIENT_IP_CTRL_7_OUT = 30, + MHI_CLIENT_IP_CTRL_7_IN = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_IP_SW_0_OUT = 34, + MHI_CLIENT_IP_SW_0_IN = 35, + MHI_CLIENT_IP_SW_1_OUT = 36, + MHI_CLIENT_IP_SW_1_IN = 37, + MHI_CLIENT_IP_SW_2_OUT = 38, + MHI_CLIENT_IP_SW_2_IN = 39, + MHI_CLIENT_IP_SW_3_OUT = 40, + MHI_CLIENT_IP_SW_3_IN = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_MAX_SOFTWARE_CHANNELS = 46, + MHI_CLIENT_TEST_OUT = 60, + MHI_CLIENT_TEST_IN = 61, + MHI_CLIENT_RESERVED_1_LOWER = 62, + MHI_CLIENT_RESERVED_1_UPPER = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_RESERVED_2_LOWER = 102, + MHI_CLIENT_RESERVED_2_UPPER = 127, + MHI_MAX_CHANNELS = 102, +}; + +struct mhi_dev_iov { + void *addr; + uint32_t buf_size; +}; + +/** + * mhi_dev_open_channel() - Channel open for a given client done prior + * to read/write. + * @chan_id: Software Channel ID for the assigned client. + * @handle_client: Structure device for client handle. + * @notifier: Client issued callback notification. + */ +int mhi_dev_open_channel(uint32_t chan_id, + struct mhi_dev_client **handle_client, + void (*event_trigger)(struct mhi_dev_client_cb_reason *cb)); +/** + * mhi_dev_close_channel() - Channel close for a given client. + */ +int mhi_dev_close_channel(struct mhi_dev_client *handle_client); + +/** + * mhi_dev_read_channel() - Channel read for a given client + * @handle_client: Client Handle issued during mhi_dev_open_channel + * @buf: Pointer to the buffer used by the MHI core to copy the data received + * from the Host. + * @buf_size: Size of the buffer pointer. + * @chain : Indicate if the received data is part of chained packet. + */ +int mhi_dev_read_channel(struct mhi_dev_client *handle_client, + void *buf, uint32_t buf_size, uint32_t *chain); + +/** + * mhi_dev_write_channel() - Channel write for a given software client. + * @handle_client: Client Handle issued during mhi_dev_open_channel + * @buf: Pointer to the buffer used by the MHI core to copy the data from the + * device to the host. + * @buf_size: Size of the buffer pointer. + */ +int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf, + uint32_t buf_size); + +/** + * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process. + * @handle_client: Client Handle issued during mhi_dev_open_channel + */ +int mhi_dev_channel_isempty(struct mhi_dev_client *handle); + +struct mhi_dev_trace { + unsigned timestamp; + uint32_t data[TRACE_DATA_MAX]; +}; + +/* MHI Ring related functions */ + +/** + * mhi_ring_init() - Initializes the Ring id to the default un-initialized + * state. Once a start command is received, the respective ring + * is then prepared by fetching the context and updating the + * offset. + * @ring: Ring for the respective context - Channel/Event/Command. + * @type: Command/Event or Channel transfer ring. + * @id: Index to the ring id. For command its usually 1, Event rings + * may vary from 1 to 128. Channels vary from 1 to 256. + */ +void mhi_ring_init(struct mhi_dev_ring *ring, + enum mhi_dev_ring_type type, int id); + +/** + * mhi_ring_start() - Fetches the respective transfer ring's context from + * the host and updates the write offset. + * @ring: Ring for the respective context - Channel/Event/Command. + * @ctx: Transfer ring of type mhi_dev_ring_ctx. + * @dev: MHI device structure. + */ +int mhi_ring_start(struct mhi_dev_ring *ring, + union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi); + +/** + * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally. + * @ring: Ring for the respective context - Channel/Event/Command. + * @wr_offset: Cache the TRE's upto the write offset value. + */ +int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset); + +/** + * mhi_dev_update_wr_offset() - Check for any updates in the write offset. + * @ring: Ring for the respective context - Channel/Event/Command. + */ +int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring); + +/** + * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements + * and invoke the clients callback. + * @ring: Ring for the respective context - Channel/Event/Command. + */ +int mhi_dev_process_ring(struct mhi_dev_ring *ring); + +/** + * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the + * clients callback. + * @ring: Ring for the respective context - Channel/Event/Command. + * @offset: Offset index into the respective ring's cache element. + */ +int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset); + +/** + * mhi_dev_add_element() - Copy the element to the respective transfer rings + * read pointer and increment the index. + * @ring: Ring for the respective context - Channel/Event/Command. + * @element: Transfer ring element to be copied to the host memory. + */ +int mhi_dev_add_element(struct mhi_dev_ring *ring, + union mhi_dev_ring_element_type *element); + +/** + * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data + * from device to the host. + * @dst_pa: Physical destination address. + * @src: Source virtual address. + * @len: Numer of bytes to be transferred. + * @mhi: MHI dev structure. + */ +int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len, + struct mhi_dev *mhi); + +/** + * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data + * from host to the device. + * @dst: Physical destination virtual address. + * @src_pa: Source physical address. + * @len: Numer of bytes to be transferred. + * @mhi: MHI dev structure. + */ +int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len, + struct mhi_dev *mhi); + +/** + * mhi_dev_write_to_host() - memcpy equivalent API to transfer data + * from device to host. + * @host: Host and device address details. + * @buf: Data buffer that needs to be written to the host. + * @size: Data buffer size. + */ +void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size, + struct mhi_dev *mhi); + +/** + * mhi_dev_read_from_host() - memcpy equivalent API to transfer data + * from host to device. + * @host: Host and device address details. + * @buf: Data buffer that needs to be read from the host. + * @size: Data buffer size. + */ +void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size); + +/** + * mhi_dev_read_from_host() - memcpy equivalent API to transfer data + * from host to device. + * @host: Host and device address details. + * @buf: Data buffer that needs to be read from the host. + * @size: Data buffer size. + */ +void mhi_ring_set_cb(struct mhi_dev_ring *ring, + void (*ring_cb)(struct mhi_dev *dev, + union mhi_dev_ring_element_type *el, void *ctx)); + +/** + * mhi_ring_set_state() - Sets internal state of the ring for tracking whether + * a ring is being processed, idle or uninitialized. + * @ring: Ring for the respective context - Channel/Event/Command. + * @state: state of type mhi_dev_ring_state. + */ +void mhi_ring_set_state(struct mhi_dev_ring *ring, + enum mhi_dev_ring_state state); + +/** + * mhi_ring_get_state() - Obtains the internal state of the ring. + * @ring: Ring for the respective context - Channel/Event/Command. + */ +enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring); + +/* MMIO related functions */ + +/** + * mhi_dev_mmio_read() - Generic MHI MMIO register read API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @reg_val: Pointer the register value is stored to. + */ +int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset, + uint32_t *reg_value); + +/** + * mhi_dev_mmio_read() - Generic MHI MMIO register write API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @val: Value to be written to the register offset. + */ +int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset, + uint32_t val); + +/** + * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @mask: Register field mask. + * @shift: Register field mask shift value. + * @val: Value to be written to the register offset. + */ +int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t val); +/** + * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @mask: Register field mask. + * @shift: Register field mask shift value. + * @reg_val: Pointer the register value is stored to. + */ +int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t *reg_val); +/** + * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given + * channel id. + * @dev: MHI device structure. + * @chdb_id: Channel id number. + */ +int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id); +/** + * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given + * channel id. + * @dev: MHI device structure. + * @chdb_id: Channel id number. + */ +int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id); + +/** + * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given + * event ring id. + * @dev: MHI device structure. + * @erdb_id: Event ring id number. + */ +int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id); + +/** + * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given + * event ring id. + * @dev: MHI device structure. + * @erdb_id: Event ring id number. + */ +int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id); + +/** + * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address. + @dev: MHI device structure. + */ +int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev); + +/** + * mhi_dev_get_crc_base() - Fetch the Command ring context base address. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID. + * @dev: MHI device structure. + * @wr_offset: Pointer of the write offset to be written to. + */ +int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); + +/** + * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID. + * @dev: MHI device structure. + * @wr_offset: Pointer of the write offset to be written to. + */ +int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); + +/** + * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID. + * @dev: MHI device structure. + * @wr_offset: Pointer of the write offset to be written to. + */ +int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); + +/** + * mhi_dev_mmio_set_env() - Write the Execution Enviornment. + * @dev: MHI device structure. + * @value: Value of the EXEC EVN. + */ +int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value); + +/** + * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_reset(struct mhi_dev *dev); + +/** + * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host. + * @dev: MHI device structure. + */ +int mhi_dev_get_mhi_addr(struct mhi_dev *dev); + +/** + * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3. + * @dev: MHI device structure. + * @state: Pointer of type mhi_dev_state + */ +int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state); + +/** + * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event + * rings, support number of channels, and offsets to the Channel + * and Event doorbell from the host. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_init(struct mhi_dev *dev); + +/** + * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by + * the host. + * @dev: MHI device structure. + */ +int mhi_dev_update_ner(struct mhi_dev *dev); + +/** + * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3. + * @dev: MHI device structure. + */ +int mhi_dev_restore_mmio(struct mhi_dev *dev); + +/** + * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3. + * @dev: MHI device structure. + */ +int mhi_dev_backup_mmio(struct mhi_dev *dev); + +/** + * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug. + * @dev: MHI device structure. + */ +int mhi_dev_dump_mmio(struct mhi_dev *dev); + +/** + * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation + * unit between device and host to map the Data and Control + * information. + * @dev: MHI device structure. + */ +int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi); + +/** + * mhi_dev_send_state_change_event() - Send state change event to the host + * such as M0/M1/M2/M3. + * @dev: MHI device structure. + * @state: MHI state of type mhi_dev_state + */ +int mhi_dev_send_state_change_event(struct mhi_dev *mhi, + enum mhi_dev_state state); +/** + * mhi_dev_send_ee_event() - Send Execution enviornment state change + * event to the host. + * @dev: MHI device structure. + * @state: MHI state of type mhi_dev_execenv + */ +int mhi_dev_send_ee_event(struct mhi_dev *mhi, + enum mhi_dev_execenv exec_env); +/** + * mhi_dev_syserr() - System error when unexpected events are received. + * @dev: MHI device structure. + */ +int mhi_dev_syserr(struct mhi_dev *mhi); + +/** + * mhi_dev_suspend() - MHI device suspend to stop channel processing at the + * Transfer ring boundary, update the channel state to suspended. + * @dev: MHI device structure. + */ +int mhi_dev_suspend(struct mhi_dev *mhi); + +/** + * mhi_dev_resume() - MHI device resume to update the channel state to running. + * @dev: MHI device structure. + */ +int mhi_dev_resume(struct mhi_dev *mhi); + +/** + * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW + * accelerated data to be send and prevent MHI suspend. + * @dev: MHI device structure. + */ +int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi); + +/** + * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel + * context with IPA when performing a MHI resume. + * @dev: MHI device structure. + */ +int mhi_pcie_config_db_routing(struct mhi_dev *mhi); + +/** + * mhi_uci_init() - Initializes the User control interface (UCI) which + * exposes device nodes for the supported MHI software + * channels. + */ +int mhi_uci_init(void); + +void mhi_dev_notify_a7_event(struct mhi_dev *mhi); + +#endif /* _MHI_H_ */ diff --git a/drivers/platform/msm/mhi_dev/mhi_hwio.h b/drivers/platform/msm/mhi_dev/mhi_hwio.h new file mode 100644 index 000000000000..bcc4095575b3 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_hwio.h @@ -0,0 +1,191 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MHI_HWIO_ +#define _MHI_HWIO_ + +/* MHI register definition */ +#define MHI_CTRL_INT_STATUS_A7 (0x0004) +#define MHI_CTRL_INT_STATUS_A7_STATUS_MASK 0xffffffff +#define MHI_CTRL_INT_STATUS_A7_STATUS_SHIFT 0x0 + +#define MHI_CHDB_INT_STATUS_A7_n(n) (0x0028 + 0x4 * (n)) +#define MHI_CHDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff +#define MHI_CHDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0 + +#define MHI_ERDB_INT_STATUS_A7_n(n) (0x0038 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff +#define MHI_ERDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0 + +#define MHI_CTRL_INT_CLEAR_A7 (0x004C) +#define MHI_CTRL_INT_CLEAR_A7_CLEAR_MASK 0xffffffff +#define MHI_CTRL_INT_CLEAR_A7_CLEAR_SHIFT 0x0 +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x0070 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff +#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0 + +#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x0080 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff +#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0 + +#define MHI_CTRL_INT_MASK_A7 (0x0094) +#define MHI_CTRL_INT_MASK_A7_MASK_MASK 0x3 +#define MHI_CTRL_INT_MASK_A7_MASK_SHIFT 0x0 +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_MHICTRL_SHFT 0 +#define MHI_CTRL_CRDB_MASK BIT(1) +#define MHI_CTRL_CRDB_SHFT 1 + +#define MHI_CHDB_INT_MASK_A7_n(n) (0x00B8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_A7_n_MASK_MASK 0xffffffff +#define MHI_CHDB_INT_MASK_A7_n_MASK_SHIFT 0x0 + +#define MHI_ERDB_INT_MASK_A7_n(n) (0x00C8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_A7_n_MASK_MASK 0xffffffff +#define MHI_ERDB_INT_MASK_A7_n_MASK_SHIFT 0x0 + +#define MHIREGLEN (0x0100) +#define MHIREGLEN_MHIREGLEN_MASK 0xffffffff +#define MHIREGLEN_MHIREGLEN_SHIFT 0x0 + +#define MHIVER (0x0108) +#define MHIVER_MHIVER_MASK 0xffffffff +#define MHIVER_MHIVER_SHIFT 0x0 + +#define MHICFG (0x0110) +#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000 +#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18 +#define MHICFG_NER_MASK 0xff0000 +#define MHICFG_NER_SHIFT 0x10 +#define MHICFG_RESERVED_BITS15_8_MASK 0xff00 +#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8 +#define MHICFG_NCH_MASK 0xff +#define MHICFG_NCH_SHIFT 0x0 + +#define CHDBOFF (0x0118) +#define CHDBOFF_CHDBOFF_MASK 0xffffffff +#define CHDBOFF_CHDBOFF_SHIFT 0x0 + +#define ERDBOFF (0x0120) +#define ERDBOFF_ERDBOFF_MASK 0xffffffff +#define ERDBOFF_ERDBOFF_SHIFT 0x0 + +#define BHIOFF (0x0128) +#define BHIOFF_BHIOFF_MASK 0xffffffff +#define BHIOFF_BHIOFF_SHIFT 0x0 + +#define DEBUGOFF (0x0130) +#define DEBUGOFF_DEBUGOFF_MASK 0xffffffff +#define DEBUGOFF_DEBUGOFF_SHIFT 0x0 + +#define MHICTRL (0x0138) +#define MHICTRL_MHISTATE_MASK 0x0000FF00 +#define MHICTRL_MHISTATE_SHIFT 0x8 +#define MHICTRL_RESET_MASK 0x2 +#define MHICTRL_RESET_SHIFT 0x1 + +#define MHISTATUS (0x0148) +#define MHISTATUS_MHISTATE_MASK 0x0000ff00 +#define MHISTATUS_MHISTATE_SHIFT 0x8 +#define MHISTATUS_SYSERR_MASK 0x4 +#define MHISTATUS_SYSERR_SHIFT 0x2 +#define MHISTATUS_READY_MASK 0x1 +#define MHISTATUS_READY_SHIFT 0x0 + +#define CCABAP_LOWER (0x0158) +#define CCABAP_LOWER_CCABAP_LOWER_MASK 0xffffffff +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0x0 + +#define CCABAP_HIGHER (0x015C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK 0xffffffff +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0x0 + +#define ECABAP_LOWER (0x0160) +#define ECABAP_LOWER_ECABAP_LOWER_MASK 0xffffffff +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0x0 + +#define ECABAP_HIGHER (0x0164) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK 0xffffffff +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0x0 + +#define CRCBAP_LOWER (0x0168) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK 0xffffffff +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0x0 + +#define CRCBAP_HIGHER (0x016C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK 0xffffffff +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0x0 + +#define CRDB_LOWER (0x0170) +#define CRDB_LOWER_CRDB_LOWER_MASK 0xffffffff +#define CRDB_LOWER_CRDB_LOWER_SHIFT 0x0 + +#define CRDB_HIGHER (0x0174) +#define CRDB_HIGHER_CRDB_HIGHER_MASK 0xffffffff +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0x0 + +#define MHICTRLBASE_LOWER (0x0180) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK 0xffffffff +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0x0 + +#define MHICTRLBASE_HIGHER (0x0184) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK 0xffffffff +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0x0 + +#define MHICTRLLIMIT_LOWER (0x0188) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK 0xffffffff +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0x0 + +#define MHICTRLLIMIT_HIGHER (0x018C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK 0xffffffff +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0x0 + +#define MHIDATABASE_LOWER (0x0198) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK 0xffffffff +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0x0 + +#define MHIDATABASE_HIGHER (0x019C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK 0xffffffff +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0x0 + +#define MHIDATALIMIT_LOWER (0x01A0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK 0xffffffff +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0x0 + +#define MHIDATALIMIT_HIGHER (0x01A4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK 0xffffffff +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0x0 + +#define CHDB_LOWER_n(n) (0x0400 + 0x8 * (n)) +#define CHDB_LOWER_n_CHDB_LOWER_MASK 0xffffffff +#define CHDB_LOWER_n_CHDB_LOWER_SHIFT 0x0 + +#define CHDB_HIGHER_n(n) (0x0404 + 0x8 * (n)) +#define CHDB_HIGHER_n_CHDB_HIGHER_MASK 0xffffffff +#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT 0x0 + +#define ERDB_LOWER_n(n) (0x0800 + 0x8 * (n)) +#define ERDB_LOWER_n_ERDB_LOWER_MASK 0xffffffff +#define ERDB_LOWER_n_ERDB_LOWER_SHIFT 0x0 + +#define ERDB_HIGHER_n(n) (0x0804 + 0x8 * (n)) +#define ERDB_HIGHER_n_ERDB_HIGHER_MASK 0xffffffff +#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT 0x0 + +#define BHI_EXECENV (0x228) +#define BHI_EXECENV_MASK 0xFFFFFFFF +#define BHI_EXECENV_SHIFT 0 + +#endif diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c new file mode 100644 index 000000000000..12e4a0d4851c --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c @@ -0,0 +1,999 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" +#include "mhi_hwio.h" + +int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset, + uint32_t *reg_value) +{ + void __iomem *addr; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + addr = dev->mmio_base_addr + offset; + + *reg_value = readl_relaxed(addr); + + pr_debug("reg read:0x%x with value 0x%x\n", offset, *reg_value); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read); + +int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset, + uint32_t val) +{ + void __iomem *addr; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + addr = dev->mmio_base_addr + offset; + + writel_relaxed(val, addr); + + pr_debug("reg write:0x%x with value 0x%x\n", offset, val); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_write); + +int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t val) +{ + uint32_t reg_val; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, offset, ®_val); + if (rc) { + pr_err("Read error failed for offset:0x%x\n", offset); + return rc; + } + + reg_val &= ~mask; + reg_val |= ((val << shift) & mask); + + rc = mhi_dev_mmio_write(dev, offset, reg_val); + if (rc) { + pr_err("Write error failed for offset:0x%x\n", offset); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_masked_write); + +int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t *reg_val) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, offset, reg_val); + if (rc) { + pr_err("Read error failed for offset:0x%x\n", offset); + return rc; + } + + *reg_val &= mask; + *reg_val >>= shift; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_masked_read); + +static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev, + uint32_t chdb_id, bool enable) +{ + uint32_t chid_mask, chid_idx, chid_shft, val = 0; + int rc = 0; + + chid_shft = chdb_id%32; + chid_mask = (1 << chid_shft); + chid_idx = chdb_id/32; + + if (enable) + val = 1; + + rc = mhi_dev_mmio_masked_write(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx), + chid_mask, chid_shft, val); + if (rc) { + pr_err("Write on channel db interrupt failed\n"); + return rc; + } + + return rc; +} + +int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, true); + if (rc) { + pr_err("Setting channel DB failed for ch_id:%d\n", chdb_id); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_a7); + +int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, false); + if (rc) { + pr_err("Disabling channel DB failed for ch_id:%d\n", chdb_id); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_chdb_a7); + +static int mhi_dev_mmio_set_erdb_int_a7(struct mhi_dev *dev, + uint32_t erdb_ch_id, bool enable) +{ + uint32_t erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0; + int rc = 0; + + erdb_id_shft = erdb_ch_id%32; + erdb_id_mask = (1 << erdb_id_shft); + erdb_id_idx = erdb_ch_id/32; + + if (enable) + val = 1; + + rc = mhi_dev_mmio_masked_write(dev, + MHI_ERDB_INT_MASK_A7_n(erdb_id_idx), + erdb_id_mask, erdb_id_shft, val); + if (rc) { + pr_err("Error setting event ring db for %d\n", erdb_ch_id); + return rc; + } + + return rc; +} + +int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, true); + if (rc) { + pr_err("Error setting event ring db for %d\n", erdb_id); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_a7); + +int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, false); + if (rc) { + pr_err("Error disabling event ring db for %d\n", erdb_id); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7); + +int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state) +{ + uint32_t reg_value = 0; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_masked_read(dev, MHICTRL, + MHISTATUS_MHISTATE_MASK, MHISTATUS_MHISTATE_SHIFT, state); + if (rc) + return rc; + + rc = mhi_dev_mmio_read(dev, MHICTRL, ®_value); + if (rc) + return rc; + + pr_debug("MHICTRL is 0x%x\n", reg_value); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_mhi_state); + +static int mhi_dev_mmio_set_chdb_interrupts(struct mhi_dev *dev, bool enable) +{ + uint32_t mask = 0, i = 0; + int rc = 0; + + if (enable) + mask = MHI_CHDB_INT_MASK_A7_n_MASK_MASK; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + rc = mhi_dev_mmio_write(dev, + MHI_CHDB_INT_MASK_A7_n(i), mask); + if (rc) { + pr_err("Set channel db on row:%d failed\n", i); + return rc; + } + } + + return rc; +} + +int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_set_chdb_interrupts(dev, true); + if (rc) { + pr_err("Error setting channel db interrupts\n"); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_interrupts); + +int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_set_chdb_interrupts(dev, false); + if (rc) { + pr_err("Error masking channel db interrupts\n"); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_mask_chdb_interrupts); + +int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev) +{ + uint32_t i; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + rc = mhi_dev_mmio_read(dev, + MHI_CHDB_INT_STATUS_A7_n(i), &dev->chdb[i].status); + if (rc) { + pr_err("Error reading chdb status for row:%d\n", i); + return rc; + } + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_chdb_status_interrupts); + +static int mhi_dev_mmio_set_erdb_interrupts(struct mhi_dev *dev, bool enable) +{ + uint32_t mask = 0, i; + int rc = 0; + + if (enable) + mask = MHI_ERDB_INT_MASK_A7_n_MASK_MASK; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + rc = mhi_dev_mmio_write(dev, + MHI_ERDB_INT_MASK_A7_n(i), mask); + if (rc) { + pr_err("Error setting erdb status for row:%d\n", i); + return rc; + } + } + + return 0; +} + +int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_set_erdb_interrupts(dev, true); + if (rc) { + pr_err("Error enabling all erdb interrupts\n"); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_interrupts); + +int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_set_erdb_interrupts(dev, false); + if (rc) { + pr_err("Error masking all event db interrupt\n"); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_mask_erdb_interrupts); + +int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev) +{ + uint32_t i; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + rc = mhi_dev_mmio_read(dev, MHI_ERDB_INT_STATUS_A7_n(i), + &dev->evdb[i].status); + if (rc) { + pr_err("Error setting erdb status for row:%d\n", i); + return rc; + } + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_erdb_status_interrupts); + +int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 1); + if (rc) { + pr_err("Error enabling control interrupt\n"); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_ctrl_interrupt); + +int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 0); + if (rc) { + pr_err("Error disabling control interrupt\n"); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_ctrl_interrupt); + +int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->ctrl_int); + if (rc) { + pr_err("Error reading control status interrupt\n"); + return rc; + } + + dev->ctrl_int &= 0x1; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_ctrl_status_interrupt); + +int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->cmd_int); + if (rc) { + pr_err("Error reading cmd status register\n"); + return rc; + } + + dev->cmd_int &= 0x10; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_cmdb_status_interrupt); + +int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 1); + if (rc) + return rc; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_cmdb_interrupt); + +int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 0); + if (rc) + return rc; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt); + +static void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev) +{ + int rc = 0; + + rc = mhi_dev_mmio_disable_ctrl_interrupt(dev); + if (rc) { + pr_err("Error disabling control interrupt\n"); + return; + } + + rc = mhi_dev_mmio_disable_cmdb_interrupt(dev); + if (rc) { + pr_err("Error disabling command db interrupt\n"); + return; + } + + rc = mhi_dev_mmio_mask_chdb_interrupts(dev); + if (rc) { + pr_err("Error masking all channel db interrupts\n"); + return; + } + + rc = mhi_dev_mmio_mask_erdb_interrupts(dev); + if (rc) { + pr_err("Error masking all erdb interrupts\n"); + return; + } +} + +int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev) +{ + uint32_t i = 0; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + rc = mhi_dev_mmio_write(dev, MHI_CHDB_INT_CLEAR_A7_n(i), + MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK); + if (rc) + return rc; + } + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + rc = mhi_dev_mmio_write(dev, MHI_ERDB_INT_CLEAR_A7_n(i), + MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK); + if (rc) + return rc; + } + + rc = mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, + MHI_CTRL_INT_CRDB_CLEAR); + if (rc) + return rc; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_clear_interrupts); + +int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev) +{ + uint32_t ccabap_value = 0, offset = 0; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, CCABAP_HIGHER, &ccabap_value); + if (rc) + return rc; + + dev->ch_ctx_shadow.host_pa = ccabap_value; + dev->ch_ctx_shadow.host_pa <<= 32; + + rc = mhi_dev_mmio_read(dev, CCABAP_LOWER, &ccabap_value); + if (rc) + return rc; + + dev->ch_ctx_shadow.host_pa |= ccabap_value; + + offset = (uint32_t)(dev->ch_ctx_shadow.host_pa - + dev->ctrl_base.host_pa); + + dev->ch_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset; + dev->ch_ctx_shadow.device_va = dev->ctrl_base.device_va + offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_chc_base); + +int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev) +{ + uint32_t ecabap_value = 0, offset = 0; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, ECABAP_HIGHER, &ecabap_value); + if (rc) + return rc; + + dev->ev_ctx_shadow.host_pa = ecabap_value; + dev->ev_ctx_shadow.host_pa <<= 32; + + rc = mhi_dev_mmio_read(dev, ECABAP_LOWER, &ecabap_value); + if (rc) + return rc; + + dev->ev_ctx_shadow.host_pa |= ecabap_value; + + offset = (uint32_t)(dev->ev_ctx_shadow.host_pa - + dev->ctrl_base.host_pa); + + dev->ev_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset; + dev->ev_ctx_shadow.device_va = dev->ctrl_base.device_va + offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_erc_base); + +int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev) +{ + uint32_t crcbap_value = 0, offset = 0; + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, CRCBAP_HIGHER, &crcbap_value); + if (rc) + return rc; + + dev->cmd_ctx_shadow.host_pa = crcbap_value; + dev->cmd_ctx_shadow.host_pa <<= 32; + + rc = mhi_dev_mmio_read(dev, CRCBAP_LOWER, &crcbap_value); + if (rc) + return rc; + + dev->cmd_ctx_shadow.host_pa |= crcbap_value; + + offset = (uint32_t)(dev->cmd_ctx_shadow.host_pa - + dev->ctrl_base.host_pa); + + dev->cmd_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset; + dev->cmd_ctx_shadow.device_va = dev->ctrl_base.device_va + offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_crc_base); + +int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset) +{ + uint32_t value = 0, ch_start_idx = 0; + int rc = 0; + + if (!ring) { + pr_err("Invalid ring context\n"); + return -EINVAL; + } + + ch_start_idx = ring->mhi_dev->ch_ring_start; + + rc = mhi_dev_mmio_read(ring->mhi_dev, + CHDB_HIGHER_n(ring->id-ch_start_idx), &value); + if (rc) + return rc; + + *wr_offset = value; + *wr_offset <<= 32; + + rc = mhi_dev_mmio_read(ring->mhi_dev, + CHDB_LOWER_n(ring->id-ch_start_idx), &value); + if (rc) + return rc; + + *wr_offset |= value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_ch_db); + +int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset) +{ + uint32_t value = 0, ev_idx_start = 0; + int rc = 0; + + if (!ring) { + pr_err("Invalid ring context\n"); + return -EINVAL; + } + + ev_idx_start = ring->mhi_dev->ev_ring_start; + rc = mhi_dev_mmio_read(ring->mhi_dev, + ERDB_HIGHER_n(ring->id - ev_idx_start), &value); + if (rc) + return rc; + + *wr_offset = value; + *wr_offset <<= 32; + + rc = mhi_dev_mmio_read(ring->mhi_dev, + ERDB_LOWER_n(ring->id - ev_idx_start), &value); + if (rc) + return rc; + + *wr_offset |= value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_erc_db); + +int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset) +{ + uint32_t value = 0; + int rc = 0; + + if (!ring) { + pr_err("Invalid ring context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_HIGHER, &value); + if (rc) + return rc; + + *wr_offset = value; + *wr_offset <<= 32; + + rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_LOWER, &value); + if (rc) + return rc; + + *wr_offset |= value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_cmd_db); + +int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value) +{ + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + mhi_dev_mmio_write(dev, BHI_EXECENV, value); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_set_env); + +int mhi_dev_mmio_reset(struct mhi_dev *dev) +{ + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + mhi_dev_mmio_write(dev, MHICTRL, 0); + mhi_dev_mmio_write(dev, MHISTATUS, 0); + mhi_dev_mmio_clear_interrupts(dev); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_reset); + +int mhi_dev_restore_mmio(struct mhi_dev *dev) +{ + uint32_t i, reg_cntl_value; + void *reg_cntl_addr; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + mhi_dev_mmio_mask_interrupts(dev); + + for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) { + reg_cntl_addr = dev->mmio_base_addr + (i * 4); + reg_cntl_value = dev->mmio_backup[i]; + writel_relaxed(reg_cntl_value, reg_cntl_addr); + } + + mhi_dev_mmio_clear_interrupts(dev); + mhi_dev_mmio_enable_ctrl_interrupt(dev); + + /* Mask and enable control interrupt */ + mb(); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_restore_mmio); + +int mhi_dev_backup_mmio(struct mhi_dev *dev) +{ + uint32_t i = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++) + dev->mmio_backup[i] = + readl_relaxed(dev->mmio_base_addr + (i * 4)); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_backup_mmio); + +int mhi_dev_get_mhi_addr(struct mhi_dev *dev) +{ + uint32_t data_value = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + mhi_dev_mmio_read(dev, MHICTRLBASE_LOWER, &data_value); + dev->host_addr.ctrl_base_lsb = data_value; + + mhi_dev_mmio_read(dev, MHICTRLBASE_HIGHER, &data_value); + dev->host_addr.ctrl_base_msb = data_value; + + mhi_dev_mmio_read(dev, MHICTRLLIMIT_LOWER, &data_value); + dev->host_addr.ctrl_limit_lsb = data_value; + + mhi_dev_mmio_read(dev, MHICTRLLIMIT_HIGHER, &data_value); + dev->host_addr.ctrl_limit_msb = data_value; + + mhi_dev_mmio_read(dev, MHIDATABASE_LOWER, &data_value); + dev->host_addr.data_base_lsb = data_value; + + mhi_dev_mmio_read(dev, MHIDATABASE_HIGHER, &data_value); + dev->host_addr.data_base_msb = data_value; + + mhi_dev_mmio_read(dev, MHIDATALIMIT_LOWER, &data_value); + dev->host_addr.data_limit_lsb = data_value; + + mhi_dev_mmio_read(dev, MHIDATALIMIT_HIGHER, &data_value); + dev->host_addr.data_limit_msb = data_value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_get_mhi_addr); + +int mhi_dev_mmio_init(struct mhi_dev *dev) +{ + int rc = 0; + + if (!dev) { + pr_err("Invalid MHI dev context\n"); + return -EINVAL; + } + + rc = mhi_dev_mmio_read(dev, MHIREGLEN, &dev->cfg.mhi_reg_len); + if (rc) + return rc; + + rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK, + MHICFG_NER_SHIFT, &dev->cfg.event_rings); + if (rc) + return rc; + + rc = mhi_dev_mmio_read(dev, CHDBOFF, &dev->cfg.chdb_offset); + if (rc) + return rc; + + rc = mhi_dev_mmio_read(dev, ERDBOFF, &dev->cfg.erdb_offset); + if (rc) + return rc; + + dev->cfg.channels = NUM_CHANNELS; + + if (!dev->mmio_initialized) { + rc = mhi_dev_mmio_reset(dev); + if (rc) { + pr_err("Error resetting MMIO\n"); + return rc; + } + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_init); + +int mhi_dev_update_ner(struct mhi_dev *dev) +{ + int rc = 0; + + rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK, + MHICFG_NER_SHIFT, &dev->cfg.event_rings); + if (rc) { + pr_err("Error update NER\n"); + return rc; + } + + pr_debug("NER in HW :%d\n", dev->cfg.event_rings); + return 0; +} +EXPORT_SYMBOL(mhi_dev_update_ner); + +int mhi_dev_dump_mmio(struct mhi_dev *dev) +{ + uint32_t r1, r2, r3, r4, i, offset = 0; + int rc = 0; + + for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i += 4) { + rc = mhi_dev_mmio_read(dev, offset, &r1); + if (rc) + return rc; + + rc = mhi_dev_mmio_read(dev, offset+4, &r2); + if (rc) + return rc; + + rc = mhi_dev_mmio_read(dev, offset+8, &r3); + if (rc) + return rc; + + rc = mhi_dev_mmio_read(dev, offset+0xC, &r4); + if (rc) + return rc; + + offset += 0x10; + pr_debug("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + offset, r1, r2, r3, r4); + } + + return rc; +} +EXPORT_SYMBOL(mhi_dev_dump_mmio); diff --git a/drivers/platform/msm/mhi_dev/mhi_ring.c b/drivers/platform/msm/mhi_dev/mhi_ring.c new file mode 100644 index 000000000000..b7eab1eb8b64 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_ring.c @@ -0,0 +1,438 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" + +static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p) +{ + uint64_t rbase; + + rbase = ring->ring_ctx->generic.rbase; + + return (p - rbase)/sizeof(union mhi_dev_ring_element_type); +} + +static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring) +{ + return ring->ring_ctx->generic.rlen/ + sizeof(union mhi_dev_ring_element_type); +} + +/* fetch ring elements from stat->end, take care of wrap-around case */ +int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring, + uint32_t start, uint32_t end) +{ + struct mhi_addr host_addr; + + host_addr.device_pa = ring->ring_shadow.device_pa + + sizeof(union mhi_dev_ring_element_type) * start; + host_addr.device_va = ring->ring_shadow.device_va + + sizeof(union mhi_dev_ring_element_type) * start; + host_addr.host_pa = ring->ring_shadow.host_pa + + sizeof(union mhi_dev_ring_element_type) * start; + if (start < end) { + mhi_dev_read_from_host(&host_addr, + (ring->ring_cache_dma_handle + + sizeof(union mhi_dev_ring_element_type) * start), + (end-start) * + sizeof(union mhi_dev_ring_element_type)); + } else if (start > end) { + /* copy from 'start' to ring end, then ring start to 'end'*/ + mhi_dev_read_from_host(&host_addr, + (ring->ring_cache_dma_handle + + sizeof(union mhi_dev_ring_element_type) * start), + (ring->ring_size-start) * + sizeof(union mhi_dev_ring_element_type)); + if (end) { + /* wrapped around */ + host_addr.device_pa = ring->ring_shadow.device_pa; + host_addr.device_va = ring->ring_shadow.device_va; + host_addr.host_pa = ring->ring_shadow.host_pa; + mhi_dev_read_from_host(&host_addr, + (ring->ring_cache_dma_handle + + sizeof(union mhi_dev_ring_element_type) * + start), + end * sizeof(union mhi_dev_ring_element_type)); + } + } + + return 0; +} + +int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset) +{ + uint32_t old_offset = 0; + struct mhi_dev *mhi_ctx; + + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return -EINVAL; + } + + mhi_ctx = ring->mhi_dev; + + if (ring->wr_offset == wr_offset) { + mhi_log(MHI_MSG_INFO, + "nothing to cache for ring %d, local wr_ofst %d\n", + ring->id, ring->wr_offset); + mhi_log(MHI_MSG_INFO, + "new wr_offset %d\n", wr_offset); + return 0; + } + + old_offset = ring->wr_offset; + + mhi_log(MHI_MSG_ERROR, + "caching - rng size :%d local ofst:%d new ofst: %d\n", + (uint32_t) ring->ring_size, old_offset, + ring->wr_offset); + + /* + * copy the elements starting from old_offset to wr_offset + * take in to account wrap around case event rings are not + * cached, not required + */ + if (ring->id >= mhi_ctx->ev_ring_start && + ring->id < (mhi_ctx->ev_ring_start + + mhi_ctx->cfg.event_rings)) { + mhi_log(MHI_MSG_ERROR, + "not caching event ring %d\n", ring->id); + return 0; + } + + mhi_log(MHI_MSG_ERROR, "caching ring %d, start %d, end %d\n", + ring->id, old_offset, wr_offset); + + if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) { + mhi_log(MHI_MSG_ERROR, + "failed to fetch elements for ring %d, start %d, end %d\n", + ring->id, old_offset, wr_offset); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_cache_ring); + +int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) +{ + uint64_t wr_offset = 0; + uint32_t new_wr_offset = 0; + int32_t rc = 0; + + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return -EINVAL; + } + + switch (ring->type) { + case RING_TYPE_CMD: + rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset); + if (rc) { + pr_err("%s: CMD DB read failed\n", __func__); + return rc; + } + mhi_log(MHI_MSG_ERROR, + "ring %d wr_offset from db 0x%x\n", + ring->id, (uint32_t) wr_offset); + break; + case RING_TYPE_ER: + rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset); + if (rc) { + pr_err("%s: EVT DB read failed\n", __func__); + return rc; + } + break; + case RING_TYPE_CH: + rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset); + if (rc) { + pr_err("%s: CH DB read failed\n", __func__); + return rc; + } + mhi_log(MHI_MSG_ERROR, + "ring %d wr_offset from db 0x%x\n", + ring->id, (uint32_t) wr_offset); + break; + default: + mhi_log(MHI_MSG_ERROR, "invalid ring type\n"); + return -EINVAL; + } + + new_wr_offset = mhi_dev_ring_addr2ofst(ring, wr_offset); + + mhi_dev_cache_ring(ring, new_wr_offset); + + ring->wr_offset = new_wr_offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_update_wr_offset); + +int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset) +{ + union mhi_dev_ring_element_type *el; + + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return -EINVAL; + } + + /* get the element and invoke the respective callback */ + el = &ring->ring_cache[offset]; + + if (ring->ring_cb) + ring->ring_cb(ring->mhi_dev, el, (void *)ring); + else + mhi_log(MHI_MSG_INFO, "No callback registered for ring %d\n", + ring->id); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_process_ring_element); + +int mhi_dev_process_ring(struct mhi_dev_ring *ring) +{ + int rc = 0; + + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return -EINVAL; + } + + rc = mhi_dev_update_wr_offset(ring); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error updating write-offset for ring %d\n", + ring->id); + return rc; + } + + if (ring->type == RING_TYPE_CH) { + /* notify the clients that there are elements in the ring */ + rc = mhi_dev_process_ring_element(ring, ring->rd_offset); + if (rc) + pr_err("Error fetching elements\n"); + return rc; + } + + while (ring->rd_offset != ring->wr_offset) { + rc = mhi_dev_process_ring_element(ring, ring->rd_offset); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error processing ring (%d) element (%d)\n", + ring->id, ring->rd_offset); + return rc; + } + + mhi_log(MHI_MSG_ERROR, + "Processing ring (%d) rd_offset:%d, wr_offset:%d\n", + ring->id, ring->rd_offset, ring->wr_offset); + + mhi_dev_ring_inc_index(ring, ring->rd_offset); + } + + if (!(ring->rd_offset == ring->wr_offset)) { + mhi_log(MHI_MSG_ERROR, + "Error with the rd offset/wr offset\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_process_ring); + +int mhi_dev_add_element(struct mhi_dev_ring *ring, + union mhi_dev_ring_element_type *element) +{ + uint32_t old_offset = 0; + struct mhi_addr host_addr; + + if (!ring || !element) { + pr_err("%s: Invalid context\n", __func__); + return -EINVAL; + } + + mhi_dev_update_wr_offset(ring); + + if ((ring->rd_offset + 1) % ring->ring_size == ring->wr_offset) { + mhi_log(MHI_MSG_INFO, "ring full to insert element\n"); + return -EINVAL; + } + + old_offset = ring->rd_offset; + + mhi_dev_ring_inc_index(ring, ring->rd_offset); + + ring->ring_ctx->generic.rp = (ring->rd_offset * + sizeof(union mhi_dev_ring_element_type)) + + ring->ring_ctx->generic.rbase; + /* + * Write the element, ring_base has to be the + * iomap of the ring_base for memcpy + */ + host_addr.host_pa = ring->ring_shadow.host_pa + + sizeof(union mhi_dev_ring_element_type) * old_offset; + host_addr.device_va = ring->ring_shadow.device_va + + sizeof(union mhi_dev_ring_element_type) * old_offset; + + mhi_log(MHI_MSG_ERROR, "adding element to ring (%d)\n", ring->id); + mhi_log(MHI_MSG_ERROR, "rd_ofset %d\n", ring->rd_offset); + mhi_log(MHI_MSG_ERROR, "type %d\n", element->generic.type); + + mhi_dev_write_to_host(&host_addr, element, + sizeof(union mhi_dev_ring_element_type), ring->mhi_dev); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_add_element); + +int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, + struct mhi_dev *mhi) +{ + int rc = 0; + uint32_t wr_offset = 0; + uint32_t offset = 0; + + if (!ring || !ctx || !mhi) { + pr_err("%s: Invalid context\n", __func__); + return -EINVAL; + } + + ring->ring_ctx = ctx; + ring->ring_size = mhi_dev_ring_num_elems(ring); + ring->rd_offset = mhi_dev_ring_addr2ofst(ring, + ring->ring_ctx->generic.rp); + ring->wr_offset = mhi_dev_ring_addr2ofst(ring, + ring->ring_ctx->generic.rp); + ring->mhi_dev = mhi; + + mhi_ring_set_state(ring, RING_STATE_IDLE); + + wr_offset = mhi_dev_ring_addr2ofst(ring, + ring->ring_ctx->generic.wp); + + ring->ring_cache = dma_alloc_coherent(mhi->dev, + ring->ring_size * + sizeof(union mhi_dev_ring_element_type), + &ring->ring_cache_dma_handle, + GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + offset = (uint32_t)(ring->ring_ctx->generic.rbase - + mhi->ctrl_base.host_pa); + + ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset; + ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset; + ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset; + + if (ring->type == RING_TYPE_ER) + ring->ring_ctx_shadow = + (union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va + + (ring->id - mhi->ev_ring_start) * + sizeof(union mhi_dev_ring_ctx)); + else if (ring->type == RING_TYPE_CMD) + ring->ring_ctx_shadow = + (union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va; + else if (ring->type == RING_TYPE_CH) + ring->ring_ctx_shadow = + (union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va + + (ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx)); + + + ring->ring_ctx_shadow = ring->ring_ctx; + + if (ring->type != RING_TYPE_ER) { + rc = mhi_dev_cache_ring(ring, wr_offset); + if (rc) + return rc; + } + + mhi_log(MHI_MSG_ERROR, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n", + (uint32_t)ring->ring_ctx->generic.rbase, + (uint32_t)ring->ring_ctx->generic.rp, + (uint32_t)ring->ring_ctx->generic.wp); + ring->wr_offset = wr_offset; + + return rc; +} +EXPORT_SYMBOL(mhi_ring_start); + +void mhi_ring_init(struct mhi_dev_ring *ring, enum mhi_dev_ring_type type, + int id) +{ + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return; + } + + ring->id = id; + ring->state = RING_STATE_UINT; + ring->ring_cb = NULL; + ring->type = type; +} +EXPORT_SYMBOL(mhi_ring_init); + +void mhi_ring_set_cb(struct mhi_dev_ring *ring, + void (*ring_cb)(struct mhi_dev *dev, + union mhi_dev_ring_element_type *el, void *ctx)) +{ + if (!ring || !ring_cb) { + pr_err("%s: Invalid context\n", __func__); + return; + } + + ring->ring_cb = ring_cb; +} +EXPORT_SYMBOL(mhi_ring_set_cb); + +void mhi_ring_set_state(struct mhi_dev_ring *ring, + enum mhi_dev_ring_state state) +{ + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return; + } + + if (state > RING_STATE_PENDING) { + pr_err("%s: Invalid ring state\n", __func__); + return; + } + + ring->state = state; +} +EXPORT_SYMBOL(mhi_ring_set_state); + +enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring) +{ + if (!ring) { + pr_err("%s: Invalid ring context\n", __func__); + return -EINVAL; + } + + return ring->state; +} +EXPORT_SYMBOL(mhi_ring_get_state); diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c new file mode 100644 index 000000000000..12a4fb229922 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_sm.c @@ -0,0 +1,1319 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "mhi_hwio.h" +#include "mhi_sm.h" + +#define MHI_SM_DBG(fmt, args...) \ + mhi_log(MHI_MSG_DBG, fmt, ##args) + +#define MHI_SM_ERR(fmt, args...) \ + mhi_log(MHI_MSG_ERROR, fmt, ##args) + +#define MHI_SM_FUNC_ENTRY() MHI_SM_DBG("ENTRY\n") +#define MHI_SM_FUNC_EXIT() MHI_SM_DBG("EXIT\n") + + +static inline const char *mhi_sm_dev_event_str(enum mhi_dev_event state) +{ + const char *str; + + switch (state) { + case MHI_DEV_EVENT_CTRL_TRIG: + str = "MHI_DEV_EVENT_CTRL_TRIG"; + break; + case MHI_DEV_EVENT_M0_STATE: + str = "MHI_DEV_EVENT_M0_STATE"; + break; + case MHI_DEV_EVENT_M1_STATE: + str = "MHI_DEV_EVENT_M1_STATE"; + break; + case MHI_DEV_EVENT_M2_STATE: + str = "MHI_DEV_EVENT_M2_STATE"; + break; + case MHI_DEV_EVENT_M3_STATE: + str = "MHI_DEV_EVENT_M3_STATE"; + break; + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + str = "MHI_DEV_EVENT_HW_ACC_WAKEUP"; + break; + case MHI_DEV_EVENT_CORE_WAKEUP: + str = "MHI_DEV_EVENT_CORE_WAKEUP"; + break; + default: + str = "INVALID MHI_DEV_EVENT"; + } + + return str; +} + +static inline const char *mhi_sm_mstate_str(enum mhi_dev_state state) +{ + const char *str; + + switch (state) { + case MHI_DEV_RESET_STATE: + str = "RESET"; + break; + case MHI_DEV_READY_STATE: + str = "READY"; + break; + case MHI_DEV_M0_STATE: + str = "M0"; + break; + case MHI_DEV_M1_STATE: + str = "M1"; + break; + case MHI_DEV_M2_STATE: + str = "M2"; + break; + case MHI_DEV_M3_STATE: + str = "M3"; + break; + case MHI_DEV_SYSERR_STATE: + str = "SYSTEM ERROR"; + break; + default: + str = "INVALID"; + break; + } + + return str; +} +enum mhi_sm_ep_pcie_state { + MHI_SM_EP_PCIE_LINK_DISABLE, + MHI_SM_EP_PCIE_D0_STATE, + MHI_SM_EP_PCIE_D3_HOT_STATE, + MHI_SM_EP_PCIE_D3_COLD_STATE, +}; + +static inline const char *mhi_sm_dstate_str(enum mhi_sm_ep_pcie_state state) +{ + const char *str; + + switch (state) { + case MHI_SM_EP_PCIE_LINK_DISABLE: + str = "EP_PCIE_LINK_DISABLE"; + break; + case MHI_SM_EP_PCIE_D0_STATE: + str = "D0_STATE"; + break; + case MHI_SM_EP_PCIE_D3_HOT_STATE: + str = "D3_HOT_STATE"; + break; + case MHI_SM_EP_PCIE_D3_COLD_STATE: + str = "D3_COLD_STATE"; + break; + default: + str = "INVALID D-STATE"; + break; + } + + return str; +} + +static inline const char *mhi_sm_pcie_event_str(enum ep_pcie_event event) +{ + const char *str; + + switch (event) { + case EP_PCIE_EVENT_LINKDOWN: + str = "EP_PCIE_LINKDOWN_EVENT"; + break; + case EP_PCIE_EVENT_LINKUP: + str = "EP_PCIE_LINKUP_EVENT"; + break; + case EP_PCIE_EVENT_PM_D3_HOT: + str = "EP_PCIE_PM_D3_HOT_EVENT"; + break; + case EP_PCIE_EVENT_PM_D3_COLD: + str = "EP_PCIE_PM_D3_COLD_EVENT"; + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + str = "EP_PCIE_PM_RST_DEAST_EVENT"; + break; + case EP_PCIE_EVENT_PM_D0: + str = "EP_PCIE_PM_D0_EVENT"; + break; + case EP_PCIE_EVENT_MHI_A7: + str = "EP_PCIE_MHI_A7"; + break; + default: + str = "INVALID_PCIE_EVENT"; + break; + } + + return str; +} + +/** + * struct mhi_sm_device_event - mhi-core event work + * @event: mhi core state change event + * @work: work struct + * + * used to add work for mhi state change event to mhi_sm_wq + */ +struct mhi_sm_device_event { + enum mhi_dev_event event; + struct work_struct work; +}; + +/** + * struct mhi_sm_ep_pcie_event - ep-pcie event work + * @event: ep-pcie link state change event + * @work: work struct + * + * used to add work for ep-pcie link state change event to mhi_sm_wq + */ +struct mhi_sm_ep_pcie_event { + enum ep_pcie_event event; + struct work_struct work; +}; + +/** + * struct mhi_sm_stats - MHI state machine statistics, viewable using debugfs + * @m0_event_cnt: total number of MHI_DEV_EVENT_M0_STATE events + * @m3_event_cnt: total number of MHI_DEV_EVENT_M3_STATE events + * @hw_acc_wakeup_event_cnt: total number of MHI_DEV_EVENT_HW_ACC_WAKEUP events + * @mhi_core_wakeup_event_cnt: total number of MHI_DEV_EVENT_CORE_WAKEUP events + * @linkup_event_cnt: total number of EP_PCIE_EVENT_LINKUP events + * @rst_deast_event_cnt: total number of EP_PCIE_EVENT_PM_RST_DEAST events + * @d3_hot_event_cnt: total number of EP_PCIE_EVENT_PM_D3_HOT events + * @d3_cold_event_cnt: total number of EP_PCIE_EVENT_PM_D3_COLD events + * @d0_event_cnt: total number of EP_PCIE_EVENT_PM_D0 events + * @linkdown_event_cnt: total number of EP_PCIE_EVENT_LINKDOWN events + */ +struct mhi_sm_stats { + int m0_event_cnt; + int m3_event_cnt; + int hw_acc_wakeup_event_cnt; + int mhi_core_wakeup_event_cnt; + int linkup_event_cnt; + int rst_deast_event_cnt; + int d3_hot_event_cnt; + int d3_cold_event_cnt; + int d0_event_cnt; + int linkdown_event_cnt; +}; + +/** + * struct mhi_sm_dev - MHI state manager context information + * @mhi_state: MHI M state of the MHI device + * @d_state: EP-PCIe D state of the MHI device + * @mhi_dev: MHI device struct pointer + * @mhi_state_lock: mutex for mhi_state + * @syserr_occurred:flag to indicate if a syserr condition has occurred. + * @mhi_sm_wq: workqueue for state change events + * @pending_device_events: number of pending mhi state change events in sm_wq + * @pending_pcie_events: number of pending mhi state change events in sm_wq + * @stats: stats on the handled and pending events + */ +struct mhi_sm_dev { + enum mhi_dev_state mhi_state; + enum mhi_sm_ep_pcie_state d_state; + struct mhi_dev *mhi_dev; + struct mutex mhi_state_lock; + bool syserr_occurred; + struct workqueue_struct *mhi_sm_wq; + atomic_t pending_device_events; + atomic_t pending_pcie_events; + struct mhi_sm_stats stats; +}; +static struct mhi_sm_dev *mhi_sm_ctx; + + +#ifdef CONFIG_DEBUG_FS +#define MHI_SM_MAX_MSG_LEN 1024 +static char dbg_buff[MHI_SM_MAX_MSG_LEN]; +static struct dentry *dent; +static struct dentry *dfile_stats; + +static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos); +static ssize_t mhi_sm_debugfs_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos); + +const struct file_operations mhi_sm_stats_ops = { + .read = mhi_sm_debugfs_read, + .write = mhi_sm_debugfs_write, +}; + +static void mhi_sm_debugfs_init(void) +{ + const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH | + S_IWUSR | S_IWGRP | S_IWOTH; + + dent = debugfs_create_dir("mhi_sm", 0); + if (IS_ERR(dent)) { + MHI_SM_ERR("fail to create folder mhi_sm\n"); + return; + } + + dfile_stats = + debugfs_create_file("stats", read_write_mode, dent, + 0, &mhi_sm_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + MHI_SM_ERR("fail to create file stats\n"); + goto fail; + } + return; +fail: + debugfs_remove_recursive(dent); +} + +static void mhi_sm_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} +#else +static inline void mhi_sm_debugfs_init(void) {} +static inline void mhi_sm_debugfs_destroy(void) {} +#endif /*CONFIG_DEBUG_FS*/ + + +static void mhi_sm_mmio_set_mhistatus(enum mhi_dev_state state) +{ + struct mhi_dev *dev = mhi_sm_ctx->mhi_dev; + + MHI_SM_FUNC_ENTRY(); + + switch (state) { + case MHI_DEV_READY_STATE: + MHI_SM_DBG("set MHISTATUS to READY mode\n"); + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, 1); + + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, state); + break; + case MHI_DEV_SYSERR_STATE: + MHI_SM_DBG("set MHISTATUS to SYSTEM ERROR mode\n"); + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_SYSERR_MASK, + MHISTATUS_SYSERR_SHIFT, 1); + + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, state); + break; + case MHI_DEV_M1_STATE: + case MHI_DEV_M2_STATE: + MHI_SM_ERR("Not supported state, can't set MHISTATUS to %s\n", + mhi_sm_mstate_str(state)); + goto exit; + case MHI_DEV_M0_STATE: + case MHI_DEV_M3_STATE: + MHI_SM_DBG("set MHISTATUS.MHISTATE to %s state\n", + mhi_sm_mstate_str(state)); + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, state); + break; + default: + MHI_SM_ERR("Invalid mhi state: 0x%x state", state); + goto exit; + } + + mhi_sm_ctx->mhi_state = state; + +exit: + MHI_SM_FUNC_EXIT(); +} + +/** + * mhi_sm_is_legal_event_on_state() - Determine if MHI state transition is valid + * @curr_state: current MHI state + * @event: MHI state change event + * + * Determine according to MHI state management if the state change event + * is valid on the current mhi state. + * Note: The decision doesn't take into account M1 and M2 states. + * + * Return: true: transition is valid + * false: transition is not valid + */ +static bool mhi_sm_is_legal_event_on_state(enum mhi_dev_state curr_state, + enum mhi_dev_event event) +{ + bool res; + + switch (event) { + case MHI_DEV_EVENT_M0_STATE: + res = (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D0_STATE && + curr_state != MHI_DEV_RESET_STATE); + break; + case MHI_DEV_EVENT_M3_STATE: + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + case MHI_DEV_EVENT_CORE_WAKEUP: + res = (curr_state == MHI_DEV_M3_STATE || + curr_state == MHI_DEV_M0_STATE); + break; + default: + MHI_SM_ERR("Received invalid event: %s\n", + mhi_sm_dev_event_str(event)); + res = false; + break; + } + + return res; +} + +/** + * mhi_sm_is_legal_pcie_event_on_state() - Determine if EP-PCIe linke state + * transition is valid on the current system state. + * @curr_mstate: current MHI state + * @curr_dstate: current ep-pcie link, d, state + * @event: ep-pcie link state change event + * + * Return: true: transition is valid + * false: transition is not valid + */ +static bool mhi_sm_is_legal_pcie_event_on_state(enum mhi_dev_state curr_mstate, + enum mhi_sm_ep_pcie_state curr_dstate, enum ep_pcie_event event) +{ + bool res; + + switch (event) { + case EP_PCIE_EVENT_LINKUP: + case EP_PCIE_EVENT_LINKDOWN: + res = true; + break; + case EP_PCIE_EVENT_PM_D3_HOT: + res = (curr_mstate == MHI_DEV_M3_STATE && + curr_dstate != MHI_SM_EP_PCIE_LINK_DISABLE); + break; + case EP_PCIE_EVENT_PM_D3_COLD: + res = (curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE); + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE); + break; + case EP_PCIE_EVENT_PM_D0: + res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE); + break; + case EP_PCIE_EVENT_MHI_A7: + res = true; + break; + default: + MHI_SM_ERR("Invalid ep_pcie event, received: %s\n", + mhi_sm_pcie_event_str(event)); + res = false; + break; + } + + return res; +} + +/** + * mhi_sm_change_to_M0() - switch to M0 state. + * + * Switch MHI-device state to M0, if possible according to MHI state machine. + * Notify the MHI-host on the transition, in case MHI is suspended- resume MHI. + * + * Return: 0: success + * negative: failure + */ +static int mhi_sm_change_to_M0(void) +{ + enum mhi_dev_state old_state; + struct ep_pcie_msi_config cfg; + int res; + + MHI_SM_FUNC_ENTRY(); + + old_state = mhi_sm_ctx->mhi_state; + + if (old_state == MHI_DEV_M0_STATE) { + MHI_SM_DBG("Nothing to do, already in M0 state\n"); + res = 0; + goto exit; + } else if (old_state == MHI_DEV_M3_STATE || + old_state == MHI_DEV_READY_STATE) { + /* Retrieve MHI configuration*/ + res = mhi_dev_config_outbound_iatu(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Fail to configure iATU, returned %d\n", + res); + goto exit; + } + res = ep_pcie_get_msi_config(mhi_sm_ctx->mhi_dev->phandle, + &cfg); + if (res) { + MHI_SM_ERR("Error retrieving pcie msi logic\n"); + goto exit; + } + res = mhi_pcie_config_db_routing(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Error configuring db routing\n"); + goto exit; + + } + } else { + MHI_SM_ERR("unexpected old_state: %s\n", + mhi_sm_mstate_str(old_state)); + goto exit; + } + mhi_sm_mmio_set_mhistatus(MHI_DEV_M0_STATE); + + /* Tell the host, device move to M0 */ + res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev, + MHI_DEV_M0_STATE); + if (res) { + MHI_SM_ERR("Failed to send event %s to host, returned %d\n", + mhi_sm_dev_event_str(MHI_DEV_EVENT_M0_STATE), res); + goto exit; + } + + if (old_state == MHI_DEV_READY_STATE) { + /* Tell the host the EE */ + res = mhi_dev_send_ee_event(mhi_sm_ctx->mhi_dev, 2); + if (res) { + MHI_SM_ERR("failed sending EE event to host\n"); + goto exit; + } + } else if (old_state == MHI_DEV_M3_STATE) { + /*Resuming MHI operation*/ + res = mhi_dev_resume(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Failed resuming mhi core, returned %d", + res); + goto exit; + } + res = ipa_mhi_resume(); + if (res) { + MHI_SM_ERR("Failed resuming ipa_mhi, returned %d", + res); + goto exit; + } + } + res = 0; + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_change_to_M3() - switch to M3 state + * + * Switch MHI-device state to M3, if possible according to MHI state machine. + * Suspend MHI traffic and notify the host on the transition. + * + * Return: 0: success + * negative: failure + */ +static int mhi_sm_change_to_M3(void) +{ + enum mhi_dev_state old_state; + int res = 0; + + MHI_SM_FUNC_ENTRY(); + + old_state = mhi_sm_ctx->mhi_state; + if (old_state == MHI_DEV_M3_STATE) { + MHI_SM_DBG("Nothing to do, already in M3 state\n"); + res = 0; + goto exit; + } + /* Suspending MHI operation*/ + res = mhi_dev_suspend(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Failed to suspend mhi_core, returned %d\n", res); + goto exit; + } + res = ipa_mhi_suspend(true); + if (res) { + MHI_SM_ERR("Failed to suspend ipa_mhi, returned %d\n", res); + goto exit; + } + mhi_sm_mmio_set_mhistatus(MHI_DEV_M3_STATE); + + /* tell the host, device move to M3 */ + res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev, + MHI_DEV_M3_STATE); + if (res) { + MHI_SM_ERR("Failed sendind event: %s to mhi_host\n", + mhi_sm_dev_event_str(MHI_DEV_EVENT_M3_STATE)); + goto exit; + } + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_wakeup_host() - wakeup MHI-host + *@event: MHI state chenge event + * + * Sends wekup event to MHI-host via EP-PCIe, in case MHI is in M3 state. + * + * Return: 0:success + * negative: failure + */ +static int mhi_sm_wakeup_host(enum mhi_dev_event event) +{ + int res = 0; + + MHI_SM_FUNC_ENTRY(); + + if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) { + /* + * ep_pcie driver is responsible to send the right wakeup + * event, assert WAKE#, according to Link state + */ + res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle); + if (res) { + MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n", + res); + goto exit; + } + } else { + MHI_SM_DBG("Nothing to do, Host is already awake\n"); + } + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_handle_syserr() - switch to system error state. + * + * Called on system error condition. + * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device. + * Synchronic function. + * + * Return: 0: success + * negative: failure + */ +static int mhi_sm_handle_syserr(void) +{ + int res; + enum ep_pcie_link_status link_status; + bool link_enabled = false; + + MHI_SM_FUNC_ENTRY(); + + MHI_SM_ERR("Start handling SYSERR, MHI state: %s and %s", + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + + if (mhi_sm_ctx->mhi_state == MHI_DEV_SYSERR_STATE) { + MHI_SM_DBG("Nothing to do, already in SYSERR state\n"); + return 0; + } + + mhi_sm_ctx->syserr_occurred = true; + link_status = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle); + if (link_status == EP_PCIE_LINK_DISABLED) { + /* try to power on ep-pcie, restore mmio, and wakup host */ + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_POWER_ON); + if (res) { + MHI_SM_ERR("Failed to power on ep-pcie, returned %d\n", + res); + goto exit; + } + mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev); + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_AST_WAKE | EP_PCIE_OPT_ENUM); + if (res) { + MHI_SM_ERR("Failed to wakup host and enable ep-pcie\n"); + goto exit; + } + } + + link_enabled = true; + mhi_sm_mmio_set_mhistatus(MHI_DEV_SYSERR_STATE); + + /* Tell the host, device move to SYSERR state */ + res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev, + MHI_DEV_SYSERR_STATE); + if (res) { + MHI_SM_ERR("Failed to send %s state change event to host\n", + mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE)); + goto exit; + } + +exit: + if (!link_enabled) + MHI_SM_ERR("EP-PCIE Link is disable cannot set MMIO to %s\n", + mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE)); + + MHI_SM_ERR("/n/n/nASSERT ON DEVICE !!!!/n/n/n"); + WARN_ON(); + + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_dev_event_manager() - performs MHI state change + * @work: work_struct used by the work queue + * + * This function is called from mhi_sm_wq, and performs mhi state change + * if possible according to MHI state machine + */ +static void mhi_sm_dev_event_manager(struct work_struct *work) +{ + int res; + struct mhi_sm_device_event *chg_event = container_of(work, + struct mhi_sm_device_event, work); + + MHI_SM_FUNC_ENTRY(); + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + MHI_SM_DBG("Start handling %s event, current states: %s & %s\n", + mhi_sm_dev_event_str(chg_event->event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + + if (mhi_sm_ctx->syserr_occurred) { + MHI_SM_DBG("syserr occurred, Ignoring %s\n", + mhi_sm_dev_event_str(chg_event->event)); + goto unlock_and_exit; + } + + if (!mhi_sm_is_legal_event_on_state(mhi_sm_ctx->mhi_state, + chg_event->event)) { + MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n", + mhi_sm_dev_event_str(chg_event->event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("Failed switching to SYSERR state\n"); + goto unlock_and_exit; + } + + switch (chg_event->event) { + case MHI_DEV_EVENT_M0_STATE: + res = mhi_sm_change_to_M0(); + if (res) + MHI_SM_ERR("Failed switching to M0 state\n"); + break; + case MHI_DEV_EVENT_M3_STATE: + res = mhi_sm_change_to_M3(); + if (res) + MHI_SM_ERR("Failed switching to M3 state\n"); + break; + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + case MHI_DEV_EVENT_CORE_WAKEUP: + res = mhi_sm_wakeup_host(chg_event->event); + if (res) + MHI_SM_ERR("Failed to wakeup MHI host\n"); + break; + case MHI_DEV_EVENT_CTRL_TRIG: + case MHI_DEV_EVENT_M1_STATE: + case MHI_DEV_EVENT_M2_STATE: + MHI_SM_ERR("Error: %s event is not supported\n", + mhi_sm_dev_event_str(chg_event->event)); + break; + default: + MHI_SM_ERR("Error: Invalid event, 0x%x", chg_event->event); + break; + } +unlock_and_exit: + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + atomic_dec(&mhi_sm_ctx->pending_device_events); + kfree(chg_event); + + MHI_SM_FUNC_EXIT(); +} + +/** + * mhi_sm_pcie_event_manager() - performs EP-PCIe linke state change + * @work: work_struct used by the work queue + * + * This function is called from mhi_sm_wq, and performs ep-pcie link state + * change if possible according to current system state and MHI state machine + */ +static void mhi_sm_pcie_event_manager(struct work_struct *work) +{ + int res; + enum mhi_sm_ep_pcie_state old_dstate; + struct mhi_sm_ep_pcie_event *chg_event = container_of(work, + struct mhi_sm_ep_pcie_event, work); + enum ep_pcie_event pcie_event = chg_event->event; + + MHI_SM_FUNC_ENTRY(); + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + old_dstate = mhi_sm_ctx->d_state; + + MHI_SM_DBG("Start handling %s event, current MHI state %s and %s\n", + mhi_sm_pcie_event_str(chg_event->event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(old_dstate)); + + if (mhi_sm_ctx->syserr_occurred && + pcie_event != EP_PCIE_EVENT_LINKDOWN) { + MHI_SM_DBG("SYSERR occurred. Ignoring %s", + mhi_sm_pcie_event_str(pcie_event)); + goto unlock_and_exit; + } + + if (!mhi_sm_is_legal_pcie_event_on_state(mhi_sm_ctx->mhi_state, + old_dstate, pcie_event)) { + MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n", + mhi_sm_pcie_event_str(pcie_event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(old_dstate)); + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("Failed switching to SYSERR state\n"); + goto unlock_and_exit; + } + + switch (pcie_event) { + case EP_PCIE_EVENT_LINKUP: + if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_LINK_DISABLE) + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + break; + case EP_PCIE_EVENT_LINKDOWN: + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("Failed switching to SYSERR state\n"); + goto unlock_and_exit; + case EP_PCIE_EVENT_PM_D3_HOT: + if (old_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE) { + MHI_SM_DBG("cannot move to D3_HOT from D3_COLD\n"); + break; + } + /* Backup MMIO is done on the callback function*/ + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_HOT_STATE; + break; + case EP_PCIE_EVENT_PM_D3_COLD: + if (old_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE) { + MHI_SM_DBG("Nothing to do, already in D3_COLD state\n"); + break; + } + ep_pcie_disable_endpoint(mhi_sm_ctx->mhi_dev->phandle); + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_COLD_STATE; + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) { + MHI_SM_DBG("Nothing to do, already in D0 state\n"); + break; + } + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_POWER_ON); + if (res) { + MHI_SM_ERR("Failed to power on ep_pcie, returned %d\n", + res); + goto unlock_and_exit; + } + + mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev); + + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_ENUM); + if (res) { + MHI_SM_ERR("ep-pcie failed to link train, return %d\n", + res); + goto unlock_and_exit; + } + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + break; + case EP_PCIE_EVENT_PM_D0: + if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) { + MHI_SM_DBG("Nothing to do, already in D0 state\n"); + break; + } + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + break; + default: + MHI_SM_ERR("Invalid EP_PCIE event, received 0x%x\n", + pcie_event); + break; + } + +unlock_and_exit: + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + atomic_dec(&mhi_sm_ctx->pending_pcie_events); + kfree(chg_event); + + MHI_SM_FUNC_EXIT(); +} + +/** + * mhi_dev_sm_init() - Initialize MHI state machine. + * @mhi_dev: pointer to mhi device instance + * + * Assuming MHISTATUS register is in RESET state. + * + * Return: 0 success + * -EINVAL: invalid param + * -ENOMEM: allocating memory error + */ +int mhi_dev_sm_init(struct mhi_dev *mhi_dev) +{ + int res; + enum ep_pcie_link_status link_state; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_dev) { + MHI_SM_ERR("Fail: Null argument\n"); + return -EINVAL; + } + + mhi_sm_ctx = devm_kzalloc(mhi_dev->dev, sizeof(*mhi_sm_ctx), + GFP_KERNEL); + if (!mhi_sm_ctx) { + MHI_SM_ERR("devm_kzalloc err: mhi_sm_ctx\n"); + return -ENOMEM; + } + + /*init debugfs*/ + mhi_sm_debugfs_init(); + mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq"); + if (!mhi_sm_ctx->mhi_sm_wq) { + MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n"); + res = -ENOMEM; + goto fail_init_wq; + } + + mutex_init(&mhi_sm_ctx->mhi_state_lock); + mhi_sm_ctx->mhi_dev = mhi_dev; + mhi_sm_ctx->mhi_state = MHI_DEV_RESET_STATE; + mhi_sm_ctx->syserr_occurred = false; + atomic_set(&mhi_sm_ctx->pending_device_events, 0); + atomic_set(&mhi_sm_ctx->pending_pcie_events, 0); + + link_state = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle); + if (link_state == EP_PCIE_LINK_ENABLED) + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + else + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_LINK_DISABLE; + + MHI_SM_FUNC_EXIT(); + return 0; + +fail_init_wq: + mhi_sm_ctx = NULL; + mhi_sm_debugfs_destroy(); + return res; +} +EXPORT_SYMBOL(mhi_dev_sm_init); + +/** + * mhi_dev_sm_get_mhi_state() -Get current MHI state. + * @state: return param + * + * Returns the current MHI state of the state machine. + * + * Return: 0 success + * -EINVAL: invalid param + * -EFAULT: state machine isn't initialized + */ +int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state) +{ + MHI_SM_FUNC_ENTRY(); + + if (!state) { + MHI_SM_ERR("Fail: Null argument\n"); + return -EINVAL; + } + if (!mhi_sm_ctx) { + MHI_SM_ERR("Fail: MHI SM is not initialized\n"); + return -EFAULT; + } + *state = mhi_sm_ctx->mhi_state; + MHI_SM_DBG("state machine states are: %s and %s\n", + mhi_sm_mstate_str(*state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + + MHI_SM_FUNC_EXIT(); + return 0; +} +EXPORT_SYMBOL(mhi_dev_sm_get_mhi_state); + +/** + * mhi_dev_sm_set_ready() -Set MHI state to ready. + * + * Set MHISTATUS register in mmio to READY. + * Synchronic function. + * + * Return: 0: success + * EINVAL: mhi state manager is not initialized + * EPERM: Operation not permitted as EP PCIE link is desable. + * EFAULT: MHI state is not RESET + * negative: other failure + */ +int mhi_dev_sm_set_ready(void) +{ + int res; + int is_ready; + enum mhi_dev_state state; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM isn't initialized\n"); + return -EINVAL; + } + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + if (mhi_sm_ctx->mhi_state != MHI_DEV_RESET_STATE) { + MHI_SM_ERR("Can not switch to READY state from %s state\n", + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state)); + res = -EFAULT; + goto unlock_and_exit; + } + + if (mhi_sm_ctx->d_state != MHI_SM_EP_PCIE_D0_STATE) { + if (ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle) == + EP_PCIE_LINK_ENABLED) { + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + } else { + MHI_SM_ERR("ERROR: ep-pcie link is not enabled\n"); + res = -EPERM; + goto unlock_and_exit; + } + } + + /* verify that MHISTATUS is configured to RESET*/ + mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, + MHISTATUS, MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + + mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &is_ready); + + if (state != MHI_DEV_RESET_STATE || is_ready) { + MHI_SM_ERR("Cannot switch to READY, MHI is not in RESET state"); + MHI_SM_ERR("-MHISTATE: %s, READY bit: 0x%x\n", + mhi_sm_mstate_str(state), is_ready); + res = -EFAULT; + goto unlock_and_exit; + } + mhi_sm_mmio_set_mhistatus(MHI_DEV_READY_STATE); + +unlock_and_exit: + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + MHI_SM_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(mhi_dev_sm_set_ready); + +/** + * mhi_dev_notify_sm_event() - MHI-core notify SM on trigger occurred + * @event - enum of the requierd operation. + * + * Asynchronic function. + * No trigger is sent after operation is done. + * + * Return: 0: success + * -EFAULT: SM isn't initialized or event isn't supported + * -ENOMEM: allocating memory error + * -EINVAL: invalied event + */ +int mhi_dev_notify_sm_event(enum mhi_dev_event event) +{ + struct mhi_sm_device_event *state_change_event; + int res; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM is not initialized\n"); + return -EFAULT; + } + + MHI_SM_DBG("received: %s\n", + mhi_sm_dev_event_str(event)); + + switch (event) { + case MHI_DEV_EVENT_M0_STATE: + mhi_sm_ctx->stats.m0_event_cnt++; + break; + case MHI_DEV_EVENT_M3_STATE: + mhi_sm_ctx->stats.m3_event_cnt++; + break; + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt++; + break; + case MHI_DEV_EVENT_CORE_WAKEUP: + mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt++; + break; + case MHI_DEV_EVENT_CTRL_TRIG: + case MHI_DEV_EVENT_M1_STATE: + case MHI_DEV_EVENT_M2_STATE: + MHI_SM_ERR("Not supported event: %s\n", + mhi_sm_dev_event_str(event)); + res = -EFAULT; + goto exit; + default: + MHI_SM_ERR("Invalid event, received: 0x%x event\n", event); + res = -EINVAL; + goto exit; + } + + /*init work and push to queue*/ + state_change_event = kzalloc(sizeof(*state_change_event), GFP_ATOMIC); + if (!state_change_event) { + MHI_SM_ERR("kzalloc error\n"); + res = -ENOMEM; + goto exit; + } + + state_change_event->event = event; + INIT_WORK(&state_change_event->work, mhi_sm_dev_event_manager); + atomic_inc(&mhi_sm_ctx->pending_device_events); + queue_work(mhi_sm_ctx->mhi_sm_wq, &state_change_event->work); + res = 0; + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(mhi_dev_notify_sm_event); + +/** + * mhi_dev_sm_pcie_handler() - handler of ep_pcie events + * @notify - pointer to structure contains the ep_pcie event + * + * Callback function, called by ep_pcie driver to notify on pcie state change + * Asynchronic function + */ +void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify) +{ + struct mhi_sm_ep_pcie_event *dstate_change_evt; + enum ep_pcie_event event; + + MHI_SM_FUNC_ENTRY(); + + if (!notify) { + MHI_SM_ERR("Null argument - notify\n"); + return; + } + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM is not initialized\n"); + return; + } + + event = notify->event; + MHI_SM_DBG("received: %s\n", + mhi_sm_pcie_event_str(event)); + + dstate_change_evt = kzalloc(sizeof(*dstate_change_evt), GFP_ATOMIC); + if (!dstate_change_evt) { + MHI_SM_ERR("kzalloc error\n"); + goto exit; + } + + switch (event) { + case EP_PCIE_EVENT_LINKUP: + mhi_sm_ctx->stats.linkup_event_cnt++; + break; + case EP_PCIE_EVENT_PM_D3_COLD: + mhi_sm_ctx->stats.d3_cold_event_cnt++; + break; + case EP_PCIE_EVENT_PM_D3_HOT: + mhi_sm_ctx->stats.d3_hot_event_cnt++; + mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev); + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + mhi_sm_ctx->stats.rst_deast_event_cnt++; + break; + case EP_PCIE_EVENT_PM_D0: + mhi_sm_ctx->stats.d0_event_cnt++; + break; + case EP_PCIE_EVENT_LINKDOWN: + mhi_sm_ctx->stats.linkdown_event_cnt++; + mhi_sm_ctx->syserr_occurred = true; + MHI_SM_ERR("got %s, ERROR occurred\n", + mhi_sm_pcie_event_str(event)); + break; + case EP_PCIE_EVENT_MHI_A7: + ep_pcie_mask_irq_event(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_INT_EVT_MHI_A7, false); + mhi_dev_notify_a7_event(mhi_sm_ctx->mhi_dev); + goto exit; + default: + MHI_SM_ERR("Invalid ep_pcie event, received 0x%x event\n", + event); + kfree(dstate_change_evt); + goto exit; + } + + dstate_change_evt->event = event; + INIT_WORK(&dstate_change_evt->work, mhi_sm_pcie_event_manager); + queue_work(mhi_sm_ctx->mhi_sm_wq, &dstate_change_evt->work); + atomic_inc(&mhi_sm_ctx->pending_pcie_events); + +exit: + MHI_SM_FUNC_EXIT(); +} +EXPORT_SYMBOL(mhi_dev_sm_pcie_handler); + +/** + * mhi_dev_sm_syserr() - switch to system error state. + * + * Called on system error condition. + * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device. + * Synchronic function. + * + * Return: 0: success + * negative: failure + */ +int mhi_dev_sm_syserr(void) +{ + int res; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM is not initialized\n"); + return -EFAULT; + } + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("mhi_sm_handle_syserr failed %d\n", res); + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + + MHI_SM_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(mhi_dev_sm_syserr); + +static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + + if (!mhi_sm_ctx) { + nbytes = scnprintf(dbg_buff, MHI_SM_MAX_MSG_LEN, + "Not initialized\n"); + } else { + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "*************** MHI State machine status ***************\n"); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D state: %s\n", + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "M state: %s\n", + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "pending device events: %d\n", + atomic_read(&mhi_sm_ctx->pending_device_events)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "pending pcie events: %d\n", + atomic_read(&mhi_sm_ctx->pending_pcie_events)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "*************** Statistics ***************\n"); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "M0 events: %d\n", mhi_sm_ctx->stats.m0_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "M3 events: %d\n", mhi_sm_ctx->stats.m3_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "HW_ACC wakeup events: %d\n", + mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "CORE wakeup events: %d\n", + mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "Linkup events: %d\n", + mhi_sm_ctx->stats.linkup_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "De-assert PERST events: %d\n", + mhi_sm_ctx->stats.rst_deast_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D0 events: %d\n", + mhi_sm_ctx->stats.d0_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D3_HOT events: %d\n", + mhi_sm_ctx->stats.d3_hot_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D3_COLD events:%d\n", + mhi_sm_ctx->stats.d3_cold_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "Linkdown events: %d\n", + mhi_sm_ctx->stats.linkdown_event_cnt); + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t mhi_sm_debugfs_write(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + s8 in_num = 0; + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Not initialized\n"); + return -EFAULT; + } + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &in_num)) + return -EFAULT; + + switch (in_num) { + case 0: + if (atomic_read(&mhi_sm_ctx->pending_device_events) || + atomic_read(&mhi_sm_ctx->pending_pcie_events)) + MHI_SM_DBG("Note, there are pending events in sm_wq\n"); + + memset(&mhi_sm_ctx->stats, 0, sizeof(struct mhi_sm_stats)); + break; + default: + MHI_SM_ERR("invalid argument: To reset statistics echo 0\n"); + break; + } + + return count; +} diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.h b/drivers/platform/msm/mhi_dev/mhi_sm.h new file mode 100644 index 000000000000..ebf465e1cc43 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_sm.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MHI_SM_H +#define MHI_SM_H + +#include "mhi.h" +#include +#include + + +/** + * enum mhi_dev_event - MHI state change events + * @MHI_DEV_EVENT_CTRL_TRIG: CTRL register change event. + * Not supported,for future use + * @MHI_DEV_EVENT_M0_STATE: M0 state change event + * @MHI_DEV_EVENT_M1_STATE: M1 state change event. Not supported, for future use + * @MHI_DEV_EVENT_M2_STATE: M2 state change event. Not supported, for future use + * @MHI_DEV_EVENT_M3_STATE: M0 state change event + * @MHI_DEV_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup + * @MHI_DEV_EVENT_CORE_WAKEUP: MHI core initiate Host wakup + */ +enum mhi_dev_event { + MHI_DEV_EVENT_CTRL_TRIG, + MHI_DEV_EVENT_M0_STATE, + MHI_DEV_EVENT_M1_STATE, + MHI_DEV_EVENT_M2_STATE, + MHI_DEV_EVENT_M3_STATE, + MHI_DEV_EVENT_HW_ACC_WAKEUP, + MHI_DEV_EVENT_CORE_WAKEUP, + MHI_DEV_EVENT_MAX +}; + +int mhi_dev_sm_init(struct mhi_dev *dev); +int mhi_dev_sm_set_ready(void); +int mhi_dev_notify_sm_event(enum mhi_dev_event event); +int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state); +int mhi_dev_sm_syserr(void); +void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify); + +#endif /* MHI_SM_H */ + diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c new file mode 100644 index 000000000000..64b5e7a73ef5 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_uci.c @@ -0,0 +1,835 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" + +#define MHI_DEV_NODE_NAME_LEN 13 +#define MHI_MAX_NR_OF_CLIENTS 23 +#define MHI_SOFTWARE_CLIENT_START 0 +#define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2) +#define MHI_UCI_IPC_LOG_PAGES (100) + +#define MAX_NR_TRBS_PER_CHAN 1 +#define MHI_QTI_IFACE_ID 4 +#define DEVICE_NAME "mhi" + +enum uci_dbg_level { + UCI_DBG_VERBOSE = 0x0, + UCI_DBG_INFO = 0x1, + UCI_DBG_DBG = 0x2, + UCI_DBG_WARNING = 0x3, + UCI_DBG_ERROR = 0x4, + UCI_DBG_CRITICAL = 0x5, + UCI_DBG_reserved = 0x80000000 +}; + +static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_CRITICAL; +static enum uci_dbg_level mhi_uci_ipc_log_lvl = UCI_DBG_INFO; +static void *mhi_uci_ipc_log; + + +enum mhi_chan_dir { + MHI_DIR_INVALID = 0x0, + MHI_DIR_OUT = 0x1, + MHI_DIR_IN = 0x2, + MHI_DIR__reserved = 0x80000000 +}; + +struct chan_attr { + /* SW maintained channel id */ + enum mhi_client_channel chan_id; + /* maximum buffer size for this channel */ + size_t max_packet_size; + /* number of buffers supported in this channel */ + u32 nr_trbs; + /* direction of the channel, see enum mhi_chan_dir */ + enum mhi_chan_dir dir; + u32 uci_ownership; +}; + +struct uci_client { + u32 client_index; + /* write channel - always odd*/ + u32 out_chan; + /* read channel - always even */ + u32 in_chan; + struct mhi_dev_client *out_handle; + struct mhi_dev_client *in_handle; + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + atomic_t read_data_ready; + struct device *dev; + atomic_t ref_count; + int mhi_status; + void *pkt_loc; + size_t pkt_size; + struct mhi_dev_iov *in_buf_list; + atomic_t write_data_ready; + atomic_t mhi_chans_open; + struct mhi_uci_ctxt_t *uci_ctxt; + struct mutex in_chan_lock; + struct mutex out_chan_lock; +}; + +struct mhi_uci_ctxt_t { + struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS]; + struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT]; + void (*event_notifier)(struct mhi_dev_client_cb_reason *cb); + dev_t start_ctrl_nr; + struct cdev cdev[MHI_MAX_SOFTWARE_CHANNELS]; + struct class *mhi_uci_class; + atomic_t mhi_disabled; + atomic_t mhi_enable_notif_wq_active; +}; + +#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2) + +#define uci_log(_msg_lvl, _msg, ...) do { \ + if (_msg_lvl >= mhi_uci_msg_lvl) { \ + pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ + } \ + if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \ + ipc_log_string(mhi_uci_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ + } \ +} while (0) + + +module_param(mhi_uci_msg_lvl, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mhi_uci_msg_lvl, "uci dbg lvl"); + +module_param(mhi_uci_ipc_log_lvl, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mhi_uci_ipc_log_lvl, "ipc dbg lvl"); + +static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, + size_t count, loff_t *offp); +static ssize_t mhi_uci_client_write(struct file *file, + const char __user *buf, size_t count, loff_t *offp); +static int mhi_uci_client_open(struct inode *mhi_inode, struct file*); +static int mhi_uci_client_release(struct inode *mhi_inode, + struct file *file_handle); +static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait); +static struct mhi_uci_ctxt_t uci_ctxt; + +static int mhi_init_read_chan(struct uci_client *client_handle, + enum mhi_client_channel chan) +{ + int rc = 0; + u32 i, j; + struct chan_attr *chan_attributes; + size_t buf_size; + void *data_loc; + + if (client_handle == NULL) { + uci_log(UCI_DBG_ERROR, "Bad Input data, quitting\n"); + return -EINVAL; + } + if (chan >= MHI_MAX_SOFTWARE_CHANNELS) { + uci_log(UCI_DBG_ERROR, "Incorrect channel number %d\n", chan); + return -EINVAL; + } + + chan_attributes = &uci_ctxt.chan_attrib[chan]; + buf_size = chan_attributes->max_packet_size; + + for (i = 0; i < (chan_attributes->nr_trbs); i++) { + data_loc = kmalloc(buf_size, GFP_KERNEL); + if (!data_loc) { + rc = -ENOMEM; + goto free_memory; + } + client_handle->in_buf_list[i].addr = data_loc; + client_handle->in_buf_list[i].buf_size = buf_size; + } + + return rc; + +free_memory: + for (j = 0; j < i; j++) + kfree(client_handle->in_buf_list[j].addr); + + return rc; +} + +static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf, + u32 size, u32 is_uspace_buf) +{ + void *data_loc = NULL; + uintptr_t memcpy_result = 0; + u32 data_inserted_so_far = 0; + struct uci_client *uci_handle; + + uci_handle = container_of(client_handle, struct uci_client, + out_handle); + + if (!client_handle || !buf || + !size || !uci_handle) + return -EINVAL; + + if (is_uspace_buf) { + data_loc = kmalloc(size, GFP_KERNEL); + if (!data_loc) { + uci_log(UCI_DBG_ERROR, + "Failed to allocate memory 0x%x\n", + size); + return -ENOMEM; + } + memcpy_result = copy_from_user(data_loc, buf, size); + if (memcpy_result) + goto error_memcpy; + } else { + data_loc = buf; + } + + data_inserted_so_far = mhi_dev_write_channel(*client_handle, data_loc, + size); + +error_memcpy: + kfree(data_loc); + return data_inserted_so_far; +} + +static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + struct uci_client *uci_handle; + + uci_handle = file->private_data; + + if (!uci_handle) + return -ENODEV; + + poll_wait(file, &uci_handle->read_wq, wait); + poll_wait(file, &uci_handle->write_wq, wait); + if (!atomic_read(&uci_ctxt.mhi_disabled) && + !mhi_dev_channel_isempty(uci_handle->in_handle)) { + uci_log(UCI_DBG_VERBOSE, + "Client can read chan %d\n", uci_handle->in_chan); + mask |= POLLIN | POLLRDNORM; + } + if (!atomic_read(&uci_ctxt.mhi_disabled) && + !mhi_dev_channel_isempty(uci_handle->out_handle)) { + uci_log(UCI_DBG_VERBOSE, + "Client can write chan %d\n", uci_handle->out_chan); + mask |= POLLOUT | POLLWRNORM; + } + + uci_log(UCI_DBG_VERBOSE, + "Client attempted to poll chan %d, returning mask 0x%x\n", + uci_handle->in_chan, mask); + return mask; +} + +static int open_client_mhi_channels(struct uci_client *uci_client) +{ + int rc = 0; + + uci_log(UCI_DBG_DBG, + "Starting channels %d %d.\n", + uci_client->out_chan, + uci_client->in_chan); + mutex_lock(&uci_client->out_chan_lock); + mutex_lock(&uci_client->in_chan_lock); + uci_log(UCI_DBG_DBG, + "Initializing inbound chan %d.\n", + uci_client->in_chan); + + rc = mhi_init_read_chan(uci_client, uci_client->in_chan); + if (rc < 0) { + uci_log(UCI_DBG_ERROR, + "Failed to init inbound 0x%x, ret 0x%x\n", + uci_client->in_chan, rc); + } + + rc = mhi_dev_open_channel(uci_client->out_chan, + &uci_client->out_handle, + uci_ctxt.event_notifier); + if (rc < 0) + goto handle_not_rdy_err; + + rc = mhi_dev_open_channel(uci_client->in_chan, + &uci_client->in_handle, + uci_ctxt.event_notifier); + + if (rc < 0) { + uci_log(UCI_DBG_ERROR, + "Failed to open chan %d, ret 0x%x\n", + uci_client->out_chan, rc); + goto handle_in_err; + } + atomic_set(&uci_client->mhi_chans_open, 1); + mutex_unlock(&uci_client->in_chan_lock); + mutex_unlock(&uci_client->out_chan_lock); + + return 0; + +handle_in_err: + mhi_dev_close_channel(uci_client->out_handle); +handle_not_rdy_err: + mutex_unlock(&uci_client->in_chan_lock); + mutex_unlock(&uci_client->out_chan_lock); + return rc; +} + +static int mhi_uci_client_open(struct inode *mhi_inode, + struct file *file_handle) +{ + struct uci_client *uci_handle; + int rc = 0; + + uci_handle = + &uci_ctxt.client_handles[iminor(mhi_inode)]; + + uci_log(UCI_DBG_DBG, + "Client opened struct device node 0x%x, ref count 0x%x\n", + iminor(mhi_inode), atomic_read(&uci_handle->ref_count)); + if (atomic_add_return(1, &uci_handle->ref_count) == 1) { + if (!uci_handle) { + atomic_dec(&uci_handle->ref_count); + return -ENOMEM; + } + uci_handle->uci_ctxt = &uci_ctxt; + if (!atomic_read(&uci_handle->mhi_chans_open)) { + uci_log(UCI_DBG_INFO, + "Opening channels client %d\n", + iminor(mhi_inode)); + rc = open_client_mhi_channels(uci_handle); + if (rc) { + uci_log(UCI_DBG_INFO, + "Failed to open channels ret %d\n", rc); + return rc; + } + } + } + file_handle->private_data = uci_handle; + + return 0; + +} + +static int mhi_uci_client_release(struct inode *mhi_inode, + struct file *file_handle) +{ + struct uci_client *uci_handle = file_handle->private_data; + struct mhi_uci_ctxt_t *uci_ctxt = uci_handle->uci_ctxt; + u32 nr_in_bufs = 0; + int rc = 0; + int in_chan = 0; + u32 buf_size = 0; + + in_chan = iminor(mhi_inode) + 1; + nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs; + buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size; + + if (!uci_handle) + return -EINVAL; + if (atomic_sub_return(1, &uci_handle->ref_count) == 0) { + uci_log(UCI_DBG_DBG, + "Last client left, closing channel 0x%x\n", + iminor(mhi_inode)); + if (atomic_read(&uci_handle->mhi_chans_open)) { + atomic_set(&uci_handle->mhi_chans_open, 0); + + mutex_lock(&uci_handle->out_chan_lock); + rc = mhi_dev_close_channel(uci_handle->out_handle); + wake_up(&uci_handle->write_wq); + mutex_unlock(&uci_handle->out_chan_lock); + + mutex_lock(&uci_handle->in_chan_lock); + rc = mhi_dev_close_channel(uci_handle->in_handle); + wake_up(&uci_handle->read_wq); + mutex_unlock(&uci_handle->in_chan_lock); + + } + atomic_set(&uci_handle->read_data_ready, 0); + atomic_set(&uci_handle->write_data_ready, 0); + file_handle->private_data = NULL; + } else { + uci_log(UCI_DBG_DBG, + "Client close chan %d, ref count 0x%x\n", + iminor(mhi_inode), + atomic_read(&uci_handle->ref_count)); + } + return rc; +} + +static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, + size_t uspace_buf_size, loff_t *bytes_pending) +{ + struct uci_client *uci_handle = NULL; + struct mhi_dev_client *client_handle = NULL; + int bytes_avail = 0; + int ret_val = 0; + struct mutex *mutex; + u32 chan = 0; + ssize_t bytes_copied = 0; + u32 addr_offset = 0; + uint32_t buf_size; + uint32_t chained = 0; + void *local_buf = NULL; + + if (!file || !buf || !uspace_buf_size || + !file->private_data) + return -EINVAL; + + uci_handle = file->private_data; + client_handle = uci_handle->in_handle; + mutex = &uci_handle->in_chan_lock; + chan = uci_handle->in_chan; + + mutex_lock(mutex); + + local_buf = uci_handle->in_buf_list[0].addr; + buf_size = uci_handle->in_buf_list[0].buf_size; + + + uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n", chan); + do { + if (!uci_handle->pkt_loc && + !atomic_read(&uci_ctxt.mhi_disabled)) { + + bytes_avail = mhi_dev_read_channel(client_handle, + local_buf, buf_size, &chained); + + uci_log(UCI_DBG_VERBOSE, + "reading from mhi_core local_buf = %p,buf_size = 0x%x bytes_read = 0x%x\n", + local_buf, buf_size, bytes_avail); + + if (bytes_avail < 0) { + uci_log(UCI_DBG_ERROR, + "Failed to read channel ret %d\n", + bytes_avail); + ret_val = -EIO; + goto error; + } + + if (bytes_avail > 0) { + uci_handle->pkt_loc = (void *)local_buf; + uci_handle->pkt_size = bytes_avail; + + *bytes_pending = (loff_t)uci_handle->pkt_size; + uci_log(UCI_DBG_VERBOSE, + "Got pkt of size 0x%x at addr %p, chan %d\n", + uci_handle->pkt_size, local_buf, chan); + } else { + uci_handle->pkt_loc = 0; + uci_handle->pkt_size = 0; + } + } + if (bytes_avail == 0) { + + /* If nothing was copied yet, wait for data */ + uci_log(UCI_DBG_VERBOSE, + "No data read_data_ready %d, chan %d\n", + atomic_read(&uci_handle->read_data_ready), + chan); + + ret_val = wait_event_interruptible(uci_handle->read_wq, + (!mhi_dev_channel_isempty(client_handle))); + + if (ret_val == -ERESTARTSYS) { + uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); + goto error; + } + uci_log(UCI_DBG_VERBOSE, + "Thread woke up. Got data on chan %d read_data_ready %d\n", + chan, + atomic_read(&uci_handle->read_data_ready)); + + /* A valid packet was returned from MHI */ + } else if (bytes_avail > 0) { + uci_log(UCI_DBG_VERBOSE, + "Got packet: avail pkts %d phy_adr %p, chan %d\n", + atomic_read(&uci_handle->read_data_ready), + local_buf, + chan); + break; + /* + * MHI did not return a valid packet, but we have one + * which we did not finish returning to user + */ + } else { + uci_log(UCI_DBG_CRITICAL, + "chan %d err: avail pkts %d phy_adr %p", + chan, + atomic_read(&uci_handle->read_data_ready), + local_buf); + return -EIO; + } + } while (!uci_handle->pkt_loc); + + if (uspace_buf_size >= *bytes_pending) { + addr_offset = uci_handle->pkt_size - *bytes_pending; + if (copy_to_user(buf, uci_handle->pkt_loc + addr_offset, + *bytes_pending)) { + ret_val = -EIO; + goto error; + } + + bytes_copied = *bytes_pending; + *bytes_pending = 0; + uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x, chan %d\n", + bytes_copied, (u32)*bytes_pending, chan); + } else { + addr_offset = uci_handle->pkt_size - *bytes_pending; + if (copy_to_user(buf, (void *) (uintptr_t)uci_handle->pkt_loc + + addr_offset, uspace_buf_size)) { + ret_val = -EIO; + goto error; + } + bytes_copied = uspace_buf_size; + *bytes_pending -= uspace_buf_size; + uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x,chan %d\n", + bytes_copied, + (u32)*bytes_pending, + chan); + } + /* We finished with this buffer, map it back */ + if (*bytes_pending == 0) { + uci_log(UCI_DBG_VERBOSE, + "All data consumed. Pkt loc %p ,chan %d\n", + uci_handle->pkt_loc, chan); + uci_handle->pkt_loc = 0; + uci_handle->pkt_size = 0; + } + uci_log(UCI_DBG_VERBOSE, + "Returning 0x%x bytes, 0x%x bytes left\n", + bytes_copied, (u32)*bytes_pending); + mutex_unlock(mutex); + return bytes_copied; +error: + mutex_unlock(mutex); + uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val); + return ret_val; +} + +static ssize_t mhi_uci_client_write(struct file *file, + const char __user *buf, + size_t count, loff_t *offp) +{ + struct uci_client *uci_handle = NULL; + int ret_val = 0; + u32 chan = 0xFFFFFFFF; + + if (file == NULL || buf == NULL || + !count || file->private_data == NULL) + return -EINVAL; + + uci_handle = file->private_data; + + if (atomic_read(&uci_ctxt.mhi_disabled)) { + uci_log(UCI_DBG_ERROR, + "Client %d attempted to write while MHI is disabled\n", + uci_handle->out_chan); + return -EIO; + } + chan = uci_handle->out_chan; + mutex_lock(&uci_handle->out_chan_lock); + while (!ret_val) { + ret_val = mhi_uci_send_packet(&uci_handle->out_handle, + (void *)buf, count, 1); + if (ret_val < 0) { + uci_log(UCI_DBG_ERROR, + "Error while writing data to MHI, chan %d, buf %p, size %d\n", + chan, (void *)buf, count); + ret_val = -EIO; + break; + } + if (!ret_val) { + uci_log(UCI_DBG_VERBOSE, + "No descriptors available, did we poll, chan %d?\n", + chan); + mutex_unlock(&uci_handle->out_chan_lock); + ret_val = wait_event_interruptible(uci_handle->write_wq, + !mhi_dev_channel_isempty( + uci_handle->out_handle)); + + mutex_lock(&uci_handle->out_chan_lock); + if (-ERESTARTSYS == ret_val) { + uci_log(UCI_DBG_WARNING, + "Waitqueue cancelled by system\n"); + break; + } + } + } + mutex_unlock(&uci_handle->out_chan_lock); + return ret_val; +} + +static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt) +{ + u32 i = 0; + u32 data_size = TRB_MAX_DATA_SIZE; + u32 index = 0; + struct uci_client *client; + struct chan_attr *chan_attrib = NULL; + + for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) { + chan_attrib = &uci_ctxt->chan_attrib[i]; + switch (i) { + case MHI_CLIENT_LOOPBACK_OUT: + case MHI_CLIENT_LOOPBACK_IN: + case MHI_CLIENT_SAHARA_OUT: + case MHI_CLIENT_SAHARA_IN: + case MHI_CLIENT_EFS_OUT: + case MHI_CLIENT_EFS_IN: + case MHI_CLIENT_QMI_OUT: + case MHI_CLIENT_QMI_IN: + case MHI_CLIENT_IP_CTRL_0_OUT: + case MHI_CLIENT_IP_CTRL_0_IN: + case MHI_CLIENT_IP_CTRL_1_OUT: + case MHI_CLIENT_IP_CTRL_1_IN: + case MHI_CLIENT_DUN_OUT: + case MHI_CLIENT_DUN_IN: + chan_attrib->uci_ownership = 1; + break; + default: + chan_attrib->uci_ownership = 0; + break; + } + if (chan_attrib->uci_ownership) { + chan_attrib->chan_id = i; + chan_attrib->max_packet_size = data_size; + index = CHAN_TO_CLIENT(i); + client = &uci_ctxt->client_handles[index]; + chan_attrib->nr_trbs = 9; + client->in_buf_list = + kmalloc(sizeof(struct mhi_dev_iov) * + chan_attrib->nr_trbs, + GFP_KERNEL); + if (client->in_buf_list == NULL) + return -ENOMEM; + } + if (i % 2 == 0) + chan_attrib->dir = MHI_DIR_OUT; + else + chan_attrib->dir = MHI_DIR_IN; + } + return 0; +} + + +static void uci_event_notifier(struct mhi_dev_client_cb_reason *reason) +{ + int client_index = 0; + struct uci_client *uci_handle = NULL; + + if (reason->reason == MHI_DEV_TRE_AVAILABLE) { + client_index = reason->ch_id / 2; + uci_handle = &uci_ctxt.client_handles[client_index]; + uci_log(UCI_DBG_DBG, + "recived TRE available event for chan %d\n", + uci_handle->in_chan); + + if (reason->ch_id % 2) { + atomic_set(&uci_handle->write_data_ready, 1); + wake_up(&uci_handle->write_wq); + } else { + atomic_set(&uci_handle->read_data_ready, 1); + wake_up(&uci_handle->read_wq); + } + } +} + +static int mhi_register_client(struct uci_client *mhi_client, int index) +{ + init_waitqueue_head(&mhi_client->read_wq); + init_waitqueue_head(&mhi_client->write_wq); + mhi_client->out_chan = index * 2 + 1; + mhi_client->in_chan = index * 2; + mhi_client->client_index = index; + + mutex_init(&mhi_client->in_chan_lock); + mutex_init(&mhi_client->out_chan_lock); + + uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan); + return 0; +} + +static long mhi_uci_client_ioctl(struct file *file, unsigned cmd, + unsigned long arg) +{ + struct uci_client *uci_handle = NULL; + int rc = 0; + struct ep_info epinfo; + + if (file == NULL || file->private_data == NULL) + return -EINVAL; + + uci_handle = file->private_data; + + uci_log(UCI_DBG_DBG, "Received command %d for client:%d\n", + cmd, uci_handle->client_index); + + if (cmd == MHI_UCI_EP_LOOKUP) { + uci_log(UCI_DBG_DBG, "EP_LOOKUP for client:%d\n", + uci_handle->client_index); + epinfo.ph_ep_info.ep_type = DATA_EP_TYPE_PCIE; + epinfo.ph_ep_info.peripheral_iface_id = MHI_QTI_IFACE_ID; + epinfo.ipa_ep_pair.cons_pipe_num = + ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD); + epinfo.ipa_ep_pair.prod_pipe_num = + ipa_get_ep_mapping(IPA_CLIENT_MHI_CONS); + + uci_log(UCI_DBG_DBG, "client:%d ep_type:%d intf:%d\n", + uci_handle->client_index, + epinfo.ph_ep_info.ep_type, + epinfo.ph_ep_info.peripheral_iface_id); + + uci_log(UCI_DBG_DBG, "ipa_cons_idx:%d ipa_prod_idx:%d\n", + epinfo.ipa_ep_pair.cons_pipe_num, + epinfo.ipa_ep_pair.prod_pipe_num); + + rc = copy_to_user((void __user *)arg, &epinfo, + sizeof(epinfo)); + if (rc) + uci_log(UCI_DBG_ERROR, "copying to user space failed"); + } else { + uci_log(UCI_DBG_ERROR, "wrong parameter:%d\n", cmd); + rc = -EINVAL; + } + + return rc; +} + +static const struct file_operations mhi_uci_client_fops = { + .read = mhi_uci_client_read, + .write = mhi_uci_client_write, + .open = mhi_uci_client_open, + .release = mhi_uci_client_release, + .poll = mhi_uci_client_poll, + .unlocked_ioctl = mhi_uci_client_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mhi_uci_client_ioctl, +#endif +}; + +int mhi_uci_init(void) +{ + u32 i = 0; + int ret_val = 0; + struct uci_client *mhi_client = NULL; + s32 r = 0; + + mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES, + "mhi-uci", 0); + if (mhi_uci_ipc_log == NULL) { + uci_log(UCI_DBG_WARNING, + "Failed to create IPC logging context\n"); + } + uci_ctxt.event_notifier = uci_event_notifier; + + uci_log(UCI_DBG_DBG, "Setting up channel attributes.\n"); + + ret_val = uci_init_client_attributes(&uci_ctxt); + if (ret_val < 0) { + uci_log(UCI_DBG_ERROR, + "Failed to init client attributes\n"); + return -EIO; + } + + uci_log(UCI_DBG_DBG, "Initializing clients\n"); + uci_log(UCI_DBG_INFO, "Registering for MHI events.\n"); + + for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) { + if (uci_ctxt.chan_attrib[i * 2].uci_ownership) { + mhi_client = &uci_ctxt.client_handles[i]; + + r = mhi_register_client(mhi_client, i); + + if (r) { + uci_log(UCI_DBG_CRITICAL, + "Failed to reg client %d ret %d\n", + r, i); + } + } + } + uci_log(UCI_DBG_INFO, "Allocating char devices.\n"); + r = alloc_chrdev_region(&uci_ctxt.start_ctrl_nr, + 0, MHI_MAX_SOFTWARE_CHANNELS, + DEVICE_NAME); + + if (IS_ERR_VALUE(r)) { + uci_log(UCI_DBG_ERROR, + "Failed to alloc char devs, ret 0x%x\n", r); + goto failed_char_alloc; + } + uci_log(UCI_DBG_INFO, "Creating class\n"); + uci_ctxt.mhi_uci_class = class_create(THIS_MODULE, + DEVICE_NAME); + if (IS_ERR(uci_ctxt.mhi_uci_class)) { + uci_log(UCI_DBG_ERROR, + "Failed to instantiate class, ret 0x%x\n", r); + r = -ENOMEM; + goto failed_class_add; + } + + uci_log(UCI_DBG_INFO, "Setting up device nodes.\n"); + for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) { + if (uci_ctxt.chan_attrib[i*2].uci_ownership) { + cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops); + uci_ctxt.cdev[i].owner = THIS_MODULE; + r = cdev_add(&uci_ctxt.cdev[i], + uci_ctxt.start_ctrl_nr + i, 1); + if (IS_ERR_VALUE(r)) { + uci_log(UCI_DBG_ERROR, + "Failed to add cdev %d, ret 0x%x\n", + i, r); + goto failed_char_add; + } + uci_ctxt.client_handles[i].dev = + device_create(uci_ctxt.mhi_uci_class, NULL, + uci_ctxt.start_ctrl_nr + i, + NULL, DEVICE_NAME "_pipe_%d", + i * 2); + + if (IS_ERR(uci_ctxt.client_handles[i].dev)) { + uci_log(UCI_DBG_ERROR, + "Failed to add cdev %d\n", i); + cdev_del(&uci_ctxt.cdev[i]); + goto failed_device_create; + } + } + } + return 0; + +failed_char_add: +failed_device_create: + while (--i >= 0) { + cdev_del(&uci_ctxt.cdev[i]); + device_destroy(uci_ctxt.mhi_uci_class, + MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2)); + }; + class_destroy(uci_ctxt.mhi_uci_class); +failed_class_add: + unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr), + MHI_MAX_SOFTWARE_CHANNELS); +failed_char_alloc: + return r; +} -- cgit v1.2.3 From 96b933ebbf337e38f968140706db42d2c9318bfb Mon Sep 17 00:00:00 2001 From: Siddartha Mohanadoss Date: Fri, 8 Apr 2016 11:34:35 -0700 Subject: uapi: Add MHI device Export MHI device header for user space clients. Change-Id: I0f68975dfcad9483182e5af5477153f39a98ac1f Signed-off-by: Siddartha Mohanadoss --- include/uapi/linux/Kbuild | 1 + include/uapi/linux/mhi.h | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 include/uapi/linux/mhi.h diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 96a79417671a..29e3be2ce18c 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -271,6 +271,7 @@ header-y += membarrier.h header-y += memfd.h header-y += mempolicy.h header-y += meye.h +header-y += mhi.h header-y += mic_common.h header-y += mic_ioctl.h header-y += mii.h diff --git a/include/uapi/linux/mhi.h b/include/uapi/linux/mhi.h new file mode 100644 index 000000000000..834c1dc77173 --- /dev/null +++ b/include/uapi/linux/mhi.h @@ -0,0 +1,37 @@ +#ifndef _UAPI_MHI_H +#define _UAPI_MHI_H + +#include +#include + +enum peripheral_ep_type { + DATA_EP_TYPE_RESERVED, + DATA_EP_TYPE_HSIC, + DATA_EP_TYPE_HSUSB, + DATA_EP_TYPE_PCIE, + DATA_EP_TYPE_EMBEDDED, + DATA_EP_TYPE_BAM_DMUX, +}; + +struct peripheral_ep_info { + enum peripheral_ep_type ep_type; + __u32 peripheral_iface_id; +}; + +struct ipa_ep_pair { + __u32 cons_pipe_num; + __u32 prod_pipe_num; +}; + +struct ep_info { + struct peripheral_ep_info ph_ep_info; + struct ipa_ep_pair ipa_ep_pair; + +}; + +#define MHI_UCI_IOCTL_MAGIC 'm' + +#define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info) + +#endif /* _UAPI_MHI_H */ + -- cgit v1.2.3 From 217cb4db86edfff435195d5f536f76888b59249a Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Thu, 7 Apr 2016 17:51:45 -0700 Subject: ARM: dts:msm: Disable USB QMP PHY on msmcobalt This change disables USB QMP PHY on msmcobalt until SSUSB functionality is validated. With this only USB high speed functionality is supported now. CRs-Fixed: 1001222 Change-Id: I74a00cc76ab86ee96905d270b1f6e09fb3fb9db7 Signed-off-by: Mayank Rana --- arch/arm/boot/dts/qcom/msmcobalt.dtsi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index daa870107c3e..01f0e6a4fd2a 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -1278,10 +1278,11 @@ reg = <0x0a800000 0xcd00>; interrupt-parent = <&intc>; interrupts = <0 131 0>; - usb-phy = <&qusb_phy0>, <&ssphy>; + usb-phy = <&qusb_phy0>, <&usb_nop_phy>; tx-fifo-resize; snps,nominal-elastic-buffer; snps,hird_thresh = <0x10>; + maximum-speed = "high-speed"; }; qcom,usbbam@a904000 { @@ -1496,6 +1497,10 @@ qcom,reset-ep-after-lpm-resume; }; + usb_nop_phy: usb_nop_phy { + compatible = "usb-nop-xceiv"; + }; + qcom,lpass@17300000 { compatible = "qcom,pil-tz-generic"; reg = <0x17300000 0x00100>; -- cgit v1.2.3