summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-06-13 10:48:26 -0600
committerLinux Build Service Account <lnxbuild@localhost>2016-06-13 10:48:26 -0600
commit5ec373331d6321fa6175c71039eb5d57a749b0d8 (patch)
tree2975e63b0748486f13cd86e735fd8401612e2d4e
parentcc4502ce833996b835f986fe0443474b947e5ef4 (diff)
parente3f1df3ed2de39b7b963c5237eec4f7905473407 (diff)
Promotion of kernel.lnx.4.4-160610.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 1024406 If286c5b80874a95be0b5ecc533d0e5c7a14f39d3 usb: gadget: f_mtp: Change the icon to camera for PTP co 983795 I25a00441f91087a370764b451ecc73a49a75b0a6 ARM: dts: msm: vote for PCNOC from IPA on msmcobalt 1023239 Ibf7fc53d3ff7084b252a44c44e3ce29326659787 input: qpnp-power-on: use restart reason bits based upon 1002026 Icb6ec9d060ca4fb02e95c1e98bded89422bb1fff msm: ipa: Add napi support to rmnet ipa device 1024406 I7e3527b7e08ccfe566d85e3009d8a015d8daa707 USB: gadget: mtp: Fix bug in freeing memory on allocatio 1026703 I462a750aa6ee69f588b4bbcf5cb030f9e7292951 scsi: ufs: fix comments in ufshcd_quirk_tune_host_pa_tac 1024406 I252e830568704e9e557660c1ae0f7597823e4b17 usb: gadget: f_mtp: Add support to capture time taken wi 1024406 Icbee5fe7ae2c02b2bca185a0dc7587eb4940058a usb: gadget: f_mtp: Fix issue of NULL pointer access in 1024406 I909bebe0d22c19329cebb0a4a76424e08c82328c USB: gadget: f_mtp: Fix bug in receive_file work 1024406 Idb4e075c89bdf94790a321bc464d30eba546eeaa usb: gadget: f_mtp: Fix mtp enumeration failure issue 999619 Ic5b38b038508802b0b5779dc5d54ec6772f24b65 msm: camera: isp: Enable write master for stats 1024406 Ieacdc8dd76bc45638002eb749ff87aa95f496fa3 USB: f_mtp: Don't reset string id during function_unbind 1024406 I26cc10986e28a28eab6f3c65f28f4d2b808112d9 USB: gadget: Implement COMPAT_IOCTL for ioctls 1021696 Ic8f02dcc89e716ec88b711496d1e43754b95968d NFC: Error handling correction in probe Change-Id: I5b07f77ffc5c6fbf19101159090d3464cbbe6284 CRs-Fixed: 1021696, 1024406, 1023239, 983795, 1026703, 1002026, 999619
-rw-r--r--Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt2
-rw-r--r--Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi22
-rw-r--r--drivers/input/qpnp-power-on.c9
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c27
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.h15
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp48.c42
-rw-r--r--drivers/nfc/nq-nci.c160
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c31
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c680
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_trace.h19
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c129
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c287
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_trace.h20
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c103
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/usb/gadget/function/f_accessory.c3
-rw-r--r--drivers/usb/gadget/function/f_mtp.c460
-rw-r--r--include/linux/ipa.h15
-rw-r--r--include/linux/usb/f_mtp.h30
30 files changed, 1552 insertions, 543 deletions
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c7024e07a71e..102b304f5fb3 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -10,6 +10,8 @@ Optional:
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+ to enable napi framework or not
Example:
qcom,rmnet-ipa {
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
index 3f5531278700..7ee28664668b 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -10,6 +10,8 @@ Optional:
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+ to enable napi framework or not
Example:
qcom,rmnet-ipa3 {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 58e0d0b25c1b..5e31da726464 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -900,12 +900,24 @@
clocks = <&clock_gcc clk_ipa_clk>;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,num-paths = <3>;
qcom,msm-bus,vectors-KBps =
- <90 512 0 0>, <90 585 0 0>, /* No vote */
- <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */
- <90 512 206000 960000>, <90 585 206000 960000>, /* NOMINAL */
- <90 512 206000 3600000>, <90 585 206000 3600000>; /* TURBO */
+ /* No vote */
+ <90 512 0 0>,
+ <90 585 0 0>,
+ <1 676 0 0>,
+ /* SVS */
+ <90 512 80000 640000>,
+ <90 585 80000 640000>,
+ <1 676 80000 160000>,
+ /* NOMINAL */
+ <90 512 206000 960000>,
+ <90 585 206000 960000>,
+ <1 676 206000 200000>,
+ /* TURBO */
+ <90 512 206000 3600000>,
+ <90 585 206000 3600000>,
+ <1 676 206000 960000>;
qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
/* IPA RAM mmap */
diff --git a/drivers/input/qpnp-power-on.c b/drivers/input/qpnp-power-on.c
index 8c3c523c83de..a4057045e3e4 100644
--- a/drivers/input/qpnp-power-on.c
+++ b/drivers/input/qpnp-power-on.c
@@ -347,8 +347,13 @@ int qpnp_pon_set_restart_reason(enum pon_restart_reason reason)
if (!pon->store_hard_reset_reason)
return 0;
- rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
- PON_MASK(7, 1), (reason << 1));
+ if (is_pon_gen2(pon))
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
+ PON_MASK(7, 1), (reason << 1));
+ else
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
+ PON_MASK(7, 2), (reason << 2));
+
if (rc)
dev_err(&pon->pdev->dev,
"Unable to write to addr=%x, rc(%d)\n",
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index b5de2d83580e..e0e768d39612 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -277,6 +277,8 @@ struct msm_vfe_stats_ops {
void (*update_cgc_override)(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable);
+ void (*enable_stats_wm)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
};
enum msm_isp_hw_client {
@@ -510,6 +512,7 @@ struct msm_vfe_axi_shared_data {
struct msm_vfe_stats_hardware_info {
uint32_t stats_capability_mask;
uint8_t *stats_ping_pong_offset;
+ uint8_t *stats_wm_index;
uint8_t num_stats_type;
uint8_t num_stats_comp_mask;
};
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 062ff3bea7e6..b336bdf9b7d8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1531,6 +1531,7 @@ struct msm_vfe_hardware_info vfe32_hw_info = {
.get_wm_mask = msm_vfe32_stats_get_wm_mask,
.get_frame_id = msm_vfe32_stats_get_frame_id,
.get_pingpong_status = msm_vfe32_get_pingpong_status,
+ .enable_stats_wm = NULL,
},
},
.dmi_reg_offset = 0x5A0,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d42988132ce4..fdd08da436d4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -2275,6 +2275,7 @@ struct msm_vfe_hardware_info vfe40_hw_info = {
.get_pingpong_status = msm_vfe40_get_pingpong_status,
.update_cgc_override =
msm_vfe40_stats_update_cgc_override,
+ .enable_stats_wm = NULL,
},
.platform_ops = {
.get_platform_data = msm_vfe47_get_platform_data,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 8c00214dc241..c927dcaee449 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -1925,6 +1925,7 @@ struct msm_vfe_hardware_info vfe44_hw_info = {
.get_pingpong_status = msm_vfe44_get_pingpong_status,
.update_cgc_override =
msm_vfe44_stats_update_cgc_override,
+ .enable_stats_wm = NULL,
},
.platform_ops = {
.get_platform_data = msm_vfe47_get_platform_data,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 3ca74506f640..b8e1838d0a7e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -2020,6 +2020,7 @@ struct msm_vfe_hardware_info vfe46_hw_info = {
.get_pingpong_status = msm_vfe46_get_pingpong_status,
.update_cgc_override =
msm_vfe46_stats_update_cgc_override,
+ .enable_stats_wm = NULL,
},
.platform_ops = {
.get_platform_data = msm_vfe47_get_platform_data,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 2b64ddd9b5dc..3a1adbadbb80 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -47,20 +47,9 @@
#define VFE47_PING_PONG_BASE(wm, ping_pong) \
(VFE47_WM_BASE(wm) + 0x4 * (1 + (((~ping_pong) & 0x1) * 2)))
#define SHIFT_BF_SCALE_BIT 1
-#define VFE47_NUM_STATS_COMP 2
#define VFE47_BUS_RD_CGC_OVERRIDE_BIT 16
-/*composite mask order*/
-#define STATS_COMP_IDX_HDR_BE 0
-#define STATS_COMP_IDX_BG 1
-#define STATS_COMP_IDX_BF 2
-#define STATS_COMP_IDX_HDR_BHIST 3
-#define STATS_COMP_IDX_RS 4
-#define STATS_COMP_IDX_CS 5
-#define STATS_COMP_IDX_IHIST 6
-#define STATS_COMP_IDX_BHIST 7
-#define STATS_COMP_IDX_AEC_BG 8
#define VFE47_VBIF_CLK_OFFSET 0x4
static uint32_t stats_base_addr[] = {
@@ -98,7 +87,6 @@ static uint8_t stats_irq_map_comp_mask[] = {
23, /* BHIST (SKIN_BHIST) */
15, /* AEC_BG */
};
-#define VFE47_NUM_STATS_TYPE 9
#define VFE47_STATS_BASE(idx) (stats_base_addr[idx])
#define VFE47_STATS_PING_PONG_BASE(idx, ping_pong) \
(VFE47_STATS_BASE(idx) + 0x4 * \
@@ -2157,16 +2145,10 @@ void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
module_cfg &= ~module_cfg_mask;
msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x44);
-
-/* need to move to userspace
- uint32_t stats_cfg;
- stats_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x9B8);
- if (enable)
- stats_cfg |= stats_cfg_mask;
- else
- stats_cfg &= ~stats_cfg_mask;
- msm_camera_io_w(stats_cfg, vfe_dev->vfe_base + 0x9B8);
-*/
+ /* enable wm if needed */
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.enable_stats_wm)
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_stats_wm(vfe_dev,
+ stats_mask, enable);
}
void msm_vfe47_stats_update_ping_pong_addr(
@@ -2674,6 +2656,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
.get_pingpong_status = msm_vfe47_get_pingpong_status,
.update_cgc_override =
msm_vfe47_stats_update_cgc_override,
+ .enable_stats_wm = NULL,
},
.platform_ops = {
.get_platform_data = msm_vfe47_get_platform_data,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
index b5975b13eb5b..737f845c7272 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
@@ -13,6 +13,21 @@
#ifndef __MSM_ISP47_H__
#define __MSM_ISP47_H__
+#define VFE47_NUM_STATS_COMP 2
+#define VFE47_NUM_STATS_TYPE 9
+/*composite mask order*/
+enum msm_vfe47_stats_comp_idx {
+ STATS_COMP_IDX_HDR_BE = 0,
+ STATS_COMP_IDX_BG,
+ STATS_COMP_IDX_BF,
+ STATS_COMP_IDX_HDR_BHIST,
+ STATS_COMP_IDX_RS,
+ STATS_COMP_IDX_CS,
+ STATS_COMP_IDX_IHIST,
+ STATS_COMP_IDX_BHIST,
+ STATS_COMP_IDX_AEC_BG,
+};
+
extern struct msm_vfe_hardware_info vfe47_hw_info;
void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
index c9955e01125b..ffcd88dc44f3 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
@@ -47,8 +47,18 @@ static uint8_t stats_pingpong_offset_map[] = {
11, /* AEC_BG */
};
-#define VFE48_NUM_STATS_TYPE 9
-#define VFE48_NUM_STATS_COMP 2
+static uint8_t stats_wm_index[] = {
+ 7, /* HDR_BE */
+ 11, /* BG(AWB_BG) */
+ 9, /* BF */
+ 8, /* HDR_BHIST */
+ 13, /* RS */
+ 14, /* CS */
+ 15, /* IHIST */
+ 12, /* BHIST (SKIN_BHIST) */
+ 10, /* AEC_BG */
+};
+
#define VFE48_SRC_CLK_DTSI_IDX 3
static struct msm_vfe_stats_hardware_info msm_vfe48_stats_hw_info = {
@@ -59,8 +69,9 @@ static struct msm_vfe_stats_hardware_info msm_vfe48_stats_hw_info = {
1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
1 << MSM_ISP_STATS_AEC_BG,
.stats_ping_pong_offset = stats_pingpong_offset_map,
- .num_stats_type = VFE48_NUM_STATS_TYPE,
- .num_stats_comp_mask = VFE48_NUM_STATS_COMP,
+ .stats_wm_index = stats_wm_index,
+ .num_stats_type = VFE47_NUM_STATS_TYPE,
+ .num_stats_comp_mask = VFE47_NUM_STATS_COMP,
};
static void msm_vfe48_axi_enable_wm(void __iomem *vfe_base,
@@ -68,15 +79,31 @@ static void msm_vfe48_axi_enable_wm(void __iomem *vfe_base,
{
uint32_t val;
- val = msm_camera_io_r(vfe_base + 0xCEC);
if (enable)
- val |= (0x3 << (2 * wm_idx));
+ val = (0x2 << (2 * wm_idx));
else
- val &= ~(0x3 << (2 * wm_idx));
+ val = (0x1 << (2 * wm_idx));
msm_camera_io_w_mb(val, vfe_base + 0xCEC);
}
+static void msm_vfe48_enable_stats_wm(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ if (!(stats_mask & 0x1)) {
+ stats_mask >>= 1;
+ continue;
+ }
+ stats_mask >>= 1;
+ msm_vfe48_axi_enable_wm(vfe_dev->vfe_base,
+ vfe_dev->hw_info->stats_hw_info->stats_wm_index[i],
+ enable);
+ }
+}
+
static void msm_vfe48_deinit_bandwidth_mgr(
struct msm_isp_bandwidth_mgr *isp_bandwidth_mgr)
{
@@ -285,6 +312,7 @@ struct msm_vfe_hardware_info vfe48_hw_info = {
.get_pingpong_status = msm_vfe47_get_pingpong_status,
.update_cgc_override =
msm_vfe47_stats_update_cgc_override,
+ .enable_stats_wm = msm_vfe48_enable_stats_wm,
},
.platform_ops = {
.get_platform_data = msm_vfe47_get_platform_data,
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 9053944ef459..a2ff438627ed 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -69,7 +69,7 @@ struct nqx_dev {
/* read buffer*/
size_t kbuflen;
u8 *kbuf;
-
+ struct nqx_platform_data *pdata;
};
static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
@@ -150,6 +150,11 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
int ret;
int irq_gpio_val = 0;
+ if (!nqx_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (count > nqx_dev->kbuflen)
count = nqx_dev->kbuflen;
@@ -178,23 +183,27 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
}
tmp = nqx_dev->kbuf;
-
+ if (!tmp) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: device doesn't exist anymore\n", __func__);
+ ret = -ENODEV;
+ goto err;
+ }
memset(tmp, 0x00, count);
+
/* Read data */
ret = i2c_master_recv(nqx_dev->client, tmp, count);
-
- mutex_unlock(&nqx_dev->read_mutex);
-
if (ret < 0) {
dev_err(&nqx_dev->client->dev,
"%s: i2c_master_recv returned %d\n", __func__, ret);
- return ret;
+ goto err;
}
if (ret > count) {
dev_err(&nqx_dev->client->dev,
"%s: received too many bytes from i2c (%d)\n",
__func__, ret);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
#ifdef NFC_KERNEL_BU
dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
@@ -203,12 +212,15 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
if (copy_to_user(buf, tmp, ret)) {
dev_warn(&nqx_dev->client->dev,
"%s : failed to copy to user space\n", __func__);
- return -EFAULT;
+ ret = -EFAULT;
+ goto err;
}
+ mutex_unlock(&nqx_dev->read_mutex);
return ret;
err:
mutex_unlock(&nqx_dev->read_mutex);
+out:
return ret;
}
@@ -216,25 +228,34 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
struct nqx_dev *nqx_dev = filp->private_data;
- char *tmp;
+ char *tmp = NULL;
int ret = 0;
+ if (!nqx_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
if (count > nqx_dev->kbuflen) {
dev_err(&nqx_dev->client->dev, "%s: out of memory\n",
__func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
tmp = memdup_user(buf, count);
-
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
+ if (IS_ERR(tmp)) {
+ dev_err(&nqx_dev->client->dev, "%s: memdup_user failed\n",
+ __func__);
+ ret = PTR_ERR(tmp);
+ goto out;
+ }
ret = i2c_master_send(nqx_dev->client, tmp, count);
if (ret != count) {
dev_err(&nqx_dev->client->dev,
"%s: failed to write %d\n", __func__, ret);
ret = -EIO;
+ goto out_free;
}
#ifdef NFC_KERNEL_BU
dev_dbg(&nqx_dev->client->dev,
@@ -243,7 +264,9 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
tmp[0], tmp[1], tmp[2]);
#endif
usleep_range(1000, 1100);
+out_free:
kfree(tmp);
+out:
return ret;
}
@@ -559,28 +582,35 @@ static int nqx_probe(struct i2c_client *client,
if (client->dev.of_node) {
platform_data = devm_kzalloc(&client->dev,
sizeof(struct nqx_platform_data), GFP_KERNEL);
- if (!platform_data)
- return -ENOMEM;
+ if (!platform_data) {
+ r = -ENOMEM;
+ goto err_platform_data;
+ }
r = nfc_parse_dt(&client->dev, platform_data);
if (r)
- return r;
- } else {
+ goto err_free_data;
+ } else
platform_data = client->dev.platform_data;
- }
+
dev_dbg(&client->dev,
"%s, inside nfc-nci flags = %x\n",
__func__, client->flags);
+
if (platform_data == NULL) {
dev_err(&client->dev, "%s: failed\n", __func__);
- return -ENODEV;
+ r = -ENODEV;
+ goto err_platform_data;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
- return -ENODEV;
+ r = -ENODEV;
+ goto err_free_data;
}
nqx_dev = kzalloc(sizeof(*nqx_dev), GFP_KERNEL);
- if (nqx_dev == NULL)
- return -ENOMEM;
+ if (nqx_dev == NULL) {
+ r = -ENOMEM;
+ goto err_free_data;
+ }
nqx_dev->client = client;
nqx_dev->kbuflen = MAX_BUFFER_SIZE;
nqx_dev->kbuf = kzalloc(MAX_BUFFER_SIZE, GFP_KERNEL);
@@ -595,7 +625,7 @@ static int nqx_probe(struct i2c_client *client,
r = gpio_request(platform_data->en_gpio, "nfc_reset_gpio");
if (r) {
dev_err(&client->dev,
- "%s: unable to request gpio [%d]\n",
+ "%s: unable to request nfc reset gpio [%d]\n",
__func__,
platform_data->en_gpio);
goto err_mem;
@@ -603,36 +633,36 @@ static int nqx_probe(struct i2c_client *client,
r = gpio_direction_output(platform_data->en_gpio, 0);
if (r) {
dev_err(&client->dev,
- "%s: unable to set direction for gpio [%d]\n",
+ "%s: unable to set direction for nfc reset gpio [%d]\n",
__func__,
platform_data->en_gpio);
goto err_en_gpio;
}
} else {
- dev_err(&client->dev, "%s: dis gpio not provided\n", __func__);
+ dev_err(&client->dev,
+ "%s: nfc reset gpio not provided\n", __func__);
goto err_mem;
}
if (gpio_is_valid(platform_data->irq_gpio)) {
r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
if (r) {
- dev_err(&client->dev, "%s: unable to req irq gpio [%d]\n",
+ dev_err(&client->dev, "%s: unable to request nfc irq gpio [%d]\n",
__func__, platform_data->irq_gpio);
goto err_en_gpio;
}
r = gpio_direction_input(platform_data->irq_gpio);
if (r) {
-
dev_err(&client->dev,
- "%s: unable to set direction for irq gpio [%d]\n",
+ "%s: unable to set direction for nfc irq gpio [%d]\n",
__func__,
platform_data->irq_gpio);
- goto err_irq;
+ goto err_irq_gpio;
}
irqn = gpio_to_irq(platform_data->irq_gpio);
if (irqn < 0) {
r = irqn;
- goto err_irq;
+ goto err_irq_gpio;
}
client->irq = irqn;
} else {
@@ -644,47 +674,49 @@ static int nqx_probe(struct i2c_client *client,
"nfc_firm_gpio");
if (r) {
dev_err(&client->dev,
- "%s: unable to request firm gpio [%d]\n",
+ "%s: unable to request nfc firmware gpio [%d]\n",
__func__, platform_data->firm_gpio);
- goto err_irq;
+ goto err_irq_gpio;
}
r = gpio_direction_output(platform_data->firm_gpio, 0);
if (r) {
dev_err(&client->dev,
- "%s: cannot set direction for firm gpio [%d]\n",
+ "%s: cannot set direction for nfc firmware gpio [%d]\n",
__func__, platform_data->firm_gpio);
- goto err_irq;
+ goto err_firm_gpio;
}
- nqx_dev->firm_gpio = platform_data->firm_gpio;
} else {
dev_err(&client->dev,
"%s: firm gpio not provided\n", __func__);
+ goto err_irq_gpio;
}
if (gpio_is_valid(platform_data->clkreq_gpio)) {
r = gpio_request(platform_data->clkreq_gpio,
"nfc_clkreq_gpio");
if (r) {
dev_err(&client->dev,
- "%s: unable to request clk gpio [%d]\n",
+ "%s: unable to request nfc clkreq gpio [%d]\n",
__func__, platform_data->clkreq_gpio);
- goto err_clkreq_gpio;
+ goto err_firm_gpio;
}
r = gpio_direction_input(platform_data->clkreq_gpio);
if (r) {
dev_err(&client->dev,
- "%s: cannot set direction for clk gpio [%d]\n",
+ "%s: cannot set direction for nfc clkreq gpio [%d]\n",
__func__, platform_data->clkreq_gpio);
goto err_clkreq_gpio;
}
- nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
} else {
dev_err(&client->dev,
"%s: clkreq gpio not provided\n", __func__);
+ goto err_firm_gpio;
}
nqx_dev->en_gpio = platform_data->en_gpio;
nqx_dev->irq_gpio = platform_data->irq_gpio;
nqx_dev->firm_gpio = platform_data->firm_gpio;
+ nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
+ nqx_dev->pdata = platform_data;
/* init mutex and queues */
init_waitqueue_head(&nqx_dev->read_wq);
@@ -719,6 +751,8 @@ static int nqx_probe(struct i2c_client *client,
r = nfcc_hw_check(client, platform_data->en_gpio);
if (r) {
+ /* make sure NFCC is not enabled */
+ gpio_set_value(platform_data->en_gpio, 0);
/* We don't think there is hardware switch NFC OFF */
goto err_request_hw_check_failed;
}
@@ -729,39 +763,46 @@ static int nqx_probe(struct i2c_client *client,
dev_err(&client->dev,
"%s: cannot register reboot notifier(err = %d)\n",
__func__, r);
- goto err_request_notifier_failed;
+ /*
+ * nfcc_hw_check function not doing memory
+ * allocation so using same goto target here
+ */
+ goto err_request_hw_check_failed;
}
- device_init_wakeup(&client->dev, true);
- device_set_wakeup_capable(&client->dev, true);
- i2c_set_clientdata(client, nqx_dev);
#ifdef NFC_KERNEL_BU
r = nqx_clock_select(nqx_dev);
if (r < 0) {
dev_err(&client->dev,
"%s: nqx_clock_select failed\n", __func__);
- goto err_request_notifier_failed;
+ goto err_clock_en_failed;
}
gpio_set_value(platform_data->en_gpio, 1);
#endif
+ device_init_wakeup(&client->dev, true);
+ device_set_wakeup_capable(&client->dev, true);
+ i2c_set_clientdata(client, nqx_dev);
+
dev_err(&client->dev,
"%s: probing NFCC NQxxx exited successfully\n",
__func__);
return 0;
-err_request_notifier_failed:
+#ifdef NFC_KERNEL_BU
+err_clock_en_failed:
unregister_reboot_notifier(&nfcc_notifier);
+#endif
err_request_hw_check_failed:
- /* make sure NFCC is not enabled */
- gpio_set_value(platform_data->en_gpio, 0);
+ free_irq(client->irq, nqx_dev);
err_request_irq_failed:
misc_deregister(&nqx_dev->nqx_device);
err_misc_register:
mutex_destroy(&nqx_dev->read_mutex);
err_clkreq_gpio:
gpio_free(platform_data->clkreq_gpio);
-err_irq:
- free_irq(client->irq, nqx_dev);
+err_firm_gpio:
+ gpio_free(platform_data->firm_gpio);
+err_irq_gpio:
gpio_free(platform_data->irq_gpio);
err_en_gpio:
gpio_free(platform_data->en_gpio);
@@ -769,6 +810,10 @@ err_mem:
kfree(nqx_dev->kbuf);
err_free_dev:
kfree(nqx_dev);
+err_free_data:
+ if (client->dev.of_node)
+ devm_kfree(&client->dev, platform_data);
+err_platform_data:
dev_err(&client->dev,
"%s: probing nqxx failed, check hardware\n",
__func__);
@@ -777,17 +822,32 @@ err_free_dev:
static int nqx_remove(struct i2c_client *client)
{
+ int ret = 0;
struct nqx_dev *nqx_dev;
nqx_dev = i2c_get_clientdata(client);
+ if (!nqx_dev) {
+ dev_err(&client->dev,
+ "%s: device doesn't exist anymore\n", __func__);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ unregister_reboot_notifier(&nfcc_notifier);
free_irq(client->irq, nqx_dev);
misc_deregister(&nqx_dev->nqx_device);
mutex_destroy(&nqx_dev->read_mutex);
+ gpio_free(nqx_dev->clkreq_gpio);
+ gpio_free(nqx_dev->firm_gpio);
gpio_free(nqx_dev->irq_gpio);
gpio_free(nqx_dev->en_gpio);
kfree(nqx_dev->kbuf);
+ if (client->dev.of_node)
+ devm_kfree(&client->dev, nqx_dev->pdata);
+
kfree(nqx_dev);
- return 0;
+err:
+ return ret;
}
static int nqx_suspend(struct device *device)
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 72249ca07886..82402d7d1545 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2654,6 +2654,37 @@ void ipa_assert(void)
BUG();
}
+/**
+ * ipa_rx_poll() - Poll the rx packets from IPA HW in the
+ * softirq context
+ *
+ * @budget: number of packets to be polled in single iteration
+ *
+ * Return codes: >= 0 : Actual number of packets polled
+ *
+ */
+int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_rx_poll);
+
+/**
+ * ipa_recycle_wan_skb() - Recycle the Wan skb
+ *
+ * @skb: skb that needs to recycle
+ *
+ */
+void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+ IPA_API_DISPATCH(ipa_recycle_wan_skb, skb);
+}
+EXPORT_SYMBOL(ipa_recycle_wan_skb);
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index e3fa4144cb84..862bdc475025 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -356,6 +356,10 @@ struct ipa_api_controller {
void *(*ipa_get_ipc_logbuf_low)(void);
+ int (*ipa_rx_poll)(u32 clnt_hdl, int budget);
+
+ void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 9a2797537712..510b4c584764 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -77,6 +77,7 @@ static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
struct sps_iovec *iovec);
static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
{
@@ -762,6 +763,29 @@ static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
}
/**
+ * ipa_poll_pkt() - Poll packet from SPS BAM
+ * return 0 to caller on poll successfully
+ * else -EIO
+ *
+ */
+static int ipa_poll_pkt(struct ipa_sys_context *sys,
+ struct sps_iovec *iov)
+{
+ int ret;
+
+ ret = sps_get_iovec(sys->ep->ep_hdl, iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ return ret;
+ }
+
+ if (iov->addr == 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* ipa_handle_rx_core() - The core functionality of packet reception. This
* function is read from multiple code paths.
*
@@ -787,14 +811,10 @@ static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
if (cnt && !process_all)
break;
- ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
- if (ret) {
- IPAERR("sps_get_iovec failed %d\n", ret);
+ ret = ipa_poll_pkt(sys, &iov);
+ if (ret)
break;
- }
- if (iov.addr == 0)
- break;
if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
ipa_dma_memcpy_notify(sys, &iov);
else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
@@ -851,7 +871,8 @@ static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
goto fail;
}
atomic_set(&sys->curr_polling_state, 0);
- ipa_handle_rx_core(sys, true, false);
+ if (!sys->ep->napi_enabled)
+ ipa_handle_rx_core(sys, true, false);
ipa_dec_release_wakelock(sys->ep->wakelock_client);
return;
@@ -965,26 +986,30 @@ static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
case SPS_EVENT_EOT:
if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
- if (!atomic_read(&sys->curr_polling_state)) {
- ret = sps_get_config(sys->ep->ep_hdl,
+
+ if (atomic_read(&sys->curr_polling_state)) {
+ sys->ep->eot_in_poll_err++;
+ break;
+ }
+
+ ret = sps_get_config(sys->ep->ep_hdl,
&sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- break;
- }
- sys->ep->connect.options = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->ep->ep_hdl,
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
&sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- break;
- }
- ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
- atomic_set(&sys->curr_polling_state, 1);
- trace_intr_to_poll(sys->ep->client);
- queue_work(sys->wq, &sys->work);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
}
+ ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
+ atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll(sys->ep->client);
+ queue_work(sys->wq, &sys->work);
break;
default:
IPAERR("received unexpected event id %d\n", notify->event_id);
@@ -1041,6 +1066,58 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
+/**
+ * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa2_rx_poll(u32 clnt_hdl, int weight)
+{
+ struct ipa_ep_context *ep;
+ int ret;
+ int cnt = 0;
+ unsigned int delay = 1;
+ struct sps_iovec iov;
+
+ IPADBG("\n");
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm 0x%x\n", clnt_hdl);
+ return cnt;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ while (cnt < weight &&
+ atomic_read(&ep->sys->curr_polling_state)) {
+
+ ret = ipa_poll_pkt(ep->sys, &iov);
+ if (ret)
+ break;
+
+ ipa_wq_rx_common(ep->sys, iov.size);
+ cnt += 5;
+ };
+
+ if (cnt == 0) {
+ ep->inactive_cycles++;
+ ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+ if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+ ep->switch_to_intr = true;
+ delay = 0;
+ }
+ queue_delayed_work(ep->sys->wq,
+ &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+ } else
+ ep->inactive_cycles = 0;
+
+ return cnt;
+}
+
static void switch_to_intr_rx_work_func(struct work_struct *work)
{
struct delayed_work *dwork;
@@ -1048,7 +1125,18 @@ static void switch_to_intr_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
- ipa_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ if (sys->ep->switch_to_intr) {
+ ipa_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ sys->ep->switch_to_intr = false;
+ sys->ep->inactive_cycles = 0;
+ } else
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa_handle_rx(sys);
}
/**
@@ -1196,6 +1284,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
INIT_LIST_HEAD(&ep->sys->head_desc_list);
+ INIT_LIST_HEAD(&ep->sys->rcycl_list);
spin_lock_init(&ep->sys->spinlock);
} else {
memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
@@ -1211,6 +1300,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->valid = 1;
ep->client = sys_in->client;
ep->client_notify = sys_in->notify;
+ ep->napi_enabled = sys_in->napi_enabled;
ep->priv = sys_in->priv;
ep->keep_ipa_awake = sys_in->keep_ipa_awake;
atomic_set(&ep->avail_fifo_desc,
@@ -1334,9 +1424,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
- if (nr_cpu_ids > 1 &&
- (sys_in->client == IPA_CLIENT_APPS_LAN_CONS ||
- sys_in->client == IPA_CLIENT_APPS_WAN_CONS)) {
+ if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
sizeof(void *), GFP_KERNEL);
@@ -1425,7 +1513,12 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_disable_data_path(clnt_hdl);
- ep->valid = 0;
+ if (ep->napi_enabled) {
+ ep->switch_to_intr = true;
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
if (IPA_CLIENT_IS_PROD(ep->client)) {
do {
@@ -1471,6 +1564,7 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
ipa_cleanup_wlan_rx_common_cache();
+ ep->valid = 0;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -1721,7 +1815,13 @@ static void ipa_wq_handle_rx(struct work_struct *work)
struct ipa_sys_context *sys;
sys = container_of(work, struct ipa_sys_context, work);
- ipa_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa_handle_rx(sys);
}
static void ipa_wq_repl_rx(struct work_struct *work)
@@ -2024,6 +2124,63 @@ fail_kmem_cache_alloc:
msecs_to_jiffies(1));
}
+static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ spin_lock_bh(&sys->spinlock);
+ if (list_empty(&sys->rcycl_list))
+ goto fail_kmem_cache_alloc;
+
+ rx_pkt = list_first_entry(&sys->rcycl_list,
+ struct ipa_rx_pkt_wrapper, link);
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0)
+ goto fail_dma_mapping;
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+ }
+
+ return;
+fail_sps_transfer:
+ rx_len_cached = --sys->len;
+ list_del(&rx_pkt->link);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+ spin_unlock_bh(&sys->spinlock);
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
{
struct ipa_rx_pkt_wrapper *rx_pkt;
@@ -2035,8 +2192,10 @@ static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
curr = atomic_read(&sys->repl.head_idx);
while (rx_len_cached < sys->rx_pool_sz) {
- if (curr == atomic_read(&sys->repl.tail_idx))
+ if (curr == atomic_read(&sys->repl.tail_idx)) {
+ queue_work(sys->repl_wq, &sys->repl_work);
break;
+ }
rx_pkt = sys->repl.cache[curr];
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
@@ -2107,6 +2266,15 @@ static void ipa_cleanup_rx(struct ipa_sys_context *sys)
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->rcycl_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
if (sys->repl.cache) {
head = atomic_read(&sys->repl.head_idx);
tail = atomic_read(&sys->repl.tail_idx);
@@ -2471,6 +2639,10 @@ static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
IPA_RECEIVE, (unsigned long)(skb));
return rc;
}
+ if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
+ IPAERR("Recycle should enable only with GRO Aggr\n");
+ ipa_assert();
+ }
/*
* payload splits across 2 buff or more,
* take the start of the payload from prev_skb
@@ -2721,6 +2893,37 @@ void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
}
+void ipa2_recycle_wan_skb(struct sk_buff *skb)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ep_idx = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_WAN_CONS);
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+ (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist\n");
+ ipa_assert();
+ }
+
+ rx_pkt = kmem_cache_zalloc(
+ ipa_ctx->rx_pkt_wrapper_cache, flag);
+ if (!rx_pkt)
+ ipa_assert();
+
+ INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+ rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
+
+ rx_pkt->data.skb = skb;
+ rx_pkt->data.dma_addr = 0;
+ ipa_skb_recycle(rx_pkt->data.skb);
+ skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_lock_bh(&rx_pkt->sys->spinlock);
+ list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+ spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
{
struct ipa_rx_pkt_wrapper *rx_pkt_expected;
@@ -2858,6 +3061,220 @@ static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
return 0;
}
+static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
+ struct ipa_sys_context *sys)
+{
+ unsigned long int aggr_byte_limit;
+
+ sys->ep->status.status_en = true;
+ sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
+ if (IPA_CLIENT_IS_PROD(in->client)) {
+ if (!sys->ep->skip_ep_cfg) {
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE;
+ sys->sps_callback = NULL;
+ sys->ep->status.status_ep = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
+ sys->ep->status.status_en = false;
+ } else {
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE |
+ SPS_O_EOT);
+ sys->sps_callback =
+ ipa_sps_irq_tx_no_aggr_notify;
+ }
+ return 0;
+ }
+
+ aggr_byte_limit =
+ (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.aggr_byte_limit));
+
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+ in->client == IPA_CLIENT_APPS_WAN_CONS) {
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+ IPA_GENERIC_RX_BUFF_BASE_SZ) -
+ IPA_HEADROOM;
+ sys->get_skb = ipa_get_skb_ipa_rx_headroom;
+ sys->free_skb = ipa_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+ in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+ in->ipa_ep_cfg.aggr.aggr_time_limit =
+ IPA_GENERIC_AGGR_TIME_LIMIT;
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+ sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
+ } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ IPA_GENERIC_RX_POOL_SZ;
+ in->ipa_ep_cfg.aggr.aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_LAN_RX;
+ } else if (in->client ==
+ IPA_CLIENT_APPS_WAN_CONS) {
+ sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
+ if (in->napi_enabled) {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache_recycle;
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ } else {
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
+ } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa_ctx->wan_rx_ring_size;
+ }
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WAN_RX;
+ in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+ = true;
+ if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
+ IPAERR("get close-by %u\n",
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
+ /* disable ipa_status */
+ sys->ep->status.
+ status_en = false;
+ sys->rx_buff_sz =
+ IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ sys->rx_buff_sz < in->
+ ipa_ep_cfg.aggr.aggr_byte_limit ?
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ sys->rx_buff_sz) :
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ in->ipa_ep_cfg.
+ aggr.aggr_byte_limit);
+ IPAERR("set aggr_limit %lu\n",
+ (unsigned long int)
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit);
+ } else {
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.
+ aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ }
+ }
+ } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+ sys->pyld_hdlr = NULL;
+ sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
+ } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+ sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ sys->repl_hdlr = ipa_replenish_rx_cache;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_ODU_RX;
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ } else {
+ IPAERR("Need to install a RX pipe hdlr\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ipa_assign_policy(struct ipa_sys_connect_params *in,
struct ipa_sys_context *sys)
{
@@ -2904,203 +3321,14 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
WARN_ON(1);
return -EINVAL;
}
- } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
- sys->ep->status.status_en = true;
- sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
- if (IPA_CLIENT_IS_PROD(in->client)) {
- if (!sys->ep->skip_ep_cfg) {
- sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE;
- sys->sps_callback = NULL;
- sys->ep->status.status_ep = ipa2_get_ep_mapping(
- IPA_CLIENT_APPS_LAN_CONS);
- if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
- sys->ep->status.status_en = false;
- } else {
- sys->policy = IPA_POLICY_INTR_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE |
- SPS_O_EOT);
- sys->sps_callback =
- ipa_sps_irq_tx_no_aggr_notify;
- }
- } else {
- if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
- in->client == IPA_CLIENT_APPS_WAN_CONS) {
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- INIT_DELAYED_WORK(&sys->replenish_rx_work,
- replenish_rx_work_func);
- INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
- atomic_set(&sys->curr_polling_state, 0);
- sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
- IPA_GENERIC_RX_BUFF_BASE_SZ) -
- IPA_HEADROOM;
- sys->get_skb = ipa_get_skb_ipa_rx_headroom;
- sys->free_skb = ipa_free_skb_rx;
- in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
- in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
- in->ipa_ep_cfg.aggr.aggr_time_limit =
- IPA_GENERIC_AGGR_TIME_LIMIT;
- if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
- sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
- sys->rx_pool_sz =
- IPA_GENERIC_RX_POOL_SZ;
- in->ipa_ep_cfg.aggr.aggr_byte_limit =
- IPA_GENERIC_AGGR_BYTE_LIMIT;
- in->ipa_ep_cfg.aggr.aggr_pkt_limit =
- IPA_GENERIC_AGGR_PKT_LIMIT;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_LAN_RX;
- } else if (in->client ==
- IPA_CLIENT_APPS_WAN_CONS) {
- sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
- sys->rx_pool_sz =
- ipa_ctx->wan_rx_ring_size;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_WAN_RX;
- in->ipa_ep_cfg.aggr.aggr_sw_eof_active
- = true;
- if (ipa_ctx->
- ipa_client_apps_wan_cons_agg_gro) {
- IPAERR("get close-by %u\n",
- ipa_adjust_ra_buff_base_sz(
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit));
- IPAERR("set rx_buff_sz %lu\n",
- (unsigned long int)
- IPA_GENERIC_RX_BUFF_SZ(
- ipa_adjust_ra_buff_base_sz(
- in->ipa_ep_cfg.
- aggr.aggr_byte_limit)));
- /* disable ipa_status */
- sys->ep->status.
- status_en = false;
- sys->rx_buff_sz =
- IPA_GENERIC_RX_BUFF_SZ(
- ipa_adjust_ra_buff_base_sz(
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit));
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit =
- sys->rx_buff_sz < in->
- ipa_ep_cfg.aggr.
- aggr_byte_limit ?
- IPA_ADJUST_AGGR_BYTE_LIMIT(
- sys->rx_buff_sz) :
- IPA_ADJUST_AGGR_BYTE_LIMIT(
- in->ipa_ep_cfg.
- aggr.aggr_byte_limit);
- IPAERR("set aggr_limit %lu\n",
- (unsigned long int)
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit);
- } else {
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit =
- IPA_GENERIC_AGGR_BYTE_LIMIT;
- in->ipa_ep_cfg.aggr.
- aggr_pkt_limit =
- IPA_GENERIC_AGGR_PKT_LIMIT;
- }
- }
- sys->repl_trig_thresh = sys->rx_pool_sz / 8;
- if (nr_cpu_ids > 1)
- sys->repl_hdlr =
- ipa_fast_replenish_rx_cache;
- else
- sys->repl_hdlr = ipa_replenish_rx_cache;
- } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
- IPADBG("assigning policy to client:%d",
- in->client);
-
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- INIT_DELAYED_WORK(&sys->replenish_rx_work,
- replenish_rx_work_func);
- atomic_set(&sys->curr_polling_state, 0);
- sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
- sys->rx_pool_sz = in->desc_fifo_sz/
- sizeof(struct sps_iovec) - 1;
- if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
- sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
- sys->pyld_hdlr = NULL;
- sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
- sys->get_skb = ipa_get_skb_ipa_rx;
- sys->free_skb = ipa_free_skb_rx;
- in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
- } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
- IPADBG("assigning policy to client:%d",
- in->client);
-
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- INIT_DELAYED_WORK(&sys->replenish_rx_work,
- replenish_rx_work_func);
- atomic_set(&sys->curr_polling_state, 0);
- sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
- sys->rx_pool_sz = in->desc_fifo_sz /
- sizeof(struct sps_iovec) - 1;
- if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
- sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
- sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
- sys->get_skb = ipa_get_skb_ipa_rx;
- sys->free_skb = ipa_free_skb_rx;
- sys->repl_hdlr = ipa_replenish_rx_cache;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_ODU_RX;
- } else if (in->client ==
- IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
- IPADBG("assigning policy to client:%d",
- in->client);
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- } else if (in->client ==
- IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
- IPADBG("assigning policy to client:%d",
- in->client);
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- } else {
- IPAERR("Need to install a RX pipe hdlr\n");
- WARN_ON(1);
- return -EINVAL;
- }
- }
- } else {
- IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
- WARN_ON(1);
- return -EINVAL;
- }
+ return 0;
+ } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
+ return ipa_assign_policy_v2(in, sys);
- return 0;
+ IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
+ WARN_ON(1);
+ return -EINVAL;
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index cb41f8429771..f0b25132df33 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -39,6 +39,8 @@
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
#define IPA_SYS_DESC_FIFO_SZ 0x2000
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -515,6 +517,7 @@ enum ipa_wakelock_ref_client {
* @disconnect_in_progress: Indicates client disconnect in progress.
* @qmi_request_sent: Indicates whether QMI request to enable clear data path
* request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
*/
struct ipa_ep_context {
int valid;
@@ -546,6 +549,10 @@ struct ipa_ep_context {
bool disconnect_in_progress;
u32 qmi_request_sent;
enum ipa_wakelock_ref_client wakelock_client;
+ bool napi_enabled;
+ bool switch_to_intr;
+ int inactive_cycles;
+ u32 eot_in_poll_err;
/* sys MUST be the last element of this struct */
struct ipa_sys_context *sys;
@@ -603,6 +610,7 @@ struct ipa_sys_context {
/* ordering is important - mutable fields go above */
struct ipa_ep_context *ep;
struct list_head head_desc_list;
+ struct list_head rcycl_list;
spinlock_t spinlock;
struct workqueue_struct *wq;
struct workqueue_struct *repl_wq;
@@ -1929,4 +1937,6 @@ void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+int ipa2_rx_poll(u32 clnt_hdl, int budget);
+void ipa2_recycle_wan_skb(struct sk_buff *skb);
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
index d70abdfa0469..a03a49a33f97 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,23 @@ TRACE_EVENT(
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
+TRACE_EVENT(
+ rmnet_ipa_netif_rcv_skb,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 9fc67548f6ac..ae709c54cec1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -5128,7 +5128,8 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
ipa2_set_required_perf_profile;
api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
-
+ api_ctrl->ipa_rx_poll = ipa2_rx_poll;
+ api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index eeecc508e8db..9336250352f0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -59,6 +59,8 @@
#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
+
static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
@@ -76,6 +78,8 @@ static struct mutex ipa_to_apps_pipe_handle_guard;
static int wwan_add_ul_flt_rule_to_ipa(void);
static int wwan_del_ul_flt_rule_to_ipa(void);
static void ipa_wwan_msg_free_cb(void*, u32, u32);
+static void ipa_rmnet_rx_cb(void *priv);
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
static void wake_tx_queue(struct work_struct *work);
static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
@@ -93,8 +97,10 @@ struct ipa_rmnet_plat_drv_res {
bool ipa_rmnet_ssr;
bool ipa_loaduC;
bool ipa_advertise_sg_support;
+ bool ipa_napi_enable;
};
+static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
/**
* struct wwan_private - WWAN private data
* @net: network interface struct implemented by this driver
@@ -119,6 +125,7 @@ struct wwan_private {
spinlock_t lock;
struct completion resource_granted_completion;
enum wwan_device_status device_status;
+ struct napi_struct napi;
};
/**
@@ -936,6 +943,9 @@ static int __ipa_wwan_open(struct net_device *dev)
if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
reinit_completion(&wwan_ptr->resource_granted_completion);
wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_enable(&(wwan_ptr->napi));
return 0;
}
@@ -970,6 +980,8 @@ static int __ipa_wwan_close(struct net_device *dev)
/* do not close wwan port once up, this causes
remote side to hang if tried to open again */
reinit_completion(&wwan_ptr->resource_granted_completion);
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_disable(&(wwan_ptr->napi));
rc = ipa2_deregister_intf(dev->name);
if (rc) {
IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
@@ -1168,38 +1180,49 @@ static void apps_ipa_packet_receive_notify(void *priv,
enum ipa_dp_evt_type evt,
unsigned long data)
{
- struct sk_buff *skb = (struct sk_buff *)data;
struct net_device *dev = (struct net_device *)priv;
- int result;
- unsigned int packet_len = skb->len;
- IPAWANDBG("Rx packet was received");
- if (evt != IPA_RECEIVE) {
- IPAWANERR("A none IPA_RECEIVE event in wan_ipa_receive\n");
- return;
- }
+ if (evt == IPA_RECEIVE) {
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int result;
+ unsigned int packet_len = skb->len;
- skb->dev = ipa_netdevs[0];
- skb->protocol = htons(ETH_P_MAP);
+ IPAWANDBG("Rx packet was received");
+ skb->dev = ipa_netdevs[0];
+ skb->protocol = htons(ETH_P_MAP);
- if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
- trace_rmnet_ipa_netifni(dev->stats.rx_packets);
- result = netif_rx_ni(skb);
- } else {
- trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
- result = netif_rx(skb);
- }
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
+ result = netif_receive_skb(skb);
+ } else {
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+ == 0) {
+ trace_rmnet_ipa_netifni(dev->stats.rx_packets);
+ result = netif_rx_ni(skb);
+ } else {
+ trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
+ result = netif_rx(skb);
+ }
+ }
- if (result) {
- pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
- __func__, __LINE__);
- dev->stats.rx_dropped++;
- }
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += packet_len;
-}
+ if (result) {
+ pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+ __func__, __LINE__);
+ dev->stats.rx_dropped++;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+ } else if (evt == IPA_CLIENT_START_POLL)
+ ipa_rmnet_rx_cb(priv);
+ else if (evt == IPA_CLIENT_COMP_NAPI) {
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_complete(&(wwan_ptr->napi));
+ } else
+ IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
-static struct ipa_rmnet_plat_drv_res ipa_rmnet_res = {0, };
+}
/**
* ipa_wwan_ioctl() - I/O control for wwan network driver.
@@ -1555,9 +1578,17 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
ipa_to_apps_ep_cfg.notify =
apps_ipa_packet_receive_notify;
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
ipa_to_apps_ep_cfg.priv = dev;
+ ipa_to_apps_ep_cfg.napi_enabled =
+ ipa_rmnet_res.ipa_napi_enable;
+ if (ipa_to_apps_ep_cfg.napi_enabled)
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_SYS_DESC_FIFO_SZ;
+
mutex_lock(&ipa_to_apps_pipe_handle_guard);
if (atomic_read(&is_ssr)) {
IPAWANDBG("In SSR sequence/recovery\n");
@@ -1899,6 +1930,12 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-advertise-sg-support");
pr_info("IPA SG support = %s\n",
ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_napi_enable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-napi-enable");
+ pr_info("IPA Napi Enable = %s\n",
+ ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
return 0;
}
@@ -2044,6 +2081,12 @@ static int ipa_wwan_probe(struct platform_device *pdev)
if (ipa_rmnet_res.ipa_advertise_sg_support)
dev->hw_features |= NETIF_F_SG;
+ /* Enable NAPI support in netdevice. */
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ netif_napi_add(dev, &(wwan_ptr->napi),
+ ipa_rmnet_poll, NAPI_WEIGHT);
+ }
+
ret = register_netdev(dev);
if (ret) {
IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
@@ -2068,6 +2111,8 @@ static int ipa_wwan_probe(struct platform_device *pdev)
pr_info("rmnet_ipa completed initialization\n");
return 0;
config_err:
+ if (ipa_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(wwan_ptr->napi));
unregister_netdev(ipa_netdevs[0]);
set_perf_err:
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2107,6 +2152,9 @@ setup_a7_qmap_hdr_err:
static int ipa_wwan_remove(struct platform_device *pdev)
{
int ret;
+ struct wwan_private *wwan_ptr;
+
+ wwan_ptr = netdev_priv(ipa_netdevs[0]);
pr_info("rmnet_ipa started deinitialization\n");
mutex_lock(&ipa_to_apps_pipe_handle_guard);
@@ -2115,6 +2163,8 @@ static int ipa_wwan_remove(struct platform_device *pdev)
IPAWANERR("Failed to teardown IPA->APPS pipe\n");
else
ipa_to_apps_hdl = -1;
+ if (ipa_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(wwan_ptr->napi));
mutex_unlock(&ipa_to_apps_pipe_handle_guard);
unregister_netdev(ipa_netdevs[0]);
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2802,6 +2852,31 @@ static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
kfree(buff);
}
+static void ipa_rmnet_rx_cb(void *priv)
+{
+ struct net_device *dev = priv;
+ struct wwan_private *wwan_ptr;
+
+ IPAWANDBG("\n");
+
+ if (dev != ipa_netdevs[0]) {
+ IPAWANERR("Not matching with netdev\n");
+ return;
+ }
+
+ wwan_ptr = netdev_priv(dev);
+ napi_schedule(&(wwan_ptr->napi));
+}
+
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
+{
+ int rcvd_pkts = 0;
+
+ rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
+ IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
+ return rcvd_pkts;
+}
+
late_initcall(ipa_wwan_init);
module_exit(ipa_wwan_cleanup);
MODULE_DESCRIPTION("WWAN Network Interface");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index f5d202bfebce..f480cba044eb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1010,25 +1010,28 @@ static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
if (!atomic_read(&sys->curr_polling_state)) {
- ret = sps_get_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- break;
- }
- sys->ep->connect.options = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- break;
- }
- ipa3_inc_acquire_wakelock();
- atomic_set(&sys->curr_polling_state, 1);
- trace_intr_to_poll3(sys->ep->client);
- queue_work(sys->wq, &sys->work);
+ sys->ep->eot_in_poll_err++;
+ break;
}
+
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ ipa3_inc_acquire_wakelock();
+ atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll3(sys->ep->client);
+ queue_work(sys->wq, &sys->work);
break;
default:
IPAERR("received unexpected event id %d\n", notify->event_id);
@@ -1089,7 +1092,18 @@ static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
- ipa3_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ if (sys->ep->switch_to_intr) {
+ ipa3_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ sys->ep->switch_to_intr = false;
+ sys->ep->inactive_cycles = 0;
+ } else
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa3_handle_rx(sys);
}
/**
@@ -1217,6 +1231,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->valid = 1;
ep->client = sys_in->client;
ep->client_notify = sys_in->notify;
+ ep->napi_enabled = sys_in->napi_enabled;
ep->priv = sys_in->priv;
ep->keep_ipa_awake = sys_in->keep_ipa_awake;
atomic_set(&ep->avail_fifo_desc,
@@ -1423,6 +1438,12 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
+ if (ep->napi_enabled) {
+ ep->switch_to_intr = true;
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
if (IPA_CLIENT_IS_PROD(ep->client)) {
do {
@@ -1772,7 +1793,13 @@ static void ipa3_wq_handle_rx(struct work_struct *work)
struct ipa3_sys_context *sys;
sys = container_of(work, struct ipa3_sys_context, work);
- ipa3_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa3_handle_rx(sys);
}
static void ipa3_wq_repl_rx(struct work_struct *work)
@@ -2717,6 +2744,11 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
IPA_RECEIVE, (unsigned long)(skb));
return rc;
}
+ if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
+ IPAERR("Recycle should enable only with GRO Aggr\n");
+ ipa_assert();
+ }
+
/*
* payload splits across 2 buff or more,
* take the start of the payload from prev_skb
@@ -2909,6 +2941,30 @@ static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
spin_unlock_bh(&rx_pkt->sys->spinlock);
}
+void ipa3_recycle_wan_skb(struct sk_buff *skb)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ep_idx = ipa3_get_ep_mapping(
+ IPA_CLIENT_APPS_WAN_CONS);
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist\n");
+ ipa_assert();
+ }
+
+ rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt)
+ ipa_assert();
+
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
+
+ rx_pkt->data.skb = skb;
+ ipa3_recycle_rx_wrapper(rx_pkt);
+}
+
static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
{
struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
@@ -3123,14 +3179,22 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
sys->free_rx_wrapper = ipa3_free_rx_wrapper;
- if (nr_cpu_ids > 1)
- sys->repl_hdlr =
- ipa3_fast_replenish_rx_cache;
- else
+ if (in->napi_enabled) {
sys->repl_hdlr =
- ipa3_replenish_rx_cache;
- sys->rx_pool_sz =
- ipa3_ctx->wan_rx_ring_size;
+ ipa3_replenish_rx_cache_recycle;
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ } else {
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa3_fast_replenish_rx_cache;
+ } else {
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa3_ctx->wan_rx_ring_size;
+ }
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
= true;
if (ipa3_ctx->
@@ -3941,68 +4005,88 @@ static int ipa_populate_tag_field(struct ipa3_desc *desc,
return 0;
}
-static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
- bool process_all, bool in_poll_state)
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
{
int ret;
- int cnt = 0;
- struct ipa3_sys_context *sys_ptr;
- struct ipa3_rx_pkt_wrapper *rx_pkt;
struct gsi_chan_xfer_notify xfer_notify;
- struct ipa_mem_buffer mem_info = {0};
- enum ipa_client_type client;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
if (sys->ep->bytes_xfered_valid) {
- mem_info.phys_base = sys->ep->phys_base;
- mem_info.size = (u32)sys->ep->bytes_xfered;
- sys_ptr = sys;
- if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
- ipa3_dma_memcpy_notify(sys_ptr, &mem_info);
- else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
- ipa3_wlan_wq_rx_common(sys_ptr, mem_info.size);
- else
- ipa3_wq_rx_common(sys_ptr, mem_info.size);
-
- cnt++;
+ mem_info->phys_base = sys->ep->phys_base;
+ mem_info->size = (u32)sys->ep->bytes_xfered;
sys->ep->bytes_xfered_valid = false;
+ return GSI_STATUS_SUCCESS;
+ }
+
+ ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
+ &xfer_notify);
+ if (ret == GSI_STATUS_POLL_EMPTY)
+ return ret;
+ else if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("Poll channel err: %d\n", ret);
+ return ret;
}
+ rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+ xfer_notify.xfer_user_data;
+ mem_info->phys_base = rx_pkt->data.dma_addr;
+ mem_info->size = xfer_notify.bytes_xfered;
+
+ return ret;
+}
+
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+ bool process_all, bool in_poll_state)
+{
+ int ret;
+ int cnt = 0;
+ struct ipa_mem_buffer mem_info = {0};
+
while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
!atomic_read(&sys->curr_polling_state))) {
if (cnt && !process_all)
break;
- ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
- &xfer_notify);
- if (ret == GSI_STATUS_POLL_EMPTY)
+ ret = ipa_poll_gsi_pkt(sys, &mem_info);
+ if (ret)
break;
- else if (ret == GSI_STATUS_SUCCESS) {
- sys_ptr = (struct ipa3_sys_context *)
- xfer_notify.chan_user_data;
- rx_pkt = (struct ipa3_rx_pkt_wrapper *)
- xfer_notify.xfer_user_data;
- mem_info.phys_base = rx_pkt->data.dma_addr;
- mem_info.size = xfer_notify.bytes_xfered;
-
- client = sys->ep->client;
- if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(client))
- ipa3_dma_memcpy_notify(sys_ptr, &mem_info);
- else if (IPA_CLIENT_IS_WLAN_CONS(client))
- ipa3_wlan_wq_rx_common(sys_ptr, mem_info.size);
- else
- ipa3_wq_rx_common(sys_ptr, mem_info.size);
- cnt++;
- } else
- IPAERR("Poll channel err: %d\n", ret);
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+ ipa3_dma_memcpy_notify(sys, &mem_info);
+ else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+ ipa3_wlan_wq_rx_common(sys, mem_info.size);
+ else
+ ipa3_wq_rx_common(sys, mem_info.size);
+
+ cnt++;
}
return cnt;
}
+static int ipa_poll_sps_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
+{
+ int ret;
+ struct sps_iovec iov;
+
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ return ret;
+ }
+
+ if (iov.addr == 0)
+ return -EIO;
+
+ mem_info->phys_base = iov.addr;
+ mem_info->size = iov.size;
+ return 0;
+}
+
static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
bool process_all, bool in_poll_state)
{
- struct sps_iovec iov;
int ret;
int cnt = 0;
struct ipa_mem_buffer mem_info = {0};
@@ -4012,17 +4096,10 @@ static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
if (cnt && !process_all)
break;
- ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
- if (ret) {
- IPAERR("sps_get_iovec failed %d\n", ret);
- break;
- }
-
- if (iov.addr == 0)
+ ret = ipa_poll_sps_pkt(sys, &mem_info);
+ if (ret)
break;
- mem_info.phys_base = iov.addr;
- mem_info.size = iov.size;
if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
ipa3_dma_memcpy_notify(sys, &mem_info);
else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
@@ -4032,6 +4109,64 @@ static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
cnt++;
}
+
+ return cnt;
+}
+
+/**
+ * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_rx_poll(u32 clnt_hdl, int weight)
+{
+ struct ipa3_ep_context *ep;
+ int ret;
+ int cnt = 0;
+ unsigned int delay = 1;
+ struct ipa_mem_buffer mem_info = {0};
+
+ IPADBG("\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm 0x%x\n", clnt_hdl);
+ return cnt;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ while (cnt < weight &&
+ atomic_read(&ep->sys->curr_polling_state)) {
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
+ else
+ ret = ipa_poll_sps_pkt(ep->sys, &mem_info);
+
+ if (ret)
+ break;
+
+ ipa3_wq_rx_common(ep->sys, mem_info.size);
+ cnt += 5;
+ };
+
+ if (cnt == 0) {
+ ep->inactive_cycles++;
+ ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+ if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+ ep->switch_to_intr = true;
+ delay = 0;
+ }
+ queue_delayed_work(ep->sys->wq,
+ &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+ } else
+ ep->inactive_cycles = 0;
+
return cnt;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index d50a25aa84f4..36cb5cbb8d34 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -41,6 +41,8 @@
#define MTU_BYTE 1500
#define IPA3_MAX_NUM_PIPES 31
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
#define IPA_SYS_DESC_FIFO_SZ 0x800
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -550,6 +552,7 @@ struct ipa3_status_stats {
* @disconnect_in_progress: Indicates client disconnect in progress.
* @qmi_request_sent: Indicates whether QMI request to enable clear data path
* request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
*/
struct ipa3_ep_context {
int valid;
@@ -586,6 +589,10 @@ struct ipa3_ep_context {
u32 wdi_state;
bool disconnect_in_progress;
u32 qmi_request_sent;
+ bool napi_enabled;
+ bool switch_to_intr;
+ int inactive_cycles;
+ u32 eot_in_poll_err;
/* sys MUST be the last element of this struct */
struct ipa3_sys_context *sys;
@@ -2262,4 +2269,6 @@ int ipa3_load_fws(const struct firmware *firmware);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
int ipa_gsi_ch20_wa(void);
+int ipa3_rx_poll(u32 clnt_hdl, int budget);
+void ipa3_recycle_wan_skb(struct sk_buff *skb);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
index 5ea6c6daf240..b67899ba894e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,24 @@ TRACE_EVENT(
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
+TRACE_EVENT(
+ rmnet_ipa_netif_rcv_skb3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 2481d6e7bfb7..c2a70bca80b1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4657,6 +4657,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
ipa3_set_required_perf_profile;
api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
+ api_ctrl->ipa_rx_poll = ipa3_rx_poll;
+ api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 5dcb25876fd4..a4eab02cb571 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -57,6 +57,7 @@
#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
#define IPA_NETDEV() \
((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
@@ -66,6 +67,8 @@
static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
static void ipa3_wwan_msg_free_cb(void*, u32, u32);
+static void ipa3_rmnet_rx_cb(void *priv);
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
static void ipa3_wake_tx_queue(struct work_struct *work);
static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
@@ -83,6 +86,7 @@ struct ipa3_rmnet_plat_drv_res {
bool ipa_rmnet_ssr;
bool ipa_loaduC;
bool ipa_advertise_sg_support;
+ bool ipa_napi_enable;
};
/**
@@ -109,6 +113,7 @@ struct ipa3_wwan_private {
spinlock_t lock;
struct completion resource_granted_completion;
enum ipa3_wwan_device_status device_status;
+ struct napi_struct napi;
};
struct rmnet_ipa3_context {
@@ -134,6 +139,7 @@ struct rmnet_ipa3_context {
};
static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
+static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
/**
* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
@@ -957,6 +963,9 @@ static int __ipa_wwan_open(struct net_device *dev)
if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
reinit_completion(&wwan_ptr->resource_granted_completion);
wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_enable(&(wwan_ptr->napi));
return 0;
}
@@ -1189,39 +1198,47 @@ static void apps_ipa_packet_receive_notify(void *priv,
enum ipa_dp_evt_type evt,
unsigned long data)
{
- struct sk_buff *skb = (struct sk_buff *)data;
struct net_device *dev = (struct net_device *)priv;
- int result;
- unsigned int packet_len = skb->len;
- IPAWANDBG_LOW("Rx packet was received");
- if (evt != IPA_RECEIVE) {
- IPAWANERR("A none IPA_RECEIVE event in wan_ipa_receive\n");
- return;
- }
+ if (evt == IPA_RECEIVE) {
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int result;
+ unsigned int packet_len = skb->len;
- skb->dev = IPA_NETDEV();
- skb->protocol = htons(ETH_P_MAP);
+ IPAWANDBG_LOW("Rx packet was received");
+ skb->dev = IPA_NETDEV();
+ skb->protocol = htons(ETH_P_MAP);
- if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
- trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
- result = netif_rx_ni(skb);
- } else {
- trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
- result = netif_rx(skb);
- }
+ if (ipa3_rmnet_res.ipa_napi_enable) {
+ trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
+ result = netif_receive_skb(skb);
+ } else {
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+ == 0) {
+ trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
+ result = netif_rx_ni(skb);
+ } else {
+ trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
+ result = netif_rx(skb);
+ }
+ }
- if (result) {
- pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
- __func__, __LINE__);
- dev->stats.rx_dropped++;
- }
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += packet_len;
+ if (result) {
+ pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+ __func__, __LINE__);
+ dev->stats.rx_dropped++;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+ } else if (evt == IPA_CLIENT_START_POLL)
+ ipa3_rmnet_rx_cb(priv);
+ else if (evt == IPA_CLIENT_COMP_NAPI) {
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
+ } else
+ IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
}
-static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res = {0, };
-
/**
* ipa3_wwan_ioctl() - I/O control for wwan network driver.
*
@@ -1595,10 +1612,17 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
IPA_CLIENT_APPS_WAN_CONS;
rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
apps_ipa_packet_receive_notify;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.desc_fifo_sz =
- IPA_SYS_DESC_FIFO_SZ;
rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
+ ipa3_rmnet_res.ipa_napi_enable;
+ if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
mutex_lock(
&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
@@ -2126,6 +2150,9 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
if (ipa3_rmnet_res.ipa_advertise_sg_support)
dev->hw_features |= NETIF_F_SG;
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
+ ipa3_rmnet_poll, NAPI_WEIGHT);
ret = register_netdev(dev);
if (ret) {
IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
@@ -2149,6 +2176,8 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
pr_info("rmnet_ipa completed initialization\n");
return 0;
config_err:
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
unregister_netdev(dev);
set_perf_err:
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2196,6 +2225,8 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
IPAWANERR("Failed to teardown IPA->APPS pipe\n");
else
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
unregister_netdev(IPA_NETDEV());
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2903,6 +2934,22 @@ static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
kfree(buff);
}
+static void ipa3_rmnet_rx_cb(void *priv)
+{
+ IPAWANDBG_LOW("\n");
+ napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
+}
+
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
+{
+ int rcvd_pkts = 0;
+
+ rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
+ NAPI_WEIGHT);
+ IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
+ return rcvd_pkts;
+}
+
late_initcall(ipa3_wwan_init);
module_exit(ipa3_wwan_cleanup);
MODULE_DESCRIPTION("WWAN Network Interface");
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 75db00874979..40dcaa8f0eee 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6616,7 +6616,7 @@ out:
/**
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
- * more than device PA_TACTIVATE time.
+ * less than device PA_TACTIVATE time.
* @hba: per-adapter instance
*
* Some UFS devices require host PA_TACTIVATE to be lower than device
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 45cfc01573d9..908cfb33bf99 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -818,6 +818,9 @@ static const struct file_operations acc_fops = {
.read = acc_read,
.write = acc_write,
.unlocked_ioctl = acc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = acc_ioctl,
+#endif
.open = acc_open,
.release = acc_release,
};
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index ea3721fe73e9..6b3ed12ede73 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -26,6 +26,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/device.h>
@@ -73,6 +75,8 @@
#define MTP_RESPONSE_DEVICE_BUSY 0x2019
#define DRIVER_NAME "mtp"
+#define MAX_ITERATION 100
+
unsigned int mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
@@ -122,6 +126,15 @@ struct mtp_dev {
uint16_t xfer_command;
uint32_t xfer_transaction_id;
int xfer_result;
+ struct {
+ unsigned long vfs_rbytes;
+ unsigned long vfs_wbytes;
+ unsigned vfs_rtime;
+ unsigned vfs_wtime;
+ } perf[MAX_ITERATION];
+ unsigned dbg_read_index;
+ unsigned dbg_write_index;
+ bool is_ptp;
};
static struct usb_interface_descriptor mtp_interface_desc = {
@@ -328,10 +341,12 @@ struct mtp_ext_config_desc_function {
};
/* MTP Extended Configuration Descriptor */
-struct {
+struct ext_mtp_desc {
struct mtp_ext_config_desc_header header;
struct mtp_ext_config_desc_function function;
-} mtp_ext_config_desc = {
+};
+
+struct ext_mtp_desc mtp_ext_config_desc = {
.header = {
.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
.bcdVersion = __constant_cpu_to_le16(0x0100),
@@ -345,6 +360,20 @@ struct {
},
};
+struct ext_mtp_desc ptp_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'P', 'T', 'P' },
+ },
+};
+
struct mtp_device_status {
__le16 wLength;
__le16 wCode;
@@ -447,7 +476,7 @@ static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
{
struct mtp_dev *dev = _mtp_dev;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
mtp_req_put(dev, &dev->tx_idle, req);
@@ -460,7 +489,7 @@ static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
struct mtp_dev *dev = _mtp_dev;
dev->rx_done = 1;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
wake_up(&dev->read_wq);
@@ -470,7 +499,7 @@ static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
{
struct mtp_dev *dev = _mtp_dev;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
mtp_req_put(dev, &dev->intr_idle, req);
@@ -552,7 +581,7 @@ retry_rx_alloc:
if (!req) {
if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
goto fail;
- for (; i > 0; i--)
+ for (--i; i >= 0; i--)
mtp_request_free(dev->rx_req[i], dev->ep_out);
mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
goto retry_rx_alloc;
@@ -584,11 +613,7 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
ssize_t r = count, xfer, len;
int ret = 0;
- DBG(cdev, "mtp_read(%zu)\n", count);
-
- len = ALIGN(count, dev->ep_out->maxpacket);
- if (len > mtp_rx_req_len)
- return -EINVAL;
+ DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
/* we will block until we're online */
DBG(cdev, "mtp_read: waiting for online state\n");
@@ -598,6 +623,11 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
r = ret;
goto done;
}
+
+ len = ALIGN(count, dev->ep_out->maxpacket);
+ if (len > mtp_rx_req_len)
+ return -EINVAL;
+
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED) {
/* report cancelation to userspace */
@@ -659,7 +689,7 @@ done:
dev->state = STATE_READY;
spin_unlock_irq(&dev->lock);
- DBG(cdev, "mtp_read returning %zd\n", r);
+ DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
return r;
}
@@ -674,7 +704,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
int sendZLP = 0;
int ret;
- DBG(cdev, "mtp_write(%zu)\n", count);
+ DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED) {
@@ -713,6 +743,8 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
((req = mtp_req_get(dev, &dev->tx_idle))
|| dev->state != STATE_BUSY));
if (!req) {
+ DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
+ ret, dev->state);
r = ret;
break;
}
@@ -751,7 +783,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
dev->state = STATE_READY;
spin_unlock_irq(&dev->lock);
- DBG(cdev, "mtp_write returning %zd\n", r);
+ DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
return r;
}
@@ -769,6 +801,7 @@ static void send_file_work(struct work_struct *data)
int xfer, ret, hdr_size;
int r = 0;
int sendZLP = 0;
+ ktime_t start_time;
/* read our parameters */
smp_rmb();
@@ -806,6 +839,9 @@ static void send_file_work(struct work_struct *data)
break;
}
if (!req) {
+ DBG(cdev,
+ "send_file_work request NULL ret:%d state:%d\n",
+ ret, dev->state);
r = ret;
break;
}
@@ -832,14 +868,19 @@ static void send_file_work(struct work_struct *data)
header->transaction_id =
__cpu_to_le32(dev->xfer_transaction_id);
}
-
+ start_time = ktime_get();
ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
&offset);
if (ret < 0) {
r = ret;
break;
}
+
xfer = ret + hdr_size;
+ dev->perf[dev->dbg_read_index].vfs_rtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
+ dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
hdr_size = 0;
req->length = xfer;
@@ -861,7 +902,7 @@ static void send_file_work(struct work_struct *data)
if (req)
mtp_req_put(dev, &dev->tx_idle, req);
- DBG(cdev, "send_file_work returning %d\n", r);
+ DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
/* write the result */
dev->xfer_result = r;
smp_wmb();
@@ -879,6 +920,7 @@ static void receive_file_work(struct work_struct *data)
int64_t count;
int ret, cur_buf = 0;
int r = 0;
+ ktime_t start_time;
/* read our parameters */
smp_rmb();
@@ -912,6 +954,7 @@ static void receive_file_work(struct work_struct *data)
if (write_req) {
DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ start_time = ktime_get();
ret = vfs_write(filp, write_req->buf, write_req->actual,
&offset);
DBG(cdev, "vfs_write %d\n", ret);
@@ -921,6 +964,11 @@ static void receive_file_work(struct work_struct *data)
dev->state = STATE_ERROR;
break;
}
+ dev->perf[dev->dbg_write_index].vfs_wtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
+ dev->dbg_write_index =
+ (dev->dbg_write_index + 1) % MAX_ITERATION;
write_req = NULL;
}
@@ -930,7 +978,10 @@ static void receive_file_work(struct work_struct *data)
dev->rx_done || dev->state != STATE_BUSY);
if (dev->state == STATE_CANCELED
|| dev->state == STATE_OFFLINE) {
- r = -ECANCELED;
+ if (dev->state == STATE_OFFLINE)
+ r = -EIO;
+ else
+ r = -ECANCELED;
if (!dev->rx_done)
usb_ep_dequeue(dev->ep_out, read_req);
break;
@@ -996,85 +1047,107 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
return ret;
}
-static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
+ struct mtp_file_range *mfr)
{
struct mtp_dev *dev = fp->private_data;
struct file *filp = NULL;
+ struct work_struct *work;
int ret = -EINVAL;
- if (mtp_lock(&dev->ioctl_excl))
+ if (mtp_lock(&dev->ioctl_excl)) {
+ DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
return -EBUSY;
+ }
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancellation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr->fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr->offset;
+ dev->xfer_file_length = mfr->length;
+ /* make sure write is done before parameters are read */
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr->command;
+ dev->xfer_transaction_id = mfr->transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct mtp_event event;
+ int ret = -EINVAL;
switch (code) {
case MTP_SEND_FILE:
case MTP_RECEIVE_FILE:
case MTP_SEND_FILE_WITH_HEADER:
- {
- struct mtp_file_range mfr;
- struct work_struct *work;
-
- spin_lock_irq(&dev->lock);
- if (dev->state == STATE_CANCELED) {
- /* report cancelation to userspace */
- dev->state = STATE_READY;
- spin_unlock_irq(&dev->lock);
- ret = -ECANCELED;
- goto out;
- }
- if (dev->state == STATE_OFFLINE) {
- spin_unlock_irq(&dev->lock);
- ret = -ENODEV;
- goto out;
- }
- dev->state = STATE_BUSY;
- spin_unlock_irq(&dev->lock);
-
if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
ret = -EFAULT;
goto fail;
}
- /* hold a reference to the file while we are working with it */
- filp = fget(mfr.fd);
- if (!filp) {
- ret = -EBADF;
- goto fail;
- }
-
- /* write the parameters */
- dev->xfer_file = filp;
- dev->xfer_file_offset = mfr.offset;
- dev->xfer_file_length = mfr.length;
- smp_wmb();
-
- if (code == MTP_SEND_FILE_WITH_HEADER) {
- work = &dev->send_file_work;
- dev->xfer_send_header = 1;
- dev->xfer_command = mfr.command;
- dev->xfer_transaction_id = mfr.transaction_id;
- } else if (code == MTP_SEND_FILE) {
- work = &dev->send_file_work;
- dev->xfer_send_header = 0;
- } else {
- work = &dev->receive_file_work;
- }
-
- /* We do the file transfer on a work queue so it will run
- * in kernel context, which is necessary for vfs_read and
- * vfs_write to use our buffers in the kernel address space.
- */
- queue_work(dev->wq, work);
- /* wait for operation to complete */
- flush_workqueue(dev->wq);
- fput(filp);
-
- /* read the result */
- smp_rmb();
- ret = dev->xfer_result;
- break;
- }
+ ret = mtp_send_receive_ioctl(fp, code, &mfr);
+ break;
case MTP_SEND_EVENT:
- {
- struct mtp_event event;
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
/* return here so we don't change dev->state below,
* which would interfere with bulk transfer state.
*/
@@ -1082,28 +1155,93 @@ static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
ret = -EFAULT;
else
ret = mtp_send_event(dev, &event);
- goto out;
+ mtp_unlock(&dev->ioctl_excl);
+ break;
+ default:
+ DBG(dev->cdev, "unknown ioctl code: %d\n", code);
}
+fail:
+ return ret;
+}
+
+/*
+ * 32 bit userspace calling into 64 bit kernel. handle ioctl code
+ * and userspace pointer
+*/
+#ifdef CONFIG_COMPAT
+static long compat_mtp_ioctl(struct file *fp, unsigned code,
+ unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct __compat_mtp_file_range cmfr;
+ struct mtp_event event;
+ struct __compat_mtp_event cevent;
+ unsigned cmd;
+ bool send_file = false;
+ int ret = -EINVAL;
+
+ switch (code) {
+ case COMPAT_MTP_SEND_FILE:
+ cmd = MTP_SEND_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_RECEIVE_FILE:
+ cmd = MTP_RECEIVE_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_FILE_WITH_HEADER:
+ cmd = MTP_SEND_FILE_WITH_HEADER;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_EVENT:
+ cmd = MTP_SEND_EVENT;
+ break;
+ default:
+ DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+ ret = -ENOIOCTLCMD;
+ goto fail;
}
+ if (send_file) {
+ if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ mfr.fd = cmfr.fd;
+ mfr.offset = cmfr.offset;
+ mfr.length = cmfr.length;
+ mfr.command = cmfr.command;
+ mfr.transaction_id = cmfr.transaction_id;
+ ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
+ } else {
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&cevent, (void __user *)value,
+ sizeof(cevent))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ event.length = cevent.length;
+ event.data = compat_ptr(cevent.data);
+ ret = mtp_send_event(dev, &event);
+ mtp_unlock(&dev->ioctl_excl);
+ }
fail:
- spin_lock_irq(&dev->lock);
- if (dev->state == STATE_CANCELED)
- ret = -ECANCELED;
- else if (dev->state != STATE_OFFLINE)
- dev->state = STATE_READY;
- spin_unlock_irq(&dev->lock);
-out:
- mtp_unlock(&dev->ioctl_excl);
- DBG(dev->cdev, "ioctl returning %d\n", ret);
return ret;
}
+#endif
static int mtp_open(struct inode *ip, struct file *fp)
{
printk(KERN_INFO "mtp_open\n");
- if (mtp_lock(&_mtp_dev->open_excl))
+ if (mtp_lock(&_mtp_dev->open_excl)) {
+ pr_err("%s mtp_release not called returning EBUSY\n", __func__);
return -EBUSY;
+ }
/* clear any error condition */
if (_mtp_dev->state != STATE_OFFLINE)
@@ -1127,6 +1265,9 @@ static const struct file_operations mtp_fops = {
.read = mtp_read,
.write = mtp_write,
.unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_mtp_ioctl,
+#endif
.open = mtp_open,
.release = mtp_release,
};
@@ -1169,9 +1310,21 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
if (ctrl->bRequest == 1
&& (ctrl->bRequestType & USB_DIR_IN)
&& (w_index == 4 || w_index == 5)) {
- value = (w_length < sizeof(mtp_ext_config_desc) ?
- w_length : sizeof(mtp_ext_config_desc));
- memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ if (!dev->is_ptp) {
+ value = (w_length <
+ sizeof(mtp_ext_config_desc) ?
+ w_length :
+ sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc,
+ value);
+ } else {
+ value = (w_length <
+ sizeof(ptp_ext_config_desc) ?
+ w_length :
+ sizeof(ptp_ext_config_desc));
+ memcpy(cdev->req->buf, &ptp_ext_config_desc,
+ value);
+ }
}
} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
@@ -1287,7 +1440,6 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
struct usb_request *req;
int i;
- mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
while ((req = mtp_req_get(dev, &dev->tx_idle)))
mtp_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
@@ -1295,6 +1447,7 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
while ((req = mtp_req_get(dev, &dev->intr_idle)))
mtp_request_free(req, dev->ep_intr);
dev->state = STATE_OFFLINE;
+ dev->is_ptp = false;
}
static int mtp_function_set_alt(struct usb_function *f,
@@ -1358,6 +1511,120 @@ static void mtp_function_disable(struct usb_function *f)
VDBG(cdev, "%s disabled\n", dev->function.name);
}
+static int debug_mtp_read_stats(struct seq_file *s, void *unused)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int i;
+ unsigned long flags;
+ unsigned min, max = 0, sum = 0, iteration = 0;
+
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Write Stats:\n");
+ seq_puts(s, "\n=======================\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ min = dev->perf[0].vfs_wtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_wbytes,
+ dev->perf[i].vfs_wtime);
+ if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+ sum += dev->perf[i].vfs_wtime;
+ if (min > dev->perf[i].vfs_wtime)
+ min = dev->perf[i].vfs_wtime;
+ if (max < dev->perf[i].vfs_wtime)
+ max = dev->perf[i].vfs_wtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ min = max = sum = iteration = 0;
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Read Stats:\n");
+ seq_puts(s, "\n=======================\n");
+
+ min = dev->perf[0].vfs_rtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_rbytes,
+ dev->perf[i].vfs_rtime);
+ if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+ sum += dev->perf[i].vfs_rtime;
+ if (min > dev->perf[i].vfs_rtime)
+ min = dev->perf[i].vfs_rtime;
+ if (max < dev->perf[i].vfs_rtime)
+ max = dev->perf[i].vfs_rtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int clear_stats;
+ unsigned long flags;
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (buf == NULL) {
+ pr_err("[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
+ pr_err("Wrong value. To clear stats, enter value as 0.\n");
+ goto done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
+ dev->dbg_read_index = 0;
+ dev->dbg_write_index = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+done:
+ return count;
+}
+
+static int debug_mtp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_mtp_read_stats, inode->i_private);
+}
+
+static const struct file_operations debug_mtp_ops = {
+ .open = debug_mtp_open,
+ .read = seq_read,
+ .write = debug_mtp_reset_stats,
+};
+
+struct dentry *dent_mtp;
+static void mtp_debugfs_init(void)
+{
+ struct dentry *dent_mtp_status;
+
+ dent_mtp = debugfs_create_dir("usb_mtp", 0);
+ if (!dent_mtp || IS_ERR(dent_mtp))
+ return;
+
+ dent_mtp_status = debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ dent_mtp, 0, &debug_mtp_ops);
+ if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
+ debugfs_remove(dent_mtp);
+ dent_mtp = NULL;
+ return;
+ }
+}
+
+static void mtp_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dent_mtp);
+}
+
static int __mtp_setup(struct mtp_instance *fi_mtp)
{
struct mtp_dev *dev;
@@ -1394,6 +1661,7 @@ static int __mtp_setup(struct mtp_instance *fi_mtp)
if (ret)
goto err2;
+ mtp_debugfs_init();
return 0;
err2:
@@ -1418,6 +1686,7 @@ static void mtp_cleanup(void)
if (!dev)
return;
+ mtp_debugfs_remove();
misc_deregister(&mtp_device);
destroy_workqueue(dev->wq);
_mtp_dev = NULL;
@@ -1564,6 +1833,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
dev->function.disable = mtp_function_disable;
dev->function.setup = mtp_ctrlreq_configfs;
dev->function.free_func = mtp_free;
+ dev->is_ptp = mtp_config;
return &dev->function;
}
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index a0dd21d215d2..d152057af385 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -93,6 +93,8 @@ enum ipa_aggr_mode {
enum ipa_dp_evt_type {
IPA_RECEIVE,
IPA_WRITE_DONE,
+ IPA_CLIENT_START_POLL,
+ IPA_CLIENT_COMP_NAPI,
};
/**
@@ -538,6 +540,7 @@ struct ipa_ext_intf {
* @skip_ep_cfg: boolean field that determines if EP should be configured
* by IPA driver
* @keep_ipa_awake: when true, IPA will not be clock gated
+ * @napi_enabled: when true, IPA call client callback to start polling
*/
struct ipa_sys_connect_params {
struct ipa_ep_cfg ipa_ep_cfg;
@@ -547,6 +550,7 @@ struct ipa_sys_connect_params {
ipa_notify_cb notify;
bool skip_ep_cfg;
bool keep_ipa_awake;
+ bool napi_enabled;
};
/**
@@ -1233,6 +1237,8 @@ int ipa_tx_dp_mul(enum ipa_client_type dst,
struct ipa_tx_data_desc *data_desc);
void ipa_free_skb(struct ipa_rx_data *);
+int ipa_rx_poll(u32 clnt_hdl, int budget);
+void ipa_recycle_wan_skb(struct sk_buff *skb);
/*
* System pipes
@@ -1763,6 +1769,15 @@ static inline void ipa_free_skb(struct ipa_rx_data *rx_in)
return;
}
+static inline int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+ return -EPERM;
+}
+
+static inline void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+}
+
/*
* System pipes
*/
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
index 4e8417791bea..8def1431f03e 100644
--- a/include/linux/usb/f_mtp.h
+++ b/include/linux/usb/f_mtp.h
@@ -19,5 +19,35 @@
#define __LINUX_USB_F_MTP_H
#include <uapi/linux/usb/f_mtp.h>
+#include <linux/ioctl.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+struct __compat_mtp_file_range {
+ compat_int_t fd;
+ compat_loff_t offset;
+ int64_t length;
+ uint16_t command;
+ uint32_t transaction_id;
+};
+
+struct __compat_mtp_event {
+ compat_size_t length;
+ compat_caddr_t data;
+};
+
+#define COMPAT_MTP_SEND_FILE _IOW('M', 0, \
+ struct __compat_mtp_file_range)
+#define COMPAT_MTP_RECEIVE_FILE _IOW('M', 1, \
+ struct __compat_mtp_file_range)
+#define COMPAT_MTP_SEND_EVENT _IOW('M', 3, \
+ struct __compat_mtp_event)
+#define COMPAT_MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, \
+ struct __compat_mtp_file_range)
+#endif
+#endif
#endif /* __LINUX_USB_F_MTP_H */