summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/gadget/function/f_mbim.c2150
-rw-r--r--drivers/usb/gadget/function/f_qc_ecm.c1165
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c1421
-rw-r--r--drivers/usb/gadget/function/f_rmnet.c1469
-rw-r--r--drivers/usb/gadget/function/u_bam.c2523
-rw-r--r--drivers/usb/gadget/function/u_bam_data.c2113
-rw-r--r--drivers/usb/gadget/function/u_bam_data.h71
-rw-r--r--drivers/usb/gadget/function/u_ctrl_qti.c837
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c877
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.h35
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.c454
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.h101
-rw-r--r--drivers/usb/gadget/function/u_rmnet.h79
13 files changed, 13295 insertions, 0 deletions
diff --git a/drivers/usb/gadget/function/f_mbim.c b/drivers/usb/gadget/function/f_mbim.c
new file mode 100644
index 000000000000..017a2a9cb6ea
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mbim.c
@@ -0,0 +1,2150 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <linux/usb/cdc.h>
+
+#include <linux/usb/composite.h>
+#include <linux/platform_device.h>
+
+#include <linux/spinlock.h>
+
+/*
+ * This function is a "Mobile Broadband Interface Model" (MBIM) link.
+ * MBIM is intended to be used with high-speed network attachments.
+ *
+ * Note that MBIM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ */
+
+#define MBIM_BULK_BUFFER_SIZE 4096
+#define MAX_CTRL_PKT_SIZE 4096
+
+enum mbim_peripheral_ep_type {
+ MBIM_DATA_EP_TYPE_RESERVED = 0x0,
+ MBIM_DATA_EP_TYPE_HSIC = 0x1,
+ MBIM_DATA_EP_TYPE_HSUSB = 0x2,
+ MBIM_DATA_EP_TYPE_PCIE = 0x3,
+ MBIM_DATA_EP_TYPE_EMBEDDED = 0x4,
+ MBIM_DATA_EP_TYPE_BAM_DMUX = 0x5,
+};
+
+struct mbim_peripheral_ep_info {
+ enum peripheral_ep_type ep_type;
+ u32 peripheral_iface_id;
+};
+
+struct mbim_ipa_ep_pair {
+ u32 cons_pipe_num;
+ u32 prod_pipe_num;
+};
+
+struct mbim_ipa_ep_info {
+ struct mbim_peripheral_ep_info ph_ep_info;
+ struct mbim_ipa_ep_pair ipa_ep_pair;
+};
+
+#define MBIM_IOCTL_MAGIC 'o'
+#define MBIM_GET_NTB_SIZE _IOR(MBIM_IOCTL_MAGIC, 2, u32)
+#define MBIM_GET_DATAGRAM_COUNT _IOR(MBIM_IOCTL_MAGIC, 3, u16)
+
+#define MBIM_EP_LOOKUP _IOR(MBIM_IOCTL_MAGIC, 4, struct mbim_ipa_ep_info)
+
+
+#define NR_MBIM_PORTS 1
+#define MBIM_DEFAULT_PORT 0
+
+/* ID for Microsoft OS String */
+#define MBIM_OS_STRING_ID 0xEE
+
+struct ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct mbim_ep_descs {
+ struct usb_endpoint_descriptor *in;
+ struct usb_endpoint_descriptor *out;
+ struct usb_endpoint_descriptor *notify;
+};
+
+struct mbim_notify_port {
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ atomic_t notify_count;
+};
+
+enum mbim_notify_state {
+ MBIM_NOTIFY_NONE,
+ MBIM_NOTIFY_CONNECT,
+ MBIM_NOTIFY_SPEED,
+ MBIM_NOTIFY_RESPONSE_AVAILABLE,
+};
+
+struct f_mbim {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+
+ atomic_t online;
+
+ atomic_t open_excl;
+ atomic_t ioctl_excl;
+ atomic_t read_excl;
+ atomic_t write_excl;
+
+ wait_queue_head_t read_wq;
+
+ enum transport_type xport;
+ u8 port_num;
+ struct data_port bam_port;
+ struct mbim_notify_port not_port;
+
+ struct mbim_ep_descs fs;
+ struct mbim_ep_descs hs;
+
+ u8 ctrl_id, data_id;
+ bool data_interface_up;
+
+ spinlock_t lock;
+
+ struct list_head cpkt_req_q;
+ struct list_head cpkt_resp_q;
+
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+
+ atomic_t error;
+ unsigned int cpkt_drop_cnt;
+ bool remote_wakeup_enabled;
+};
+
+struct mbim_ntb_input_size {
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+ u16 reserved;
+};
+
+/* temporary variable used between mbim_open() and mbim_gadget_bind() */
+static struct f_mbim *_mbim_dev;
+
+static unsigned int nr_mbim_ports;
+
+static struct mbim_ports {
+ struct f_mbim *port;
+ unsigned port_num;
+} mbim_ports[NR_MBIM_PORTS];
+
+static inline struct f_mbim *func_to_mbim(struct usb_function *f)
+{
+ return container_of(f, struct f_mbim, function);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define MBIM_NTB_DEFAULT_IN_SIZE (0x4000)
+#define MBIM_NTB_OUT_SIZE (0x1000)
+#define MBIM_NDP_IN_DIVISOR (0x4)
+
+#define NTB_DEFAULT_IN_SIZE_IPA (0x4000)
+#define MBIM_NTB_OUT_SIZE_IPA (0x4000)
+
+#define MBIM_FORMATS_SUPPORTED USB_CDC_NCM_NTB16_SUPPORTED
+
+static struct usb_cdc_ncm_ntb_parameters mbim_ntb_parameters = {
+ .wLength = sizeof mbim_ntb_parameters,
+ .bmNtbFormatsSupported = cpu_to_le16(MBIM_FORMATS_SUPPORTED),
+ .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE),
+ .wNdpInDivisor = cpu_to_le16(MBIM_NDP_IN_DIVISOR),
+ .wNdpInPayloadRemainder = cpu_to_le16(0),
+ .wNdpInAlignment = cpu_to_le16(4),
+
+ .dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE),
+ .wNdpOutDivisor = cpu_to_le16(4),
+ .wNdpOutPayloadRemainder = cpu_to_le16(0),
+ .wNdpOutAlignment = cpu_to_le16(4),
+ .wNtbOutMaxDatagrams = 0,
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor mbim_iad_desc = {
+ .bLength = sizeof mbim_iad_desc,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = 2,
+ .bFunctionSubClass = 0x0e,
+ .bFunctionProtocol = 0,
+ /* .iFunction = DYNAMIC */
+};
+
+/* interface descriptor: */
+static struct usb_interface_descriptor mbim_control_intf = {
+ .bLength = sizeof mbim_control_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0x02,
+ .bInterfaceSubClass = 0x0e,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mbim_header_desc = {
+ .bLength = sizeof mbim_header_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc mbim_union_desc = {
+ .bLength = sizeof(mbim_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_mbim_desc mbim_desc = {
+ .bLength = sizeof mbim_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_TYPE,
+
+ .bcdMBIMVersion = cpu_to_le16(0x0100),
+
+ .wMaxControlMessage = cpu_to_le16(0x1000),
+ .bNumberFilters = 0x20,
+ .bMaxFilterSize = 0x80,
+ .wMaxSegmentSize = cpu_to_le16(0x800),
+ .bmNetworkCapabilities = 0x20,
+};
+
+static struct usb_cdc_mbim_extended_desc ext_mbb_desc = {
+ .bLength = sizeof ext_mbb_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_EXTENDED_TYPE,
+
+ .bcdMBIMExtendedVersion = cpu_to_le16(0x0100),
+ .bMaxOutstandingCommandMessages = 64,
+ .wMTU = cpu_to_le16(1500),
+};
+
+/* the default data interface has no endpoints ... */
+static struct usb_interface_descriptor mbim_data_nop_intf = {
+ .bLength = sizeof mbim_data_nop_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+static struct usb_interface_descriptor mbim_data_intf = {
+ .bLength = sizeof mbim_data_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *mbim_fs_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &fs_mbim_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &fs_mbim_in_desc,
+ (struct usb_descriptor_header *) &fs_mbim_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor hs_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *mbim_hs_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &hs_mbim_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &hs_mbim_in_desc,
+ (struct usb_descriptor_header *) &hs_mbim_out_desc,
+ NULL,
+};
+
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ss_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_notify_comp_desc = {
+ .bLength = sizeof(ss_mbim_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_in_comp_desc = {
+ .bLength = sizeof(ss_mbim_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ss_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_out_comp_desc = {
+ .bLength = sizeof(ss_mbim_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *mbim_ss_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &ss_mbim_notify_desc,
+ (struct usb_descriptor_header *) &ss_mbim_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &ss_mbim_in_desc,
+ (struct usb_descriptor_header *) &ss_mbim_in_comp_desc,
+ (struct usb_descriptor_header *) &ss_mbim_out_desc,
+ (struct usb_descriptor_header *) &ss_mbim_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+#define STRING_CTRL_IDX 0
+#define STRING_DATA_IDX 1
+
+static struct usb_string mbim_string_defs[] = {
+ [STRING_CTRL_IDX].s = "MBIM Control",
+ [STRING_DATA_IDX].s = "MBIM Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings mbim_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = mbim_string_defs,
+};
+
+static struct usb_gadget_strings *mbim_strings[] = {
+ &mbim_string_table,
+ NULL,
+};
+
+/* Microsoft OS Descriptors */
+
+/*
+ * We specify our own bMS_VendorCode byte which Windows will use
+ * as the bRequest value in subsequent device get requests.
+ */
+#define MBIM_VENDOR_CODE 0xA5
+
+/* Microsoft OS String */
+static u8 mbim_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ MBIM_VENDOR_CODE,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mbim_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mbim_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* Microsoft Extended Configuration Descriptor */
+static struct {
+ struct mbim_ext_config_desc_header header;
+ struct mbim_ext_config_desc_function function;
+} mbim_ext_config_desc = {
+ .header = {
+ .dwLength = __constant_cpu_to_le32(sizeof mbim_ext_config_desc),
+ .bcdVersion = __constant_cpu_to_le16(0x0100),
+ .wIndex = __constant_cpu_to_le16(4),
+ .bCount = 1,
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' },
+ /* .subCompatibleID = DYNAMIC */
+ },
+};
+
+static inline int mbim_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -EBUSY;
+ }
+}
+
+static inline void mbim_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static struct ctrl_pkt *mbim_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void mbim_free_ctrl_pkt(struct ctrl_pkt *pkt)
+{
+ if (pkt) {
+ kfree(pkt->buf);
+ kfree(pkt);
+ }
+}
+
+static struct usb_request *mbim_alloc_req(struct usb_ep *ep, int buffer_size,
+ size_t extra_buf)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size + extra_buf, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+void fmbim_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* ---------------------------- BAM INTERFACE ----------------------------- */
+
+static int mbim_bam_setup(int no_ports)
+{
+ int ret;
+
+ pr_info("no_ports:%d\n", no_ports);
+
+ ret = bam_data_setup(USB_FUNC_MBIM, no_ports);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ pr_info("Initialized %d ports\n", no_ports);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------*/
+
+static inline void mbim_reset_values(struct f_mbim *mbim)
+{
+ mbim->ntb_input_size = MBIM_NTB_DEFAULT_IN_SIZE;
+
+ atomic_set(&mbim->online, 0);
+}
+
+static void mbim_reset_function_queue(struct f_mbim *dev)
+{
+ struct ctrl_pkt *cpkt = NULL;
+
+ pr_debug("Queue empty packet for QBI\n");
+
+ spin_lock(&dev->lock);
+
+ cpkt = mbim_alloc_ctrl_pkt(0, GFP_ATOMIC);
+ if (!cpkt) {
+ pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+ spin_unlock(&dev->lock);
+ return;
+ }
+
+ list_add_tail(&cpkt->list, &dev->cpkt_req_q);
+ spin_unlock(&dev->lock);
+
+ pr_debug("%s: Wake up read queue\n", __func__);
+ wake_up(&dev->read_wq);
+}
+
+static void fmbim_reset_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *dev = req->context;
+
+ mbim_reset_function_queue(dev);
+}
+
+static void mbim_clear_queues(struct f_mbim *mbim)
+{
+ struct ctrl_pkt *cpkt = NULL;
+ struct list_head *act, *tmp;
+
+ spin_lock(&mbim->lock);
+ list_for_each_safe(act, tmp, &mbim->cpkt_req_q) {
+ cpkt = list_entry(act, struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ list_for_each_safe(act, tmp, &mbim->cpkt_resp_q) {
+ cpkt = list_entry(act, struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock(&mbim->lock);
+}
+
+/*
+ * Context: mbim->lock held
+ */
+static void mbim_do_notify(struct f_mbim *mbim)
+{
+ struct usb_request *req = mbim->not_port.notify_req;
+ struct usb_cdc_notification *event;
+ int status;
+
+ pr_debug("notify_state: %d\n", mbim->not_port.notify_state);
+
+ if (!req)
+ return;
+
+ event = req->buf;
+
+ switch (mbim->not_port.notify_state) {
+
+ case MBIM_NOTIFY_NONE:
+ if (atomic_read(&mbim->not_port.notify_count) > 0)
+ pr_err("Pending notifications in MBIM_NOTIFY_NONE\n");
+ else
+ pr_debug("No pending notifications\n");
+
+ return;
+
+ case MBIM_NOTIFY_RESPONSE_AVAILABLE:
+ pr_debug("Notification %02x sent\n", event->bNotificationType);
+
+ if (atomic_read(&mbim->not_port.notify_count) <= 0) {
+ pr_debug("notify_response_avaliable: done\n");
+ return;
+ }
+
+ spin_unlock(&mbim->lock);
+ status = usb_func_ep_queue(&mbim->function,
+ mbim->not_port.notify,
+ req, GFP_ATOMIC);
+ spin_lock(&mbim->lock);
+ if (status) {
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_err("Queue notify request failed, err: %d\n",
+ status);
+ }
+
+ return;
+ }
+
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(mbim->ctrl_id);
+
+ /*
+ * In double buffering if there is a space in FIFO,
+ * completion callback can be called right after the call,
+ * so unlocking
+ */
+ atomic_inc(&mbim->not_port.notify_count);
+ pr_debug("queue request: notify_count = %d\n",
+ atomic_read(&mbim->not_port.notify_count));
+ spin_unlock(&mbim->lock);
+ status = usb_func_ep_queue(&mbim->function, mbim->not_port.notify, req,
+ GFP_ATOMIC);
+ spin_lock(&mbim->lock);
+ if (status) {
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_err("usb_func_ep_queue failed, err: %d\n", status);
+ }
+}
+
+static void mbim_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *mbim = req->context;
+ struct usb_cdc_notification *event = req->buf;
+
+ pr_debug("dev:%p\n", mbim);
+
+ spin_lock(&mbim->lock);
+ switch (req->status) {
+ case 0:
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_debug("notify_count = %d\n",
+ atomic_read(&mbim->not_port.notify_count));
+ break;
+
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ mbim->not_port.notify_state = MBIM_NOTIFY_NONE;
+ atomic_set(&mbim->not_port.notify_count, 0);
+ pr_info("ESHUTDOWN/ECONNRESET, connection gone\n");
+ spin_unlock(&mbim->lock);
+ mbim_clear_queues(mbim);
+ mbim_reset_function_queue(mbim);
+ spin_lock(&mbim->lock);
+ break;
+ default:
+ pr_err("Unknown event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+
+ mbim_do_notify(mbim);
+ spin_unlock(&mbim->lock);
+
+ pr_debug("dev:%p Exit\n", mbim);
+}
+
+static void mbim_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* now for SET_NTB_INPUT_SIZE only */
+ unsigned in_size = 0;
+ struct usb_function *f = req->context;
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct mbim_ntb_input_size *ntb = NULL;
+
+ pr_debug("dev:%p\n", mbim);
+
+ req->context = NULL;
+ if (req->status || req->actual != req->length) {
+ pr_err("Bad control-OUT transfer\n");
+ goto invalid;
+ }
+
+ if (req->length == 4) {
+ in_size = get_unaligned_le32(req->buf);
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {
+ pr_err("Illegal INPUT SIZE (%d) from host\n", in_size);
+ goto invalid;
+ }
+ } else if (req->length == 8) {
+ ntb = (struct mbim_ntb_input_size *)req->buf;
+ in_size = get_unaligned_le32(&(ntb->ntb_input_size));
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {
+ pr_err("Illegal INPUT SIZE (%d) from host\n", in_size);
+ goto invalid;
+ }
+ mbim->ntb_max_datagrams =
+ get_unaligned_le16(&(ntb->ntb_max_datagrams));
+ } else {
+ pr_err("Illegal NTB length %d\n", in_size);
+ goto invalid;
+ }
+
+ pr_debug("Set NTB INPUT SIZE %d\n", in_size);
+
+ mbim->ntb_input_size = in_size;
+ return;
+
+invalid:
+ usb_ep_set_halt(ep);
+
+ pr_err("dev:%p Failed\n", mbim);
+
+ return;
+}
+
+static void
+fmbim_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *dev = req->context;
+ struct ctrl_pkt *cpkt = NULL;
+ int len = req->actual;
+ static bool first_command_sent;
+
+ if (!dev) {
+ pr_err("mbim dev is null\n");
+ return;
+ }
+
+ if (req->status < 0) {
+ pr_err("mbim command error %d\n", req->status);
+ return;
+ }
+
+ /*
+ * Wait for user to process prev MBIM_OPEN cmd before handling new one.
+ * However don't drop first command during bootup as file may not be
+ * opened by now. Queue the command in this case.
+ */
+ if (!atomic_read(&dev->open_excl) && first_command_sent) {
+ pr_err("mbim not opened yet, dropping cmd pkt = %d\n", len);
+ return;
+ }
+ if (!first_command_sent)
+ first_command_sent = true;
+
+ pr_debug("dev:%p port#%d\n", dev, dev->port_num);
+
+ cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (!cpkt) {
+ pr_err("Unable to allocate ctrl pkt\n");
+ return;
+ }
+
+ pr_debug("Add to cpkt_req_q packet with len = %d\n", len);
+ memcpy(cpkt->buf, req->buf, len);
+
+ spin_lock(&dev->lock);
+
+ list_add_tail(&cpkt->list, &dev->cpkt_req_q);
+ spin_unlock(&dev->lock);
+
+ /* wakeup read thread */
+ pr_debug("Wake up read queue\n");
+ wake_up(&dev->read_wq);
+
+ return;
+}
+
+static int
+mbim_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+ struct usb_request *req = cdev->req;
+ struct ctrl_pkt *cpkt = NULL;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /*
+ * composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+
+ if (!atomic_read(&mbim->online)) {
+ pr_warning("usb cable is not connected\n");
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_RESET_FUNCTION:
+
+ pr_debug("USB_CDC_RESET_FUNCTION\n");
+ value = 0;
+ req->complete = fmbim_reset_cmd_complete;
+ req->context = mbim;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+
+ pr_debug("USB_CDC_SEND_ENCAPSULATED_COMMAND\n");
+
+ if (w_length > req->length) {
+ pr_debug("w_length > req->length: %d > %d\n",
+ w_length, req->length);
+ }
+ value = w_length;
+ req->complete = fmbim_cmd_complete;
+ req->context = mbim;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+
+ pr_debug("USB_CDC_GET_ENCAPSULATED_RESPONSE\n");
+
+ if (w_value) {
+ pr_err("w_length > 0: %d\n", w_length);
+ break;
+ }
+
+ pr_debug("req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ spin_lock(&mbim->lock);
+ if (list_empty(&mbim->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty\n");
+ spin_unlock(&mbim->lock);
+ break;
+ }
+
+ cpkt = list_first_entry(&mbim->cpkt_resp_q,
+ struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&mbim->lock);
+
+ value = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, value);
+ mbim_free_ctrl_pkt(cpkt);
+
+ pr_debug("copied encapsulated_response %d bytes\n",
+ value);
+
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_PARAMETERS:
+
+ pr_debug("USB_CDC_GET_NTB_PARAMETERS\n");
+
+ if (w_length == 0 || w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ value = w_length > sizeof mbim_ntb_parameters ?
+ sizeof mbim_ntb_parameters : w_length;
+ memcpy(req->buf, &mbim_ntb_parameters, value);
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_INPUT_SIZE:
+
+ pr_debug("USB_CDC_GET_NTB_INPUT_SIZE\n");
+
+ if (w_length < 4 || w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ put_unaligned_le32(mbim->ntb_input_size, req->buf);
+ value = 4;
+ pr_debug("Reply to host INPUT SIZE %d\n",
+ mbim->ntb_input_size);
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_NTB_INPUT_SIZE:
+
+ pr_debug("USB_CDC_SET_NTB_INPUT_SIZE\n");
+
+ if (w_length != 4 && w_length != 8) {
+ pr_err("wrong NTB length %d\n", w_length);
+ break;
+ }
+
+ if (w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ req->complete = mbim_ep0out_complete;
+ req->length = w_length;
+ req->context = f;
+
+ value = req->length;
+ break;
+
+ /* optional in mbim descriptor: */
+ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
+ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
+
+ default:
+ pr_err("invalid control req: %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("control request: %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+
+ if (value < 0) {
+ pr_err("queueing req failed: %02x.%02x, err %d\n",
+ ctrl->bRequestType,
+ ctrl->bRequest, value);
+ }
+ } else {
+ pr_err("ctrl req err %d: %02x.%02x v%04x i%04x l%d\n",
+ value, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+/*
+ * This function handles the Microsoft-specific OS descriptor control
+ * requests that are issued by Windows host drivers to determine the
+ * configuration containing the MBIM function.
+ *
+ * Unlike mbim_setup() this function handles two specific device requests,
+ * and only when a configuration has not yet been selected.
+ */
+static int mbim_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* only respond to OS desciptors when no configuration selected */
+ if (cdev->config || !mbim_ext_config_desc.function.subCompatibleID[0])
+ return value;
+
+ pr_debug("%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MSFT OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MBIM_OS_STRING_ID) {
+
+ value = (w_length < sizeof(mbim_os_string) ?
+ w_length : sizeof(mbim_os_string));
+ memcpy(cdev->req->buf, mbim_os_string, value);
+
+ } else if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+ && ctrl->bRequest == MBIM_VENDOR_CODE && w_index == 4) {
+
+ /* Handle Extended OS descriptor */
+ value = (w_length < sizeof(mbim_ext_config_desc) ?
+ w_length : sizeof(mbim_ext_config_desc));
+ memcpy(cdev->req->buf, &mbim_ext_config_desc, value);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ pr_err("response queue error: %d\n", rc);
+ }
+ return value;
+}
+
+static int mbim_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+ int ret = 0;
+
+ pr_debug("intf=%u, alt=%u\n", intf, alt);
+
+ /* Control interface has only altsetting 0 */
+ if (intf == mbim->ctrl_id) {
+
+ pr_info("CONTROL_INTERFACE\n");
+
+ if (alt != 0)
+ goto fail;
+
+ if (mbim->not_port.notify->driver_data) {
+ pr_info("reset mbim control %d\n", intf);
+ usb_ep_disable(mbim->not_port.notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->not_port.notify);
+ if (ret) {
+ mbim->not_port.notify->desc = NULL;
+ pr_err("Failed configuring notify ep %s: err %d\n",
+ mbim->not_port.notify->name, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(mbim->not_port.notify);
+ if (ret) {
+ pr_err("usb ep#%s enable failed, err#%d\n",
+ mbim->not_port.notify->name, ret);
+ return ret;
+ }
+ mbim->not_port.notify->driver_data = mbim;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == mbim->data_id) {
+
+ pr_info("DATA_INTERFACE id %d, data interface status %d\n",
+ mbim->data_id, mbim->data_interface_up);
+
+ if (alt > 1)
+ goto fail;
+
+ if (mbim->data_interface_up == alt)
+ return 0;
+
+ if (mbim->bam_port.in->driver_data) {
+ pr_info("reset mbim, alt-%d\n", alt);
+ mbim_reset_values(mbim);
+ }
+
+ if (alt == 0) {
+ /*
+ * perform bam data disconnect handshake upon usb
+ * disconnect
+ */
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_disconnect();
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ bam_data_disconnect(&mbim->bam_port,
+ USB_FUNC_MBIM, mbim->port_num);
+ if (!gadget_is_dwc3(cdev->gadget))
+ break;
+
+ if (msm_ep_unconfig(mbim->bam_port.in) ||
+ msm_ep_unconfig(mbim->bam_port.out)) {
+ pr_err("ep_unconfig failed\n");
+ goto fail;
+ }
+ default:
+ pr_err("unknown transport\n");
+ }
+ goto notify_ready;
+ }
+
+ pr_info("Alt set 1, initialize ports\n");
+
+ /*
+ * CDC Network only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ pr_info("Choose endpoints\n");
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->bam_port.in);
+ if (ret) {
+ mbim->bam_port.in->desc = NULL;
+ pr_err("IN ep %s failed: %d\n",
+ mbim->bam_port.in->name, ret);
+ return ret;
+ }
+
+ pr_info("Set mbim port in_desc = 0x%p\n",
+ mbim->bam_port.in->desc);
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->bam_port.out);
+ if (ret) {
+ mbim->bam_port.out->desc = NULL;
+ pr_err("OUT ep %s failed: %d\n",
+ mbim->bam_port.out->name, ret);
+ return ret;
+ }
+
+ pr_info("Set mbim port out_desc = 0x%p\n",
+ mbim->bam_port.out->desc);
+
+ pr_debug("Activate mbim\n");
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_connect(cdev->gadget, mbim->bam_port.in,
+ mbim->bam_port.out);
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ ret = bam_data_connect(&mbim->bam_port,
+ mbim->xport, mbim->port_num,
+ USB_FUNC_MBIM);
+ if (ret) {
+ pr_err("bam_data_setup failed:err:%d\n",
+ ret);
+ goto fail;
+ }
+ break;
+ default:
+ pr_err("unknown transport\n");
+ }
+notify_ready:
+ mbim->data_interface_up = alt;
+ spin_lock(&mbim->lock);
+ mbim->not_port.notify_state = MBIM_NOTIFY_RESPONSE_AVAILABLE;
+ spin_unlock(&mbim->lock);
+ } else {
+ goto fail;
+ }
+
+ atomic_set(&mbim->online, 1);
+
+ pr_info("SET DEVICE ONLINE\n");
+
+ return 0;
+
+fail:
+ pr_err("ERROR: Illegal Interface\n");
+ return -EINVAL;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * this MBIM function *MUST* implement a get_alt() method.
+ */
+static int mbim_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ if (intf == mbim->ctrl_id)
+ return 0;
+ else if (intf == mbim->data_id)
+ return mbim->data_interface_up;
+
+ return -EINVAL;
+}
+
+static void mbim_disable(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+
+ pr_info("SET DEVICE OFFLINE\n");
+ atomic_set(&mbim->online, 0);
+ mbim->remote_wakeup_enabled = 0;
+
+ /* Disable Control Path */
+ if (mbim->not_port.notify->driver_data) {
+ usb_ep_disable(mbim->not_port.notify);
+ mbim->not_port.notify->driver_data = NULL;
+ }
+ atomic_set(&mbim->not_port.notify_count, 0);
+ mbim->not_port.notify_state = MBIM_NOTIFY_NONE;
+
+ mbim_clear_queues(mbim);
+ mbim_reset_function_queue(mbim);
+
+ /* Disable Data Path - only if it was initialized already (alt=1) */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_disconnect();
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ if (gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(mbim->bam_port.out);
+ msm_ep_unconfig(mbim->bam_port.in);
+ }
+ bam_data_disconnect(&mbim->bam_port, USB_FUNC_MBIM,
+ mbim->port_num);
+ break;
+ default:
+ pr_err("unknown transport\n");
+ }
+
+ mbim->data_interface_up = false;
+ pr_info("mbim deactivated\n");
+}
+
+#define MBIM_ACTIVE_PORT 0
+
+static void mbim_suspend(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_info("mbim suspended\n");
+
+ pr_debug("%s(): remote_wakeup:%d\n:", __func__,
+ mbim->cdev->gadget->remote_wakeup);
+
+ if (mbim->xport == USB_GADGET_XPORT_BAM_DMUX)
+ return;
+
+ /* If the function is in Function Suspend state, avoid suspending the
+ * MBIM function again.
+ */
+ if ((mbim->cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ if (mbim->cdev->gadget->speed == USB_SPEED_SUPER)
+ mbim->remote_wakeup_enabled = f->func_wakeup_allowed;
+ else
+ mbim->remote_wakeup_enabled = mbim->cdev->gadget->remote_wakeup;
+
+ /* MBIM data interface is up only when alt setting is set to 1. */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ if (!mbim->remote_wakeup_enabled)
+ atomic_set(&mbim->online, 0);
+
+ bam_data_suspend(&mbim->bam_port, mbim->port_num, USB_FUNC_MBIM,
+ mbim->remote_wakeup_enabled);
+}
+
+static void mbim_resume(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_info("mbim resumed\n");
+
+ if (mbim->xport == USB_GADGET_XPORT_BAM_DMUX)
+ return;
+
+ /*
+ * If the function is in USB3 Function Suspend state, resume is
+ * canceled. In this case resume is done by a Function Resume request.
+ */
+ if ((mbim->cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ /* resume control path by queuing notify req */
+ spin_lock(&mbim->lock);
+ mbim_do_notify(mbim);
+ spin_unlock(&mbim->lock);
+
+ /* MBIM data interface is up only when alt setting is set to 1. */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ if (!mbim->remote_wakeup_enabled)
+ atomic_set(&mbim->online, 1);
+
+ bam_data_resume(&mbim->bam_port, mbim->port_num, USB_FUNC_MBIM,
+ mbim->remote_wakeup_enabled);
+}
+
+static int mbim_func_suspend(struct usb_function *f, unsigned char options)
+{
+ enum {
+ MBIM_FUNC_SUSPEND_MASK = 0x1,
+ MBIM_FUNC_WAKEUP_EN_MASK = 0x2
+ };
+
+ bool func_wakeup_allowed;
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ if (f == NULL)
+ return -EINVAL;
+
+ pr_debug("Got Function Suspend(%u) command for %s function\n",
+ options, f->name ? f->name : "");
+
+ /* Function Suspend is supported by Super Speed devices only */
+ if (mbim->cdev->gadget->speed != USB_SPEED_SUPER)
+ return -ENOTSUPP;
+
+ func_wakeup_allowed =
+ ((options & MBIM_FUNC_WAKEUP_EN_MASK) != 0);
+
+ if (options & MBIM_FUNC_SUSPEND_MASK) {
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ if (!f->func_is_suspended) {
+ mbim_suspend(f);
+ f->func_is_suspended = true;
+ }
+ } else {
+ if (f->func_is_suspended) {
+ f->func_is_suspended = false;
+ mbim_resume(f);
+ }
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ }
+
+ return 0;
+}
+
+static int mbim_get_status(struct usb_function *f)
+{
+ enum {
+ MBIM_STS_FUNC_WAKEUP_CAP_SHIFT = 0,
+ MBIM_STS_FUNC_WAKEUP_EN_SHIFT = 1
+ };
+
+ unsigned remote_wakeup_enabled_bit;
+ const unsigned remote_wakeup_capable_bit = 1;
+
+ remote_wakeup_enabled_bit = f->func_wakeup_allowed ? 1 : 0;
+ return (remote_wakeup_enabled_bit << MBIM_STS_FUNC_WAKEUP_EN_SHIFT) |
+ (remote_wakeup_capable_bit << MBIM_STS_FUNC_WAKEUP_CAP_SHIFT);
+}
+
+/*---------------------- function driver setup/binding ---------------------*/
+
+static int
+mbim_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_mbim *mbim = func_to_mbim(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_cdc_notification *event;
+
+ pr_info("Enter\n");
+
+ mbim->cdev = cdev;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ mbim->ctrl_id = status;
+ mbim_iad_desc.bFirstInterface = status;
+
+ mbim_control_intf.bInterfaceNumber = status;
+ mbim_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ mbim->data_id = status;
+ mbim->data_interface_up = false;
+
+ mbim_data_nop_intf.bInterfaceNumber = status;
+ mbim_data_intf.bInterfaceNumber = status;
+ mbim_union_desc.bSlaveInterface0 = status;
+
+ mbim->bam_port.cdev = cdev;
+ mbim->bam_port.func = &mbim->function;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_in_desc);
+ if (!ep) {
+ pr_err("usb epin autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb epin autoconfig succeeded\n");
+ ep->driver_data = cdev; /* claim */
+ mbim->bam_port.in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_out_desc);
+ if (!ep) {
+ pr_err("usb epout autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb epout autoconfig succeeded\n");
+ ep->driver_data = cdev; /* claim */
+ mbim->bam_port.out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_notify_desc);
+ if (!ep) {
+ pr_err("usb notify ep autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb notify ep autoconfig succeeded\n");
+ mbim->not_port.notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ mbim->not_port.notify_req = mbim_alloc_req(ep, NCM_STATUS_BYTECOUNT,
+ cdev->gadget->extra_buf_alloc);
+ if (!mbim->not_port.notify_req) {
+ pr_info("failed to allocate notify request\n");
+ goto fail;
+ }
+ pr_info("allocated notify ep request & request buffer\n");
+
+ mbim->not_port.notify_req->context = mbim;
+ mbim->not_port.notify_req->complete = mbim_notify_complete;
+ mbim->not_port.notify_req->length = sizeof(*event);
+ event = mbim->not_port.notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(mbim->ctrl_id);
+ event->wLength = cpu_to_le16(0);
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(mbim_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hs_mbim_in_desc.bEndpointAddress =
+ fs_mbim_in_desc.bEndpointAddress;
+ hs_mbim_out_desc.bEndpointAddress =
+ fs_mbim_out_desc.bEndpointAddress;
+ hs_mbim_notify_desc.bEndpointAddress =
+ fs_mbim_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(mbim_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_mbim_in_desc.bEndpointAddress =
+ fs_mbim_in_desc.bEndpointAddress;
+ ss_mbim_out_desc.bEndpointAddress =
+ fs_mbim_out_desc.bEndpointAddress;
+ ss_mbim_notify_desc.bEndpointAddress =
+ fs_mbim_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(mbim_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ /*
+ * If MBIM is bound in a config other than the first, tell Windows
+ * about it by returning the num as a string in the OS descriptor's
+ * subCompatibleID field. Windows only supports up to config #4.
+ */
+ if (c->bConfigurationValue >= 2 && c->bConfigurationValue <= 4) {
+ pr_debug("MBIM in configuration %d\n", c->bConfigurationValue);
+ mbim_ext_config_desc.function.subCompatibleID[0] =
+ c->bConfigurationValue + '0';
+ }
+
+ pr_info("mbim(%d): %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ mbim->port_num,
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ mbim->bam_port.in->name, mbim->bam_port.out->name,
+ mbim->not_port.notify->name);
+
+ return 0;
+
+fail:
+ pr_err("%s failed to bind, err %d\n", f->name, status);
+
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (mbim->not_port.notify_req) {
+ kfree(mbim->not_port.notify_req->buf);
+ usb_ep_free_request(mbim->not_port.notify,
+ mbim->not_port.notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (mbim->not_port.notify)
+ mbim->not_port.notify->driver_data = NULL;
+ if (mbim->bam_port.out)
+ mbim->bam_port.out->driver_data = NULL;
+ if (mbim->bam_port.in)
+ mbim->bam_port.in->driver_data = NULL;
+
+ return status;
+}
+
+static void mbim_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_debug("unbinding mbim\n");
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(mbim->not_port.notify_req->buf);
+ usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req);
+
+ mbim_ext_config_desc.function.subCompatibleID[0] = 0;
+}
+
+/**
+ * mbim_bind_config - add MBIM link to a configuration
+ * @c: the configuration to support the network link
+ * Context: single threaded during gadget setup
+ * Returns zero on success, else negative errno.
+ */
+int mbim_bind_config(struct usb_configuration *c, unsigned portno,
+ char *xport_name)
+{
+ struct f_mbim *mbim = NULL;
+ int status = 0;
+
+ pr_info("port number %u\n", portno);
+
+ if (portno >= nr_mbim_ports) {
+ pr_err("Can not add port %u. Max ports = %d\n",
+ portno, nr_mbim_ports);
+ return -ENODEV;
+ }
+
+ /* allocate and initialize one new instance */
+ mbim = mbim_ports[portno].port;
+ if (!mbim) {
+ pr_err("mbim struct not allocated\n");
+ return -ENOMEM;
+ }
+
+ mbim->xport = str_to_xport(xport_name);
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM2BAM:
+ /* Override BAM2BAM to BAM_DMUX for old ABI compatibility */
+ mbim->xport = USB_GADGET_XPORT_BAM_DMUX;
+ /* fall-through */
+ case USB_GADGET_XPORT_BAM_DMUX:
+ status = gbam_mbim_setup();
+ if (status)
+ break;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ status = mbim_bam_setup(nr_mbim_ports);
+ if (status)
+ break;
+ mbim_ntb_parameters.wNtbOutMaxDatagrams = 16;
+ /* For IPA this is proven to give maximum throughput */
+ mbim_ntb_parameters.dwNtbInMaxSize =
+ cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
+ mbim_ntb_parameters.dwNtbOutMaxSize =
+ cpu_to_le32(MBIM_NTB_OUT_SIZE_IPA);
+ /* update rx buffer size to be used by usb rx request buffer */
+ mbim->bam_port.rx_buffer_size = MBIM_NTB_OUT_SIZE_IPA;
+ mbim_ntb_parameters.wNdpInDivisor = 1;
+ pr_debug("MBIM: dwNtbOutMaxSize:%d\n", MBIM_NTB_OUT_SIZE_IPA);
+ break;
+ default:
+ status = -EINVAL;
+ }
+
+ if (status) {
+ pr_err("%s transport setup failed\n", xport_name);
+ return status;
+ }
+
+
+ /* maybe allocate device-global string IDs */
+ if (mbim_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ mbim_string_defs[STRING_CTRL_IDX].id = status;
+ mbim_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ mbim_string_defs[STRING_DATA_IDX].id = status;
+ mbim_data_nop_intf.iInterface = status;
+ mbim_data_intf.iInterface = status;
+ }
+
+ mbim->cdev = c->cdev;
+
+ mbim_reset_values(mbim);
+
+ mbim->function.name = "usb_mbim";
+ mbim->function.strings = mbim_strings;
+ mbim->function.bind = mbim_bind;
+ mbim->function.unbind = mbim_unbind;
+ mbim->function.set_alt = mbim_set_alt;
+ mbim->function.get_alt = mbim_get_alt;
+ mbim->function.setup = mbim_setup;
+ mbim->function.disable = mbim_disable;
+ mbim->function.suspend = mbim_suspend;
+ mbim->function.func_suspend = mbim_func_suspend;
+ mbim->function.get_status = mbim_get_status;
+ mbim->function.resume = mbim_resume;
+
+ INIT_LIST_HEAD(&mbim->cpkt_req_q);
+ INIT_LIST_HEAD(&mbim->cpkt_resp_q);
+
+ status = usb_add_function(c, &mbim->function);
+
+ pr_info("Exit status %d\n", status);
+
+ return status;
+}
+
+/* ------------ MBIM DRIVER File Operations API for USER SPACE ------------ */
+
+static ssize_t
+mbim_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct f_mbim *dev = fp->private_data;
+ struct ctrl_pkt *cpkt = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("Enter(%zu)\n", count);
+
+ if (!dev) {
+ pr_err("Received NULL mbim pointer\n");
+ return -ENODEV;
+ }
+
+ if (count > MBIM_BULK_BUFFER_SIZE) {
+ pr_err("Buffer size is too big %zu, should be at most %d\n",
+ count, MBIM_BULK_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&dev->read_excl)) {
+ pr_err("Previous reading is not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (atomic_read(&dev->error)) {
+ mbim_unlock(&dev->read_excl);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (list_empty(&dev->cpkt_req_q)) {
+ pr_debug("Requests list is empty. Wait.\n");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = wait_event_interruptible(dev->read_wq,
+ !list_empty(&dev->cpkt_req_q));
+ if (ret < 0) {
+ pr_err("Waiting failed\n");
+ mbim_unlock(&dev->read_excl);
+ return -ERESTARTSYS;
+ }
+ pr_debug("Received request packet\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_req_q, struct ctrl_pkt,
+ list);
+ if (cpkt->len > count) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->read_excl);
+ pr_err("cpkt size too big:%d > buf size:%zu\n",
+ cpkt->len, count);
+ return -ENOMEM;
+ }
+
+ pr_debug("cpkt size:%d\n", cpkt->len);
+
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->read_excl);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ pr_err("copy_to_user failed: err %d\n", ret);
+ ret = -ENOMEM;
+ } else {
+ pr_debug("copied %d bytes to user\n", cpkt->len);
+ ret = cpkt->len;
+ }
+
+ mbim_free_ctrl_pkt(cpkt);
+
+ return ret;
+}
+
+static ssize_t
+mbim_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
+{
+ struct f_mbim *dev = fp->private_data;
+ struct ctrl_pkt *cpkt = NULL;
+ struct usb_request *req = dev->not_port.notify_req;
+ int ret = 0;
+ unsigned long flags;
+
+ pr_debug("Enter(%zu)\n", count);
+
+ if (!dev || !req || !req->buf) {
+ pr_err("%s: dev %p req %p req->buf %p\n",
+ __func__, dev, req, req ? req->buf : req);
+ return -ENODEV;
+ }
+
+ if (!count || count > MAX_CTRL_PKT_SIZE) {
+ pr_err("error: ctrl pkt lenght %zu\n", count);
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&dev->write_excl)) {
+ pr_err("Previous writing not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&dev->online)) {
+ pr_err("USB cable not connected\n");
+ mbim_unlock(&dev->write_excl);
+ return -EPIPE;
+ }
+
+ if (dev->not_port.notify_state != MBIM_NOTIFY_RESPONSE_AVAILABLE) {
+ pr_err("dev:%p state=%d error\n", dev,
+ dev->not_port.notify_state);
+ mbim_unlock(&dev->write_excl);
+ return -EINVAL;
+ }
+
+ if (dev->function.func_is_suspended &&
+ !dev->function.func_wakeup_allowed) {
+ dev->cpkt_drop_cnt++;
+ pr_err("drop ctrl pkt of len %zu\n", count);
+ return -ENOTSUPP;
+ }
+
+ cpkt = mbim_alloc_ctrl_pkt(count, GFP_KERNEL);
+ if (!cpkt) {
+ pr_err("failed to allocate ctrl pkt\n");
+ mbim_unlock(&dev->write_excl);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cpkt->buf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed err:%d\n", ret);
+ mbim_free_ctrl_pkt(cpkt);
+ mbim_unlock(&dev->write_excl);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+
+ if (atomic_inc_return(&dev->not_port.notify_count) != 1) {
+ pr_debug("delay ep_queue: notifications queue is busy[%d]\n",
+ atomic_read(&dev->not_port.notify_count));
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->write_excl);
+ return count;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_func_ep_queue(&dev->function, dev->not_port.notify,
+ req, GFP_ATOMIC);
+ if (ret == -ENOTSUPP || (ret < 0 && ret != -EAGAIN)) {
+ spin_lock_irqsave(&dev->lock, flags);
+ /* check if device disconnected while we dropped lock */
+ if (atomic_read(&dev->online)) {
+ list_del(&cpkt->list);
+ atomic_dec(&dev->not_port.notify_count);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ dev->cpkt_drop_cnt++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_err("drop ctrl pkt of len %d error %d\n", cpkt->len, ret);
+ } else {
+ ret = 0;
+ }
+ mbim_unlock(&dev->write_excl);
+
+ pr_debug("Exit(%zu)\n", count);
+
+ return ret ? ret : count;
+}
+
+static int mbim_open(struct inode *ip, struct file *fp)
+{
+ pr_info("Open mbim driver\n");
+
+ while (!_mbim_dev) {
+ pr_err("mbim_dev not created yet\n");
+ return -ENODEV;
+ }
+
+ if (mbim_lock(&_mbim_dev->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ pr_info("Lock mbim_dev->open_excl for open\n");
+
+ if (!atomic_read(&_mbim_dev->online))
+ pr_err("USB cable not connected\n");
+
+ fp->private_data = _mbim_dev;
+
+ atomic_set(&_mbim_dev->error, 0);
+
+ pr_info("Exit, mbim file opened\n");
+
+ return 0;
+}
+
+static int mbim_release(struct inode *ip, struct file *fp)
+{
+ pr_info("Close mbim file\n");
+
+ mbim_unlock(&_mbim_dev->open_excl);
+
+ return 0;
+}
+
+#define BAM_DMUX_CHANNEL_ID 8
+static long mbim_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct f_mbim *mbim = fp->private_data;
+ struct data_port *port;
+ struct mbim_ipa_ep_info info;
+ int ret = 0;
+
+ pr_debug("Received command %d\n", cmd);
+
+ if (!mbim) {
+ pr_err("Bad parameter\n");
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&mbim->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case MBIM_GET_NTB_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &mbim->ntb_input_size, sizeof(mbim->ntb_input_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent NTB size %d\n", mbim->ntb_input_size);
+ break;
+ case MBIM_GET_DATAGRAM_COUNT:
+ ret = copy_to_user((void __user *)arg,
+ &mbim->ntb_max_datagrams,
+ sizeof(mbim->ntb_max_datagrams));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent NTB datagrams count %d\n",
+ mbim->ntb_max_datagrams);
+ break;
+
+ case MBIM_EP_LOOKUP:
+ if (!atomic_read(&mbim->online)) {
+ pr_warn("usb cable is not connected\n");
+ return -ENOTCONN;
+ }
+
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ /*
+ * Rmnet and MBIM share the same BAM-DMUX channel.
+ * This channel number 8 should be in sync with
+ * the one defined in u_bam.c.
+ */
+ info.ph_ep_info.ep_type = MBIM_DATA_EP_TYPE_BAM_DMUX;
+ info.ph_ep_info.peripheral_iface_id =
+ BAM_DMUX_CHANNEL_ID;
+ info.ipa_ep_pair.cons_pipe_num = 0;
+ info.ipa_ep_pair.prod_pipe_num = 0;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ port = &mbim->bam_port;
+ if ((port->ipa_producer_ep == -1) ||
+ (port->ipa_consumer_ep == -1)) {
+ pr_err("EP_LOOKUP failed - IPA pipes not updated\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = mbim->data_id;
+ info.ipa_ep_pair.cons_pipe_num = port->ipa_consumer_ep;
+ info.ipa_ep_pair.prod_pipe_num = port->ipa_producer_ep;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_err("unknown transport\n");
+ break;
+ }
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ break;
+
+ default:
+ pr_err("wrong parameter\n");
+ ret = -EINVAL;
+ }
+
+ mbim_unlock(&mbim->ioctl_excl);
+
+ return ret;
+}
+
+/* file operations for MBIM device /dev/android_mbim */
+static const struct file_operations mbim_fops = {
+ .owner = THIS_MODULE,
+ .open = mbim_open,
+ .release = mbim_release,
+ .read = mbim_read,
+ .write = mbim_write,
+ .unlocked_ioctl = mbim_ioctl,
+};
+
+static struct miscdevice mbim_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_mbim",
+ .fops = &mbim_fops,
+};
+
+static int mbim_init(int instances)
+{
+ int i;
+ struct f_mbim *dev = NULL;
+ int ret;
+
+ pr_info("initialize %d instances\n", instances);
+
+ if (instances > NR_MBIM_PORTS) {
+ pr_err("Max-%d instances supported\n", NR_MBIM_PORTS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < instances; i++) {
+ dev = kzalloc(sizeof(struct f_mbim), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate mbim dev\n");
+ ret = -ENOMEM;
+ goto fail_probe;
+ }
+
+ dev->port_num = i;
+ dev->bam_port.ipa_consumer_ep = -1;
+ dev->bam_port.ipa_producer_ep = -1;
+
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_req_q);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+ mbim_ports[i].port = dev;
+ mbim_ports[i].port_num = i;
+
+ init_waitqueue_head(&dev->read_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ nr_mbim_ports++;
+
+ }
+
+ _mbim_dev = dev;
+ ret = misc_register(&mbim_device);
+ if (ret) {
+ pr_err("mbim driver failed to register\n");
+ goto fail_probe;
+ }
+
+ pr_info("Initialized %d ports\n", nr_mbim_ports);
+
+ return ret;
+
+fail_probe:
+ pr_err("Failed\n");
+ for (i = 0; i < nr_mbim_ports; i++) {
+ kfree(mbim_ports[i].port);
+ mbim_ports[i].port = NULL;
+ }
+
+ return ret;
+}
+
+static void fmbim_cleanup(void)
+{
+ int i = 0;
+
+ pr_info("Enter\n");
+
+ for (i = 0; i < nr_mbim_ports; i++) {
+ kfree(mbim_ports[i].port);
+ mbim_ports[i].port = NULL;
+ }
+ nr_mbim_ports = 0;
+
+ misc_deregister(&mbim_device);
+
+ _mbim_dev = NULL;
+}
+
diff --git a/drivers/usb/gadget/function/f_qc_ecm.c b/drivers/usb/gadget/function/f_qc_ecm.c
new file mode 100644
index 000000000000..847eb953ec61
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_ecm.c
@@ -0,0 +1,1165 @@
+/*
+ * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+
+#include "u_bam_data.h"
+#include <linux/ecm_ipa.h>
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link. The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support. It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.) A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ *
+ * This function is based on USB CDC Ethernet link function driver and
+ * contains MSM specific implementation.
+ */
+
+
+enum ecm_qc_notify_state {
+ ECM_QC_NOTIFY_NONE, /* don't notify */
+ ECM_QC_NOTIFY_CONNECT, /* issue CONNECT next */
+ ECM_QC_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
+};
+
+struct f_ecm_qc {
+ struct qc_gether port;
+ u8 ctrl_id, data_id;
+ enum transport_type xport;
+ u8 port_num;
+ char ethaddr[14];
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ bool is_open;
+ struct data_port bam_port;
+ bool ecm_mdm_ready_trigger;
+
+ bool data_interface_up;
+};
+
+static struct f_ecm_qc *__ecm;
+
+static struct ecm_ipa_params ipa_params;
+
+static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_ecm_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide. More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+/* Currently only one std ecm instance is supported - port index 0. */
+#define ECM_QC_NO_PORTS 1
+#define ECM_QC_DEFAULT_PORT 0
+#define ECM_QC_ACTIVE_PORT 0
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ecm_qc_control_intf = {
+ .bLength = sizeof ecm_qc_control_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_qc_header_desc = {
+ .bLength = sizeof ecm_qc_header_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_qc_union_desc = {
+ .bLength = sizeof(ecm_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_qc_desc = {
+ .bLength = sizeof ecm_qc_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+
+ /* this descriptor actually adds value, surprise! */
+ /* .iMACAddress = DYNAMIC */
+ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
+ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
+ .wNumberMCFilters = cpu_to_le16(0),
+ .bNumberPowerFilters = 0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_qc_data_nop_intf = {
+ .bLength = sizeof ecm_qc_data_nop_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_qc_data_intf = {
+ .bLength = sizeof ecm_qc_data_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_qc_fs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_qc_hs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_notify_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_in_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_out_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ecm_qc_ss_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_qc_string_defs[] = {
+ [0].s = "CDC Ethernet Control Model (ECM)",
+ [1].s = NULL /* DYNAMIC */,
+ [2].s = "CDC Ethernet Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = ecm_qc_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_qc_strings[] = {
+ &ecm_qc_string_table,
+ NULL,
+};
+
+static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
+{
+ struct usb_request *req = ecm->notify_req;
+ struct usb_cdc_notification *event;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ __le32 *data;
+ int status;
+
+ /* notification already in flight? */
+ if (!req)
+ return;
+
+ event = req->buf;
+ switch (ecm->notify_state) {
+ case ECM_QC_NOTIFY_NONE:
+ return;
+
+ case ECM_QC_NOTIFY_CONNECT:
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ if (ecm->is_open) {
+ event->wValue = cpu_to_le16(1);
+ ecm->notify_state = ECM_QC_NOTIFY_SPEED;
+ } else {
+ event->wValue = cpu_to_le16(0);
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ }
+ event->wLength = 0;
+ req->length = sizeof *event;
+
+ DBG(cdev, "notify connect %s\n",
+ ecm->is_open ? "true" : "false");
+ break;
+
+ case ECM_QC_NOTIFY_SPEED:
+ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(8);
+ req->length = ECM_QC_STATUS_BYTECOUNT;
+
+ /* SPEED_CHANGE data is up/down speeds in bits/sec */
+ data = req->buf + sizeof *event;
+ data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget));
+ data[1] = data[0];
+
+ DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget));
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ }
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+ ecm->notify_req = NULL;
+ status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+ if (status < 0) {
+ ecm->notify_req = req;
+ DBG(cdev, "notify --> %d\n", status);
+ }
+}
+
+static void ecm_qc_notify(struct f_ecm_qc *ecm)
+{
+ /* NOTE on most versions of Linux, host side cdc-ethernet
+ * won't listen for notifications until its netdevice opens.
+ * The first notification then sits in the FIFO for a long
+ * time, and the second one is queued.
+ */
+ ecm->notify_state = ECM_QC_NOTIFY_CONNECT;
+ ecm_qc_do_notify(ecm);
+}
+
+void *ecm_qc_get_ipa_rx_cb(void)
+{
+ return ipa_params.ecm_ipa_rx_dp_notify;
+}
+
+void *ecm_qc_get_ipa_tx_cb(void)
+{
+ return ipa_params.ecm_ipa_tx_dp_notify;
+}
+
+void *ecm_qc_get_ipa_priv(void)
+{
+ return ipa_params.private;
+}
+
+bool ecm_qc_get_skip_ep_config(void)
+{
+ return ipa_params.skip_ep_cfg;
+}
+/*-------------------------------------------------------------------------*/
+
+
+
+static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ecm_qc *ecm = req->context;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ struct usb_cdc_notification *event = req->buf;
+
+ switch (req->status) {
+ case 0:
+ /* no fault */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ default:
+ DBG(cdev, "event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+ ecm->notify_req = req;
+ ecm_qc_do_notify(ecm);
+}
+
+static int ecm_qc_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("Enter\n");
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_ETHERNET_PACKET_FILTER:
+ /* see 6.2.30: no data, wIndex = interface,
+ * wValue = packet filter bitmap
+ */
+ if (w_length != 0 || w_index != ecm->ctrl_id)
+ goto invalid;
+ DBG(cdev, "packet filter %02x\n", w_value);
+ /* REVISIT locking of cdc_filter. This assumes the UDC
+ * driver won't have a concurrent packet TX irq running on
+ * another CPU; or that if it does, this write is atomic...
+ */
+ ecm->port.cdc_filter = w_value;
+ value = 0;
+ break;
+
+ /* and optionally:
+ * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+ * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_STATISTIC:
+ */
+
+ default:
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("ecm req %02x.%02x response err %d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* Control interface has only altsetting 0 */
+ if (intf == ecm->ctrl_id) {
+ if (alt != 0) {
+ pr_warning("fail, alt setting is not 0\n");
+ goto fail;
+ }
+
+ if (ecm->notify->driver_data) {
+ VDBG(cdev, "reset ecm control %d\n", intf);
+ usb_ep_disable(ecm->notify);
+ }
+ if (!(ecm->notify->desc)) {
+ VDBG(cdev, "init ecm ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+ goto fail;
+ }
+ usb_ep_enable(ecm->notify);
+ ecm->notify->driver_data = ecm;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == ecm->data_id) {
+ if (alt > 1)
+ goto fail;
+
+ if (ecm->data_interface_up == alt)
+ return 0;
+
+ if (!ecm->port.in_ep->desc ||
+ !ecm->port.out_ep->desc) {
+ DBG(cdev, "init ecm\n");
+ __ecm->ecm_mdm_ready_trigger = false;
+ if (config_ep_by_speed(cdev->gadget, f,
+ ecm->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ ecm->port.out_ep)) {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ goto fail;
+ }
+ }
+
+ if (alt == 0 && ecm->port.in_ep->driver_data) {
+ DBG(cdev, "reset ecm\n");
+ __ecm->ecm_mdm_ready_trigger = false;
+ /* ecm->port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ bam_data_disconnect(&ecm->bam_port, USB_FUNC_ECM,
+ ecm->port_num);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
+ } else if (ecm->data_interface_up &&
+ gadget_is_dwc3(cdev->gadget)) {
+ if (msm_ep_unconfig(ecm->port.in_ep) ||
+ msm_ep_unconfig(ecm->port.out_ep)) {
+ pr_err("%s: ep_unconfig failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+ }
+ /* CDC Ethernet only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ if (alt == 1) {
+ struct net_device *net;
+
+ /* Enable zlps by default for ECM conformance;
+ * override for musb_hdrc (avoids txdma ovhead).
+ */
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+ );
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+ DBG(cdev, "activate ecm\n");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ net = gether_qc_connect_name(&ecm->port,
+ "ecm0", true);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ }
+
+ ecm->bam_port.cdev = cdev;
+ ecm->bam_port.func = &ecm->port.func;
+ ecm->bam_port.in = ecm->port.in_ep;
+ ecm->bam_port.out = ecm->port.out_ep;
+ if (bam_data_connect(&ecm->bam_port, ecm->xport,
+ ecm->port_num, USB_FUNC_ECM))
+ goto fail;
+ }
+
+ ecm->data_interface_up = alt;
+ /* NOTE this can be a minor disagreement with the ECM spec,
+ * which says speed notifications will "always" follow
+ * connection notifications. But we allow one connect to
+ * follow another (if the first is in flight), and instead
+ * just guarantee that a speed notification is always sent.
+ */
+ ecm_qc_notify(ecm);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_qc_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ if (intf == ecm->ctrl_id)
+ return 0;
+ return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_qc_disable(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+
+ DBG(cdev, "ecm deactivated\n");
+
+ if (ecm->port.in_ep->driver_data) {
+ bam_data_disconnect(&ecm->bam_port, USB_FUNC_ECM,
+ ecm->port_num);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
+ } else {
+ /* release EPs incase no set_alt(1) yet */
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ }
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA &&
+ gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(ecm->port.out_ep);
+ msm_ep_unconfig(ecm->port.in_ep);
+ }
+
+ if (ecm->notify->driver_data) {
+ usb_ep_disable(ecm->notify);
+ ecm->notify->driver_data = NULL;
+ ecm->notify->desc = NULL;
+ }
+
+ ecm->data_interface_up = false;
+}
+
+static void ecm_qc_suspend(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ bool remote_wakeup_allowed;
+
+ /* Is DATA interface initialized? */
+ if (!ecm->data_interface_up) {
+ pr_err("%s(): data interface not up\n", __func__);
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed =
+ f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s(): remote_wakeup:%d\n:", __func__, remote_wakeup_allowed);
+ if (!remote_wakeup_allowed)
+ __ecm->ecm_mdm_ready_trigger = false;
+
+ bam_data_suspend(&ecm->bam_port, ecm->port_num, USB_FUNC_ECM,
+ remote_wakeup_allowed);
+
+ pr_debug("ecm suspended\n");
+}
+
+static void ecm_qc_resume(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (!ecm->data_interface_up) {
+ pr_err("%s(): data interface was not up\n", __func__);
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed =
+ f->config->cdev->gadget->remote_wakeup;
+
+ bam_data_resume(&ecm->bam_port, ecm->port_num, USB_FUNC_ECM,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ ecm->is_open = ecm->ecm_mdm_ready_trigger ? true : false;
+ ecm_qc_notify(ecm);
+ }
+
+ pr_debug("ecm resumed\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ * - disconnected/unconfigured
+ * - configured but inactive (data alt 0)
+ * - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting). Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_qc_open(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+}
+
+static void ecm_qc_close(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = false;
+ ecm_qc_notify(ecm);
+}
+
+/* Callback to let ECM_IPA trigger us when network interface is up */
+void ecm_mdm_ready(void)
+{
+ struct f_ecm_qc *ecm = __ecm;
+ int port_num;
+
+ if (!ecm) {
+ pr_err("can't set ecm_ready_trigger, no ecm instance\n");
+ return;
+ }
+
+ if (ecm->ecm_mdm_ready_trigger) {
+ pr_err("already triggered - can't set ecm_ready_trigger\n");
+ return;
+ }
+
+ pr_debug("set ecm_ready_trigger\n");
+ ecm->ecm_mdm_ready_trigger = true;
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+ port_num = (u_bam_data_func_to_port(USB_FUNC_ECM,
+ ECM_QC_ACTIVE_PORT));
+ if (port_num < 0)
+ return;
+ bam_data_start_rx_tx(port_num);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+
+ ecm->ctrl_id = status;
+
+ ecm_qc_control_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0) {
+ pr_debug("no more interface IDs can be allocated\n");
+ goto fail;
+ }
+
+ ecm->data_id = status;
+
+ ecm_qc_data_nop_intf.bInterfaceNumber = status;
+ ecm_qc_data_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_in)\n");
+ goto fail;
+ }
+
+ ecm->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_out)\n");
+ goto fail;
+ }
+
+ ecm->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is *OPTIONAL* but we
+ * don't treat it that way. It's simpler, and some newer CDC
+ * profiles (wireless handsets) no longer treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_notify)\n");
+ goto fail;
+ }
+ ecm->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!ecm->notify_req) {
+ pr_debug("can not allocate notification request\n");
+ goto fail;
+ }
+ ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!ecm->notify_req->buf)
+ goto fail;
+ ecm->notify_req->context = ecm;
+ ecm->notify_req->complete = ecm_qc_notify_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(ecm_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ ecm_qc_hs_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_hs_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_hs_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ecm_qc_ss_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_ss_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_ss_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ f->ss_descriptors = usb_copy_descriptors(ecm_qc_ss_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ ecm->port.open = ecm_qc_open;
+ ecm->port.close = ecm_qc_close;
+
+ DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ ecm->port.in_ep->name, ecm->port.out_ep->name,
+ ecm->notify->name);
+ return 0;
+
+fail:
+
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (ecm->notify_req) {
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+ if (ecm->port.out_ep->desc)
+ ecm->port.out_ep->driver_data = NULL;
+ if (ecm->port.in_ep->desc)
+ ecm->port.in_ep->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ DBG(c->cdev, "ecm unbind\n");
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+ ecm_qc_string_defs[1].s = NULL;
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ bam_data_flush_workqueue();
+ ecm_ipa_cleanup(ipa_params.private);
+ }
+
+ kfree(ecm);
+ __ecm = NULL;
+}
+
+/**
+ * ecm_qc_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * @xport_name: data path transport type name ("BAM2BAM" or "BAM2BAM_IPA")
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_qc_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name)
+{
+ struct f_ecm_qc *ecm;
+ int status;
+
+ if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+ return -EINVAL;
+
+ pr_debug("data transport type is %s\n", xport_name);
+
+ /* maybe allocate device-global string IDs */
+ if (ecm_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[0].id = status;
+ ecm_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[2].id = status;
+ ecm_qc_data_intf.iInterface = status;
+
+ /* MAC address */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[1].id = status;
+ ecm_qc_desc.iMACAddress = status;
+ }
+
+ /* allocate and initialize one new instance */
+ ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
+ if (!ecm)
+ return -ENOMEM;
+ __ecm = ecm;
+
+ ecm->xport = str_to_xport(xport_name);
+ pr_debug("set xport = %d\n", ecm->xport);
+
+ /* export host's Ethernet address in CDC format */
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_get_macs(ipa_params.device_ethaddr,
+ ipa_params.host_ethaddr);
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ "%02X%02X%02X%02X%02X%02X",
+ ipa_params.host_ethaddr[0], ipa_params.host_ethaddr[1],
+ ipa_params.host_ethaddr[2], ipa_params.host_ethaddr[3],
+ ipa_params.host_ethaddr[4], ipa_params.host_ethaddr[5]);
+ ipa_params.device_ready_notify = ecm_mdm_ready;
+ } else
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ "%02X%02X%02X%02X%02X%02X",
+ ethaddr[0], ethaddr[1], ethaddr[2],
+ ethaddr[3], ethaddr[4], ethaddr[5]);
+
+ ecm_qc_string_defs[1].s = ecm->ethaddr;
+
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+
+ ecm->port.func.name = "cdc_ethernet";
+ ecm->port.func.strings = ecm_qc_strings;
+ /* descriptors are per-instance copies */
+ ecm->port.func.bind = ecm_qc_bind;
+ ecm->port.func.unbind = ecm_qc_unbind;
+ ecm->port.func.set_alt = ecm_qc_set_alt;
+ ecm->port.func.get_alt = ecm_qc_get_alt;
+ ecm->port.func.setup = ecm_qc_setup;
+ ecm->port.func.disable = ecm_qc_disable;
+ ecm->port.func.suspend = ecm_qc_suspend;
+ ecm->port.func.resume = ecm_qc_resume;
+ ecm->ecm_mdm_ready_trigger = false;
+
+ status = usb_add_function(c, &ecm->port.func);
+ if (status) {
+ pr_err("failed to add function\n");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ __ecm = NULL;
+ return status;
+ }
+
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return status;
+
+ pr_debug("setting ecm_ipa, host_ethaddr=%pM, device_ethaddr=%pM",
+ ipa_params.host_ethaddr, ipa_params.device_ethaddr);
+ status = ecm_ipa_init(&ipa_params);
+ if (status) {
+ pr_err("failed to initialize ecm_ipa\n");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ __ecm = NULL;
+
+ } else {
+ pr_debug("ecm_ipa successful created\n");
+ }
+
+ return status;
+}
+
+static int ecm_qc_init(void)
+{
+ int ret;
+
+ pr_debug("initialize ecm qc port instance\n");
+
+ ret = bam_data_setup(USB_FUNC_ECM, ECM_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
new file mode 100644
index 000000000000..6a616595f4d4
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -0,0 +1,1421 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+#include "rndis.h"
+#include "u_bam_data.h"
+#include <linux/rndis_ipa.h>
+
+unsigned int rndis_dl_max_xfer_size = 9216;
+module_param(rndis_dl_max_xfer_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_xfer_size,
+ "Max size of bus transfer to host");
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet. The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex. Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short: it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets. Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data. The control model is built around
+ * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored). RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface. That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely. Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ * - Power management ... references data that's scattered around lots
+ * of other documentation, which is incorrect/incomplete there too.
+ *
+ * - There are various undocumented protocol requirements, like the need
+ * to send garbage in some control-OUT messages.
+ *
+ * - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+ struct qc_gether port;
+ u8 ctrl_id, data_id;
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ u8 ul_max_pkt_per_xfer;
+ u8 pkt_alignment_factor;
+ u32 max_pkt_size;
+ const char *manufacturer;
+ int config;
+ atomic_t ioctl_excl;
+ atomic_t open_excl;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ atomic_t notify_count;
+ struct data_port bam_port;
+ enum transport_type xport;
+ u8 port_num;
+ bool net_ready_trigger;
+};
+
+static struct ipa_usb_init_params rndis_ipa_params;
+static spinlock_t rndis_lock;
+static bool rndis_ipa_supported;
+static void rndis_qc_open(struct qc_gether *geth);
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_rndis_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT 8 /* 8 bytes data */
+
+/* currently only one rndis instance is supported - port
+ * index 0.
+ */
+#define RNDIS_QC_NO_PORTS 1
+#define RNDIS_QC_ACTIVE_PORT 0
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define RNDIS_QC_IOCTL_MAGIC 'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE _IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
+
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+ .bLength = sizeof rndis_qc_control_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
+ .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+ .bLength = sizeof rndis_qc_header_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+ .bLength = sizeof rndis_qc_call_mgmt_descriptor,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+ .bLength = sizeof rndis_qc_acm_descriptor,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+ .bLength = sizeof(rndis_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+ .bLength = sizeof rndis_qc_data_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+ .bLength = sizeof rndis_qc_iad_descriptor,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_COMM,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+ .bLength = sizeof ss_intr_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+ .bLength = sizeof ss_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+ &rndis_qc_string_table,
+ NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -EBUSY;
+ }
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct sk_buff *rndis_qc_add_header(struct qc_gether *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+ if (skb2)
+ rndis_add_hdr(skb2);
+
+ dev_kfree_skb_any(skb);
+ return skb2;
+}
+
+int rndis_qc_rm_hdr(struct qc_gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ /* tmp points to a struct rndis_packet_msg_type */
+ __le32 *tmp = (void *)skb->data;
+
+ /* MessageType, MessageLength */
+ if (cpu_to_le32(RNDIS_MSG_PACKET)
+ != get_unaligned(tmp++)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ tmp++;
+
+ /* DataOffset, DataLength */
+ if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
+ dev_kfree_skb_any(skb);
+ return -EOVERFLOW;
+ }
+ skb_trim(skb, get_unaligned_le32(tmp++));
+
+ skb_queue_tail(list, skb);
+ return 0;
+}
+
+
+static void rndis_qc_response_available(void *_rndis)
+{
+ struct f_rndis_qc *rndis = _rndis;
+ struct usb_request *req = rndis->notify_req;
+ __le32 *data = req->buf;
+ int status;
+
+ if (atomic_inc_return(&rndis->notify_count) != 1)
+ return;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ /* Send RNDIS RESPONSE_AVAILABLE notification; a
+ * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+ *
+ * This is the only notification defined by RNDIS.
+ */
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ pr_info("notify/0 --> %d\n", status);
+ }
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis = req->context;
+ int status = req->status;
+ struct usb_composite_dev *cdev;
+
+ if (!rndis->port.func.config || !rndis->port.func.config->cdev) {
+ pr_err("%s(): cdev or config is NULL.\n", __func__);
+ return;
+ } else {
+ cdev = rndis->port.func.config->cdev;
+ }
+
+ /* after TX:
+ * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+ * - RNDIS_RESPONSE_AVAILABLE (status/irq)
+ */
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&rndis->notify_count, 0);
+ break;
+ default:
+ pr_info("RNDIS %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ /* FALLTHROUGH */
+ case 0:
+ if (ep != rndis->notify)
+ break;
+
+ /* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&rndis->notify_count))
+ break;
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ DBG(cdev, "notify/1 --> %d\n", status);
+ }
+ break;
+ }
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis = req->context;
+ int status;
+ rndis_init_msg_type *buf;
+ u32 ul_max_xfer_size, dl_max_xfer_size;
+
+ /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+ if (status < 0)
+ pr_err("RNDIS command error %d, %d/%d\n",
+ status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->config);
+ u_bam_data_set_ul_max_xfer_size(ul_max_xfer_size);
+ /*
+ * For consistent data throughput from IPA, it is required to
+ * fine tune aggregation byte limit as 7KB. RNDIS IPA driver
+ * use provided this value to calculate aggregation byte limit
+ * and program IPA hardware for aggregation.
+ * Host provides 8KB or 16KB as Max Transfer size, hence select
+ * minimum out of host provided value and optimum transfer size
+ * to get 7KB as aggregation byte limit.
+ */
+ if (rndis_dl_max_xfer_size)
+ dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
+ rndis_get_dl_max_xfer_size(rndis->config));
+ else
+ dl_max_xfer_size =
+ rndis_get_dl_max_xfer_size(rndis->config);
+ u_bam_data_set_dl_max_xfer_size(dl_max_xfer_size);
+ }
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("%s: Enter\n", __func__);
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* RNDIS uses the CDC command encapsulation mechanism to implement
+ * an RPC scheme, with much getting/setting of attributes by OID.
+ */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->complete = rndis_qc_command_complete;
+ /* later, rndis_response_available() sends a notification */
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ else {
+ u8 *buf;
+ u32 n;
+
+ /* return the result */
+ buf = rndis_get_next_response(rndis->config, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ req->complete = rndis_qc_response_complete;
+ rndis_free_response(rndis->config, buf);
+ value = n;
+ }
+ /* else stalls ... spec says to avoid that */
+ }
+ break;
+
+ default:
+invalid:
+ VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->context = rndis;
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("rndis response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* we know alt == 0 */
+
+ if (intf == rndis->ctrl_id) {
+ if (rndis->notify->driver_data) {
+ VDBG(cdev, "reset rndis control %d\n", intf);
+ usb_ep_disable(rndis->notify);
+ }
+ if (!rndis->notify->desc) {
+ VDBG(cdev, "init rndis ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+ goto fail;
+ }
+ usb_ep_enable(rndis->notify);
+ rndis->notify->driver_data = rndis;
+
+ } else if (intf == rndis->data_id) {
+ struct net_device *net;
+
+ rndis->net_ready_trigger = false;
+ if (rndis->port.in_ep->driver_data) {
+ DBG(cdev, "reset rndis\n");
+ /* rndis->port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ bam_data_disconnect(&rndis->bam_port, USB_FUNC_RNDIS,
+ rndis->port_num);
+
+ if (rndis->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&rndis->port,
+ "rndis0");
+ }
+
+ if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+ DBG(cdev, "init rndis\n");
+ if (config_ep_by_speed(cdev->gadget, f,
+ rndis->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ rndis->port.out_ep)) {
+ rndis->port.in_ep->desc = NULL;
+ rndis->port.out_ep->desc = NULL;
+ goto fail;
+ }
+ }
+
+ /* Avoid ZLPs; they can be troublesome. */
+ rndis->port.is_zlp_ok = false;
+
+ /* RNDIS should be in the "RNDIS uninitialized" state,
+ * either never activated or after rndis_uninit().
+ *
+ * We don't want data to flow here until a nonzero packet
+ * filter is set, at which point it enters "RNDIS data
+ * initialized" state ... but we do want the endpoints
+ * to be activated. It's a strange little state.
+ *
+ * REVISIT the RNDIS gadget code has done this wrong for a
+ * very long time. We need another call to the link layer
+ * code -- gether_updown(...bool) maybe -- to do it right.
+ */
+ rndis->port.cdc_filter = 0;
+
+ rndis->bam_port.cdev = cdev;
+ rndis->bam_port.func = &rndis->port.func;
+ rndis->bam_port.in = rndis->port.in_ep;
+ rndis->bam_port.out = rndis->port.out_ep;
+
+ if (bam_data_connect(&rndis->bam_port, rndis->xport,
+ rndis->port_num, USB_FUNC_RNDIS))
+ goto fail;
+
+ DBG(cdev, "RNDIS RX/TX early activation ...\n");
+ if (rndis->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ net = gether_qc_connect_name(&rndis->port, "rndis0",
+ false);
+ } else {
+ rndis_qc_open(&rndis->port);
+ net = gether_qc_get_net("rndis0");
+ }
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ rndis_set_param_dev(rndis->config, net,
+ &rndis->port.cdc_filter);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ pr_info("rndis deactivated\n");
+
+ rndis_uninit(rndis->config);
+ bam_data_disconnect(&rndis->bam_port, USB_FUNC_RNDIS, rndis->port_num);
+ if (rndis->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&rndis->port, "rndis0");
+
+ if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA &&
+ gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(rndis->port.out_ep);
+ msm_ep_unconfig(rndis->port.in_ep);
+ }
+ usb_ep_disable(rndis->notify);
+ rndis->notify->driver_data = NULL;
+}
+
+static void rndis_qc_suspend(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n:",
+ __func__, remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ /* This is required as Linux host side RNDIS driver doesn't
+ * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus.
+ * Hence we perform same operations explicitly here for Linux
+ * host case. In case of windows, this RNDIS state machine is
+ * already updated due to receiving of PACKET_FILTER.
+ */
+ rndis_flow_control(rndis->config, true);
+ pr_debug("%s(): Disconnecting\n", __func__);
+ }
+
+ bam_data_suspend(&rndis->bam_port, rndis->port_num, USB_FUNC_RNDIS,
+ remote_wakeup_allowed);
+ pr_debug("rndis suspended\n");
+}
+
+static void rndis_qc_resume(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ pr_debug("%s: rndis resumed\n", __func__);
+
+ /* Nothing to do if DATA interface wasn't initialized */
+ if (!rndis->bam_port.cdev) {
+ pr_debug("data interface was not up\n");
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ bam_data_resume(&rndis->bam_port, rndis->port_num, USB_FUNC_RNDIS,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA)
+ rndis_qc_open(&rndis->port);
+ /*
+ * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
+ * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
+ * resume. Hence trigger USB IPA transfer functionality
+ * explicitly here. For Windows host case is also being
+ * handle with RNDIS state machine.
+ */
+ rndis_flow_control(rndis->config, false);
+ }
+
+ pr_debug("%s: RNDIS resume completed\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested. A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct qc_gether *geth)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(&geth->func);
+ struct usb_composite_dev *cdev = geth->func.config->cdev;
+
+ DBG(cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
+ rndis_qc_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->config);
+}
+
+static void rndis_qc_close(struct qc_gether *geth)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(&geth->func);
+
+ DBG(geth->func.config->cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
+ rndis_signal_disconnect(rndis->config);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->ctrl_id = status;
+ rndis_qc_iad_descriptor.bFirstInterface = status;
+
+ rndis_qc_control_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->data_id = status;
+
+ rndis_qc_data_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+ if (!ep)
+ goto fail;
+ rndis->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+ if (!ep)
+ goto fail;
+ rndis->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is, strictly speaking,
+ * optional. We don't treat it that way though! It's simpler,
+ * and some newer profiles don't treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ rndis->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!rndis->notify_req)
+ goto fail;
+ rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!rndis->notify_req->buf)
+ goto fail;
+ rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+ rndis->notify_req->context = rndis;
+ rndis->notify_req->complete = rndis_qc_response_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rndis_qc_hs_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_hs_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_hs_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ rndis_qc_ss_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_ss_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_ss_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ rndis->port.open = rndis_qc_open;
+ rndis->port.close = rndis_qc_close;
+
+ status = rndis_register(rndis_qc_response_available, rndis,
+ bam_data_flow_control_enable);
+ if (status < 0)
+ goto fail;
+ rndis->config = status;
+
+ rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->config, rndis->ethaddr);
+
+ if (rndis->manufacturer && rndis->vendorID &&
+ rndis_set_param_vendor(rndis->config, rndis->vendorID,
+ rndis->manufacturer))
+ goto fail;
+
+ pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
+ rndis->ul_max_pkt_per_xfer);
+ rndis_set_max_pkt_xfer(rndis->config, rndis->ul_max_pkt_per_xfer);
+
+ /* In case of aggregated packets QC device will request
+ * aliment to 4 (2^2).
+ */
+ pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
+ rndis->pkt_alignment_factor);
+ rndis_set_pkt_alignment_factor(rndis->config,
+ rndis->pkt_alignment_factor);
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ rndis->port.in_ep->name, rndis->port.out_ep->name,
+ rndis->notify->name);
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (rndis->notify_req) {
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+ if (rndis->port.out_ep->desc)
+ rndis->port.out_ep->driver_data = NULL;
+ if (rndis->port.in_ep->desc)
+ rndis->port.in_ep->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ unsigned long flags;
+
+ pr_debug("rndis_qc_unbind: free\n");
+ rndis_deregister(rndis->config);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+ if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ bam_data_flush_workqueue();
+ rndis_ipa_cleanup(rndis_ipa_params.private);
+ rndis_ipa_supported = false;
+ }
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ kfree(rndis);
+ _rndis_qc = NULL;
+ spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+void rndis_ipa_reset_trigger(void)
+{
+ struct f_rndis_qc *rndis;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+
+ rndis->net_ready_trigger = false;
+ spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+/*
+ * Callback let RNDIS_IPA trigger us when network interface is up
+ * and userspace is ready to answer DHCP requests
+ */
+void rndis_net_ready_notify(void)
+{
+ struct f_rndis_qc *rndis;
+ unsigned long flags;
+ int port_num;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+ if (rndis->net_ready_trigger) {
+ pr_err("%s: Already triggered", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+
+ pr_debug("%s: Set net_ready_trigger", __func__);
+ rndis->net_ready_trigger = true;
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ port_num = (u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT));
+ if (port_num < 0)
+ return;
+ bam_data_start_rx_tx(port_num);
+}
+
+
+/* Some controllers can't support RNDIS ... */
+static inline bool can_support_rndis_qc(struct usb_configuration *c)
+{
+ /* everything else is *presumably* fine */
+ return true;
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+rndis_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+ return rndis_qc_bind_config_vendor(c, ethaddr, 0, NULL, 1, 0, NULL);
+}
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer,
+ u8 max_pkt_per_xfer,
+ u8 pkt_alignment_factor,
+ char *xport_name)
+{
+ struct f_rndis_qc *rndis;
+ int status;
+
+ if (!can_support_rndis_qc(c) || !ethaddr) {
+ pr_debug("%s: invalid argument\n", __func__);
+ return -EINVAL;
+ }
+
+ /* maybe allocate device-global string IDs */
+ if (rndis_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[0].id = status;
+ rndis_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[1].id = status;
+ rndis_qc_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[2].id = status;
+ rndis_qc_iad_descriptor.iFunction = status;
+ }
+
+ /* allocate and initialize one new instance */
+ status = -ENOMEM;
+ rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
+ if (!rndis) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ goto fail;
+ }
+
+ rndis->xport = str_to_xport(xport_name);
+
+ /* export host's Ethernet address in CDC format */
+ if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_get_macs(rndis_ipa_params.device_ethaddr,
+ rndis_ipa_params.host_ethaddr);
+ pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+ rndis_ipa_params.host_ethaddr,
+ rndis_ipa_params.device_ethaddr);
+ rndis_ipa_supported = true;
+ memcpy(rndis->ethaddr, &rndis_ipa_params.host_ethaddr,
+ ETH_ALEN);
+ rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
+ } else
+ memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+
+ rndis->vendorID = vendorID;
+ rndis->manufacturer = manufacturer;
+
+ /* if max_pkt_per_xfer was not configured set to default value */
+ rndis->ul_max_pkt_per_xfer =
+ max_pkt_per_xfer ? max_pkt_per_xfer :
+ DEFAULT_MAX_PKT_PER_XFER;
+ u_bam_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+
+ /*
+ * Check no RNDIS aggregation, and alignment if not mentioned,
+ * use alignment factor as zero. If aggregated RNDIS data transfer,
+ * max packet per transfer would be default if it is not set
+ * explicitly, and same way use alignment factor as 2 by default.
+ * This would eliminate need of writing to sysfs if default RNDIS
+ * aggregation setting required. Writing to both sysfs entries,
+ * those values will always override default values.
+ */
+ if ((rndis->pkt_alignment_factor == 0) &&
+ (rndis->ul_max_pkt_per_xfer == 1))
+ rndis->pkt_alignment_factor = 0;
+ else
+ rndis->pkt_alignment_factor = pkt_alignment_factor ?
+ pkt_alignment_factor :
+ DEFAULT_PKT_ALIGNMENT_FACTOR;
+
+ /* RNDIS activates when the host changes this filter */
+ rndis->port.cdc_filter = 0;
+
+ /* RNDIS has special (and complex) framing */
+ rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+ rndis->port.wrap = rndis_qc_add_header;
+ rndis->port.unwrap = rndis_qc_rm_hdr;
+
+ rndis->port.func.name = "rndis";
+ rndis->port.func.strings = rndis_qc_strings;
+ /* descriptors are per-instance copies */
+ rndis->port.func.bind = rndis_qc_bind;
+ rndis->port.func.unbind = rndis_qc_unbind;
+ rndis->port.func.set_alt = rndis_qc_set_alt;
+ rndis->port.func.setup = rndis_qc_setup;
+ rndis->port.func.disable = rndis_qc_disable;
+ rndis->port.func.suspend = rndis_qc_suspend;
+ rndis->port.func.resume = rndis_qc_resume;
+
+ _rndis_qc = rndis;
+
+ if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ status = rndis_ipa_init(&rndis_ipa_params);
+ if (status) {
+ pr_err("%s: failed to init rndis_ipa\n", __func__);
+ goto fail;
+ }
+ }
+
+ status = usb_add_function(c, &rndis->port.func);
+ if (status) {
+ if (rndis->xport == USB_GADGET_XPORT_BAM2BAM_IPA)
+ rndis_ipa_cleanup(rndis_ipa_params.private);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(rndis);
+ _rndis_qc = NULL;
+ return status;
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+ pr_info("Open rndis QC driver\n");
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not created yet\n");
+ return -ENODEV;
+ }
+
+ if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ fp->private_data = _rndis_qc;
+ pr_info("rndis QC file opened\n");
+
+ return 0;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+ struct f_rndis_qc *rndis = fp->private_data;
+
+ pr_info("Close rndis QC file\n");
+ rndis_qc_unlock(&rndis->open_excl);
+
+ return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct f_rndis_qc *rndis = fp->private_data;
+ int ret = 0;
+
+ pr_info("Received command %d\n", cmd);
+
+ if (rndis_qc_lock(&rndis->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+ ret = copy_to_user((void __user *)arg,
+ &rndis->ul_max_pkt_per_xfer,
+ sizeof(rndis->ul_max_pkt_per_xfer));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent UL max packets per xfer %d\n",
+ rndis->ul_max_pkt_per_xfer);
+ break;
+ case RNDIS_QC_GET_MAX_PKT_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &rndis->max_pkt_size,
+ sizeof(rndis->max_pkt_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_debug("Sent max packet size %d\n",
+ rndis->max_pkt_size);
+ break;
+ default:
+ pr_err("Unsupported IOCTL\n");
+ ret = -EINVAL;
+ }
+
+ rndis_qc_unlock(&rndis->ioctl_excl);
+
+ return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+ .owner = THIS_MODULE,
+ .open = rndis_qc_open_dev,
+ .release = rndis_qc_release_dev,
+ .unlocked_ioctl = rndis_qc_ioctl,
+};
+
+static struct miscdevice rndis_qc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_rndis_qc",
+ .fops = &rndis_qc_fops,
+};
+
+static int rndis_qc_init(void)
+{
+ int ret;
+
+ pr_info("initialize rndis QC instance\n");
+
+ ret = misc_register(&rndis_qc_device);
+ if (ret)
+ pr_err("rndis QC driver failed to register\n");
+ spin_lock_init(&rndis_lock);
+
+ ret = bam_data_setup(USB_FUNC_RNDIS, RNDIS_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void rndis_qc_cleanup(void)
+{
+ pr_info("rndis QC cleanup\n");
+
+ misc_deregister(&rndis_qc_device);
+}
+
+void *rndis_qc_get_ipa_rx_cb(void)
+{
+ return rndis_ipa_params.ipa_rx_notify;
+}
+
+void *rndis_qc_get_ipa_tx_cb(void)
+{
+ return rndis_ipa_params.ipa_tx_notify;
+}
+
+void *rndis_qc_get_ipa_priv(void)
+{
+ return rndis_ipa_params.private;
+}
+
+bool rndis_qc_get_skip_ep_config(void)
+{
+ return rndis_ipa_params.skip_ep_cfg;
+}
diff --git a/drivers/usb/gadget/function/f_rmnet.c b/drivers/usb/gadget/function/f_rmnet.c
new file mode 100644
index 000000000000..efce7c382f2b
--- /dev/null
+++ b/drivers/usb/gadget/function/f_rmnet.c
@@ -0,0 +1,1469 @@
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/usb_bam.h>
+
+#include "usb_gadget_xport.h"
+#include "u_ether.h"
+#include "u_rmnet.h"
+#include "gadget_chips.h"
+
+static unsigned int rmnet_dl_max_pkt_per_xfer = 7;
+module_param(rmnet_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rmnet_dl_max_pkt_per_xfer,
+ "Maximum packets per transfer for DL aggregation");
+
+#define RMNET_NOTIFY_INTERVAL 5
+#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+
+#define ACM_CTRL_DTR (1 << 0)
+
+/* TODO: use separate structures for data and
+ * control paths
+ */
+struct f_rmnet {
+ struct gether gether_port;
+ struct grmnet port;
+ int ifc_id;
+ u8 port_num;
+ atomic_t online;
+ atomic_t ctrl_online;
+ struct usb_composite_dev *cdev;
+
+ spinlock_t lock;
+
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+
+ /* control info */
+ struct list_head cpkt_resp_q;
+ unsigned long notify_count;
+ unsigned long cpkts_len;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+};
+
+static unsigned int nr_rmnet_ports;
+static unsigned int no_ctrl_smd_ports;
+static unsigned int no_ctrl_qti_ports;
+static unsigned int no_ctrl_hsic_ports;
+static unsigned int no_ctrl_hsuart_ports;
+static unsigned int no_data_bam_ports;
+static unsigned int no_data_bam2bam_ports;
+static unsigned int no_data_hsic_ports;
+static unsigned int no_data_hsuart_ports;
+static struct rmnet_ports {
+ enum transport_type data_xport;
+ enum transport_type ctrl_xport;
+ unsigned data_xport_num;
+ unsigned ctrl_xport_num;
+ unsigned port_num;
+ struct f_rmnet *port;
+} rmnet_ports[NR_RMNET_PORTS];
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
+ .bLength = sizeof rmnet_ss_notify_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
+ .bLength = sizeof rmnet_ss_in_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
+ .bLength = sizeof rmnet_ss_out_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *rmnet_ss_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_in_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_out_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev);
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+ return container_of(f, struct f_rmnet, gether_port.func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+ return container_of(r, struct f_rmnet, port);
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned len, size_t extra_buf_alloc,
+ gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->buf = kmalloc(len + extra_buf_alloc, flags);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->length = len;
+
+ return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int rmnet_gport_setup(void)
+{
+ int ret;
+ int port_idx;
+ int i;
+ u8 base;
+
+ pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
+ " smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
+ " nr_rmnet_ports: %u\n",
+ __func__, no_data_bam_ports, no_data_bam2bam_ports,
+ no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
+ no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
+
+ if (no_data_bam_ports) {
+ ret = gbam_setup(no_data_bam_ports);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (no_data_bam2bam_ports) {
+ ret = gbam2bam_setup(no_data_bam2bam_ports);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (no_ctrl_smd_ports) {
+ ret = gsmd_ctrl_setup(FRMNET_CTRL_CLIENT,
+ no_ctrl_smd_ports, &base);
+ if (ret)
+ return ret;
+ for (i = 0; i < nr_rmnet_ports; i++)
+ if (rmnet_ports[i].port)
+ rmnet_ports[i].port->port_num += base;
+ }
+
+ if (no_data_hsic_ports) {
+ port_idx = ghsic_data_setup(no_data_hsic_ports,
+ USB_GADGET_RMNET);
+ if (port_idx < 0)
+ return port_idx;
+ for (i = 0; i < nr_rmnet_ports; i++) {
+ if (rmnet_ports[i].data_xport ==
+ USB_GADGET_XPORT_HSIC) {
+ rmnet_ports[i].data_xport_num = port_idx;
+ port_idx++;
+ }
+ }
+ }
+
+ if (no_ctrl_hsic_ports) {
+ port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
+ USB_GADGET_RMNET);
+ if (port_idx < 0)
+ return port_idx;
+ for (i = 0; i < nr_rmnet_ports; i++) {
+ if (rmnet_ports[i].ctrl_xport ==
+ USB_GADGET_XPORT_HSIC) {
+ rmnet_ports[i].ctrl_xport_num = port_idx;
+ port_idx++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int gport_rmnet_connect(struct f_rmnet *dev, unsigned intf)
+{
+ int ret;
+ unsigned port_num;
+ enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
+ enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
+ int src_connection_idx = 0, dst_connection_idx = 0;
+ struct usb_gadget *gadget = dev->cdev->gadget;
+ enum usb_ctrl usb_bam_type;
+ void *net;
+
+ pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
+ __func__, xport_to_str(cxport), xport_to_str(dxport),
+ dev, dev->port_num);
+
+ port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
+ switch (cxport) {
+ case USB_GADGET_XPORT_SMD:
+ ret = gsmd_ctrl_connect(&dev->port, port_num);
+ if (ret) {
+ pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_QTI:
+ ret = gqti_ctrl_connect(&dev->port, port_num, dev->ifc_id,
+ dxport, USB_GADGET_RMNET);
+ if (ret) {
+ pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ ret = ghsic_ctrl_connect(&dev->port, port_num);
+ if (ret) {
+ pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ xport_to_str(cxport));
+ return -ENODEV;
+ }
+
+ port_num = rmnet_ports[dev->port_num].data_xport_num;
+
+ switch (dxport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ ret = gbam_connect(&dev->port, port_num,
+ dxport, src_connection_idx, dst_connection_idx);
+ if (ret) {
+ pr_err("%s: gbam_connect failed: err:%d\n",
+ __func__, ret);
+ gsmd_ctrl_disconnect(&dev->port, port_num);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ port_num);
+ if (dst_connection_idx < 0 || src_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ gsmd_ctrl_disconnect(&dev->port, port_num);
+ return -EINVAL;
+ }
+ ret = gbam_connect(&dev->port, port_num,
+ dxport, src_connection_idx, dst_connection_idx);
+ if (ret) {
+ pr_err("%s: gbam_connect failed: err:%d\n",
+ __func__, ret);
+ if (cxport == USB_GADGET_XPORT_QTI)
+ gqti_ctrl_disconnect(&dev->port, port_num);
+ else
+ gsmd_ctrl_disconnect(&dev->port, port_num);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ ret = ghsic_data_connect(&dev->port, port_num);
+ if (ret) {
+ pr_err("%s: ghsic_data_connect failed: err:%d\n",
+ __func__, ret);
+ ghsic_ctrl_disconnect(&dev->port, port_num);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_ETHER:
+ gether_enable_sg(&dev->gether_port, true);
+ net = gether_connect(&dev->gether_port);
+ if (IS_ERR(net)) {
+ pr_err("%s: gether_connect failed: err:%ld\n",
+ __func__, PTR_ERR(net));
+ if (cxport == USB_GADGET_XPORT_QTI)
+ gqti_ctrl_disconnect(&dev->port, port_num);
+ else
+ gsmd_ctrl_disconnect(&dev->port, port_num);
+
+ return PTR_ERR(net);
+ }
+ gether_update_dl_max_pkts_per_xfer(&dev->gether_port,
+ rmnet_dl_max_pkt_per_xfer);
+ gether_update_dl_max_xfer_size(&dev->gether_port, 16384);
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ xport_to_str(dxport));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int gport_rmnet_disconnect(struct f_rmnet *dev)
+{
+ unsigned port_num;
+ enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
+ enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
+
+ pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
+ __func__, xport_to_str(cxport), xport_to_str(dxport),
+ dev, dev->port_num);
+
+ port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
+ switch (cxport) {
+ case USB_GADGET_XPORT_SMD:
+ gsmd_ctrl_disconnect(&dev->port, port_num);
+ break;
+ case USB_GADGET_XPORT_QTI:
+ gqti_ctrl_disconnect(&dev->port, port_num);
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ ghsic_ctrl_disconnect(&dev->port, port_num);
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ xport_to_str(cxport));
+ return -ENODEV;
+ }
+
+ port_num = rmnet_ports[dev->port_num].data_xport_num;
+ switch (dxport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ gbam_disconnect(&dev->port, port_num, dxport);
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ ghsic_data_disconnect(&dev->port, port_num);
+ break;
+ case USB_GADGET_XPORT_ETHER:
+ gether_disconnect(&dev->gether_port);
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ xport_to_str(dxport));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+
+ pr_debug("%s: portno:%d\n", __func__, dev->port_num);
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ frmnet_free_req(dev->notify, dev->notify_req);
+
+ kfree(f->name);
+}
+
+static void frmnet_purge_responses(struct f_rmnet *dev)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->cpkt_resp_q)) {
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void frmnet_suspend(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ unsigned port_num;
+ enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s: data xport: %s dev: %p portno: %d remote_wakeup: %d\n",
+ __func__, xport_to_str(dxport),
+ dev, dev->port_num, remote_wakeup_allowed);
+
+ usb_ep_fifo_flush(dev->notify);
+ frmnet_purge_responses(dev);
+
+ port_num = rmnet_ports[dev->port_num].data_xport_num;
+ switch (dxport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ if (remote_wakeup_allowed) {
+ gbam_suspend(&dev->port, port_num, dxport);
+ } else {
+ /*
+ * When remote wakeup is disabled, IPA is disconnected
+ * because it cannot send new data until the USB bus is
+ * resumed. Endpoint descriptors info is saved before it
+ * gets reset by the BAM disconnect API. This lets us
+ * restore this info when the USB bus is resumed.
+ */
+ dev->in_ep_desc_backup = dev->port.in->desc;
+ dev->out_ep_desc_backup = dev->port.out->desc;
+ pr_debug("in_ep_desc_bkup = %p, out_ep_desc_bkup = %p",
+ dev->in_ep_desc_backup, dev->out_ep_desc_backup);
+ pr_debug("%s(): Disconnecting\n", __func__);
+ gport_rmnet_disconnect(dev);
+ }
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ break;
+ case USB_GADGET_XPORT_HSUART:
+ break;
+ case USB_GADGET_XPORT_ETHER:
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ xport_to_str(dxport));
+ }
+}
+
+static void frmnet_resume(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ unsigned port_num;
+ enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
+ int ret;
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s: data xport: %s dev: %p portno: %d remote_wakeup: %d\n",
+ __func__, xport_to_str(dxport),
+ dev, dev->port_num, remote_wakeup_allowed);
+
+ port_num = rmnet_ports[dev->port_num].data_xport_num;
+ switch (dxport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ if (remote_wakeup_allowed) {
+ gbam_resume(&dev->port, port_num, dxport);
+ } else {
+ dev->port.in->desc = dev->in_ep_desc_backup;
+ dev->port.out->desc = dev->out_ep_desc_backup;
+ pr_debug("%s(): Connecting\n", __func__);
+ ret = gport_rmnet_connect(dev, dev->ifc_id);
+ if (ret) {
+ pr_err("%s: gport_rmnet_connect failed: err:%d\n",
+ __func__, ret);
+ }
+ }
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ break;
+ case USB_GADGET_XPORT_HSUART:
+ break;
+ case USB_GADGET_XPORT_ETHER:
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %s\n", __func__,
+ xport_to_str(dxport));
+ }
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+
+ atomic_set(&dev->online, 0);
+
+ frmnet_purge_responses(dev);
+
+ if (dxport == USB_GADGET_XPORT_BAM2BAM_IPA &&
+ gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(dev->port.out);
+ msm_ep_unconfig(dev->port.in);
+ }
+ gport_rmnet_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ int ret;
+ struct list_head *cpkt;
+
+ pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ if (dev->notify->driver_data) {
+ pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+ usb_ep_disable(dev->notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
+ dev->notify->name, ret);
+ return ret;
+ }
+ ret = usb_ep_enable(dev->notify);
+
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, dev->notify->name, ret);
+ dev->notify->desc = NULL;
+ return ret;
+ }
+ dev->notify->driver_data = dev;
+
+ if (!dev->port.in->desc || !dev->port.out->desc) {
+ if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+ dev->port.gadget = dev->cdev->gadget;
+ }
+
+ ret = gport_rmnet_connect(dev, intf);
+ if (ret) {
+ pr_err("%s(): gport_rmnet_connect fail with err:%d\n",
+ __func__, ret);
+ goto err_disable_ep;
+ }
+
+ atomic_set(&dev->online, 1);
+
+ /* In case notifications were aborted, but there are pending control
+ packets in the response queue, re-add the notifications */
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ frmnet_ctrl_response_available(dev);
+
+ return ret;
+err_disable_ep:
+ dev->port.in->desc = NULL;
+ dev->port.out->desc = NULL;
+ usb_ep_disable(dev->notify);
+
+ return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event;
+ unsigned long flags;
+ int ret;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!atomic_read(&dev->online) || !req || !req->buf) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ if (++dev->notify_count != 1) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ event = req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (ret) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", ret);
+ }
+}
+
+static void frmnet_connect(struct grmnet *gr)
+{
+ struct f_rmnet *dev;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ atomic_set(&dev->ctrl_online, 1);
+}
+
+static void frmnet_disconnect(struct grmnet *gr)
+{
+ struct f_rmnet *dev;
+ struct usb_cdc_notification *event;
+ int status;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ atomic_set(&dev->ctrl_online, 0);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: nothing to do\n", __func__);
+ return;
+ }
+
+ usb_ep_fifo_flush(dev->notify);
+
+ event = dev->notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ if (!atomic_read(&dev->online))
+ return;
+ pr_err("%s: rmnet notify ep enqueue error %d\n",
+ __func__, status);
+ }
+
+ frmnet_purge_responses(dev);
+}
+
+static int
+frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
+{
+ struct f_rmnet *dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ if (!gr || !buf) {
+ pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
+ __func__, gr, buf);
+ return -ENODEV;
+ }
+ cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ dev = port_to_rmnet(gr);
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
+ rmnet_free_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ frmnet_ctrl_response_available(dev);
+
+ return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ struct usb_composite_dev *cdev;
+ unsigned port_num;
+
+ if (!dev) {
+ pr_err("%s: rmnet dev is null\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ cdev = dev->cdev;
+
+ if (dev->port.send_encap_cmd) {
+ port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
+ dev->port.send_encap_cmd(port_num, req->buf, req->actual);
+ }
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ int status = req->status;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ default:
+ pr_err("rmnet notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (!atomic_read(&dev->ctrl_online))
+ break;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->notify_count > 0) {
+ dev->notify_count--;
+ if (dev->notify_count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ } else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_err("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock,
+ flags);
+ break;
+ }
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", status);
+ }
+ break;
+ }
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = cdev->req;
+ unsigned port_num;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int ret = -EOPNOTSUPP;
+
+ pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+ if (!atomic_read(&dev->online)) {
+ pr_warning("%s: usb cable is not connected\n", __func__);
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ pr_debug("%s: USB_CDC_SEND_ENCAPSULATED_COMMAND\n"
+ , __func__);
+ ret = w_length;
+ req->complete = frmnet_cmd_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ pr_debug("%s: USB_CDC_GET_ENCAPSULATED_RESPONSE\n", __func__);
+ if (w_value) {
+ pr_err("%s: invalid w_value = %04x\n",
+ __func__ , w_value);
+ goto invalid;
+ } else {
+ unsigned len;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty "
+ " req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ ret = 0;
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d\n",
+ __func__, w_value & ACM_CTRL_DTR ? 1 : 0);
+ if (dev->port.notify_modem) {
+ port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
+ dev->port.notify_modem(&dev->port, port_num, w_value);
+ }
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+ pr_debug("%s: start binding\n", __func__);
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d\n",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+ rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n", __func__);
+ return -ENODEV;
+ }
+ dev->port.in = ep;
+ /* Update same for u_ether which uses gether port struct */
+ dev->gether_port.in_ep = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ dev->port.out = ep;
+ /* Update same for u_ether which uses gether port struct */
+ dev->gether_port.out_ep = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+
+ dev->notify_req = frmnet_alloc_req(ep,
+ sizeof(struct usb_cdc_notification),
+ cdev->gadget->extra_buf_alloc,
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ ret = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
+
+ dev->notify_req->complete = frmnet_notify_complete;
+ dev->notify_req->context = dev;
+
+ ret = -ENOMEM;
+ f->fs_descriptors = usb_copy_descriptors(rmnet_fs_function);
+
+ if (!f->fs_descriptors) {
+ pr_err("%s: no descriptors,usb_copy descriptors(fs)failed\n",
+ __func__);
+ goto fail;
+ }
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ rmnet_hs_in_desc.bEndpointAddress =
+ rmnet_fs_in_desc.bEndpointAddress;
+ rmnet_hs_out_desc.bEndpointAddress =
+ rmnet_fs_out_desc.bEndpointAddress;
+ rmnet_hs_notify_desc.bEndpointAddress =
+ rmnet_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
+
+ if (!f->hs_descriptors) {
+ pr_err("%s: no hs_descriptors,usb_copy descriptors(hs)failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ rmnet_ss_in_desc.bEndpointAddress =
+ rmnet_fs_in_desc.bEndpointAddress;
+ rmnet_ss_out_desc.bEndpointAddress =
+ rmnet_fs_out_desc.bEndpointAddress;
+ rmnet_ss_notify_desc.bEndpointAddress =
+ rmnet_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
+
+ if (!f->ss_descriptors) {
+ pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ pr_debug("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
+ __func__, dev->port_num,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+ dev->port.in->name, dev->port.out->name);
+
+ return 0;
+
+fail:
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
+ep_notify_alloc_fail:
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
+ep_auto_notify_fail:
+ dev->port.out->driver_data = NULL;
+ dev->port.out = NULL;
+ep_auto_out_fail:
+ dev->port.in->driver_data = NULL;
+ dev->port.in = NULL;
+
+ return ret;
+}
+
+static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
+{
+ int status;
+ struct f_rmnet *dev;
+ struct usb_function *f;
+ unsigned long flags;
+
+ pr_debug("%s: usb config:%p\n", __func__, c);
+
+ if (portno >= nr_rmnet_ports) {
+ pr_err("%s: supporting ports#%u port_id:%u\n", __func__,
+ nr_rmnet_ports, portno);
+ return -ENODEV;
+ }
+
+ dev = rmnet_ports[portno].port;
+
+ if (rmnet_ports[portno].data_xport == USB_GADGET_XPORT_ETHER) {
+ struct net_device *net = gether_setup_name_default("usb_rmnet");
+ if (IS_ERR(net)) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return PTR_ERR(net);
+ }
+ dev->gether_port.ioport = netdev_priv(net);
+ gether_set_gadget(net, c->cdev->gadget);
+ status = gether_register_netdev(net);
+ if (status < 0) {
+ pr_err("%s: gether_register_netdev failed\n", __func__);
+ free_netdev(net);
+ return status;
+ }
+ }
+
+ if (rmnet_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0) {
+ pr_err("%s: failed to get string id, err:%d\n",
+ __func__, status);
+ return status;
+ }
+ rmnet_string_defs[0].id = status;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cdev = c->cdev;
+ f = &dev->gether_port.func;
+ f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!f->name) {
+ pr_err("%s: cannot allocate memory for name\n", __func__);
+ return -ENOMEM;
+ }
+
+ f->strings = rmnet_strings;
+ f->bind = frmnet_bind;
+ f->unbind = frmnet_unbind;
+ f->disable = frmnet_disable;
+ f->set_alt = frmnet_set_alt;
+ f->setup = frmnet_setup;
+ f->suspend = frmnet_suspend;
+ f->resume = frmnet_resume;
+ dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+ dev->port.disconnect = frmnet_disconnect;
+ dev->port.connect = frmnet_connect;
+ dev->gether_port.cdc_filter = 0;
+
+ status = usb_add_function(c, f);
+ if (status) {
+ pr_err("%s: usb add function failed: %d\n",
+ __func__, status);
+ kfree(f->name);
+ return status;
+ }
+
+ pr_debug("%s: complete\n", __func__);
+
+ return status;
+}
+
+static void frmnet_unbind_config(void)
+{
+ int i;
+
+ for (i = 0; i < nr_rmnet_ports; i++)
+ if (rmnet_ports[i].data_xport == USB_GADGET_XPORT_ETHER) {
+ gether_cleanup(rmnet_ports[i].port->gether_port.ioport);
+ rmnet_ports[i].port->gether_port.ioport = NULL;
+ }
+}
+
+static int rmnet_init(void)
+{
+ return gqti_ctrl_init();
+}
+
+static void frmnet_cleanup(void)
+{
+ int i;
+
+ gqti_ctrl_cleanup();
+
+ for (i = 0; i < nr_rmnet_ports; i++)
+ kfree(rmnet_ports[i].port);
+
+ gbam_cleanup();
+ nr_rmnet_ports = 0;
+ no_ctrl_smd_ports = 0;
+ no_ctrl_qti_ports = 0;
+ no_data_bam_ports = 0;
+ no_data_bam2bam_ports = 0;
+ no_ctrl_hsic_ports = 0;
+ no_data_hsic_ports = 0;
+ no_ctrl_hsuart_ports = 0;
+ no_data_hsuart_ports = 0;
+}
+
+static int frmnet_init_port(const char *ctrl_name, const char *data_name,
+ const char *port_name)
+{
+ struct f_rmnet *dev;
+ struct rmnet_ports *rmnet_port;
+ int ret;
+ int i;
+
+ if (nr_rmnet_ports >= NR_RMNET_PORTS) {
+ pr_err("%s: Max-%d instances supported\n",
+ __func__, NR_RMNET_PORTS);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
+ __func__, nr_rmnet_ports, ctrl_name, data_name);
+
+ dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+ if (!dev) {
+ pr_err("%s: Unable to allocate rmnet device\n", __func__);
+ return -ENOMEM;
+ }
+
+ dev->port_num = nr_rmnet_ports;
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+ rmnet_port = &rmnet_ports[nr_rmnet_ports];
+ rmnet_port->port = dev;
+ rmnet_port->port_num = nr_rmnet_ports;
+ rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
+ rmnet_port->data_xport = str_to_xport(data_name);
+
+ switch (rmnet_port->ctrl_xport) {
+ case USB_GADGET_XPORT_SMD:
+ rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
+ no_ctrl_smd_ports++;
+ break;
+ case USB_GADGET_XPORT_QTI:
+ rmnet_port->ctrl_xport_num = no_ctrl_qti_ports;
+ no_ctrl_qti_ports++;
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ ghsic_ctrl_set_port_name(port_name, ctrl_name);
+ rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
+ no_ctrl_hsic_ports++;
+ break;
+ case USB_GADGET_XPORT_HSUART:
+ rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
+ no_ctrl_hsuart_ports++;
+ break;
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %u\n", __func__,
+ rmnet_port->ctrl_xport);
+ ret = -ENODEV;
+ goto fail_probe;
+ }
+
+ switch (rmnet_port->data_xport) {
+ case USB_GADGET_XPORT_BAM2BAM:
+ /* Override BAM2BAM to BAM_DMUX for old ABI compatibility */
+ rmnet_port->data_xport = USB_GADGET_XPORT_BAM_DMUX;
+ /* fall-through */
+ case USB_GADGET_XPORT_BAM_DMUX:
+ rmnet_port->data_xport_num = no_data_bam_ports;
+ no_data_bam_ports++;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ rmnet_port->data_xport_num = no_data_bam2bam_ports;
+ no_data_bam2bam_ports++;
+ break;
+ case USB_GADGET_XPORT_HSIC:
+ ghsic_data_set_port_name(port_name, data_name);
+ rmnet_port->data_xport_num = no_data_hsic_ports;
+ no_data_hsic_ports++;
+ break;
+ case USB_GADGET_XPORT_HSUART:
+ rmnet_port->data_xport_num = no_data_hsuart_ports;
+ no_data_hsuart_ports++;
+ break;
+ case USB_GADGET_XPORT_ETHER:
+ case USB_GADGET_XPORT_NONE:
+ break;
+ default:
+ pr_err("%s: Un-supported transport: %u\n", __func__,
+ rmnet_port->data_xport);
+ ret = -ENODEV;
+ goto fail_probe;
+ }
+ nr_rmnet_ports++;
+
+ return 0;
+
+fail_probe:
+ for (i = 0; i < nr_rmnet_ports; i++)
+ kfree(rmnet_ports[i].port);
+
+ nr_rmnet_ports = 0;
+ no_ctrl_smd_ports = 0;
+ no_ctrl_qti_ports = 0;
+ no_data_bam_ports = 0;
+ no_ctrl_hsic_ports = 0;
+ no_data_hsic_ports = 0;
+ no_ctrl_hsuart_ports = 0;
+ no_data_hsuart_ports = 0;
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/u_bam.c b/drivers/usb/gadget/function/u_bam.c
new file mode 100644
index 000000000000..aef5f1a0783f
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam.c
@@ -0,0 +1,2523 @@
+/* Copyright (c) 2011-2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <soc/qcom/smd.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include <soc/qcom/bam_dmux.h>
+
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/usb_bam.h>
+
+#include "usb_gadget_xport.h"
+#include "u_rmnet.h"
+
+#define BAM_N_PORTS 2
+#define BAM2BAM_N_PORTS 4
+
+static struct workqueue_struct *gbam_wq;
+static int n_bam_ports;
+static int n_bam2bam_ports;
+static unsigned n_tx_req_queued;
+
+static unsigned bam_ch_ids[BAM_N_PORTS] = {
+ BAM_DMUX_USB_RMNET_0,
+ BAM_DMUX_USB_DPL
+};
+
+static char bam_ch_names[BAM_N_PORTS][BAM_DMUX_CH_NAME_MAX_LEN];
+
+static const enum ipa_client_type usb_prod[BAM2BAM_N_PORTS] = {
+ IPA_CLIENT_USB_PROD, IPA_CLIENT_USB2_PROD,
+ IPA_CLIENT_USB3_PROD, IPA_CLIENT_USB4_PROD
+};
+static const enum ipa_client_type usb_cons[BAM2BAM_N_PORTS] = {
+ IPA_CLIENT_USB_CONS, IPA_CLIENT_USB2_CONS,
+ IPA_CLIENT_USB3_CONS, IPA_CLIENT_USB4_CONS
+};
+
+#define BAM_PENDING_PKTS_LIMIT 220
+#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
+#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
+#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
+#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
+
+#define BAM_MUX_HDR 8
+
+#define BAM_MUX_RX_Q_SIZE 128
+#define BAM_MUX_TX_Q_SIZE 200
+#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+
+#define DL_INTR_THRESHOLD 20
+#define BAM_PENDING_BYTES_LIMIT (50 * BAM_MUX_RX_REQ_SIZE)
+#define BAM_PENDING_BYTES_FCTRL_EN_TSHOLD (BAM_PENDING_BYTES_LIMIT / 3)
+
+/* Extra buffer size to allocate for tx */
+#define EXTRA_ALLOCATION_SIZE_U_BAM 128
+
+static unsigned int bam_pending_pkts_limit = BAM_PENDING_PKTS_LIMIT;
+module_param(bam_pending_pkts_limit, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_pending_bytes_limit = BAM_PENDING_BYTES_LIMIT;
+module_param(bam_pending_bytes_limit, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_pending_bytes_fctrl_en_thold =
+ BAM_PENDING_BYTES_FCTRL_EN_TSHOLD;
+module_param(bam_pending_bytes_fctrl_en_thold, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
+module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
+module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
+module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
+module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
+module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned long bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
+module_param(bam_mux_rx_req_size, ulong, S_IRUGO);
+
+static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
+module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
+
+#define BAM_CH_OPENED BIT(0)
+#define BAM_CH_READY BIT(1)
+#define BAM_CH_WRITE_INPROGRESS BIT(2)
+
+enum u_bam_event_type {
+ U_BAM_DISCONNECT_E = 0,
+ U_BAM_CONNECT_E,
+ U_BAM_SUSPEND_E,
+ U_BAM_RESUME_E
+};
+
+struct sys2ipa_sw {
+ void *teth_priv;
+ ipa_notify_cb teth_cb;
+};
+
+struct bam_ch_info {
+ unsigned long flags;
+ unsigned id;
+
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_q;
+
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ struct sk_buff_head rx_skb_idle;
+
+ struct gbam_port *port;
+ struct work_struct write_tobam_w;
+ struct work_struct write_tohost_w;
+
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
+
+ /* added to support sys to ipa sw UL path */
+ struct sys2ipa_sw ul_params;
+ enum usb_bam_pipe_type src_pipe_type;
+ enum usb_bam_pipe_type dst_pipe_type;
+
+ /* stats */
+ unsigned int pending_pkts_with_bam;
+ unsigned int pending_bytes_with_bam;
+ unsigned int tohost_drp_cnt;
+ unsigned int tomodem_drp_cnt;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ unsigned long to_modem;
+ unsigned long to_host;
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ unsigned int max_num_pkts_pending_with_bam;
+ unsigned int max_bytes_pending_with_bam;
+ unsigned int delayed_bam_mux_write_done;
+ unsigned long skb_expand_cnt;
+};
+
+struct gbam_port {
+ bool is_connected;
+ enum u_bam_event_type last_event;
+ unsigned port_num;
+ spinlock_t port_lock_ul;
+ spinlock_t port_lock_dl;
+ spinlock_t port_lock;
+
+ struct grmnet *port_usb;
+ struct usb_gadget *gadget;
+
+ struct bam_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+};
+
+static struct bam_portmaster {
+ struct gbam_port *port;
+ struct platform_driver pdrv;
+} bam_ports[BAM_N_PORTS];
+
+struct u_bam_data_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ unsigned long usb_bam_handle;
+};
+
+struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
+static void gbam_start_rx(struct gbam_port *port);
+static void gbam_start_endless_rx(struct gbam_port *port);
+static void gbam_start_endless_tx(struct gbam_port *port);
+static void gbam_notify(void *p, int event, unsigned long data);
+static void gbam_data_write_tobam(struct work_struct *w);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct usb_request *req;
+ pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
+ ep, head, num, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t gbam_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock_ul lock held */
+static struct sk_buff *gbam_alloc_skb_from_pool(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t skb_buf_dma_addr;
+ struct usb_gadget *gadget;
+
+ if (!port)
+ return NULL;
+
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+
+ if (!skb) {
+ pr_err("%s: alloc skb failed\n", __func__);
+ goto alloc_exit;
+ }
+
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ if ((d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)) {
+
+ gadget = port->port_usb->gadget;
+
+ skb_buf_dma_addr =
+ dma_map_single(&gadget->dev, skb->data,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&gadget->dev, skb_buf_dma_addr)) {
+ pr_err("%s: Could not DMA map SKB buffer\n",
+ __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+ } else {
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ if (skb_headroom(skb) < BAM_MUX_HDR)
+ skb_reserve(skb, BAM_MUX_HDR);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+/* This function should be called with port_lock_ul lock held */
+static void gbam_free_skb_to_pool(struct gbam_port *port, struct sk_buff *skb)
+{
+ struct bam_ch_info *d;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void gbam_free_rx_skb_idle_list(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ gadget = port->port_usb->gadget;
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr,
+ sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/*----- sys2bam towards the IPA --------------- */
+static void gbam_ipa_sys2bam_notify_cb(void *priv, enum ipa_dp_evt_type event,
+ unsigned long data)
+{
+ struct sys2ipa_sw *ul = (struct sys2ipa_sw *)priv;
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ switch (event) {
+ case IPA_WRITE_DONE:
+ d = container_of(ul, struct bam_ch_info, ul_params);
+ port = container_of(d, struct gbam_port, data_ch);
+ /* call into bam_demux functionality that'll recycle the data */
+ gbam_notify(port, BAM_DMUX_WRITE_DONE, data);
+ break;
+ case IPA_RECEIVE:
+ /* call the callback given by tethering driver init function
+ * (and was given to ipa_connect)
+ */
+ if (ul->teth_cb)
+ ul->teth_cb(ul->teth_priv, event, data);
+ break;
+ default:
+ /* unexpected event */
+ pr_err("%s: unexpected event %d\n", __func__, event);
+ break;
+ }
+}
+
+
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+ unsigned long flags;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ int ret;
+ int tail_room = 0;
+ int extra_alloc = 0;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ ep = port->port_usb->in;
+
+ while (!list_empty(&d->tx_idle)) {
+ skb = __skb_dequeue(&d->tx_skb_q);
+ if (!skb)
+ break;
+
+ /*
+ * Some UDC requires allocation of some extra bytes for
+ * TX buffer due to hardware requirement. Check if extra
+ * bytes are already there, otherwise allocate new buffer
+ * with extra bytes and do memcpy.
+ */
+ if (port->gadget->extra_buf_alloc)
+ extra_alloc = EXTRA_ALLOCATION_SIZE_U_BAM;
+ tail_room = skb_tailroom(skb);
+ if (tail_room < extra_alloc) {
+ pr_debug("%s: tail_room %d less than %d\n", __func__,
+ tail_room, extra_alloc);
+ new_skb = skb_copy_expand(skb, 0, extra_alloc -
+ tail_room, GFP_ATOMIC);
+ if (!new_skb) {
+ pr_err("skb_copy_expand failed\n");
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ d->skb_expand_cnt++;
+ }
+
+ req = list_first_entry(&d->tx_idle,
+ struct usb_request,
+ list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+ n_tx_req_queued++;
+ if (n_tx_req_queued == dl_intr_threshold) {
+ req->no_interrupt = 0;
+ n_tx_req_queued = 0;
+ } else {
+ req->no_interrupt = 1;
+ }
+
+ /* Send ZLP in case packet length is multiple of maxpacksize */
+ req->zero = 1;
+
+ list_del(&req->list);
+
+ spin_unlock(&port->port_lock_dl);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock_dl);
+ if (ret) {
+ pr_err("%s: usb epIn failed with %d\n", __func__, ret);
+ list_add(&req->list, &d->tx_idle);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ d->to_host++;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_write_data_tohost_w(struct work_struct *w)
+{
+ struct bam_ch_info *d;
+ struct gbam_port *port;
+
+ d = container_of(w, struct bam_ch_info, write_tohost_w);
+ port = d->port;
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
+ port, port->port_num, d, skb->len);
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
+ d->tohost_drp_cnt++;
+ if (printk_ratelimit())
+ pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
+ __func__, d->tohost_drp_cnt);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ __skb_queue_tail(&d->tx_skb_q, skb);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ gbam_free_skb_to_pool(port, skb);
+
+ pr_debug("%s:port:%p d:%p tom:%lu ppkt:%u pbytes:%u pno:%d\n", __func__,
+ port, d, d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ /*
+ * If BAM doesn't have much pending data then push new data from here:
+ * write_complete notify only to avoid any underruns due to wq latency
+ */
+ if (d->pending_bytes_with_bam <= bam_pending_bytes_fctrl_en_thold) {
+ gbam_data_write_tobam(&d->write_tobam_w);
+ } else {
+ d->delayed_bam_mux_write_done++;
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+}
+
+/* This function should be called with port_lock_ul spinlock acquired */
+static bool gbam_ul_bam_limit_reached(struct bam_ch_info *data_ch)
+{
+ unsigned int curr_pending_pkts = data_ch->pending_pkts_with_bam;
+ unsigned int curr_pending_bytes = data_ch->pending_bytes_with_bam;
+ struct sk_buff *skb;
+
+ if (curr_pending_pkts >= bam_pending_pkts_limit)
+ return true;
+
+ /* check if next skb length doesn't exceed pending_bytes_limit */
+ skb = skb_peek(&data_ch->rx_skb_q);
+ if (!skb)
+ return false;
+
+ if ((curr_pending_bytes + skb->len) > bam_pending_bytes_limit)
+ return true;
+ else
+ return false;
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+ int qlen;
+
+ d = container_of(w, struct bam_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ /* Bail out if already in progress */
+ if (test_bit(BAM_CH_WRITE_INPROGRESS, &d->flags)) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ set_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+
+ while (!gbam_ul_bam_limit_reached(d) &&
+ (d->trans != USB_GADGET_XPORT_BAM2BAM_IPA ||
+ usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx))) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_pkts_with_bam++;
+ d->pending_bytes_with_bam += skb->len;
+ d->to_modem++;
+
+ pr_debug("%s: port:%p d:%p tom:%lu ppkts:%u pbytes:%u pno:%d\n",
+ __func__, port, d,
+ d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ dma_addr_t skb_dma_addr;
+ struct ipa_tx_meta ipa_meta = {0x0};
+
+ skb_dma_addr = gbam_get_dma_from_skb(skb);
+ if (skb_dma_addr != DMA_ERROR_CODE) {
+ ipa_meta.dma_address = skb_dma_addr;
+ ipa_meta.dma_address_valid = true;
+ }
+
+ ret = ipa_tx_dp(usb_prod[port->port_num],
+ skb,
+ &ipa_meta);
+ } else {
+ ret = msm_bam_dmux_write(d->id, skb);
+ }
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ pr_debug("%s: write error:%d\n", __func__, ret);
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ d->to_modem--;
+ d->tomodem_drp_cnt++;
+ gbam_free_skb_to_pool(port, skb);
+ break;
+ }
+ if (d->pending_pkts_with_bam > d->max_num_pkts_pending_with_bam)
+ d->max_num_pkts_pending_with_bam =
+ d->pending_pkts_with_bam;
+ if (d->pending_bytes_with_bam > d->max_bytes_pending_with_bam)
+ d->max_bytes_pending_with_bam =
+ d->pending_bytes_with_bam;
+ }
+
+ qlen = d->rx_skb_q.qlen;
+
+ clear_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (qlen < bam_mux_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ gbam_start_rx(port);
+ }
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: data tx ep error %d\n",
+ __func__, status);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ if (!port)
+ return;
+
+ spin_lock(&port->port_lock_dl);
+ d = &port->data_ch;
+ list_add_tail(&req->list, &d->tx_idle);
+ spin_unlock(&port->port_lock_dl);
+
+ queue_work(gbam_wq, &d->write_tohost_w);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ if (printk_ratelimit())
+ pr_err("%s: %s response error %d, %d/%d\n",
+ __func__, ep->name, status,
+ req->actual, req->length);
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ break;
+ }
+
+ spin_lock(&port->port_lock_ul);
+
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ if ((d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
+ !usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ } else
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+
+ /* TODO: Handle flow control gracefully by having
+ * having call back mechanism from bam driver
+ */
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+ spin_unlock(&port->port_lock_ul);
+
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+
+ if (printk_ratelimit())
+ pr_err("%s: data rx enqueue err %d\n",
+ __func__, status);
+
+ spin_lock(&port->port_lock_ul);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ }
+}
+
+static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s status: %d\n", __func__, status);
+}
+
+static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s status: %d\n", __func__, status);
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+ struct usb_request *req;
+ struct bam_ch_info *d;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb || !port->port_usb->out) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ gbam_free_skb_to_pool(port, skb);
+
+ if (printk_ratelimit())
+ pr_err("%s: rx queue failed %d\n",
+ __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_start_endless_rx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_start_endless_tx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_stop_endless_rx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ unsigned long flags;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->rx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_stop_endless_tx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ unsigned long flags;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->tx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+
+/*
+ * This function configured data fifo based on index passed to get bam2bam
+ * configuration.
+ */
+static void configure_data_fifo(enum usb_ctrl bam_type, u8 idx,
+ struct usb_ep *ep, enum usb_bam_pipe_type pipe_type)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ if (pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+
+ msm_data_fifo_config(ep,
+ data_fifo.phys_base,
+ data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+ }
+}
+
+
+static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct gbam_port *port = param;
+ struct usb_gadget *gadget = NULL;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (port == NULL) {
+ pr_err("%s: port is NULL\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb == NULL) {
+ pr_err("%s: port_usb is NULL, disconnected\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ gadget = port->port_usb->gadget;
+ d = &port->data_ch;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (gadget == NULL) {
+ pr_err("%s: gadget is NULL\n", __func__);
+ return;
+ }
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gbam_start_endless_rx(port);
+ else {
+ gbam_start_rx(port);
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+ } else {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ }
+ gbam_start_endless_tx(port);
+ }
+}
+
+static void gbam_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct gbam_port *port = param;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ /*
+ * Only handling BAM2BAM, as there is no equivelant to
+ * gbam_stop_endless_rx() for the SYS2BAM use case
+ */
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gbam_stop_endless_rx(port);
+ } else {
+ gbam_stop_endless_tx(port);
+ }
+}
+
+static int _gbam_start_io(struct gbam_port *port, bool in)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned queue_size;
+ spinlock_t *spinlock;
+ void (*ep_complete)(struct usb_ep *, struct usb_request *);
+
+ if (in)
+ spinlock = &port->port_lock_dl;
+ else
+ spinlock = &port->port_lock_ul;
+
+ spin_lock_irqsave(spinlock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(spinlock, flags);
+ return -EBUSY;
+ }
+
+ if (in) {
+ ep = port->port_usb->in;
+ idle = &port->data_ch.tx_idle;
+ queue_size = bam_mux_tx_q_size;
+ ep_complete = gbam_epin_complete;
+ } else {
+ ep = port->port_usb->out;
+ if (!ep)
+ goto out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_mux_rx_q_size;
+ ep_complete = gbam_epout_complete;
+ }
+
+ ret = gbam_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+out:
+ spin_unlock_irqrestore(spinlock, flags);
+ if (ret)
+ pr_err("%s: allocation failed\n", __func__);
+
+ return ret;
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+ unsigned long flags;
+
+ pr_debug("%s: port:%p\n", __func__, port);
+
+ if (_gbam_start_io(port, true))
+ return;
+
+ if (_gbam_start_io(port, false)) {
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (port->port_usb)
+ gbam_free_requests(port->port_usb->in,
+ &port->data_ch.tx_idle);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ /* queue out requests */
+ gbam_start_rx(port);
+}
+
+static void gbam_notify(void *p, int event, unsigned long data)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+
+ if (port == NULL)
+ pr_err("BAM DMUX notifying after channel close\n");
+
+ switch (event) {
+ case BAM_DMUX_RECEIVE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_recv_cb(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_WRITE_DONE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_write_done(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_TRANSMIT_SIZE:
+ d = &port->data_ch;
+ if (test_bit(BAM_CH_OPENED, &d->flags))
+ pr_warn("%s, BAM channel opened already", __func__);
+ bam_mux_rx_req_size = data;
+ pr_debug("%s rx_req_size: %lu", __func__, bam_mux_rx_req_size);
+ break;
+ }
+}
+
+static void gbam_free_rx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ if (!port->port_usb || !port->port_usb->out)
+ goto free_rx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ gbam_free_rx_skb_idle_list(port);
+
+free_rx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_free_tx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+
+ if (!port->port_usb)
+ goto free_tx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->in, &d->tx_idle);
+
+ while ((skb = __skb_dequeue(&d->tx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+free_tx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+ gbam_free_rx_buffers(port);
+ gbam_free_tx_buffers(port);
+}
+
+static void gbam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ if (!test_bit(BAM_CH_OPENED, &d->flags)) {
+ pr_err("%s: Bam channel is not opened\n", __func__);
+ goto exit;
+ }
+
+ msm_bam_dmux_close(d->id);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+exit:
+ return;
+}
+
+static void gbam2bam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->is_connected) {
+ pr_debug("%s: Port already disconnected. Bailing out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->is_connected = false;
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ ret = usb_bam_disconnect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret)
+ pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
+ __func__, ret);
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+ teth_bridge_disconnect(d->ipa_params.src_client);
+ /*
+ * Decrement usage count which was incremented upon cable
+ * connect or cable disconnect in suspended state
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+ }
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct bam_ch_info *d = &port->data_ch;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (!test_bit(BAM_CH_READY, &d->flags)) {
+ pr_err("%s: Bam channel is not ready\n", __func__);
+ return;
+ }
+
+ ret = msm_bam_dmux_open(d->id, port, gbam_notify);
+ if (ret) {
+ pr_err("%s: unable open bam ch:%d err:%d\n",
+ __func__, d->id, ret);
+ return;
+ }
+
+ set_bit(BAM_CH_OPENED, &d->flags);
+
+ gbam_start_io(port);
+
+ pr_debug("%s: done\n", __func__);
+}
+
+static void gbam2bam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct usb_gadget *gadget = NULL;
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ struct bam_ch_info *d;
+ u32 sps_params;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnected. Bailing out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->is_connected = true;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ gadget = port->port_usb->gadget;
+ if (!gadget) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb.gadget is NULL, exiting\n", __func__);
+ return;
+ }
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ d->ipa_params.usb_connection_speed = gadget->speed;
+
+ /*
+ * Invalidate prod and cons client handles from previous
+ * disconnect.
+ */
+ d->ipa_params.cons_clnt_hdl = -1;
+ d->ipa_params.prod_clnt_hdl = -1;
+
+ if (usb_bam_get_pipe_type(d->usb_bam_type, d->ipa_params.src_idx,
+ &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(d->usb_bam_type, d->ipa_params.dst_idx,
+ &d->dst_pipe_type)) {
+ pr_err("%s:usb_bam_get_pipe_type() failed\n", __func__);
+ return;
+ }
+ if (d->dst_pipe_type != USB_BAM_PIPE_BAM2BAM) {
+ pr_err("%s: no software preparation for DL not using bam2bam\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_alloc_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_alloc_fifos(d->usb_bam_type, d->dst_connection_idx);
+ gadget->bam2bam_func_enabled = true;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (!port || !port->port_usb) {
+ pr_debug("%s: cable is disconnected.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock,
+ flags);
+ goto free_fifos;
+ }
+ if (gadget_is_dwc3(gadget)) {
+ /* Configure for RX */
+ configure_data_fifo(d->usb_bam_type, d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | MSM_PRODUCER |
+ d->src_pipe_idx;
+ d->rx_req->length = 32*1024;
+ d->rx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->out, d->rx_req, GFP_ATOMIC);
+
+ /* Configure for TX */
+ configure_data_fifo(d->usb_bam_type, d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | d->dst_pipe_idx;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->in, d->tx_req, GFP_ATOMIC);
+
+ } else {
+ /* Configure for RX */
+ sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ d->rx_req->udc_priv = sps_params;
+
+ /* Configure for TX */
+ sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ teth_bridge_params.client = d->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n", __func__);
+ goto ep_unconfig;
+ }
+
+ /* Support for UL using system-to-IPA */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ul_params.teth_priv =
+ teth_bridge_params.private_data;
+ d->ul_params.teth_cb =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.notify = gbam_ipa_sys2bam_notify_cb;
+ d->ipa_params.priv = &d->ul_params;
+ d->ipa_params.reset_pipe_after_lpm = false;
+
+ } else {
+ d->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.priv =
+ teth_bridge_params.private_data;
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ }
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ d->ipa_params.skip_ep_cfg = teth_bridge_params.skip_ep_cfg;
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto ep_unconfig;
+ }
+
+ /* Remove support for UL using system-to-IPA towards DL */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ipa_params.notify = d->ul_params.teth_cb;
+ d->ipa_params.priv = d->ul_params.teth_priv;
+ }
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ else
+ d->ipa_params.reset_pipe_after_lpm = false;
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto ep_unconfig;
+ }
+
+ gqti_ctrl_update_ipa_pipes(port->port_usb, port->port_num,
+ d->ipa_params.ipa_prod_ep_idx ,
+ d->ipa_params.ipa_cons_ep_idx);
+
+ connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
+ connect_params.client_type = d->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n", __func__);
+ goto ep_unconfig;
+ }
+
+ /* queue in & out requests */
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ gbam_start_endless_rx(port);
+ } else {
+ /* The use-case of UL (OUT) ports using sys2bam is based on
+ * partial reuse of the system-to-bam_demux code. The following
+ * lines perform the branching out of the standard bam2bam flow
+ * on the USB side of the UL channel
+ */
+ if (_gbam_start_io(port, false)) {
+ pr_err("%s: _gbam_start_io failed\n", __func__);
+ return;
+ }
+ gbam_start_rx(port);
+ }
+ gbam_start_endless_tx(port);
+
+ pr_debug("%s: done\n", __func__);
+ return;
+
+ep_unconfig:
+ if (gadget_is_dwc3(gadget)) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+free_fifos:
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+}
+
+static int gbam_wake_cb(void *param)
+{
+ struct gbam_port *port = (struct gbam_port *)param;
+ struct usb_gadget *gadget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENODEV;
+ }
+
+ gadget = port->port_usb->gadget;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ return usb_gadget_wakeup(gadget);
+}
+
+static void gbam2bam_suspend_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if ((port->last_event == U_BAM_DISCONNECT_E) ||
+ (port->last_event == U_BAM_RESUME_E)) {
+ pr_debug("%s: Port is about to disconnect/resume. Bail out\n",
+ __func__);
+ goto exit;
+ }
+
+ d = &port->data_ch;
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ gbam_wake_cb, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(d->usb_bam_type,
+ d->dst_connection_idx, gbam_start, gbam_stop, port);
+
+ /*
+ * release lock here because gbam_start() or
+ * gbam_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(d->usb_bam_type, &d->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+
+exit:
+ /*
+ * Decrement usage count after IPA handshake is done to allow gadget
+ * parent to go to lpm. This counter was incremented upon cable connect
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void gbam2bam_resume_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
+ struct bam_ch_info *d;
+ struct usb_gadget *gadget = NULL;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (port->last_event == U_BAM_DISCONNECT_E || !port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n",
+ __func__);
+ goto exit;
+ }
+
+ d = &port->data_ch;
+ gadget = port->port_usb->gadget;
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ NULL, NULL);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ usb_bam_resume(d->usb_bam_type, &d->ipa_params);
+ }
+
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/* BAM data channel ready, allow attempt to open */
+static int gbam_data_ch_probe(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+ bool do_work = false;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ d = &port->data_ch;
+
+ if (!strncmp(bam_ch_names[i], pdev->name,
+ BAM_DMUX_CH_NAME_MAX_LEN)) {
+ set_bit(BAM_CH_READY, &d->flags);
+
+ /* if usb is online, try opening bam_ch */
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb)
+ do_work = true;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (do_work)
+ queue_work(gbam_wq, &port->connect_w);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* BAM data channel went inactive, so close it */
+static int gbam_data_ch_remove(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct usb_ep *ep_in = NULL;
+ struct usb_ep *ep_out = NULL;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_bam_ports; i++) {
+ if (!strncmp(bam_ch_names[i], pdev->name,
+ BAM_DMUX_CH_NAME_MAX_LEN)) {
+ port = bam_ports[i].port;
+ d = &port->data_ch;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb) {
+ ep_in = port->port_usb->in;
+ ep_out = port->port_usb->out;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (ep_in)
+ usb_ep_fifo_flush(ep_in);
+ if (ep_out)
+ usb_ep_fifo_flush(ep_out);
+
+ gbam_free_buffers(port);
+
+ msm_bam_dmux_close(d->id);
+
+ /* bam dmux will free all pending skbs */
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+
+ clear_bit(BAM_CH_READY, &d->flags);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+ }
+ }
+
+ return 0;
+}
+
+static void gbam_port_free(int portno)
+{
+ struct gbam_port *port = bam_ports[portno].port;
+ struct platform_driver *pdrv = &bam_ports[portno].pdrv;
+
+ if (port) {
+ kfree(port);
+ platform_driver_unregister(pdrv);
+ }
+}
+
+static void gbam2bam_port_free(int portno)
+{
+ struct gbam_port *port = bam2bam_ports[portno];
+
+ kfree(port);
+}
+
+static int gbam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct platform_driver *pdrv;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ port->is_connected = false;
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+ INIT_WORK(&port->connect_w, gbam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ INIT_LIST_HEAD(&d->tx_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+ INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
+ skb_queue_head_init(&d->tx_skb_q);
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ d->id = bam_ch_ids[portno];
+
+ bam_ports[portno].port = port;
+
+ scnprintf(bam_ch_names[portno], BAM_DMUX_CH_NAME_MAX_LEN,
+ "bam_dmux_ch_%d", bam_ch_ids[portno]);
+ pdrv = &bam_ports[portno].pdrv;
+ pdrv->probe = gbam_data_ch_probe;
+ pdrv->remove = gbam_data_ch_remove;
+ pdrv->driver.name = bam_ch_names[portno];
+ pdrv->driver.owner = THIS_MODULE;
+
+ platform_driver_register(pdrv);
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+static int gbam2bam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ port->is_connected = false;
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, gbam2bam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
+ INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
+ INIT_WORK(&port->resume_w, gbam2bam_resume_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ d->ipa_params.src_client = usb_prod[portno];
+ d->ipa_params.dst_client = usb_cons[portno];
+ bam2bam_ports[portno] = port;
+
+ /* UL workaround requirements */
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port:%p data_ch:%p#\n"
+ "dpkts_to_usbhost: %lu\n"
+ "dpkts_to_modem: %lu\n"
+ "dpkts_pwith_bam: %u\n"
+ "dbytes_pwith_bam: %u\n"
+ "to_usbhost_dcnt: %u\n"
+ "tomodem__dcnt: %u\n"
+ "rx_flow_control_disable_count: %u\n"
+ "rx_flow_control_enable_count: %u\n"
+ "rx_flow_control_triggered: %u\n"
+ "max_num_pkts_pending_with_bam: %u\n"
+ "max_bytes_pending_with_bam: %u\n"
+ "delayed_bam_mux_write_done: %u\n"
+ "tx_buf_len: %u\n"
+ "rx_buf_len: %u\n"
+ "data_ch_open: %d\n"
+ "data_ch_ready: %d\n"
+ "skb_expand_cnt: %lu\n",
+ i, port, &port->data_ch,
+ d->to_host, d->to_modem,
+ d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam,
+ d->tohost_drp_cnt, d->tomodem_drp_cnt,
+ d->rx_flow_control_disable,
+ d->rx_flow_control_enable,
+ d->rx_flow_control_triggered,
+ d->max_num_pkts_pending_with_bam,
+ d->max_bytes_pending_with_bam,
+ d->delayed_bam_mux_write_done,
+ d->tx_skb_q.qlen, d->rx_skb_q.qlen,
+ test_bit(BAM_CH_OPENED, &d->flags),
+ test_bit(BAM_CH_READY, &d->flags),
+ d->skb_expand_cnt);
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ d->skb_expand_cnt = 0;
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+ return count;
+}
+
+const struct file_operations gbam_stats_ops = {
+ .read = gbam_read_stats,
+ .write = gbam_reset_stats,
+};
+
+struct dentry *gbam_dent;
+static void gbam_debugfs_init(void)
+{
+ struct dentry *dfile;
+
+ if (gbam_dent)
+ return;
+
+ gbam_dent = debugfs_create_dir("usb_rmnet", 0);
+ if (!gbam_dent || IS_ERR(gbam_dent))
+ return;
+
+ dfile = debugfs_create_file("status", 0444, gbam_dent, 0,
+ &gbam_stats_ops);
+ if (!dfile || IS_ERR(dfile)) {
+ debugfs_remove(gbam_dent);
+ gbam_dent = NULL;
+ return;
+ }
+}
+static void gbam_debugfs_remove(void)
+{
+ debugfs_remove_recursive(gbam_dent);
+}
+#else
+static inline void gbam_debugfs_init(void) {}
+static inline void gbam_debugfs_remove(void) {}
+#endif
+
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ unsigned long flags, flags_ul, flags_dl;
+ struct bam_ch_info *d;
+
+ pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM) {
+ pr_err("%s: invalid xport#%d\n", __func__, trans);
+ return;
+ }
+ if (trans == USB_GADGET_XPORT_BAM_DMUX &&
+ port_num >= n_bam_ports) {
+ pr_err("%s: invalid bam portno#%d\n",
+ __func__, port_num);
+ return;
+ }
+
+ if ((trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
+ port_num >= n_bam2bam_ports) {
+ pr_err("%s: invalid bam2bam portno#%d\n",
+ __func__, port_num);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ port = bam_ports[port_num].port;
+ else
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ /*
+ * Suspend with remote wakeup enabled. Increment usage
+ * count when disconnect happens in suspended state.
+ * Corresponding decrement happens in the end of this
+ * function if IPA handshake is already done or it is done
+ * in disconnect work after finishing IPA handshake.
+ */
+ if (port->last_event == U_BAM_SUSPEND_E)
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ port->port_usb = gr;
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ gbam_free_buffers(port);
+ else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ gbam_free_rx_buffers(port);
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = 0;
+ n_tx_req_queued = 0;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ /* disable endpoints */
+ if (gr->out) {
+ usb_ep_disable(gr->out);
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ if (d->rx_req) {
+ usb_ep_free_request(gr->out, d->rx_req);
+ d->rx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ }
+ }
+ usb_ep_disable(gr->in);
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ spin_lock_irqsave(&port->port_lock_dl, flags_dl);
+ if (d->tx_req) {
+ usb_ep_free_request(gr->in, d->tx_req);
+ d->tx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags_dl);
+ }
+
+ /*
+ * Set endless flag to false as USB Endpoint is already
+ * disable.
+ */
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gr->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM && gr->out)
+ gr->out->endless = false;
+ }
+
+ gr->in->driver_data = NULL;
+ if (gr->out)
+ gr->out->driver_data = NULL;
+
+ port->last_event = U_BAM_DISCONNECT_E;
+ /* Disable usb irq for CI gadget. It will be enabled in
+ * usb_bam_disconnect_pipe() after disconnecting all pipes
+ * and USB BAM reset is done.
+ */
+ if (!gadget_is_dwc3(port->gadget) &&
+ (trans == USB_GADGET_XPORT_BAM2BAM_IPA))
+ msm_usb_irq_disable(true);
+
+ queue_work(gbam_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_connect(struct grmnet *gr, u8 port_num,
+ enum transport_type trans, u8 src_connection_idx,
+ u8 dst_connection_idx)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!gr->gadget) {
+ pr_err("%s: gadget handle not passed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM) {
+ pr_err("%s: invalid xport#%d\n", __func__, trans);
+ return -EINVAL;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX && port_num >= n_bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if ((trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ && port_num >= n_bam2bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ port = bam_ports[port_num].port;
+ else
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ d->trans = trans;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = gr;
+ port->gadget = port->port_usb->gadget;
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out,
+ GFP_ATOMIC);
+ if (!d->rx_req) {
+ pr_err("%s: RX request allocation failed\n", __func__);
+ d->rx_req = NULL;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENOMEM;
+ }
+
+ d->rx_req->context = port;
+ d->rx_req->complete = gbam_endless_rx_complete;
+ d->rx_req->length = 0;
+ d->rx_req->no_interrupt = 1;
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in,
+ GFP_ATOMIC);
+ if (!d->tx_req) {
+ pr_err("%s: TX request allocation failed\n", __func__);
+ d->tx_req = NULL;
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENOMEM;
+ }
+
+ d->tx_req->context = port;
+ d->tx_req->complete = gbam_endless_tx_complete;
+ d->tx_req->length = 0;
+ d->tx_req->no_interrupt = 1;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM_DMUX) {
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ }
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->src_connection_idx = src_connection_idx;
+ d->dst_connection_idx = dst_connection_idx;
+ d->usb_bam_type = usb_bam_get_bam_type(gr->gadget->name);
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.src_idx = src_connection_idx;
+ d->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Query pipe type using IPA src/dst index with
+ * usbbam driver. It is being set either as
+ * BAM2BAM or SYS2BAM.
+ */
+ if (usb_bam_get_pipe_type(d->usb_bam_type,
+ d->ipa_params.src_idx, &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(d->usb_bam_type,
+ d->ipa_params.dst_idx, &d->dst_pipe_type)) {
+ pr_err("%s:usb_bam_get_pipe_type() failed\n",
+ __func__);
+ ret = -EINVAL;
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ goto exit;
+ }
+ /*
+ * Check for pipe_type. If it is BAM2BAM, then it is required
+ * to disable Xfer complete and Xfer not ready interrupts for
+ * that particular endpoint. Hence it set endless flag based
+ * it which is considered into UDC driver while enabling
+ * USB Endpoint.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = true;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = true;
+ }
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+ __func__, gr->in);
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+ goto exit;
+ }
+ gr->in->driver_data = port;
+
+ /*
+ * DPL traffic is routed through BAM-DMUX on some targets.
+ * DPL function has only 1 IN endpoint. Add out endpoint
+ * checks for BAM-DMUX transport.
+ */
+ if (gr->out) {
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+ __func__, gr->out);
+ gr->in->driver_data = 0;
+ usb_ep_disable(gr->in);
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+ goto exit;
+ }
+ gr->out->driver_data = port;
+ }
+
+ port->last_event = U_BAM_CONNECT_E;
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(gbam_wq, &port->connect_w);
+
+ ret = 0;
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int gbam_setup(unsigned int no_bam_port)
+{
+ int i;
+ int ret;
+ int bam_port_start = n_bam_ports;
+ int total_bam_ports = bam_port_start + no_bam_port;
+
+ pr_debug("%s: requested BAM ports:%d\n", __func__, no_bam_port);
+
+ if (!no_bam_port || total_bam_ports > BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, no_bam_port);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ for (i = bam_port_start; i < (bam_port_start + no_bam_port); i++) {
+ n_bam_ports++;
+ pr_debug("gbam_port_alloc called for %d\n", i);
+ ret = gbam_port_alloc(i);
+ if (ret) {
+ n_bam_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam_ports;
+ }
+ }
+
+ gbam_debugfs_init();
+
+ return bam_port_start;
+
+free_bam_ports:
+ for (i = 0; i < n_bam_ports; i++)
+ gbam_port_free(i);
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+int gbam2bam_setup(unsigned int no_bam2bam_port)
+{
+ int i;
+ int ret;
+ int bam2bam_port_start = n_bam2bam_ports;
+ int total_bam2bam_ports = bam2bam_port_start + no_bam2bam_port;
+
+ pr_debug("%s: requested BAM2BAM ports:%d\n", __func__, no_bam2bam_port);
+
+ if (!no_bam2bam_port || total_bam2bam_ports > BAM2BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, no_bam2bam_port);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ for (i = bam2bam_port_start; i < (bam2bam_port_start +
+ no_bam2bam_port); i++) {
+ n_bam2bam_ports++;
+ ret = gbam2bam_port_alloc(i);
+ if (ret) {
+ n_bam2bam_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam2bam_ports;
+ }
+ }
+
+ gbam_debugfs_init();
+
+ return bam2bam_port_start;
+
+free_bam2bam_ports:
+ for (i = 0; i < n_bam2bam_ports; i++)
+ gbam2bam_port_free(i);
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+void gbam_cleanup(void)
+{
+ gbam_debugfs_remove();
+}
+
+void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return;
+
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ pr_debug("%s: suspended port %d\n", __func__, port_num);
+
+ port->last_event = U_BAM_SUSPEND_E;
+ queue_work(gbam_wq, &port->suspend_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return;
+
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ port->last_event = U_BAM_RESUME_E;
+ /*
+ * Increment usage count here to disallow gadget parent suspend.
+ * This counter will decrement after IPA handshake is done in
+ * disconnect work (due to cable disconnect) or in bam_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(gbam_wq, &port->resume_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
+ struct usb_ep *out)
+{
+ struct grmnet *gr;
+
+ gr = kzalloc(sizeof(*gr), GFP_ATOMIC);
+ if (!gr)
+ return -ENOMEM;
+ gr->in = in;
+ gr->out = out;
+ gr->gadget = g;
+
+ return gbam_connect(gr, 0, USB_GADGET_XPORT_BAM_DMUX, 0, 0);
+}
+
+void gbam_mbim_disconnect(void)
+{
+ struct gbam_port *port = bam_ports[0].port;
+ struct grmnet *gr = port->port_usb;
+
+ if (!gr) {
+ pr_err("%s: port_usb is NULL\n", __func__);
+ return;
+ }
+
+ gbam_disconnect(gr, 0, USB_GADGET_XPORT_BAM_DMUX);
+ kfree(gr);
+}
+
+int gbam_mbim_setup(void)
+{
+ int ret = 0;
+
+ /*
+ * MBIM requires only 1 USB_GADGET_XPORT_BAM_DMUX
+ * port. The port is always 0 and is shared
+ * between RMNET and MBIM.
+ */
+ if (!n_bam_ports)
+ ret = gbam_setup(1);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/u_bam_data.c b/drivers/usb/gadget/function/u_bam_data.c
new file mode 100644
index 000000000000..ff32b391a3e9
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_data.c
@@ -0,0 +1,2113 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/usb/gadget.h>
+
+#include <linux/usb_bam.h>
+
+#include "u_bam_data.h"
+
+#define BAM_DATA_RX_Q_SIZE 128
+#define BAM_DATA_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+#define BAM_DATA_PENDING_LIMIT 220
+
+#define SYS_BAM_RX_PKT_FLOW_CTRL_SUPPORT 1
+#define SYS_BAM_RX_PKT_FCTRL_EN_TSHOLD 500
+#define SYS_BAM_RX_PKT_FCTRL_DIS_TSHOLD 300
+
+static unsigned int bam_ipa_rx_fctrl_support = SYS_BAM_RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(bam_ipa_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_ipa_rx_fctrl_en_thld = SYS_BAM_RX_PKT_FCTRL_EN_TSHOLD;
+module_param(bam_ipa_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_ipa_rx_fctrl_dis_thld = SYS_BAM_RX_PKT_FCTRL_DIS_TSHOLD;
+module_param(bam_ipa_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+static struct workqueue_struct *bam_data_wq;
+static int n_bam2bam_data_ports;
+
+unsigned int bam_data_rx_q_size = BAM_DATA_RX_Q_SIZE;
+module_param(bam_data_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_data_mux_rx_req_size = BAM_DATA_MUX_RX_REQ_SIZE;
+module_param(bam_data_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+#define SPS_PARAMS_SPS_MODE BIT(5)
+#define SPS_PARAMS_TBE BIT(6)
+#define MSM_VENDOR_ID BIT(16)
+
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
+struct sys2ipa_sw_data {
+ void *teth_priv;
+ ipa_notify_cb teth_cb;
+};
+
+struct bam_data_ch_info {
+ unsigned long flags;
+ unsigned id;
+
+ struct bam_data_port *port;
+ struct work_struct write_tobam_w;
+
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ enum function_type func_type;
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
+
+ /* UL workaround parameters */
+ struct sys2ipa_sw_data ul_params;
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ int total_skb;
+ int freed_skb;
+ int freed_rx_reqs;
+ int alloc_rx_reqs;
+ struct sk_buff_head rx_skb_idle;
+ enum usb_bam_pipe_type src_pipe_type;
+ enum usb_bam_pipe_type dst_pipe_type;
+ unsigned int pending_with_bam;
+ int rx_buffer_size;
+
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ /*
+ * used for RNDIS/ECM network interface based design
+ * to indicate ecm/rndis pipe connect notifiaction is sent
+ * to ecm_ipa/rndis_ipa.
+ */
+ atomic_t pipe_connect_notified;
+ bool tx_req_dequeued;
+ bool rx_req_dequeued;
+};
+
+enum u_bam_data_event_type {
+ U_BAM_DATA_DISCONNECT_E = 0,
+ U_BAM_DATA_CONNECT_E,
+ U_BAM_DATA_SUSPEND_E,
+ U_BAM_DATA_RESUME_E
+};
+
+struct bam_data_port {
+ bool is_ipa_connected;
+ enum u_bam_data_event_type last_event;
+ unsigned port_num;
+ spinlock_t port_lock;
+ unsigned int ref_count;
+ struct data_port *port_usb;
+ struct usb_gadget *gadget;
+ struct bam_data_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+};
+struct usb_bam_data_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ u32 usb_bam_handle;
+};
+
+struct bam_data_port *bam2bam_data_ports[BAM2BAM_DATA_N_PORTS];
+static struct rndis_data_ch_info rndis_data;
+
+static void bam2bam_data_suspend_work(struct work_struct *w);
+static void bam2bam_data_resume_work(struct work_struct *w);
+static void bam_data_free_reqs(struct bam_data_port *port);
+
+/*----- sys2bam towards the IPA (UL workaround) --------------- */
+
+static int bam_data_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct bam_data_port *port = ep->driver_data;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
+ ep, head, num, cb);
+
+ if (d->alloc_rx_reqs) {
+ pr_err("%s(): reqs are already allocated.\n", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ d->alloc_rx_reqs++;
+ req->complete = cb;
+ list_add_tail(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t bam_data_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock lock taken */
+static struct sk_buff *bam_data_alloc_skb_from_pool(
+ struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb = NULL;
+ dma_addr_t skb_buf_dma_addr;
+ struct data_port *data_port;
+ struct usb_gadget *gadget;
+
+ if (!port)
+ return NULL;
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(d->rx_buffer_size + BAM_MUX_HDR, GFP_ATOMIC);
+ if (!skb) {
+ pr_err("%s: alloc skb failed\n", __func__);
+ goto alloc_exit;
+ }
+
+ d->total_skb++;
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ data_port = port->port_usb;
+ if (data_port && data_port->cdev && data_port->cdev->gadget) {
+ gadget = data_port->cdev->gadget;
+
+ skb_buf_dma_addr =
+ dma_map_single(&gadget->dev, skb->data,
+ d->rx_buffer_size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&gadget->dev, skb_buf_dma_addr)) {
+ pr_err("%s: Could not DMA map SKB buffer\n",
+ __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+ } else {
+ pr_err("%s: Could not DMA map SKB buffer\n", __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+static void bam_data_free_skb_to_pool(
+ struct bam_data_port *port,
+ struct sk_buff *skb)
+{
+ struct bam_data_ch_info *d;
+
+ if (!port) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ d = &port->data_ch;
+ if (!d) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void bam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct bam_data_port *port = p;
+ struct bam_data_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+
+ d->pending_with_bam--;
+
+ pr_debug("%s: port:%p d:%p pbam:%u, pno:%d\n", __func__,
+ port, d, d->pending_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ queue_work(bam_data_wq, &d->write_tobam_w);
+}
+
+static void bam_data_ipa_sys2bam_notify_cb(void *priv,
+ enum ipa_dp_evt_type event, unsigned long data)
+{
+ struct sys2ipa_sw_data *ul = (struct sys2ipa_sw_data *)priv;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ switch (event) {
+ case IPA_WRITE_DONE:
+ d = container_of(ul, struct bam_data_ch_info, ul_params);
+ port = container_of(d, struct bam_data_port, data_ch);
+ /* call into bam_demux functionality that'll recycle the data */
+ bam_data_write_done(port, (struct sk_buff *)(data));
+ break;
+ case IPA_RECEIVE:
+ /* call the callback given by tethering driver init function
+ * (and was given to ipa_connect)
+ */
+ if (ul->teth_cb)
+ ul->teth_cb(ul->teth_priv, event, data);
+ break;
+ default:
+ /* unexpected event */
+ pr_err("%s: unexpected event %d\n", __func__, event);
+ break;
+ }
+}
+
+
+static void bam_data_start_rx(struct bam_data_port *port)
+{
+ struct usb_request *req;
+ struct bam_data_ch_info *d;
+ struct usb_ep *ep;
+ int ret;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!port->port_usb) {
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_ipa_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+ skb = bam_data_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = bam_data_get_dma_from_skb(skb);
+ req->length = d->rx_buffer_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ bam_data_free_skb_to_pool(port, skb);
+
+ pr_err("%s: rx queue failed %d\n", __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct bam_data_port *port = ep->driver_data;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+ unsigned long flags;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+ d->freed_rx_reqs++;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: %s response error %d, %d/%d\n", __func__,
+ ep->name, status, req->actual, req->length);
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ break;
+ }
+
+ spin_lock(&port->port_lock);
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ if (!usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ pr_err_ratelimited("usb bam prod is not granted.\n");
+ return;
+ } else
+ queue_work(bam_data_wq, &d->write_tobam_w);
+ }
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+
+ skb = bam_data_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+ spin_unlock(&port->port_lock);
+
+ req->buf = skb->data;
+ req->dma = bam_data_get_dma_from_skb(skb);
+ req->length = d->rx_buffer_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ pr_err_ratelimited("%s: data rx enqueue err %d\n",
+ __func__, status);
+ spin_lock(&port->port_lock);
+ bam_data_free_skb_to_pool(port, skb);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ }
+}
+/* It should be called with port_lock acquire. */
+static int bam_data_sys2bam_alloc_req(struct bam_data_port *port, bool in)
+{
+ int ret;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned queue_size;
+ void (*ep_complete)(struct usb_ep *, struct usb_request *);
+
+ if (!port->port_usb)
+ return -EBUSY;
+ if (in)
+ return -ENODEV;
+
+ ep = port->port_usb->out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_data_rx_q_size;
+ ep_complete = bam_data_epout_complete;
+
+ ret = bam_data_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+ if (ret) {
+ pr_err("%s: allocation failed\n", __func__);
+ }
+
+ return ret;
+}
+
+static void bam_data_write_toipa(struct work_struct *w)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb;
+ int ret;
+ int qlen;
+ unsigned long flags;
+ dma_addr_t skb_dma_addr;
+ struct ipa_tx_meta ipa_meta = {0x0};
+
+ d = container_of(w, struct bam_data_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ while (d->pending_with_bam < BAM_PENDING_PKTS_LIMIT &&
+ usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_with_bam++;
+
+ pr_debug("%s: port:%p d:%p pbam:%u pno:%d\n", __func__,
+ port, d, d->pending_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ skb_dma_addr = bam_data_get_dma_from_skb(skb);
+ if (skb_dma_addr != DMA_ERROR_CODE) {
+ ipa_meta.dma_address = skb_dma_addr;
+ ipa_meta.dma_address_valid = true;
+ }
+
+ ret = ipa_tx_dp(IPA_CLIENT_USB_PROD, skb, &ipa_meta);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ pr_debug_ratelimited("%s: write error:%d\n",
+ __func__, ret);
+ d->pending_with_bam--;
+ bam_data_free_skb_to_pool(port, skb);
+ break;
+ }
+ }
+
+ qlen = d->rx_skb_q.qlen;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (qlen < bam_ipa_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ bam_data_start_rx(port);
+ }
+
+}
+
+/*------------data_path----------------------------*/
+
+static void bam_data_endless_rx_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s: status: %d\n", __func__, status);
+}
+
+static void bam_data_endless_tx_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s: status: %d\n", __func__, status);
+}
+
+static void bam_data_start_endless_rx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !d->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing transfer, %d\n", status);
+}
+
+static void bam_data_start_endless_tx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !d->tx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing transfer, %d\n", status);
+}
+
+static void bam_data_stop_endless_rx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d->rx_req_dequeued = true;
+
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam_data_stop_endless_tx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->in;
+ d->tx_req_dequeued = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->tx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+static void bam2bam_free_rx_skb_idle_list(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+ if (!d) {
+ pr_err("%s(): port->data_ch is NULL.\n", __func__);
+ return;
+ }
+
+ if (!port->port_usb) {
+ pr_err("%s(): port->port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ if (!port->port_usb->cdev) {
+ pr_err("port->port_usb->cdev is NULL");
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ pr_err("%s(): gadget is NULL.\n", __func__);
+ return;
+ }
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr, sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ d->freed_skb++;
+ }
+
+ pr_debug("%s(): Freed %d SKBs from rx_skb_idle queue\n", __func__,
+ d->freed_skb);
+}
+
+/*
+ * bam_data_ipa_disconnect()- Perform USB IPA function level disconnect
+ * struct bam_data_ch_info - Per USB IPA port data structure
+ *
+ * Make sure to call IPA rndis/ecm/mbim related disconnect APIs() only
+ * if those APIs init counterpart is already performed.
+ * MBIM: teth_bridge_connect() is NO_OPS and teth_bridge_init() is
+ * being called with atomic context on cable connect, hence there is no
+ * need to consider for this check. pipe_connect_notified is being used
+ * for RNDIS/ECM driver due to its different design with usage of
+ * network interface created by IPA driver.
+ */
+static void bam_data_ipa_disconnect(struct bam_data_ch_info *d)
+{
+ pr_debug("%s(): pipe_connect_notified:%d\n",
+ __func__, atomic_read(&d->pipe_connect_notified));
+ /*
+ * Check if pipe_connect_notified is set to 1, then perform disconnect
+ * part and set pipe_connect_notified to zero.
+ */
+ if (atomic_xchg(&d->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ if (d->func_type == USB_FUNC_ECM) {
+ priv = ecm_qc_get_ipa_priv();
+ ecm_ipa_disconnect(priv);
+ } else if (d->func_type == USB_FUNC_RNDIS) {
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ pr_debug("%s(): net interface is disconnected.\n", __func__);
+ }
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ pr_debug("%s(): teth_bridge() disconnected\n", __func__);
+ teth_bridge_disconnect(d->ipa_params.src_client);
+ }
+}
+
+static void bam2bam_data_disconnect_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, disconnect_w);
+ struct bam_data_ch_info *d;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->is_ipa_connected) {
+ pr_debug("%s: Already disconnected. Bailing out.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = usb_bam_disconnect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related pipes
+ * before calling IPA tethered function related disconnect API. IPA
+ * tethered function related disconnect API delete depedency graph
+ * with IPA RM which would results into IPA not pulling data although
+ * there is pending data on USB BAM producer pipe.
+ */
+ bam_data_ipa_disconnect(d);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_ipa_connected = false;
+
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("Disconnect workqueue done (port %p)\n", port);
+}
+/*
+ * This function configured data fifo based on index passed to get bam2bam
+ * configuration.
+ */
+static void configure_usb_data_fifo(enum usb_ctrl bam_type,
+ u8 idx, struct usb_ep *ep, enum usb_bam_pipe_type pipe_type)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ if (pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+
+ msm_data_fifo_config(ep,
+ data_fifo.phys_base,
+ data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+ }
+}
+
+/* Start RX transfers according to pipe_type */
+static inline void bam_data_start_rx_transfers(struct bam_data_ch_info *d,
+ struct bam_data_port *port)
+{
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_start_endless_rx(port);
+ else
+ bam_data_start_rx(port);
+}
+
+static void bam2bam_data_connect_work(struct work_struct *w)
+{
+ struct bam_data_port *port = container_of(w, struct bam_data_port,
+ connect_w);
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ struct bam_data_ch_info *d;
+ struct data_port *d_port;
+ struct usb_gadget *gadget = NULL;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: Connect workqueue started", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ d_port = port->port_usb;
+
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnect. Bail out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (d_port && d_port->cdev)
+ gadget = d_port->cdev->gadget;
+
+ if (!gadget) {
+ pr_err("%s: NULL gadget\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->port_usb) {
+ pr_err("port_usb is NULL");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->port_usb->out) {
+ pr_err("port_usb->out (bulk out ep) is NULL");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * bam_data_connect()
+ */
+ if (port->is_ipa_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
+ d->ipa_params.usb_connection_speed = gadget->speed;
+ d->ipa_params.cons_clnt_hdl = -1;
+ d->ipa_params.prod_clnt_hdl = -1;
+
+ if (d->dst_pipe_type != USB_BAM_PIPE_BAM2BAM) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: no software preparation for DL not using bam2bam\n",
+ __func__);
+ return;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ usb_bam_alloc_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_alloc_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ pr_err("Disconnected.port_usb is NULL\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto free_fifos;
+ }
+
+ if (gadget_is_dwc3(gadget)) {
+ /* Configure RX */
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | d->src_pipe_idx;
+ d->rx_req->length = 32*1024;
+ d->rx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->out, d->rx_req, GFP_ATOMIC);
+
+ /* Configure TX */
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | d->dst_pipe_idx;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->in, d->tx_req, GFP_ATOMIC);
+
+ } else {
+ /* Configure RX */
+ sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
+ MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
+ d->rx_req->udc_priv = sps_params;
+
+ /* Configure TX */
+ sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
+ d->tx_req->udc_priv = sps_params;
+ }
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ teth_bridge_params.client = d->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:teth_bridge_init() failed\n",
+ __func__);
+ goto free_fifos;
+ }
+ d->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.priv =
+ teth_bridge_params.private_data;
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ d->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg = ecm_qc_get_skip_ep_config();
+ }
+
+ if (d->func_type == USB_FUNC_RNDIS) {
+ d->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = rndis_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ /* Support for UL using system-to-IPA */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ul_params.teth_cb = d->ipa_params.notify;
+ d->ipa_params.notify =
+ bam_data_ipa_sys2bam_notify_cb;
+ d->ul_params.teth_priv = d->ipa_params.priv;
+ d->ipa_params.priv = &d->ul_params;
+ d->ipa_params.reset_pipe_after_lpm = false;
+ } else {
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto free_fifos;
+ }
+ gadget->bam2bam_func_enabled = true;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:%d: Port is being disconnected.\n",
+ __func__, __LINE__);
+ goto disconnect_ipa;
+ }
+
+ d_port->ipa_consumer_ep = d->ipa_params.ipa_cons_ep_idx;
+
+ /* Remove support for UL using system-to-IPA towards DL */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ipa_params.notify = d->ul_params.teth_cb;
+ d->ipa_params.priv = d->ul_params.teth_priv;
+ }
+
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg = ecm_qc_get_skip_ep_config();
+ }
+ if (d->func_type == USB_FUNC_RNDIS) {
+ d->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = rndis_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ } else {
+ d->ipa_params.reset_pipe_after_lpm = false;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto disconnect_ipa;
+ }
+
+ /*
+ * Cable might have been disconnected after releasing the
+ * spinlock and re-enabling IRQs. Hence check again.
+ */
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:%d: port is beind disconnected.\n",
+ __func__, __LINE__);
+ goto disconnect_ipa;
+ }
+
+ port->is_ipa_connected = true;
+
+ d_port->ipa_producer_ep = d->ipa_params.ipa_prod_ep_idx;
+ pr_debug("%s(): ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ __func__, d_port->ipa_producer_ep,
+ d_port->ipa_consumer_ep);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ connect_params.ipa_usb_pipe_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_MBIM;
+ connect_params.client_type = d->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n",
+ __func__);
+ return;
+ }
+ }
+
+ if (d->func_type == USB_FUNC_ECM) {
+ ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
+ d->ipa_params.prod_clnt_hdl,
+ d->ipa_params.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ if (d->func_type == USB_FUNC_RNDIS) {
+ rndis_data.prod_clnt_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ rndis_data.cons_clnt_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ rndis_data.priv = d->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data.ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data.ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data.dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data.cons_clnt_hdl,
+ rndis_data.prod_clnt_hdl,
+ rndis_data.ul_max_transfer_size,
+ rndis_data.ul_max_packets_number,
+ rndis_data.dl_max_transfer_size,
+ rndis_data.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+ atomic_set(&d->pipe_connect_notified, 1);
+
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (d->func_type == USB_FUNC_RNDIS || d->func_type == USB_FUNC_ECM) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
+ /* queue in & out requests */
+ bam_data_start_rx_transfers(d, port);
+ bam_data_start_endless_tx(port);
+
+ pr_debug("Connect workqueue done (port %p)", port);
+ return;
+
+disconnect_ipa:
+ /* let disconnect work take care of ipa disconnect */
+ port->is_ipa_connected = true;
+ return;
+
+free_fifos:
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void bam_data_start_rx_tx(u8 port_num)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ unsigned long flags;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+
+ /* queue in & out requests */
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ d = &port->data_ch;
+
+ if (!port->port_usb || !port->port_usb->in->driver_data
+ || !port->port_usb->out->driver_data) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ goto out;
+ }
+
+ if (!d->rx_req || !d->tx_req) {
+ pr_err("%s: No request d->rx_req=%p, d->tx_req=%p", __func__,
+ d->rx_req, d->tx_req);
+ goto out;
+ }
+ if (!port->is_ipa_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ bam_data_start_rx_transfers(d, port);
+
+ pr_debug("%s: Starting tx", __func__);
+ bam_data_start_endless_tx(port);
+
+ return;
+out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+inline int u_bam_data_func_to_port(enum function_type func, u8 func_port)
+{
+ if (func >= USB_NUM_FUNCS || func_port >= PORTS_PER_FUNC) {
+ pr_err("func=%d and func_port=%d are an illegal combination\n",
+ func, func_port);
+ return -EINVAL;
+ }
+ return (PORTS_PER_FUNC * func) + func_port;
+}
+
+static int bam2bam_data_port_alloc(int portno)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ if (bam2bam_data_ports[portno] != NULL) {
+ pr_debug("port %d already allocated.\n", portno);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct bam_data_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("no memory to allocate port %d\n", portno);
+ return -ENOMEM;
+ }
+
+ bam2bam_data_ports[portno] = port;
+ d = &port->data_ch;
+ d->port = port;
+
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
+ INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+ INIT_WORK(&d->write_tobam_w, bam_data_write_toipa);
+ return 0;
+}
+
+void u_bam_data_start_rndis_ipa(void)
+{
+ int port_num;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ pr_debug("%s\n", __func__);
+
+ port_num = u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT);
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ if (!atomic_read(&d->pipe_connect_notified)) {
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(bam_data_wq, &port->connect_w);
+ } else {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ }
+}
+
+void u_bam_data_stop_rndis_ipa(void)
+{
+ int port_num;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ pr_debug("%s\n", __func__);
+
+ port_num = u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT);
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ if (atomic_read(&d->pipe_connect_notified)) {
+ rndis_ipa_reset_trigger();
+ bam_data_stop_endless_tx(port);
+ bam_data_stop_endless_rx(port);
+ queue_work(bam_data_wq, &port->disconnect_w);
+ }
+}
+
+void bam_data_flow_control_enable(bool enable)
+{
+ if (enable)
+ u_bam_data_stop_rndis_ipa();
+ else
+ u_bam_data_start_rndis_ipa();
+}
+
+static void bam_data_free_reqs(struct bam_data_port *port)
+{
+
+ struct list_head *head;
+ struct usb_request *req;
+
+ if (port->data_ch.src_pipe_type != USB_BAM_PIPE_SYS2BAM)
+ return;
+
+ head = &port->data_ch.rx_idle;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(port->port_usb->out, req);
+ port->data_ch.freed_rx_reqs++;
+ }
+}
+
+void bam_data_disconnect(struct data_port *gr, enum function_type func,
+ u8 dev_port_num)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("dev:%p port number:%d\n", gr, port_num);
+
+ if (!gr) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = bam2bam_data_ports[port_num];
+
+ if (!port) {
+ pr_err("port %u is NULL", port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ /*
+ * Suspend with remote wakeup enabled. Increment usage
+ * count when disconnect happens in suspended state.
+ * Corresponding decrement happens in the end of this
+ * function if IPA handshake is already done or it is done
+ * in disconnect work after finishing IPA handshake.
+ * In case of RNDIS, if connect_w by rndis_flow_control is not triggered
+ * yet then don't perform pm_runtime_get as suspend_w would have bailed
+ * w/o runtime_get.
+ * And restrict check to only RNDIS to handle cases where connect_w is
+ * already scheduled but execution is pending which must be rare though.
+ */
+ if (port->last_event == U_BAM_DATA_SUSPEND_E &&
+ (d->func_type != USB_FUNC_RNDIS || port->is_ipa_connected))
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ if (port->port_usb) {
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in && port->port_usb->in->driver_data) {
+
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ if (d->rx_req) {
+ usb_ep_free_request(port->port_usb->out,
+ d->rx_req);
+ d->rx_req = NULL;
+ }
+
+ usb_ep_disable(port->port_usb->in);
+ if (d->tx_req) {
+ usb_ep_free_request(port->port_usb->in,
+ d->tx_req);
+ d->tx_req = NULL;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* Only for SYS2BAM mode related UL workaround */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+
+ pr_debug("SKBs_RX_Q: freed:%d\n",
+ d->rx_skb_q.qlen);
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ bam2bam_free_rx_skb_idle_list(port);
+ pr_debug("SKBs: allocated:%d freed:%d\n",
+ d->total_skb, d->freed_skb);
+ pr_debug("rx_reqs: allocated:%d freed:%d\n",
+ d->alloc_rx_reqs, d->freed_rx_reqs);
+
+ /* reset all skb/reqs related statistics */
+ d->total_skb = 0;
+ d->freed_skb = 0;
+ d->freed_rx_reqs = 0;
+ d->alloc_rx_reqs = 0;
+ }
+
+ /*
+ * Set endless flag to false as USB Endpoint
+ * is already disable.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+
+ port->port_usb->in->driver_data = NULL;
+ port->port_usb->out->driver_data = NULL;
+
+ port->port_usb = NULL;
+ }
+ }
+
+ port->last_event = U_BAM_DATA_DISCONNECT_E;
+ /* Disable usb irq for CI gadget. It will be enabled in
+ * usb_bam_disconnect_pipe() after disconnecting all pipes
+ * and USB BAM reset is done.
+ */
+ if (!gadget_is_dwc3(port->gadget))
+ msm_usb_irq_disable(true);
+
+ queue_work(bam_data_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int bam_data_connect(struct data_port *gr, enum transport_type trans,
+ u8 dev_port_num, enum function_type func)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ int ret, port_num;
+ unsigned long flags;
+ u8 src_connection_idx, dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ if (!gr) {
+ pr_err("data port is null\n");
+ return -ENODEV;
+ }
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid portno#%d\n", port_num);
+ return -EINVAL;
+ }
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ pr_err("invalid xport#%d\n", trans);
+ return -EINVAL;
+ }
+
+ pr_debug("dev:%p port#%d\n", gr, port_num);
+
+ usb_bam_type = usb_bam_get_bam_type(gr->cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ dev_port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ dev_port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
+ return ret;
+ }
+
+ port = bam2bam_data_ports[port_num];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ port->port_usb = gr;
+ port->gadget = gr->cdev->gadget;
+ d = &port->data_ch;
+ d->src_connection_idx = src_connection_idx;
+ d->dst_connection_idx = dst_connection_idx;
+ d->usb_bam_type = usb_bam_type;
+
+ d->trans = trans;
+ d->func_type = func;
+ d->rx_buffer_size = (gr->rx_buffer_size ? gr->rx_buffer_size :
+ bam_mux_rx_req_size);
+
+ if (usb_bam_type == HSIC_CTRL) {
+ d->ipa_params.src_client = IPA_CLIENT_HSIC1_PROD;
+ d->ipa_params.dst_client = IPA_CLIENT_HSIC1_CONS;
+ } else {
+ d->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ d->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ }
+
+ pr_debug("%s(): rx_buffer_size:%d\n", __func__, d->rx_buffer_size);
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.src_idx = src_connection_idx;
+ d->ipa_params.dst_idx = dst_connection_idx;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+
+ /*
+ * Query pipe type using IPA src/dst index with
+ * usbbam driver. It is being set either as
+ * BAM2BAM or SYS2BAM.
+ */
+ if (usb_bam_get_pipe_type(usb_bam_type, d->ipa_params.src_idx,
+ &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(usb_bam_type, d->ipa_params.dst_idx,
+ &d->dst_pipe_type)) {
+ pr_err("usb_bam_get_pipe_type() failed\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * Check for pipe_type. If it is BAM2BAM, then it is required
+ * to disable Xfer complete and Xfer not ready interrupts for
+ * that particular endpoint. Hence it set endless flag based
+ * it which is considered into UDC driver while enabling
+ * USB Endpoint.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = true;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = true;
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%p", gr->in);
+ goto exit;
+ }
+
+ gr->in->driver_data = port;
+
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%p", gr->out);
+ goto disable_in_ep;
+ }
+
+ gr->out->driver_data = port;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+
+ /* UL workaround requirements */
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+
+ ret = bam_data_sys2bam_alloc_req(port, false);
+ if (ret) {
+ pr_err("%s: sys2bam_alloc_req failed(%d)",
+ __func__, ret);
+ goto disable_out_ep;
+ }
+ }
+
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out,
+ GFP_ATOMIC);
+ if (!d->rx_req) {
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ goto bam_data_free;
+ }
+ d->rx_req->context = port;
+ d->rx_req->complete = bam_data_endless_rx_complete;
+ d->rx_req->length = 0;
+ d->rx_req->no_interrupt = 1;
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in,
+ GFP_ATOMIC);
+ if (!d->tx_req) {
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto ep_out_req_free;
+ }
+
+ d->tx_req->context = port;
+ d->tx_req->complete = bam_data_endless_tx_complete;
+ d->tx_req->length = 0;
+ d->tx_req->no_interrupt = 1;
+
+ gr->out->driver_data = port;
+
+ port->last_event = U_BAM_DATA_CONNECT_E;
+
+ /* Wait for host to enable flow_control */
+ if (d->func_type == USB_FUNC_RNDIS) {
+ ret = 0;
+ goto exit;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ queue_work(bam_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return 0;
+
+ep_out_req_free:
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+bam_data_free:
+ bam_data_free_reqs(port);
+disable_out_ep:
+ gr->out->driver_data = 0;
+ usb_ep_disable(gr->out);
+disable_in_ep:
+ gr->in->driver_data = 0;
+ usb_ep_disable(gr->in);
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int bam_data_setup(enum function_type func, unsigned int no_bam2bam_port)
+{
+ int i;
+ int ret;
+
+ pr_debug("requested %d BAM2BAM ports", no_bam2bam_port);
+
+ if (!no_bam2bam_port || no_bam2bam_port > PORTS_PER_FUNC ||
+ func >= USB_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d or function type:%d\n",
+ no_bam2bam_port, func);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < no_bam2bam_port; i++) {
+ n_bam2bam_data_ports++;
+ ret = bam2bam_data_port_alloc(u_bam_data_func_to_port(func, i));
+ if (ret) {
+ n_bam2bam_data_ports--;
+ pr_err("Failed to alloc port:%d\n", i);
+ goto free_bam_ports;
+ }
+ }
+
+ pr_debug("n_bam2bam_data_ports:%d\n", n_bam2bam_data_ports);
+
+ if (bam_data_wq) {
+ pr_debug("bam_data is already setup.");
+ return 0;
+ }
+
+ bam_data_wq = alloc_workqueue("k_bam_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!bam_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_bam_ports;
+ }
+
+ return 0;
+
+free_bam_ports:
+ for (i = 0; i < n_bam2bam_data_ports; i++) {
+ kfree(bam2bam_data_ports[i]);
+ bam2bam_data_ports[i] = NULL;
+ if (bam_data_wq) {
+ destroy_workqueue(bam_data_wq);
+ bam_data_wq = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int bam_data_wake_cb(void *param)
+{
+ int ret;
+ struct bam_data_port *port = (struct bam_data_port *)param;
+ struct data_port *d_port = port->port_usb;
+ struct usb_gadget *gadget;
+ struct usb_function *func;
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ if (!d_port) {
+ pr_err("FAILED: d_port == NULL");
+ return -ENODEV;
+ }
+
+ if (!d_port->cdev) {
+ pr_err("FAILED: d_port->cdev == NULL");
+ return -ENODEV;
+ }
+
+ gadget = d_port->cdev->gadget;
+ if (!gadget) {
+ pr_err("FAILED: d_port->cdev->gadget == NULL");
+ return -ENODEV;
+ }
+
+ func = d_port->func;
+
+ /*
+ * In Super-Speed mode, remote wakeup is not allowed for suspended
+ * functions which have been disallowed by the host to issue Funtion
+ * Remote Wakeup.
+ * Note - We deviate here from the USB 3.0 spec and allow
+ * non-suspended functions to issue remote-wakeup even if they were not
+ * allowed to do so by the host. This is done in order to support non
+ * fully USB 3.0 compatible hosts.
+ */
+ if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended))
+ ret = usb_func_wakeup(func);
+ else
+ ret = usb_gadget_wakeup(gadget);
+
+ if ((ret == -EBUSY) || (ret == -EAGAIN))
+ pr_debug("Remote wakeup is delayed due to LPM exit.\n");
+ else if (ret)
+ pr_err("Failed to wake up the USB core. ret=%d.\n", ret);
+
+ return ret;
+}
+
+static void bam_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct bam_data_port *port = param;
+ struct data_port *d_port = port->port_usb;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_gadget *gadget;
+
+ if (!d_port || !d_port->cdev || !d_port->cdev->gadget) {
+ pr_err("%s:d_port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+ if (port->last_event != U_BAM_DATA_RESUME_E) {
+ pr_err("%s: Port state changed since resume. Bail out.\n",
+ __func__);
+ return;
+ }
+
+ gadget = d_port->cdev->gadget;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_start_endless_rx(port);
+ else {
+ bam_data_start_rx(port);
+ queue_work(bam_data_wq, &d->write_tobam_w);
+ }
+ } else {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ }
+ bam_data_start_endless_tx(port);
+ }
+
+}
+
+static void bam_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct bam_data_port *port = param;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ /*
+ * Only handling BAM2BAM, as there is no equivelant to
+ * bam_data_stop_endless_rx() for the SYS2BAM use case
+ */
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_stop_endless_rx(port);
+ } else {
+ bam_data_stop_endless_tx(port);
+ }
+}
+
+void bam_data_suspend(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled)
+{
+ struct bam_data_port *port;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("%s: suspended port %d\n", __func__, port_num);
+
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ port_usb->in_ep_desc_backup = port_usb->in->desc;
+ port_usb->out_ep_desc_backup = port_usb->out->desc;
+
+ pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
+ port_usb->in_ep_desc_backup,
+ port_usb->out_ep_desc_backup);
+
+ bam_data_disconnect(port_usb, func, dev_port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->last_event = U_BAM_DATA_SUSPEND_E;
+ queue_work(bam_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void bam_data_resume(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled)
+{
+ struct bam_data_port *port;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /* Restore endpoint descriptors info. */
+ port_usb->in->desc = port_usb->in_ep_desc_backup;
+ port_usb->out->desc = port_usb->out_ep_desc_backup;
+
+ pr_debug("in_ep_desc_backup = %p, out_ep_desc_backup = %p",
+ port_usb->in_ep_desc_backup,
+ port_usb->out_ep_desc_backup);
+
+ bam_data_connect(port_usb, port->data_ch.trans,
+ dev_port_num, func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->last_event = U_BAM_DATA_RESUME_E;
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(bam_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void bam_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(bam_data_wq);
+}
+
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, suspend_w);
+ struct bam_data_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_ipa_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if ((port->last_event == U_BAM_DATA_DISCONNECT_E) ||
+ (port->last_event == U_BAM_DATA_RESUME_E)) {
+ pr_debug("%s: Port is about to disconnect/resume. Bail out.\n",
+ __func__);
+ goto exit;
+ }
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ bam_data_wake_cb, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ usb_bam_register_start_stop_cbs(d->usb_bam_type, d->dst_connection_idx,
+ bam_data_start, bam_data_stop,
+ port);
+
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(d->usb_bam_type, &d->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+exit:
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, resume_w);
+ struct bam_data_ch_info *d;
+ struct data_port *d_port;
+ struct usb_gadget *gadget;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ pr_err("port->port_usb is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev) {
+ pr_err("!port->port_usb->cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("!port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
+ d = &port->data_ch;
+ d_port = port->port_usb;
+ gadget = d_port->cdev->gadget;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnect. Bail out.\n",
+ __func__);
+ goto exit;
+ }
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ NULL, NULL);
+ if (ret) {
+ pr_err("%s(): Failed to un-register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ /*
+ * If usb_req was dequeued as part of bus suspend then
+ * corresponding DBM IN and OUT EPs should also be reset.
+ * There is a possbility that usb_bam may not have dequeued the
+ * request in case of quick back to back usb bus suspend resume.
+ */
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ if (d->tx_req_dequeued) {
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ if (d->rx_req_dequeued) {
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ }
+ d->tx_req_dequeued = false;
+ d->rx_req_dequeued = false;
+ usb_bam_resume(d->usb_bam_type, &d->ipa_params);
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void u_bam_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data.dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void u_bam_data_set_ul_max_pkt_num(u8 max_packets_number)
+
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data.ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data.ul_aggregation_enable = true;
+ else
+ rndis_data.ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d\n", __func__,
+ rndis_data.ul_aggregation_enable);
+ pr_debug("%s(): ul_max_packets_number:%d\n", __func__,
+ max_packets_number);
+}
+
+void u_bam_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data.ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
diff --git a/drivers/usb/gadget/function/u_bam_data.h b/drivers/usb/gadget/function/u_bam_data.h
new file mode 100644
index 000000000000..e3acbd0c56a0
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_data.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_BAM_DATA_H
+#define __U_BAM_DATA_H
+
+#include "usb_gadget_xport.h"
+
+enum function_type {
+ USB_FUNC_ECM,
+ USB_FUNC_MBIM,
+ USB_FUNC_RNDIS,
+ USB_NUM_FUNCS,
+};
+
+#define PORTS_PER_FUNC 1
+#define BAM2BAM_DATA_N_PORTS (USB_NUM_FUNCS * PORTS_PER_FUNC)
+
+struct data_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ struct usb_ep *in;
+ int rx_buffer_size;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+};
+
+void bam_data_disconnect(struct data_port *gr, enum function_type func,
+ u8 dev_port_num);
+
+int bam_data_connect(struct data_port *gr, enum transport_type trans,
+ u8 dev_port_num, enum function_type func);
+
+int bam_data_setup(enum function_type func, unsigned int no_bam2bam_port);
+
+void bam_data_flush_workqueue(void);
+
+void bam_data_suspend(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled);
+
+void bam_data_resume(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled);
+
+void bam_data_flow_control_enable(bool enable);
+
+void u_bam_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void u_bam_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void u_bam_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void u_bam_data_start_rndis_ipa(void);
+
+void u_bam_data_stop_rndis_ipa(void);
+
+void bam_data_start_rx_tx(u8 port_num);
+
+int u_bam_data_func_to_port(enum function_type func, u8 func_port);
+#endif /* __U_BAM_DATA_H */
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
new file mode 100644
index 000000000000..a9a8ab4618e8
--- /dev/null
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/usb/usb_ctrl_qti.h>
+
+#include <soc/qcom/bam_dmux.h>
+
+#include "u_rmnet.h"
+#include "usb_gadget_xport.h"
+
+#define RMNET_CTRL_QTI_NAME "rmnet_ctrl"
+#define DPL_CTRL_QTI_NAME "dpl_ctrl"
+/*
+ * Use size of gadget's qti control name. Here currently RMNET and DPL
+ * gadget is using QTI as control transport. Hence using RMNET ctrl name
+ * (as it is bigger in size) for QTI_CTRL_NAME_LEN.
+ */
+#define QTI_CTRL_NAME_LEN (sizeof(RMNET_CTRL_QTI_NAME)+2)
+
+struct qti_ctrl_port {
+ void *port_usb;
+ char name[QTI_CTRL_NAME_LEN];
+ struct miscdevice ctrl_device;
+
+ bool is_open;
+ int index;
+ unsigned intf;
+ int ipa_prod_idx;
+ int ipa_cons_idx;
+ enum peripheral_ep_type ep_type;
+
+ atomic_t connected;
+ atomic_t line_state;
+
+ atomic_t open_excl;
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t ioctl_excl;
+
+ wait_queue_head_t read_wq;
+
+ struct list_head cpkt_req_q;
+
+ spinlock_t lock;
+ enum gadget_type gtype;
+ unsigned host_to_modem;
+ unsigned copied_to_modem;
+ unsigned copied_from_modem;
+ unsigned modem_to_host;
+ unsigned drp_cpkt_cnt;
+};
+static struct qti_ctrl_port *ctrl_port[NR_QTI_PORTS];
+
+static inline int qti_ctrl_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -EBUSY;
+ }
+}
+
+static inline void qti_ctrl_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static void qti_ctrl_queue_notify(struct qti_ctrl_port *port)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt = NULL;
+
+ pr_debug("%s: Queue empty packet for QTI for port%d",
+ __func__, port->index);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!port->is_open) {
+ pr_err("%s: rmnet ctrl file handler %p is not open",
+ __func__, port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ cpkt = alloc_rmnet_ctrl_pkt(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ list_add_tail(&cpkt->list, &port->cpkt_req_q);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s: Wake up read queue", __func__);
+ wake_up(&port->read_wq);
+}
+
+static int gqti_ctrl_send_cpkt_tomodem(u8 portno, void *buf, size_t len)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ if (len > MAX_QTI_PKT_SIZE) {
+ pr_err("given pkt size too big:%zu > max_pkt_size:%d\n",
+ len, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (portno >= NR_QTI_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, portno);
+ return -ENODEV;
+ }
+ port = ctrl_port[portno];
+
+ cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ pr_debug("%s: gtype:%d: Add to cpkt_req_q packet with len = %zu\n",
+ __func__, port->gtype, len);
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* drop cpkt if port is not open */
+ if (!port->is_open) {
+ pr_debug("rmnet file handler %p(index=%d) is not open",
+ port, port->index);
+ port->drp_cpkt_cnt++;
+ spin_unlock_irqrestore(&port->lock, flags);
+ free_rmnet_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ list_add_tail(&cpkt->list, &port->cpkt_req_q);
+ port->host_to_modem++;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* wakeup read thread */
+ pr_debug("%s: Wake up read queue", __func__);
+ wake_up(&port->read_wq);
+
+ return 0;
+}
+
+static void
+gqti_ctrl_notify_modem(void *gptr, u8 portno, int val)
+{
+ struct qti_ctrl_port *port;
+
+ if (portno >= NR_QTI_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, portno);
+ return;
+ }
+ port = ctrl_port[portno];
+
+ atomic_set(&port->line_state, val);
+
+ /* send 0 len pkt to qti to notify state change */
+ qti_ctrl_queue_notify(port);
+}
+
+int gqti_ctrl_connect(void *gr, u8 port_num, unsigned intf,
+ enum transport_type dxport, enum gadget_type gtype)
+{
+ struct qti_ctrl_port *port;
+ struct grmnet *g_rmnet = NULL;
+ struct gqdss *g_dpl = NULL;
+ unsigned long flags;
+
+ pr_debug("%s: gtype:%d gadget:%p\n", __func__, gtype, gr);
+ if (port_num >= NR_QTI_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ port = ctrl_port[port_num];
+ if (!port) {
+ pr_err("%s: gadget port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->gtype = gtype;
+ if (dxport == USB_GADGET_XPORT_BAM_DMUX) {
+ /*
+ * BAM-DMUX data transport is used for RMNET and DPL
+ * on some targets where IPA is not available.
+ * Set endpoint type as BAM-DMUX and interface
+ * id as channel number. This information is
+ * sent to user space via EP_LOOKUP ioctl.
+ *
+ */
+
+ port->ep_type = DATA_EP_TYPE_BAM_DMUX;
+ port->intf = (gtype == USB_GADGET_RMNET) ?
+ BAM_DMUX_USB_RMNET_0 :
+ BAM_DMUX_USB_DPL;
+ port->ipa_prod_idx = 0;
+ port->ipa_cons_idx = 0;
+ } else {
+ port->ep_type = DATA_EP_TYPE_HSUSB;
+ port->intf = intf;
+ }
+
+ if (gr && port->gtype == USB_GADGET_RMNET) {
+ port->port_usb = gr;
+ g_rmnet = (struct grmnet *)gr;
+ g_rmnet->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
+ g_rmnet->notify_modem = gqti_ctrl_notify_modem;
+ } else if (gr && port->gtype == USB_GADGET_DPL) {
+ port->port_usb = gr;
+ g_dpl = (struct gqdss *)gr;
+ g_dpl->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
+ g_dpl->notify_modem = gqti_ctrl_notify_modem;
+ atomic_set(&port->line_state, 1);
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
+ pr_err("%s(): Port is used without gtype.\n", __func__);
+ return -ENODEV;
+ }
+
+ port->host_to_modem = 0;
+ port->copied_to_modem = 0;
+ port->copied_from_modem = 0;
+ port->modem_to_host = 0;
+ port->drp_cpkt_cnt = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ atomic_set(&port->connected, 1);
+ wake_up(&port->read_wq);
+ if (port->port_usb && g_rmnet && g_rmnet->connect)
+ g_rmnet->connect(port->port_usb);
+
+ return 0;
+}
+
+void gqti_ctrl_disconnect(void *gr, u8 port_num)
+{
+ struct qti_ctrl_port *port;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+ struct grmnet *g_rmnet = NULL;
+ struct gqdss *g_dpl = NULL;
+
+ pr_debug("%s: gadget:%p\n", __func__, gr);
+
+ if (port_num >= NR_QTI_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, port_num);
+ return;
+ }
+
+ port = ctrl_port[port_num];
+
+ if (!port) {
+ pr_err("%s: gadget port is null\n", __func__);
+ return;
+ }
+
+ atomic_set(&port->connected, 0);
+ atomic_set(&port->line_state, 0);
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* reset ipa eps to -1 */
+ port->ipa_prod_idx = -1;
+ port->ipa_cons_idx = -1;
+ port->port_usb = NULL;
+
+ if (gr && port->gtype == USB_GADGET_RMNET) {
+ g_rmnet = (struct grmnet *)gr;
+ g_rmnet->send_encap_cmd = NULL;
+ g_rmnet->notify_modem = NULL;
+ } else if (gr && port->gtype == USB_GADGET_DPL) {
+ g_dpl = (struct gqdss *)gr;
+ g_dpl->send_encap_cmd = NULL;
+ g_dpl->notify_modem = NULL;
+ } else {
+ pr_err("%s(): unrecognized gadget type(%d).\n",
+ __func__, port->gtype);
+ }
+
+ while (!list_empty(&port->cpkt_req_q)) {
+ cpkt = list_first_entry(&port->cpkt_req_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ free_rmnet_ctrl_pkt(cpkt);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* send 0 len pkt to qti to notify state change */
+ qti_ctrl_queue_notify(port);
+}
+
+void gqti_ctrl_update_ipa_pipes(void *gr, u8 port_num, u32 ipa_prod,
+ u32 ipa_cons)
+{
+ struct qti_ctrl_port *port;
+
+ if (port_num >= NR_QTI_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, port_num);
+ return;
+ }
+
+ port = ctrl_port[port_num];
+
+ port->ipa_prod_idx = ipa_prod;
+ port->ipa_cons_idx = ipa_cons;
+
+}
+
+
+static int qti_ctrl_open(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+
+ pr_debug("Open rmnet_ctrl_qti device file name=%s(index=%d)\n",
+ port->name, port->index);
+
+ if (qti_ctrl_lock(&port->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->is_open = true;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static int qti_ctrl_release(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+
+ pr_debug("Close rmnet control file");
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->is_open = false;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ qti_ctrl_unlock(&port->open_excl);
+
+ return 0;
+}
+
+static ssize_t
+qti_ctrl_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ struct rmnet_ctrl_pkt *cpkt = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("%s: Enter(%zu)\n", __func__, count);
+
+ if (count > MAX_QTI_PKT_SIZE) {
+ pr_err("Buffer size is too big %zu, should be at most %d\n",
+ count, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qti_ctrl_lock(&port->read_excl)) {
+ pr_err("Previous reading is not finished yet\n");
+ return -EBUSY;
+ }
+
+ /* block until a new packet is available */
+ do {
+ spin_lock_irqsave(&port->lock, flags);
+ if (!list_empty(&port->cpkt_req_q))
+ break;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s: Requests list is empty. Wait.\n", __func__);
+ ret = wait_event_interruptible(port->read_wq,
+ !list_empty(&port->cpkt_req_q));
+ if (ret < 0) {
+ pr_debug("Waiting failed\n");
+ qti_ctrl_unlock(&port->read_excl);
+ return -ERESTARTSYS;
+ }
+ } while (1);
+
+ cpkt = list_first_entry(&port->cpkt_req_q, struct rmnet_ctrl_pkt,
+ list);
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (cpkt->len > count) {
+ pr_err("cpkt size too big:%d > buf size:%zu\n",
+ cpkt->len, count);
+ qti_ctrl_unlock(&port->read_excl);
+ free_rmnet_ctrl_pkt(cpkt);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s: cpkt size:%d\n", __func__, cpkt->len);
+
+
+ qti_ctrl_unlock(&port->read_excl);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ pr_err("copy_to_user failed: err %d\n", ret);
+ ret = -EFAULT;
+ } else {
+ pr_debug("%s: copied %d bytes to user\n", __func__, cpkt->len);
+ ret = cpkt->len;
+ port->copied_to_modem++;
+ }
+
+ free_rmnet_ctrl_pkt(cpkt);
+
+ return ret;
+}
+
+static ssize_t
+qti_ctrl_write(struct file *fp, const char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ void *kbuf;
+ unsigned long flags;
+ int ret = 0;
+ struct grmnet *g_rmnet = NULL;
+
+ pr_debug("%s: Enter(%zu) port_index=%d", __func__, count, port->index);
+
+ if (!count) {
+ pr_debug("zero length ctrl pkt\n");
+ return -EINVAL;
+ }
+
+ if (count > MAX_QTI_PKT_SIZE) {
+ pr_debug("given pkt size too big:%zu > max_pkt_size:%d\n",
+ count, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qti_ctrl_lock(&port->write_excl)) {
+ pr_err("Previous writing not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&port->connected)) {
+ pr_debug("USB cable not connected\n");
+ qti_ctrl_unlock(&port->write_excl);
+ return -EPIPE;
+ }
+
+ kbuf = kmalloc(count, GFP_KERNEL);
+ if (!kbuf) {
+ pr_err("failed to allocate ctrl pkt\n");
+ qti_ctrl_unlock(&port->write_excl);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed err:%d\n", ret);
+ kfree(kbuf);
+ qti_ctrl_unlock(&port->write_excl);
+ return -EFAULT;
+ }
+ port->copied_from_modem++;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port && port->port_usb) {
+ if (port->gtype == USB_GADGET_RMNET) {
+ g_rmnet = (struct grmnet *)port->port_usb;
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
+ pr_err("%s(): unrecognized gadget type(%d).\n",
+ __func__, port->gtype);
+ return -EINVAL;
+ }
+
+ if (g_rmnet && g_rmnet->send_cpkt_response) {
+ ret = g_rmnet->send_cpkt_response(port->port_usb,
+ kbuf, count);
+ if (ret)
+ pr_err("%d failed to send ctrl packet.\n", ret);
+ port->modem_to_host++;
+ } else {
+ pr_err("send_cpkt_response callback is NULL\n");
+ ret = -EINVAL;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ kfree(kbuf);
+ qti_ctrl_unlock(&port->write_excl);
+
+ pr_debug("%s: Exit(%zu)", __func__, count);
+ return (ret) ? ret : count;
+}
+
+static long qti_ctrl_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ struct grmnet *gr = NULL;
+ struct ep_info info;
+ int val, ret = 0;
+
+ pr_debug("%s: Received command %d for gtype:%d\n",
+ __func__, cmd, port->gtype);
+
+ if (qti_ctrl_lock(&port->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case QTI_CTRL_MODEM_OFFLINE:
+ if (port && (port->gtype == USB_GADGET_DPL)) {
+ pr_err("%s(): Modem Offline not handled\n", __func__);
+ goto exit_ioctl;
+ }
+
+ if (port && port->port_usb)
+ gr = port->port_usb;
+
+ if (gr && gr->disconnect)
+ gr->disconnect(gr);
+ break;
+ case QTI_CTRL_MODEM_ONLINE:
+ if (port && (port->gtype == USB_GADGET_DPL)) {
+ pr_err("%s(): Modem Online not handled\n", __func__);
+ goto exit_ioctl;
+ }
+
+ if (port && port->port_usb)
+ gr = port->port_usb;
+
+ if (gr && gr->connect)
+ gr->connect(gr);
+ break;
+ case QTI_CTRL_GET_LINE_STATE:
+ val = atomic_read(&port->line_state);
+ ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ pr_debug("%s: Sent line_state: %d for gtype:%d\n", __func__,
+ atomic_read(&port->line_state), port->gtype);
+ break;
+ case QTI_CTRL_EP_LOOKUP:
+
+ pr_debug("%s(): EP_LOOKUP for gtype:%d\n", __func__,
+ port->gtype);
+ val = atomic_read(&port->connected);
+ if (!val) {
+ pr_err_ratelimited("EP_LOOKUP failed: not connected\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (port->ipa_prod_idx == -1 && port->ipa_cons_idx == -1) {
+ pr_err_ratelimited("EP_LOOKUP ipa pipes not updated\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = port->ep_type;
+ info.ph_ep_info.peripheral_iface_id = port->intf;
+ info.ipa_ep_pair.cons_pipe_num = port->ipa_cons_idx;
+ info.ipa_ep_pair.prod_pipe_num = port->ipa_prod_idx;
+
+ pr_debug("%s(): gtype:%d ep_type:%d intf:%d\n",
+ __func__, port->gtype, info.ph_ep_info.ep_type,
+ info.ph_ep_info.peripheral_iface_id);
+
+ pr_debug("%s(): ipa_cons_idx:%d ipa_prod_idx:%d\n",
+ __func__, info.ipa_ep_pair.cons_pipe_num,
+ info.ipa_ep_pair.prod_pipe_num);
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ break;
+ default:
+ pr_err("wrong parameter");
+ ret = -EINVAL;
+ }
+
+exit_ioctl:
+ qti_ctrl_unlock(&port->ioctl_excl);
+
+ return ret;
+}
+
+static unsigned int qti_ctrl_poll(struct file *file, poll_table *wait)
+{
+ struct qti_ctrl_port *port = container_of(file->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ unsigned long flags;
+ unsigned int mask = 0;
+
+ if (!port) {
+ pr_err("%s on a NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ poll_wait(file, &port->read_wq, wait);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!list_empty(&port->cpkt_req_q)) {
+ mask |= POLLIN | POLLRDNORM;
+ pr_debug("%s sets POLLIN for rmnet_ctrl_qti_port\n", __func__);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return mask;
+}
+
+static int qti_ctrl_read_stats(struct seq_file *s, void *unused)
+{
+ struct qti_ctrl_port *port = s->private;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < NR_QTI_PORTS; i++) {
+ port = ctrl_port[i];
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->lock, flags);
+
+ seq_printf(s, "\n#PORT:%d port: %p\n", i, port);
+ seq_printf(s, "name: %s\n", port->name);
+ seq_printf(s, "host_to_modem: %d\n",
+ port->host_to_modem);
+ seq_printf(s, "copied_to_modem: %d\n",
+ port->copied_to_modem);
+ seq_printf(s, "copied_from_modem: %d\n",
+ port->copied_from_modem);
+ seq_printf(s, "modem_to_host: %d\n",
+ port->modem_to_host);
+ seq_printf(s, "cpkt_drp_cnt: %d\n",
+ port->drp_cpkt_cnt);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return 0;
+}
+
+static int qti_ctrl_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qti_ctrl_read_stats, inode->i_private);
+}
+
+static ssize_t qti_ctrl_reset_stats(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct qti_ctrl_port *port = s->private;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < NR_QTI_PORTS; i++) {
+ port = ctrl_port[i];
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->host_to_modem = 0;
+ port->copied_to_modem = 0;
+ port->copied_from_modem = 0;
+ port->modem_to_host = 0;
+ port->drp_cpkt_cnt = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+ return count;
+}
+
+const struct file_operations qti_ctrl_stats_ops = {
+ .open = qti_ctrl_stats_open,
+ .read = seq_read,
+ .write = qti_ctrl_reset_stats,
+};
+
+static struct dentry *qti_ctrl_dent;
+static void qti_ctrl_debugfs_init(void)
+{
+ struct dentry *qti_ctrl_dfile;
+
+ qti_ctrl_dent = debugfs_create_dir("usb_qti", 0);
+ if (IS_ERR(qti_ctrl_dent))
+ return;
+
+ qti_ctrl_dfile =
+ debugfs_create_file("status", 0444, qti_ctrl_dent, 0,
+ &qti_ctrl_stats_ops);
+ if (!qti_ctrl_dfile || IS_ERR(qti_ctrl_dfile))
+ debugfs_remove(qti_ctrl_dent);
+}
+
+static void qti_ctrl_debugfs_exit(void)
+{
+ debugfs_remove_recursive(qti_ctrl_dent);
+}
+
+/* file operations for rmnet device /dev/rmnet_ctrl */
+static const struct file_operations qti_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qti_ctrl_open,
+ .release = qti_ctrl_release,
+ .read = qti_ctrl_read,
+ .write = qti_ctrl_write,
+ .unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = qti_ctrl_ioctl,
+#endif
+ .poll = qti_ctrl_poll,
+};
+/* file operations for DPL device /dev/dpl_ctrl */
+static const struct file_operations dpl_qti_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qti_ctrl_open,
+ .release = qti_ctrl_release,
+ .read = qti_ctrl_read,
+ .write = NULL,
+ .unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = qti_ctrl_ioctl,
+#endif
+ .poll = qti_ctrl_poll,
+};
+
+int gqti_ctrl_init(void)
+{
+ int ret, i, sz = QTI_CTRL_NAME_LEN;
+ struct qti_ctrl_port *port = NULL;
+
+ for (i = 0; i < NR_QTI_PORTS; i++) {
+ port = kzalloc(sizeof(struct qti_ctrl_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("Failed to allocate rmnet control device\n");
+ ret = -ENOMEM;
+ goto fail_init;
+ }
+
+ INIT_LIST_HEAD(&port->cpkt_req_q);
+ spin_lock_init(&port->lock);
+
+ atomic_set(&port->open_excl, 0);
+ atomic_set(&port->read_excl, 0);
+ atomic_set(&port->write_excl, 0);
+ atomic_set(&port->ioctl_excl, 0);
+ atomic_set(&port->connected, 0);
+ atomic_set(&port->line_state, 0);
+
+ init_waitqueue_head(&port->read_wq);
+
+ ctrl_port[i] = port;
+ port->index = i;
+ port->ipa_prod_idx = -1;
+ port->ipa_cons_idx = -1;
+
+ if (i == 0)
+ strlcat(port->name, RMNET_CTRL_QTI_NAME, sz);
+ else if (i == DPL_QTI_CTRL_PORT_NO)
+ strlcat(port->name, DPL_CTRL_QTI_NAME, sz);
+ else
+ snprintf(port->name, sz, "%s%d",
+ RMNET_CTRL_QTI_NAME, i);
+
+ port->ctrl_device.name = port->name;
+ if (i == DPL_QTI_CTRL_PORT_NO)
+ port->ctrl_device.fops = &dpl_qti_ctrl_fops;
+ else
+ port->ctrl_device.fops = &qti_ctrl_fops;
+ port->ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&port->ctrl_device);
+ if (ret) {
+ pr_err("rmnet control driver failed to register");
+ goto fail_init;
+ }
+ }
+ qti_ctrl_debugfs_init();
+
+ return ret;
+
+fail_init:
+ for (i--; i >= 0; i--) {
+ misc_deregister(&ctrl_port[i]->ctrl_device);
+ kfree(ctrl_port[i]);
+ ctrl_port[i] = NULL;
+ }
+ return ret;
+}
+
+void gqti_ctrl_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NR_QTI_PORTS; i++) {
+ misc_deregister(&ctrl_port[i]->ctrl_device);
+ kfree(ctrl_port[i]);
+ ctrl_port[i] = NULL;
+ }
+ qti_ctrl_debugfs_exit();
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
new file mode 100644
index 000000000000..1be03f83249b
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -0,0 +1,877 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/usb_bam.h>
+
+#include "usb_gadget_xport.h"
+
+#define IPA_N_PORTS 4
+struct ipa_data_ch_info {
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+ unsigned long flags;
+ unsigned id;
+ enum transport_type trans;
+ enum gadget_type gtype;
+ bool is_connected;
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ struct gadget_ipa_port *port_usb;
+ struct usb_bam_connect_ipa_params ipa_params;
+};
+
+static int n_ipa_ports;
+static struct workqueue_struct *ipa_data_wq;
+struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+/**
+ * ipa_data_endless_complete() - completion callback for endless TX/RX request
+ * @ep: USB endpoint for which this completion happen
+ * @req: USB endless request
+ *
+ * This completion is being called when endless (TX/RX) transfer is terminated
+ * i.e. disconnect or suspend case.
+ */
+static void ipa_data_endless_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_debug("%s: endless complete for(%s) with status: %d\n",
+ __func__, ep->name, req->status);
+}
+
+/**
+ * ipa_data_start_endless_xfer() - configure USB endpoint and
+ * queue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX)
+ *
+ * It is being used to queue endless TX/RX request with UDC driver.
+ * It does set required DBM endpoint configuration before queueing endless
+ * TX/RX request.
+ */
+static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ int status;
+
+ if (!port->port_usb) {
+ pr_err("%s(): port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ if (in) {
+ pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_queue(port->port_usb->in,
+ port->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_queue(port->port_usb->out,
+ port->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless RX_REQ, %d\n", status);
+ }
+}
+
+/**
+ * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX
+ *
+ * It is being used to terminate and dequeue endless TX/RX request with UDC
+ * driver.
+ */
+static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ int status;
+
+ if (!port->port_usb) {
+ pr_err("%s(): port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ if (in) {
+ pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_dequeue(port->port_usb->in, port->tx_req);
+ if (status)
+ pr_err("error dequeueing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_dequeue(port->port_usb->out, port->rx_req);
+ if (status)
+ pr_err("error dequeueing endless RX_REQ, %d\n", status);
+ }
+}
+
+/**
+ * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
+ * @w: disconnect work
+ *
+ * It is being schedule from ipa_data_disconnect() API when particular function
+ * is being disable due to USB disconnect or USB composition switch is being
+ * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to
+ * handshake operation and involvement of SPS related APIs, this functioality
+ * can't be used from atomic context.
+ */
+static void ipa_data_disconnect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ disconnect_w);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("Already disconnected.\n");
+ return;
+ }
+ port->is_connected = false;
+ pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__,
+ port->ipa_params.prod_clnt_hdl,
+ port->ipa_params.cons_clnt_hdl);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+
+ if (port->ipa_params.prod_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ if (port->ipa_params.cons_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+
+ pr_debug("%s(): disconnect work completed.\n", __func__);
+}
+
+/**
+ * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint
+ * @gp: USB gadget IPA Port
+ * @port_num: Port num used by function driver which need to be disable
+ *
+ * It is being called from atomic context from gadget driver when particular
+ * function is being disable due to USB cable disconnect or USB composition
+ * switch is being trigger. This API performs restoring USB endpoint operation
+ * and disable USB endpoint used for accelerated path.
+ */
+void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+
+ pr_debug("dev:%p port number:%d\n", gp, port_num);
+ if (port_num >= n_ipa_ports) {
+ pr_err("invalid ipa portno#%d\n", port_num);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[port_num];
+ if (!port) {
+ pr_err("port %u is NULL", port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb) {
+ gadget = port->port_usb->cdev->gadget;
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in) {
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ if (gadget_is_dwc3(gadget))
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb->in->endless = false;
+ }
+
+ if (port->port_usb->out) {
+ if (gadget_is_dwc3(gadget))
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb->out->endless = false;
+ }
+
+ port->port_usb = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
+
+/**
+ * configure_fifo() - Configure USB BAM Pipe's data FIFO
+ * @idx: USB BAM Pipe index
+ * @ep: USB endpoint
+ *
+ * This function configures USB BAM data fifo using fetched pipe configuraion
+ * using provided index value. This function needs to used before starting
+ * endless transfer.
+ */
+static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+ msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+}
+
+/**
+ * ipa_data_connect_work() - Perform USB IPA BAM connect
+ * @w: connect work
+ *
+ * It is being schedule from ipa_data_connect() API when particular function
+ * which is using USB IPA accelerated path. This API performs allocating request
+ * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be
+ * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for connect sequence.
+ */
+static void ipa_data_connect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct gadget_ipa_port *gport;
+ struct usb_gadget *gadget = NULL;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+ bool is_ipa_disconnected = true;
+
+ pr_debug("%s: Connect workqueue started", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ gport = port->port_usb;
+ if (gport && gport->cdev)
+ gadget = gport->cdev->gadget;
+
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: gport is NULL.\n", __func__);
+ return;
+ }
+
+ gport->ipa_consumer_ep = -1;
+ gport->ipa_producer_ep = -1;
+ if (gport->out) {
+ port->rx_req = usb_ep_alloc_request(gport->out, GFP_ATOMIC);
+ if (!port->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ return;
+ }
+ port->rx_req->context = port;
+ port->rx_req->complete = ipa_data_endless_complete;
+ port->rx_req->length = 0;
+ port->rx_req->no_interrupt = 1;
+ }
+
+ if (gport->in) {
+ port->tx_req = usb_ep_alloc_request(gport->in, GFP_ATOMIC);
+ if (!port->tx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto free_rx_req;
+ }
+ port->tx_req->context = port;
+ port->tx_req->complete = ipa_data_endless_complete;
+ port->tx_req->length = 0;
+ port->tx_req->no_interrupt = 1;
+ }
+
+ port->is_connected = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* update IPA Parameteres here. */
+ port->ipa_params.usb_connection_speed = gadget->speed;
+ if (gadget_is_dwc3(gadget))
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.skip_ep_cfg = true;
+ port->ipa_params.keep_ipa_awake = true;
+ port->ipa_params.cons_clnt_hdl = -1;
+ port->ipa_params.prod_clnt_hdl = -1;
+
+
+ if (gport->out) {
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+
+ if (gadget_is_dwc3(gadget)) {
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | port->src_pipe_idx;
+ port->rx_req->length = 32*1024;
+ port->rx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->src_connection_idx,
+ port->port_usb->out);
+ ret = msm_ep_config(gport->out, port->rx_req,
+ GFP_ATOMIC);
+ if (ret) {
+ pr_err("msm_ep_config() failed for OUT EP\n");
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ goto free_rx_tx_req;
+ }
+ } else {
+ sps_params = (MSM_SPS_MODE | port->src_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ port->rx_req->udc_priv = sps_params;
+ }
+ }
+
+ if (gport->in) {
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ if (gadget_is_dwc3(gadget)) {
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ port->dst_pipe_idx;
+ port->tx_req->length = 32*1024;
+ port->tx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, gport->in);
+ ret = msm_ep_config(gport->in, port->tx_req,
+ GFP_ATOMIC);
+ if (ret) {
+ pr_err("msm_ep_config() failed for IN EP\n");
+ goto unconfig_msm_ep_out;
+ }
+ } else {
+ sps_params = (MSM_SPS_MODE | port->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ port->tx_req->udc_priv = sps_params;
+ }
+ }
+
+ /*
+ * Perform below operations for Tx from Device (OUT transfer)
+ * 1. Connect with pipe of USB BAM with IPA BAM pipe
+ * 2. Update USB Endpoint related information using SPS Param.
+ * 3. Configure USB Endpoint/DBM for the same.
+ * 4. Override USB ep queue functionality for endless transfer.
+ */
+ if (gport->out) {
+ pr_debug("configure bam ipa connect for USB OUT\n");
+ port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
+ goto unconfig_msm_ep_in;
+ }
+ gadget->bam2bam_func_enabled = true;
+
+ gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
+ is_ipa_disconnected = false;
+ }
+
+ if (gport->in) {
+ pr_debug("configure bam ipa connect for USB IN\n");
+ port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ gadget->bam2bam_func_enabled = true;
+
+ gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
+ is_ipa_disconnected = false;
+ }
+
+ pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ gport->ipa_producer_ep,
+ gport->ipa_consumer_ep);
+
+ gqti_ctrl_update_ipa_pipes(NULL, DPL_QTI_CTRL_PORT_NO,
+ gport->ipa_producer_ep,
+ gport->ipa_consumer_ep);
+
+ pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
+ port->src_connection_idx, port->dst_connection_idx);
+
+ if (gport->out)
+ ipa_data_start_endless_xfer(port, false);
+ if (gport->in)
+ ipa_data_start_endless_xfer(port, true);
+
+ pr_debug("Connect workqueue done (port %p)", port);
+ return;
+
+disconnect_usb_bam_ipa_out:
+ if (!is_ipa_disconnected) {
+ usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ is_ipa_disconnected = true;
+ }
+unconfig_msm_ep_in:
+ if (gport->in)
+ msm_ep_unconfig(port->port_usb->in);
+unconfig_msm_ep_out:
+ if (gport->in)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ if (gport->out) {
+ msm_ep_unconfig(port->port_usb->out);
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ }
+free_rx_tx_req:
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_connected = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (gport->in && port->tx_req)
+ usb_ep_free_request(gport->in, port->tx_req);
+free_rx_req:
+ if (gport->out && port->rx_req)
+ usb_ep_free_request(gport->out, port->rx_req);
+}
+
+/**
+ * ipa_data_connect() - Prepare IPA params and enable USB endpoints
+ * @gp: USB IPA gadget port
+ * @port_num: port number used by accelerated function
+ * @src_connection_idx: USB BAM pipe index used as producer
+ * @dst_connection_idx: USB BAM pipe index used as consumer
+ *
+ * It is being called from accelerated function driver (from set_alt()) to
+ * initiate USB BAM IPA connection. This API is enabling accelerated endpoints
+ * and schedule connect_work() which establishes USB IPA BAM communication.
+ */
+int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
+ u8 src_connection_idx, u8 dst_connection_idx)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("dev:%p port#%d src_connection_idx:%d dst_connection_idx:%d\n",
+ gp, port_num, src_connection_idx, dst_connection_idx);
+
+ if (port_num >= n_ipa_ports) {
+ pr_err("invalid portno#%d\n", port_num);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (!gp) {
+ pr_err("gadget port is null\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ port = ipa_data_ports[port_num];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gp;
+ port->src_connection_idx = src_connection_idx;
+ port->dst_connection_idx = dst_connection_idx;
+ port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
+
+ port->ipa_params.src_pipe = &(port->src_pipe_idx);
+ port->ipa_params.dst_pipe = &(port->dst_pipe_idx);
+ port->ipa_params.src_idx = src_connection_idx;
+ port->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Disable Xfer complete and Xfer not ready interrupts by
+ * marking endless flag which is used in UDC driver to enable
+ * these interrupts. with this set, these interrupts for selected
+ * endpoints won't be enabled.
+ */
+ if (port->port_usb->in) {
+ port->port_usb->in->endless = true;
+ ret = usb_ep_enable(port->port_usb->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%p",
+ port->port_usb->in);
+ port->port_usb->in->endless = false;
+ goto err_usb_in;
+ }
+ }
+
+ if (port->port_usb->out) {
+ port->port_usb->out->endless = true;
+ ret = usb_ep_enable(port->port_usb->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%p",
+ port->port_usb->out);
+ port->port_usb->out->endless = false;
+ goto err_usb_out;
+ }
+ }
+
+ if (!port->port_usb->out && !port->port_usb->in) {
+ pr_err("%s(): No USB endpoint enabled.\n", __func__);
+ ret = -EINVAL;
+ goto err_usb_in;
+ }
+
+ queue_work(ipa_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return ret;
+
+err_usb_out:
+ if (port->port_usb->in)
+ port->port_usb->in->endless = false;
+err_usb_in:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+err:
+ pr_debug("%s(): failed with error:%d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * ipa_data_start() - Restart USB endless transfer
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to restart USB endless transfer for USB bus resume.
+ * For USB consumer case, it restarts USB endless RX transfer, whereas
+ * for USB producer case, it resets DBM endpoint and restart USB endless
+ * TX transfer.
+ */
+static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): start endless RX\n", __func__);
+ ipa_data_start_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): start endless TX\n", __func__);
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, port->port_usb->in);
+ }
+ ipa_data_start_endless_xfer(port, true);
+ }
+}
+
+/**
+ * ipa_data_stop() - Stop endless Tx/Rx transfers
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to stop endless Tx/Rx transfers. It is being used
+ * for USB bus suspend functionality.
+ */
+static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): stop endless RX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): stop endless TX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, true);
+ }
+}
+
+/**
+ * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB BAM IPA suspend functionality
+ * for USB bus suspend functionality.
+ */
+void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num)
+{
+ struct ipa_data_ch_info *port;
+ int ret;
+
+ pr_debug("dev:%p port number:%d\n", gp, port_num);
+
+ if (port_num >= n_ipa_ports) {
+ pr_err("invalid ipa portno#%d\n", port_num);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[port_num];
+ if (!port) {
+ pr_err("port %u is NULL", port_num);
+ return;
+ }
+
+ pr_debug("%s: suspend started\n", __func__);
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_register_start_stop_cbs(port->usb_bam_type,
+ port->dst_connection_idx, ipa_data_start,
+ ipa_data_stop, port);
+ usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+}
+
+/**
+ * ipa_data_resume() - Initiate USB resume functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB resume functionality
+ * for USB bus resume case.
+ */
+void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+ int ret;
+
+ pr_debug("dev:%p port number:%d\n", gp, port_num);
+
+ if (port_num >= n_ipa_ports) {
+ pr_err("invalid ipa portno#%d\n", port_num);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[port_num];
+ if (!port) {
+ pr_err("port %u is NULL", port_num);
+ return;
+ }
+
+ pr_debug("%s: resume started\n", __func__);
+ spin_lock_irqsave(&port->port_lock, flags);
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Gadget is NULL.\n", __func__);
+ return;
+ }
+
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, NULL);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type, port->src_connection_idx,
+ port->port_usb->out);
+ configure_fifo(port->usb_bam_type, port->dst_connection_idx,
+ port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ usb_bam_resume(port->usb_bam_type, &port->ipa_params);
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_port_alloc() - Allocate IPA USB Port structure
+ * @portno: port number to be used by particular USB function
+ *
+ * It is being used by USB function driver to allocate IPA data port
+ * for USB IPA data accelerated path.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+static int ipa_data_port_alloc(int portno)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ if (ipa_data_ports[portno] != NULL) {
+ pr_debug("port %d already allocated.\n", portno);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL);
+ if (!port) {
+ pr_err("no memory to allocate port %d\n", portno);
+ return -ENOMEM;
+ }
+
+ ipa_data_ports[portno] = port;
+
+ pr_debug("port:%p with portno:%d allocated\n", port, portno);
+ return 0;
+}
+
+/**
+ * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
+ * @portno: port number to be used by particular USB function
+ * @gtype: USB gadget function type
+ *
+ * It is being used by USB function driver to select which BAM2BAM IPA
+ * port particular USB function wants to use.
+ *
+ */
+void ipa_data_port_select(int portno, enum gadget_type gtype)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ pr_debug("portno:%d\n", portno);
+
+ port = ipa_data_ports[portno];
+ port->port_num = portno;
+ port->is_connected = false;
+
+ spin_lock_init(&port->port_lock);
+
+ if (!work_pending(&port->connect_w))
+ INIT_WORK(&port->connect_w, ipa_data_connect_work);
+
+ if (!work_pending(&port->disconnect_w))
+ INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+
+ port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ port->gtype = gtype;
+};
+
+/**
+ * ipa_data_setup() - setup BAM2BAM IPA port
+ * @no_ipa_port: total number of BAM2BAM IPA port to support
+ *
+ * Each USB function who wants to use BAM2BAM IPA port would
+ * be counting number of IPA port to use and initialize those
+ * ports at time of bind_config() in android gadget driver.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+int ipa_data_setup(unsigned int no_ipa_port)
+{
+ int i, ret;
+
+ pr_debug("requested %d IPA BAM ports", no_ipa_port);
+
+ if (!no_ipa_port || no_ipa_port > IPA_N_PORTS) {
+ pr_err("Invalid num of ports count:%d\n", no_ipa_port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < no_ipa_port; i++) {
+ n_ipa_ports++;
+ ret = ipa_data_port_alloc(i);
+ if (ret) {
+ n_ipa_ports--;
+ pr_err("Failed to alloc port:%d\n", i);
+ goto free_ipa_ports;
+ }
+ }
+
+ pr_debug("n_ipa_ports:%d\n", n_ipa_ports);
+
+ if (ipa_data_wq) {
+ pr_debug("ipa_data_wq is already setup.");
+ return 0;
+ }
+
+ ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!ipa_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_ipa_ports;
+ }
+
+ return 0;
+
+free_ipa_ports:
+ for (i = 0; i < n_ipa_ports; i++) {
+ kfree(ipa_data_ports[i]);
+ ipa_data_ports[i] = NULL;
+ if (ipa_data_wq) {
+ destroy_workqueue(ipa_data_wq);
+ ipa_data_wq = NULL;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
new file mode 100644
index 000000000000..b7d47ab1bb04
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_DATA_IPA_H
+#define __U_DATA_IPA_H
+
+#include "usb_gadget_xport.h"
+
+struct gadget_ipa_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+};
+
+void ipa_data_port_select(int portno, enum gadget_type gtype);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, u8 port_num);
+int ipa_data_connect(struct gadget_ipa_port *gp, u8 port_num,
+ u8 src_connection_idx, u8 dst_connection_idx);
+int ipa_data_setup(unsigned int no_ipa_port);
+void ipa_data_resume(struct gadget_ipa_port *gp, u8 port_num);
+void ipa_data_suspend(struct gadget_ipa_port *gp, u8 port_num);
+
+#endif
diff --git a/drivers/usb/gadget/function/u_qc_ether.c b/drivers/usb/gadget/function/u_qc_ether.c
new file mode 100644
index 000000000000..118bab4477ce
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qc_ether.c
@@ -0,0 +1,454 @@
+/*
+ * u_qc_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used. Each end of the link uses one address. The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link. (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ *
+ * This utilities is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+#define UETH__VERSION "29-May-2008"
+
+struct eth_qc_dev {
+ /* lock is held while accessing port_usb
+ * or updating its backlink port_usb->ioport
+ */
+ spinlock_t lock;
+ struct qc_gether *port_usb;
+
+ struct net_device *net;
+ struct usb_gadget *gadget;
+
+ unsigned header_len;
+
+ bool zlp;
+ u8 host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+ printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+ xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG DBG
+#else
+#define VDBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+ xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+ xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+static int ueth_qc_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ int status = 0;
+
+ /* don't change MTU on "live" link (peer won't know) */
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ status = -EBUSY;
+ else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+ status = -ERANGE;
+ else
+ net->mtu = new_mtu;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return status;
+}
+
+static void eth_qc_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *p)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+
+ strlcpy(p->driver, "g_qc_ether", sizeof p->driver);
+ strlcpy(p->version, UETH__VERSION, sizeof p->version);
+ strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+}
+
+static const struct ethtool_ops qc_ethtool_ops = {
+ .get_drvinfo = eth_qc_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static netdev_tx_t eth_qc_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ return NETDEV_TX_OK;
+}
+
+static int eth_qc_open(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ struct qc_gether *link;
+
+ DBG(dev, "%s\n", __func__);
+ if (netif_carrier_ok(dev->net)) {
+ /* Force the netif to send the RTM_NEWLINK event
+ * that in use to notify on the USB cable status.
+ */
+ netif_carrier_off(dev->net);
+ netif_carrier_on(dev->net);
+ netif_wake_queue(dev->net);
+ }
+
+ spin_lock_irq(&dev->lock);
+ link = dev->port_usb;
+ if (link && link->open)
+ link->open(link);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int eth_qc_stop(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ struct qc_gether *link = dev->port_usb;
+
+ VDBG(dev, "%s\n", __func__);
+ netif_stop_queue(net);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb && link->close)
+ link->close(link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *qc_dev_addr;
+module_param(qc_dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_dev_addr, "QC Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *qc_host_addr;
+module_param(qc_host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_host_addr, "QC Host Ethernet Address");
+
+static int get_qc_ether_addr(const char *str, u8 *dev_addr)
+{
+ if (str) {
+ unsigned i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = hex_to_bin(*str++) << 4;
+ num |= hex_to_bin(*str++);
+ dev_addr[i] = num;
+ }
+ if (is_valid_ether_addr(dev_addr))
+ return 0;
+ }
+ random_ether_addr(dev_addr);
+ return 1;
+}
+
+static const struct net_device_ops eth_qc_netdev_ops = {
+ .ndo_open = eth_qc_open,
+ .ndo_stop = eth_qc_stop,
+ .ndo_start_xmit = eth_qc_start_xmit,
+ .ndo_change_mtu = ueth_qc_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct device_type qc_gadget_type = {
+ .name = "gadget",
+};
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN])
+{
+ if (get_qc_ether_addr(qc_dev_addr, dev_mac))
+ pr_debug("using random dev_mac ethernet address\n");
+ if (get_qc_ether_addr(qc_host_addr, host_mac))
+ pr_debug("using random host_mac ethernet address\n");
+}
+
+/**
+ * gether_qc_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+ return gether_qc_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_qc_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname)
+{
+ struct eth_qc_dev *dev;
+ struct net_device *net;
+ int status;
+
+ net = alloc_etherdev(sizeof *dev);
+ if (!net)
+ return -ENOMEM;
+
+ dev = netdev_priv(net);
+ spin_lock_init(&dev->lock);
+
+ /* network device setup */
+ dev->net = net;
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+ if (get_qc_ether_addr(qc_dev_addr, net->dev_addr))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "self");
+ if (get_qc_ether_addr(qc_host_addr, dev->host_mac))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "host");
+
+ if (ethaddr)
+ memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+ net->netdev_ops = &eth_qc_netdev_ops;
+ net->ethtool_ops = &qc_ethtool_ops;
+
+ netif_carrier_off(net);
+
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &qc_gadget_type);
+
+ status = register_netdev(net);
+ if (status < 0) {
+ dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+ free_netdev(net);
+ } else {
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ }
+
+ return status;
+}
+
+/**
+ * gether_qc_cleanup_name - remove Ethernet-over-USB device
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_qc_setup().
+ */
+void gether_qc_cleanup_name(const char *netname)
+{
+ struct net_device *net_dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+
+ if (net_dev) {
+ dev_put(net_dev);
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+ }
+}
+
+struct net_device *gether_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+/**
+ * gether_qc_connect_name - notify network layer that USB link
+ * is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ * current device speed, and any framing wrapper(s) set up.
+ * @netname: name for network device (for example, "usb")
+ * Context: irqs blocked
+ * @netif_enable: if true, net interface will be turned on
+ *
+ * This is called to let the network layer know the connection
+ * is active ("carrier detect").
+ */
+struct net_device *gether_qc_connect_name(struct qc_gether *link,
+ const char *netname, bool netif_enable)
+{
+ struct net_device *net_dev;
+ struct eth_qc_dev *dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ dev_put(net_dev);
+ dev = netdev_priv(net_dev);
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ dev->zlp = link->is_zlp_ok;
+ dev->header_len = link->header_len;
+
+ spin_lock(&dev->lock);
+ dev->port_usb = link;
+ link->ioport = dev;
+ if (netif_running(dev->net)) {
+ if (link->open)
+ link->open(link);
+ } else {
+ if (link->close)
+ link->close(link);
+ }
+ spin_unlock(&dev->lock);
+
+ if (netif_enable) {
+ netif_carrier_on(dev->net);
+ if (netif_running(dev->net))
+ netif_wake_queue(dev->net);
+ }
+
+ return dev->net;
+}
+
+/**
+ * gether_qc_disconnect_name - notify network layer that USB
+ * link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * @netname: name for network device (for example, "usb")
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ */
+void gether_qc_disconnect_name(struct qc_gether *link, const char *netname)
+{
+ struct net_device *net_dev;
+ struct eth_qc_dev *dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return;
+
+ dev_put(net_dev);
+ dev = netdev_priv(net_dev);
+
+ if (!dev)
+ return;
+
+ DBG(dev, "%s\n", __func__);
+
+ netif_stop_queue(dev->net);
+ netif_carrier_off(dev->net);
+
+ spin_lock(&dev->lock);
+ dev->port_usb = NULL;
+ link->ioport = NULL;
+ spin_unlock(&dev->lock);
+}
diff --git a/drivers/usb/gadget/function/u_qc_ether.h b/drivers/usb/gadget/function/u_qc_ether.h
new file mode 100644
index 000000000000..c5706edf8d2f
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qc_ether.h
@@ -0,0 +1,101 @@
+/*
+ * u_qc_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __U_QC_ETHER_H
+#define __U_QC_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing. Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration. When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ *
+ * This function is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+struct qc_gether {
+ struct usb_function func;
+
+ /* updated by gether_{connect,disconnect} */
+ struct eth_qc_dev *ioport;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ bool is_zlp_ok;
+
+ u16 cdc_filter;
+
+ /* hooks for added framing, as needed for RNDIS and EEM. */
+ u32 header_len;
+
+ struct sk_buff *(*wrap)(struct qc_gether *port,
+ struct sk_buff *skb);
+ int (*unwrap)(struct qc_gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
+
+ /* called on network open/close */
+ void (*open)(struct qc_gether *);
+ void (*close)(struct qc_gether *);
+};
+
+/* netdev setup/teardown as directed by the gadget driver */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
+void gether_qc_cleanup_name(const char *netname);
+/* variant of gether_setup that allows customizing network device name */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_qc_connect_name(struct qc_gether *link,
+ const char *netname, bool netif_enable);
+struct net_device *gether_qc_get_net(const char *netname);
+void gether_qc_disconnect_name(struct qc_gether *link, const char *netname);
+
+/* each configuration may bind one instance of an ethernet link */
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name);
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer,
+ u8 maxPktPerXfer, u8 pkt_alignment_factor,
+ char *xport_name);
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN]);
+
+#endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
new file mode 100644
index 000000000000..4336dbf26274
--- /dev/null
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct rmnet_ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct grmnet {
+ struct usb_function func;
+
+ struct usb_gadget *gadget;
+ struct usb_ep *in;
+ struct usb_ep *out;
+
+ /* to usb host, aka laptop, windows pc etc. Will
+ * be filled by usb driver of rmnet functionality
+ */
+ int (*send_cpkt_response)(void *g, void *buf, size_t len);
+
+ /* to modem, and to be filled by driver implementing
+ * control function
+ */
+ int (*send_encap_cmd)(u8 port_num, void *buf, size_t len);
+
+ void (*notify_modem)(void *g, u8 port_num, int cbits);
+
+ void (*disconnect)(struct grmnet *g);
+ void (*connect)(struct grmnet *g);
+};
+
+#define NR_QTI_PORTS (NR_RMNET_PORTS + NR_DPL_PORTS)
+#define NR_RMNET_PORTS 4
+#define NR_DPL_PORTS 1
+
+enum ctrl_client {
+ FRMNET_CTRL_CLIENT,
+ GPS_CTRL_CLIENT,
+
+ NR_CTRL_CLIENTS
+};
+
+int gbam_setup(unsigned int no_bam_port);
+int gbam2bam_setup(unsigned int no_bam2bam_port);
+void gbam_cleanup(void);
+int gbam_connect(struct grmnet *gr, u8 port_num,
+ enum transport_type trans, u8 src_connection_idx,
+ u8 dst_connection_idx);
+void gbam_disconnect(struct grmnet *gr, u8 port_num,
+ enum transport_type trans);
+void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans);
+void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans);
+int gbam_mbim_setup(void);
+int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
+ struct usb_ep *out);
+void gbam_mbim_disconnect(void);
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num);
+int gsmd_ctrl_setup(enum ctrl_client client_num, unsigned int count,
+ u8 *first_port_idx);
+#endif /* __U_RMNET_H*/