summaryrefslogtreecommitdiff
path: root/drivers/usb/gadget/function
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget/function')
-rw-r--r--drivers/usb/gadget/function/Makefile19
-rw-r--r--drivers/usb/gadget/function/f_accessory.c168
-rw-r--r--drivers/usb/gadget/function/f_acm.c2
-rw-r--r--drivers/usb/gadget/function/f_audio_source.c75
-rw-r--r--drivers/usb/gadget/function/f_ccid.c1176
-rw-r--r--drivers/usb/gadget/function/f_ccid.h83
-rw-r--r--drivers/usb/gadget/function/f_cdev.c1847
-rw-r--r--drivers/usb/gadget/function/f_diag.c1116
-rw-r--r--drivers/usb/gadget/function/f_fs.c892
-rw-r--r--drivers/usb/gadget/function/f_gsi.c3302
-rw-r--r--drivers/usb/gadget/function/f_gsi.h1374
-rw-r--r--drivers/usb/gadget/function/f_hid.c304
-rw-r--r--drivers/usb/gadget/function/f_loopback.c6
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c49
-rw-r--r--drivers/usb/gadget/function/f_mbim.c2147
-rw-r--r--drivers/usb/gadget/function/f_midi.c62
-rw-r--r--drivers/usb/gadget/function/f_mtp.c664
-rw-r--r--drivers/usb/gadget/function/f_ncm.c141
-rw-r--r--drivers/usb/gadget/function/f_obex.c2
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_qc_ecm.c1166
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c1552
-rw-r--r--drivers/usb/gadget/function/f_qdss.c1187
-rw-r--r--drivers/usb/gadget/function/f_qdss.h77
-rw-r--r--drivers/usb/gadget/function/f_rmnet.c1271
-rw-r--r--drivers/usb/gadget/function/f_rndis.c8
-rw-r--r--drivers/usb/gadget/function/f_serial.c451
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c6
-rw-r--r--drivers/usb/gadget/function/f_uac1.c967
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c1022
-rw-r--r--drivers/usb/gadget/function/f_uac2.c808
-rw-r--r--drivers/usb/gadget/function/rndis.c114
-rw-r--r--drivers/usb/gadget/function/rndis.h16
-rw-r--r--drivers/usb/gadget/function/u_audio.c645
-rw-r--r--drivers/usb/gadget/function/u_audio.h95
-rw-r--r--drivers/usb/gadget/function/u_bam.c2521
-rw-r--r--drivers/usb/gadget/function/u_bam_data.c2109
-rw-r--r--drivers/usb/gadget/function/u_bam_data.h71
-rw-r--r--drivers/usb/gadget/function/u_ctrl_qti.c826
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c1401
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.h119
-rw-r--r--drivers/usb/gadget/function/u_ether.c21
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h35
-rw-r--r--drivers/usb/gadget/function/u_fs.h3
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.c454
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.h101
-rw-r--r--drivers/usb/gadget/function/u_qdss.c128
-rw-r--r--drivers/usb/gadget/function/u_rmnet.h61
-rw-r--r--drivers/usb/gadget/function/u_serial.c419
-rw-r--r--drivers/usb/gadget/function/u_serial.h10
-rw-r--r--drivers/usb/gadget/function/u_uac1.h87
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c (renamed from drivers/usb/gadget/function/u_uac1.c)19
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h82
-rw-r--r--drivers/usb/gadget/function/u_uac2.h2
54 files changed, 29553 insertions, 1736 deletions
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 1cd544beef63..e04ca4b97c8e 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -32,8 +32,11 @@ usb_f_mass_storage-y := f_mass_storage.o storage_common.o
obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o
usb_f_fs-y := f_fs.o
obj-$(CONFIG_USB_F_FS) += usb_f_fs.o
-usb_f_uac1-y := f_uac1.o u_uac1.o
+obj-$(CONFIG_USB_U_AUDIO) += u_audio.o
+usb_f_uac1-y := f_uac1.o
obj-$(CONFIG_USB_F_UAC1) += usb_f_uac1.o
+usb_f_uac1_legacy-y := f_uac1_legacy.o u_uac1_legacy.o
+obj-$(CONFIG_USB_F_UAC1_LEGACY) += usb_f_uac1_legacy.o
usb_f_uac2-y := f_uac2.o
obj-$(CONFIG_USB_F_UAC2) += usb_f_uac2.o
usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o
@@ -52,3 +55,17 @@ usb_f_audio_source-y := f_audio_source.o
obj-$(CONFIG_USB_F_AUDIO_SRC) += usb_f_audio_source.o
usb_f_accessory-y := f_accessory.o
obj-$(CONFIG_USB_F_ACC) += usb_f_accessory.o
+usb_f_diag-y := f_diag.o
+obj-$(CONFIG_USB_F_DIAG) += usb_f_diag.o
+usb_f_gsi-y := f_gsi.o rndis.o
+obj-$(CONFIG_USB_F_GSI) += usb_f_gsi.o
+usb_f_cdev-y := f_cdev.o
+obj-$(CONFIG_USB_F_CDEV) += usb_f_cdev.o
+usb_f_qdss-y := f_qdss.o u_qdss.o
+obj-$(CONFIG_USB_F_QDSS) += usb_f_qdss.o
+usb_f_qcrndis-y := f_qc_rndis.o rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o
+usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o
+obj-$(CONFIG_USB_F_RMNET_BAM) += usb_f_rmnet_bam.o
+usb_f_ccid-y := f_ccid.o
+obj-$(CONFIG_USB_F_CCID) += usb_f_ccid.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 925688505967..b5c1ad06f8be 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -45,6 +45,7 @@
#define MAX_INST_NAME_LEN 40
#define BULK_BUFFER_SIZE 16384
+#define BULK_BUFFER_INIT_SIZE 131072
#define ACC_STRING_SIZE 256
#define PROTOCOL_VERSION 2
@@ -56,6 +57,9 @@
#define TX_REQ_MAX 4
#define RX_REQ_MAX 2
+unsigned int acc_rx_req_len = BULK_BUFFER_INIT_SIZE;
+unsigned int acc_tx_req_len = BULK_BUFFER_INIT_SIZE;
+
struct acc_hid_dev {
struct list_head list;
struct hid_device *hid;
@@ -142,12 +146,47 @@ static struct usb_interface_descriptor acc_interface_desc = {
.bInterfaceProtocol = 0,
};
+static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_in_comp_desc = {
+ .bLength = sizeof(acc_superspeed_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 8,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_out_comp_desc = {
+ .bLength = sizeof(acc_superspeed_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 8,
+ /* .bmAttributes = 0, */
+};
+
+
static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
@@ -155,7 +194,7 @@ static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
@@ -186,6 +225,15 @@ static struct usb_descriptor_header *hs_acc_descs[] = {
NULL,
};
+static struct usb_descriptor_header *ss_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_in_comp_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_out_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_out_comp_desc,
+ NULL,
+};
+
static struct usb_string acc_string_defs[] = {
[INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
{ }, /* end of list */
@@ -346,6 +394,7 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
struct acc_dev *dev = ep->driver_data;
char *string_dest = NULL;
int length = req->actual;
+ unsigned long flags;
if (req->status != 0) {
pr_err("acc_complete_set_string, err %d\n", req->status);
@@ -371,22 +420,26 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
case ACCESSORY_STRING_SERIAL:
string_dest = dev->serial;
break;
+ default:
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ return;
}
- if (string_dest) {
- unsigned long flags;
- if (length >= ACC_STRING_SIZE)
- length = ACC_STRING_SIZE - 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- memcpy(string_dest, req->buf, length);
- /* ensure zero termination */
- string_dest[length] = 0;
- spin_unlock_irqrestore(&dev->lock, flags);
- } else {
- pr_err("unknown accessory string index %d\n",
- dev->string_index);
+ if (!length) {
+ pr_debug("zero length for accessory string index %d\n",
+ dev->string_index);
+ return;
}
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
@@ -559,7 +612,7 @@ static int create_bulk_endpoints(struct acc_dev *dev,
struct usb_ep *ep;
int i;
- DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+ DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
ep = usb_ep_autoconfig(cdev->gadget, in_desc);
if (!ep) {
@@ -579,18 +632,36 @@ static int create_bulk_endpoints(struct acc_dev *dev,
ep->driver_data = dev; /* claim the endpoint */
dev->ep_out = ep;
+retry_tx_alloc:
/* now allocate requests for our endpoints */
for (i = 0; i < TX_REQ_MAX; i++) {
- req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ req = acc_request_new(dev->ep_in, acc_tx_req_len);
+ if (!req) {
+ if (acc_tx_req_len <= BULK_BUFFER_SIZE)
+ goto fail;
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ acc_tx_req_len /= 2;
+ goto retry_tx_alloc;
+ }
req->complete = acc_complete_in;
req_put(dev, &dev->tx_idle, req);
}
+
+retry_rx_alloc:
for (i = 0; i < RX_REQ_MAX; i++) {
- req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ req = acc_request_new(dev->ep_out, acc_rx_req_len);
+ if (!req) {
+ if (acc_rx_req_len <= BULK_BUFFER_SIZE)
+ goto fail;
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ acc_request_free(dev->rx_req[i],
+ dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
+ acc_rx_req_len /= 2;
+ goto retry_rx_alloc;
+ }
req->complete = acc_complete_out;
dev->rx_req[i] = req;
}
@@ -601,8 +672,10 @@ fail:
pr_err("acc_bind() could not allocate requests\n");
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
- for (i = 0; i < RX_REQ_MAX; i++)
+ for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
return -1;
}
@@ -611,9 +684,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
{
struct acc_dev *dev = fp->private_data;
struct usb_request *req;
- ssize_t r = count;
- ssize_t data_length;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
pr_debug("acc_read(%zu)\n", count);
@@ -623,8 +694,8 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
return -ENODEV;
}
- if (count > BULK_BUFFER_SIZE)
- count = BULK_BUFFER_SIZE;
+ if (count > acc_rx_req_len)
+ count = acc_rx_req_len;
/* we will block until we're online */
pr_debug("acc_read: waiting for online\n");
@@ -634,14 +705,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
goto done;
}
- /*
- * Calculate the data length by considering termination character.
- * Then compansite the difference of rounding up to
- * integer multiple of maxpacket size.
- */
- data_length = count;
- data_length += dev->ep_out->maxpacket - 1;
- data_length -= data_length % dev->ep_out->maxpacket;
+ len = ALIGN(count, dev->ep_out->maxpacket);
if (dev->rx_done) {
// last req cancelled. try to get it.
@@ -652,14 +716,14 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
requeue_req:
/* queue a request */
req = dev->rx_req[0];
- req->length = data_length;
+ req->length = len;
dev->rx_done = 0;
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
goto done;
} else {
- pr_debug("rx %p queue\n", req);
+ pr_debug("rx %pK queue\n", req);
}
/* wait for a request to complete */
@@ -682,7 +746,7 @@ copy_data:
if (req->actual == 0)
goto requeue_req;
- pr_debug("rx %p %u\n", req, req->actual);
+ pr_debug("rx %pK %u\n", req, req->actual);
xfer = (req->actual < count) ? req->actual : count;
r = xfer;
if (copy_to_user(buf, req->buf, xfer))
@@ -727,8 +791,8 @@ static ssize_t acc_write(struct file *fp, const char __user *buf,
break;
}
- if (count > BULK_BUFFER_SIZE) {
- xfer = BULK_BUFFER_SIZE;
+ if (count > acc_tx_req_len) {
+ xfer = acc_tx_req_len;
/* ZLP, They will be more TX requests so not yet. */
req->zero = 0;
} else {
@@ -846,6 +910,9 @@ static const struct file_operations acc_fops = {
.read = acc_read,
.write = acc_write,
.unlocked_ioctl = acc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = acc_ioctl,
+#endif
.open = acc_open,
.release = acc_release,
};
@@ -973,6 +1040,8 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
memset(dev->serial, 0, sizeof(dev->serial));
dev->start_requested = 0;
dev->audio_mode = 0;
+ strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE);
+ strlcpy(dev->model, "Android", ACC_STRING_SIZE);
}
}
@@ -1006,7 +1075,7 @@ __acc_function_bind(struct usb_configuration *c,
int id;
int ret;
- DBG(cdev, "acc_function_bind dev: %p\n", dev);
+ DBG(cdev, "acc_function_bind dev: %pK\n", dev);
if (configfs) {
if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
@@ -1044,6 +1113,14 @@ __acc_function_bind(struct usb_configuration *c,
acc_fullspeed_out_desc.bEndpointAddress;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ acc_superspeed_in_desc.bEndpointAddress =
+ acc_fullspeed_in_desc.bEndpointAddress;
+ acc_superspeed_out_desc.bEndpointAddress =
+ acc_fullspeed_out_desc.bEndpointAddress;
+ }
+
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
f->name, dev->ep_in->name, dev->ep_out->name);
@@ -1099,8 +1176,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
- for (i = 0; i < RX_REQ_MAX; i++)
+ for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
acc_hid_unbind(dev);
}
@@ -1184,7 +1263,7 @@ static void acc_hid_work(struct work_struct *data)
list_for_each_safe(entry, temp, &new_list) {
hid = list_entry(entry, struct acc_hid_dev, list);
if (acc_hid_init(hid)) {
- pr_err("can't add HID device %p\n", hid);
+ pr_err("can't add HID device %pK\n", hid);
acc_hid_delete(hid);
} else {
spin_lock_irqsave(&dev->lock, flags);
@@ -1423,6 +1502,7 @@ static struct usb_function *acc_alloc(struct usb_function_instance *fi)
dev->function.strings = acc_strings,
dev->function.fs_descriptors = fs_acc_descs;
dev->function.hs_descriptors = hs_acc_descs;
+ dev->function.ss_descriptors = ss_acc_descs;
dev->function.bind = acc_function_bind_configfs;
dev->function.unbind = acc_function_unbind;
dev->function.set_alt = acc_function_set_alt;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 670a89f197cd..5819f6503f75 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -704,7 +704,7 @@ fail:
if (acm->notify_req)
gs_free_req(acm->notify, acm->notify_req);
- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+ ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status);
return status;
}
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 8124af33b738..7d8bfe62b148 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -369,15 +369,22 @@ static void audio_send(struct audio_dev *audio)
s64 msecs;
s64 frames;
ktime_t now;
+ unsigned long flags;
+ spin_lock_irqsave(&audio->lock, flags);
/* audio->substream will be null if we have been closed */
- if (!audio->substream)
+ if (!audio->substream) {
+ spin_unlock_irqrestore(&audio->lock, flags);
return;
+ }
/* audio->buffer_pos will be null if we have been stopped */
- if (!audio->buffer_pos)
+ if (!audio->buffer_pos) {
+ spin_unlock_irqrestore(&audio->lock, flags);
return;
+ }
runtime = audio->substream->runtime;
+ spin_unlock_irqrestore(&audio->lock, flags);
/* compute number of frames to send */
now = ktime_get();
@@ -400,8 +407,21 @@ static void audio_send(struct audio_dev *audio)
while (frames > 0) {
req = audio_req_get(audio);
- if (!req)
+ spin_lock_irqsave(&audio->lock, flags);
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream) {
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return;
+ }
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos) {
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&audio->lock, flags);
break;
+ }
length = frames_to_bytes(runtime, frames);
if (length > IN_EP_MAX_PACKET_SIZE)
@@ -427,6 +447,7 @@ static void audio_send(struct audio_dev *audio)
}
req->length = length;
+ spin_unlock_irqrestore(&audio->lock, flags);
ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
if (ret < 0) {
pr_err("usb_ep_queue failed ret: %d\n", ret);
@@ -570,14 +591,38 @@ static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+ if (!alt) {
+ usb_ep_disable(audio->in_ep);
+ return 0;
+ }
+
ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
- if (ret)
+ if (ret) {
+ audio->in_ep->desc = NULL;
+ pr_err("config_ep fail for audio ep ret %d\n", ret);
return ret;
+ }
+ ret = usb_ep_enable(audio->in_ep);
+ if (ret) {
+ audio->in_ep->desc = NULL;
+ pr_err("failed to enable audio ret %d\n", ret);
+ return ret;
+ }
- usb_ep_enable(audio->in_ep);
return 0;
}
+/*
+ * Because the data interface supports multiple altsettings,
+ * this audio_source function *MUST* implement a get_alt() method.
+ */
+static int audio_get_alt(struct usb_function *f, unsigned int intf)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ return audio->in_ep->enabled ? 1 : 0;
+}
+
static void audio_disable(struct usb_function *f)
{
struct audio_dev *audio = func_to_audio(f);
@@ -755,11 +800,11 @@ static int audio_pcm_close(struct snd_pcm_substream *substream)
struct audio_dev *audio = substream->private_data;
unsigned long flags;
- spin_lock_irqsave(&audio->lock, flags);
-
/* Remove the QoS request */
pm_qos_remove_request(&audio->pm_qos);
+ spin_lock_irqsave(&audio->lock, flags);
+
audio->substream = NULL;
spin_unlock_irqrestore(&audio->lock, flags);
@@ -841,6 +886,7 @@ static struct audio_dev _audio_dev = {
.bind = audio_bind,
.unbind = audio_unbind,
.set_alt = audio_set_alt,
+ .get_alt = audio_get_alt,
.setup = audio_setup,
.disable = audio_disable,
.free_func = audio_free_func,
@@ -1000,6 +1046,7 @@ static ssize_t audio_source_pcm_show(struct device *dev,
struct device *create_function_device(char *name);
+#define AUDIO_SOURCE_DEV_NAME_LENGTH 20
static struct usb_function_instance *audio_source_alloc_inst(void)
{
struct audio_source_instance *fi_audio;
@@ -1008,6 +1055,8 @@ static struct usb_function_instance *audio_source_alloc_inst(void)
struct device *dev;
void *err_ptr;
int err = 0;
+ char device_name[AUDIO_SOURCE_DEV_NAME_LENGTH];
+ static u8 count;
fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
if (!fi_audio)
@@ -1025,7 +1074,17 @@ static struct usb_function_instance *audio_source_alloc_inst(void)
config_group_init_type_name(&fi_audio->func_inst.group, "",
&audio_source_func_type);
- dev = create_function_device("f_audio_source");
+
+ if (!count) {
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ "f_audio_source");
+ count++;
+ } else {
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ "f_audio_source%d", count++);
+ }
+
+ dev = create_function_device(device_name);
if (IS_ERR(dev)) {
err_ptr = dev;
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
new file mode 100644
index 000000000000..0b335575f245
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -0,0 +1,1176 @@
+/*
+ * f_ccid.c -- CCID function Driver
+ *
+ * Copyright (c) 2011, 2013, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/usb/ccid_desc.h>
+#include <linux/usb/composite.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#include "f_ccid.h"
+
+#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
+#define BULK_OUT_BUFFER_SIZE 1024
+#define CTRL_BUF_SIZE 4
+#define FUNCTION_NAME "ccid"
+#define MAX_INST_NAME_LEN 40
+#define CCID_CTRL_DEV_NAME "ccid_ctrl"
+#define CCID_BULK_DEV_NAME "ccid_bulk"
+#define CCID_NOTIFY_INTERVAL 5
+#define CCID_NOTIFY_MAXPACKET 4
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+struct ccid_ctrl_dev {
+ atomic_t opened;
+ struct list_head tx_q;
+ wait_queue_head_t tx_wait_q;
+ unsigned char buf[CTRL_BUF_SIZE];
+ int tx_ctrl_done;
+ struct miscdevice ccid_ctrl_device;
+};
+
+struct ccid_bulk_dev {
+ atomic_t error;
+ atomic_t opened;
+ atomic_t rx_req_busy;
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req;
+ int rx_done;
+ struct list_head tx_idle;
+ struct miscdevice ccid_bulk_device;
+};
+
+struct ccid_opts {
+ struct usb_function_instance func_inst;
+ struct f_ccid *ccid;
+};
+
+struct f_ccid {
+ struct usb_function function;
+ int ifc_id;
+ spinlock_t lock;
+ atomic_t online;
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ struct usb_request *notify_req;
+ struct ccid_ctrl_dev ctrl_dev;
+ struct ccid_bulk_dev bulk_dev;
+ int dtr_state;
+};
+
+static inline struct f_ccid *ctrl_dev_to_ccid(struct ccid_ctrl_dev *d)
+{
+ return container_of(d, struct f_ccid, ctrl_dev);
+}
+
+static inline struct f_ccid *bulk_dev_to_ccid(struct ccid_bulk_dev *d)
+{
+ return container_of(d, struct f_ccid, bulk_dev);
+}
+
+/* Interface Descriptor: */
+static struct usb_interface_descriptor ccid_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_CSCID,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+};
+/* CCID Class Descriptor */
+static struct usb_ccid_class_descriptor ccid_class_desc = {
+ .bLength = sizeof(ccid_class_desc),
+ .bDescriptorType = CCID_DECRIPTOR_TYPE,
+ .bcdCCID = CCID1_10,
+ .bMaxSlotIndex = 0,
+ /* This value indicates what voltages the CCID can supply to slots */
+ .bVoltageSupport = VOLTS_3_0,
+ .dwProtocols = PROTOCOL_TO,
+ /* Default ICC clock frequency in KHz */
+ .dwDefaultClock = 3580,
+ /* Maximum supported ICC clock frequency in KHz */
+ .dwMaximumClock = 3580,
+ .bNumClockSupported = 0,
+ /* Default ICC I/O data rate in bps */
+ .dwDataRate = 9600,
+ /* Maximum supported ICC I/O data rate in bps */
+ .dwMaxDataRate = 9600,
+ .bNumDataRatesSupported = 0,
+ .dwMaxIFSD = 0,
+ .dwSynchProtocols = 0,
+ .dwMechanical = 0,
+ /* This value indicates what intelligent features the CCID has */
+ .dwFeatures = CCID_FEATURES_EXC_TPDU |
+ CCID_FEATURES_AUTO_PNEGO |
+ CCID_FEATURES_AUTO_BAUD |
+ CCID_FEATURES_AUTO_CLOCK |
+ CCID_FEATURES_AUTO_VOLT |
+ CCID_FEATURES_AUTO_ACTIV |
+ CCID_FEATURES_AUTO_PCONF,
+ /* extended APDU level Message Length */
+ .dwMaxCCIDMessageLength = 0x200,
+ .bClassGetResponse = 0x0,
+ .bClassEnvelope = 0x0,
+ .wLcdLayout = 0,
+ .bPINSupport = 0,
+ .bMaxCCIDBusySlots = 1
+};
+/* Full speed support: */
+static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = 1 << CCID_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor ccid_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor ccid_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *ccid_fs_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_fs_notify_desc,
+ (struct usb_descriptor_header *) &ccid_fs_in_desc,
+ (struct usb_descriptor_header *) &ccid_fs_out_desc,
+ NULL,
+};
+
+/* High speed support: */
+static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor ccid_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ccid_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ccid_hs_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_hs_notify_desc,
+ (struct usb_descriptor_header *) &ccid_hs_in_desc,
+ (struct usb_descriptor_header *) &ccid_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support: */
+static struct usb_endpoint_descriptor ccid_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_notify_comp_desc = {
+ .bLength = sizeof(ccid_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_in_comp_desc = {
+ .bLength = sizeof(ccid_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_out_comp_desc = {
+ .bLength = sizeof(ccid_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ccid_ss_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_comp_desc,
+ NULL,
+};
+
+static inline struct f_ccid *func_to_ccid(struct usb_function *f)
+{
+ return container_of(f, struct f_ccid, function);
+}
+
+static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+}
+
+static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!list_empty(head)) {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ return req;
+}
+
+static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ switch (req->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case 0:
+ break;
+ default:
+ pr_err("CCID notify ep error %d\n", req->status);
+ }
+}
+
+static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ccid *ccid_dev = req->context;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+
+ if (req->status != 0)
+ atomic_set(&bulk_dev->error, 1);
+
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ wake_up(&bulk_dev->write_wq);
+}
+
+static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ccid *ccid_dev = req->context;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ if (req->status != 0)
+ atomic_set(&bulk_dev->error, 1);
+
+ bulk_dev->rx_done = 1;
+ wake_up(&bulk_dev->read_wq);
+}
+
+static struct usb_request *
+ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+ if (req != NULL) {
+ req->length = len;
+ req->buf = kmalloc(len, kmalloc_flags);
+ if (req->buf == NULL) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+
+ return req ? req : ERR_PTR(-ENOMEM);
+}
+
+static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int
+ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int ret = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ if (!atomic_read(&ccid_dev->online))
+ return -ENOTCONN;
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_ABORT:
+ if (w_length != 0)
+ goto invalid;
+ ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
+ ctrl_dev->buf[1] = w_value & 0xFF;
+ ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
+ ctrl_dev->buf[3] = 0x00;
+ ctrl_dev->tx_ctrl_done = 1;
+ wake_up(&ctrl_dev->tx_wait_q);
+ ret = 0;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
+ *(u32 *) req->buf =
+ cpu_to_le32(ccid_class_desc.dwDefaultClock);
+ ret = min_t(u32, w_length,
+ sizeof(ccid_class_desc.dwDefaultClock));
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_GET_DATA_RATES:
+ *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
+ ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
+ break;
+
+ default:
+invalid:
+ pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ pr_err("ccid ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void ccid_function_disable(struct usb_function *f)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ struct usb_request *req;
+
+ /* Disable endpoints */
+ usb_ep_disable(ccid_dev->notify);
+ usb_ep_disable(ccid_dev->in);
+ usb_ep_disable(ccid_dev->out);
+ /* Free endpoint related requests */
+ ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+ if (!atomic_read(&bulk_dev->rx_req_busy))
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+ while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+
+ ccid_dev->dtr_state = 0;
+ atomic_set(&ccid_dev->online, 0);
+ /* Wake up threads */
+ wake_up(&bulk_dev->write_wq);
+ wake_up(&bulk_dev->read_wq);
+ wake_up(&ctrl_dev->tx_wait_q);
+
+}
+
+static int
+ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req;
+ int ret = 0;
+ int i;
+
+ ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
+ sizeof(struct usb_ccid_notification), GFP_ATOMIC);
+ if (IS_ERR(ccid_dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ return PTR_ERR(ccid_dev->notify_req);
+ }
+ ccid_dev->notify_req->complete = ccid_notify_complete;
+ ccid_dev->notify_req->context = ccid_dev;
+
+ /* now allocate requests for our endpoints */
+ req = ccid_request_alloc(ccid_dev->out, (unsigned)BULK_OUT_BUFFER_SIZE,
+ GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ pr_err("%s: unable to allocate memory for out req\n",
+ __func__);
+ ret = PTR_ERR(req);
+ goto free_notify;
+ }
+ req->complete = ccid_bulk_complete_out;
+ req->context = ccid_dev;
+ bulk_dev->rx_req = req;
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = ccid_request_alloc(ccid_dev->in,
+ (unsigned)BULK_IN_BUFFER_SIZE,
+ GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ pr_err("%s: unable to allocate memory for in req\n",
+ __func__);
+ ret = PTR_ERR(req);
+ goto free_bulk_out;
+ }
+ req->complete = ccid_bulk_complete_in;
+ req->context = ccid_dev;
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ }
+
+ /* choose the descriptors and enable endpoints */
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
+ if (ret) {
+ ccid_dev->notify->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->notify->name, ret);
+ goto free_bulk_in;
+ }
+ ret = usb_ep_enable(ccid_dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->notify->name, ret);
+ goto free_bulk_in;
+ }
+ ccid_dev->notify->driver_data = ccid_dev;
+
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->in);
+ if (ret) {
+ ccid_dev->in->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->in->name, ret);
+ goto disable_ep_notify;
+ }
+ ret = usb_ep_enable(ccid_dev->in);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->in->name, ret);
+ goto disable_ep_notify;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->out);
+ if (ret) {
+ ccid_dev->out->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->out->name, ret);
+ goto disable_ep_in;
+ }
+ ret = usb_ep_enable(ccid_dev->out);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->out->name, ret);
+ goto disable_ep_in;
+ }
+ ccid_dev->dtr_state = 1;
+ atomic_set(&ccid_dev->online, 1);
+ return ret;
+
+disable_ep_in:
+ usb_ep_disable(ccid_dev->in);
+disable_ep_notify:
+ usb_ep_disable(ccid_dev->notify);
+ ccid_dev->notify->driver_data = NULL;
+free_bulk_in:
+ while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+free_bulk_out:
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+free_notify:
+ ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+ return ret;
+}
+
+static void ccid_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ usb_free_all_descriptors(f);
+}
+
+static int ccid_function_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+
+ ccid_dev->ifc_id = usb_interface_id(c, f);
+ if (ccid_dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d",
+ __func__, ccid_dev->ifc_id);
+ return ccid_dev->ifc_id;
+ }
+ ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ return -ENODEV;
+ }
+ ccid_dev->notify = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_in_fail;
+ }
+ ccid_dev->in = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ ccid_dev->out = ep;
+ ep->driver_data = cdev;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ ccid_hs_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_hs_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_hs_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+
+ ccid_ss_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_ss_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_ss_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, ccid_fs_descs, ccid_hs_descs,
+ ccid_ss_descs);
+ if (ret)
+ goto ep_auto_out_fail;
+
+ pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+ ccid_dev->in->name, ccid_dev->out->name);
+
+ return 0;
+
+ep_auto_out_fail:
+ ccid_dev->out->driver_data = NULL;
+ ccid_dev->out = NULL;
+ep_auto_in_fail:
+ ccid_dev->in->driver_data = NULL;
+ ccid_dev->in = NULL;
+
+ return ret;
+}
+
+static int ccid_bulk_open(struct inode *ip, struct file *fp)
+{
+ struct ccid_bulk_dev *bulk_dev = container_of(fp->private_data,
+ struct ccid_bulk_dev,
+ ccid_bulk_device);
+ struct f_ccid *ccid_dev = bulk_dev_to_ccid(bulk_dev);
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_open\n");
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&bulk_dev->opened)) {
+ pr_debug("%s: bulk device is already opened\n", __func__);
+ return -EBUSY;
+ }
+ atomic_set(&bulk_dev->opened, 1);
+ /* clear the error latch */
+ atomic_set(&bulk_dev->error, 0);
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ fp->private_data = ccid_dev;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ return 0;
+}
+
+static int ccid_bulk_release(struct inode *ip, struct file *fp)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+
+ pr_debug("ccid_bulk_release\n");
+ atomic_set(&bulk_dev->opened, 0);
+ return 0;
+}
+
+static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req;
+ int r = count, xfer, len;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_read(%zu)\n", count);
+
+ if (count > BULK_OUT_BUFFER_SIZE) {
+ pr_err("%s: max_buffer_size:%d given_pkt_size:%zu\n",
+ __func__, BULK_OUT_BUFFER_SIZE, count);
+ return -ENOMEM;
+ }
+
+ if (atomic_read(&bulk_dev->error)) {
+ r = -EIO;
+ pr_err("%s bulk_dev_error\n", __func__);
+ goto done;
+ }
+
+ len = ALIGN(count, ccid_dev->out->maxpacket);
+requeue_req:
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ /* queue a request */
+ req = bulk_dev->rx_req;
+ req->length = len;
+ bulk_dev->rx_done = 0;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ pr_err("%s usb ep queue failed\n", __func__);
+ atomic_set(&bulk_dev->error, 1);
+ goto done;
+ }
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
+ atomic_read(&bulk_dev->error) ||
+ !atomic_read(&ccid_dev->online));
+ if (ret < 0) {
+ atomic_set(&bulk_dev->error, 1);
+ r = ret;
+ usb_ep_dequeue(ccid_dev->out, req);
+ goto done;
+ }
+ if (!atomic_read(&bulk_dev->error)) {
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n", __func__);
+ r = -ENODEV;
+ goto done;
+ }
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ goto requeue_req;
+ }
+ if (req->actual > count)
+ pr_err("%s More data received(%d) than required(%zu)\n",
+ __func__, req->actual, count);
+ xfer = (req->actual < count) ? req->actual : count;
+ atomic_set(&bulk_dev->rx_req_busy, 1);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ atomic_set(&bulk_dev->rx_req_busy, 0);
+ if (!atomic_read(&ccid_dev->online)) {
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n", __func__);
+ r = -ENODEV;
+ goto done;
+ } else {
+ r = xfer;
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ } else {
+ r = -EIO;
+ }
+done:
+ pr_debug("ccid_bulk_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req = 0;
+ int r = count;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_write(%zu)\n", count);
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!count) {
+ pr_err("%s: zero length ctrl pkt\n", __func__);
+ return -ENODEV;
+ }
+ if (count > BULK_IN_BUFFER_SIZE) {
+ pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
+ __func__, BULK_IN_BUFFER_SIZE, count);
+ return -ENOMEM;
+ }
+
+
+ /* get an idle tx request to use */
+ ret = wait_event_interruptible(bulk_dev->write_wq,
+ ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
+ atomic_read(&bulk_dev->error)));
+
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+ if (atomic_read(&bulk_dev->error)) {
+ pr_err(" %s dev->error\n", __func__);
+ r = -EIO;
+ goto done;
+ }
+ if (copy_from_user(req->buf, buf, count)) {
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n",
+ __func__);
+ ccid_request_free(req, ccid_dev->in);
+ r = -ENODEV;
+ } else {
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ r = -EFAULT;
+ }
+ goto done;
+ }
+ req->length = count;
+ ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("ccid_bulk_write: xfer error %d\n", ret);
+ atomic_set(&bulk_dev->error, 1);
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ r = -EIO;
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n",
+ __func__);
+ while ((req = ccid_req_get(ccid_dev,
+ &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+ r = -ENODEV;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ }
+done:
+ pr_debug("ccid_bulk_write returning %d\n", r);
+ return r;
+}
+
+static const struct file_operations ccid_bulk_fops = {
+ .owner = THIS_MODULE,
+ .read = ccid_bulk_read,
+ .write = ccid_bulk_write,
+ .open = ccid_bulk_open,
+ .release = ccid_bulk_release,
+};
+
+static int ccid_bulk_device_init(struct f_ccid *dev)
+{
+ int ret;
+ struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev;
+
+ init_waitqueue_head(&bulk_dev->read_wq);
+ init_waitqueue_head(&bulk_dev->write_wq);
+ INIT_LIST_HEAD(&bulk_dev->tx_idle);
+
+ bulk_dev->ccid_bulk_device.name = CCID_BULK_DEV_NAME;
+ bulk_dev->ccid_bulk_device.fops = &ccid_bulk_fops;
+ bulk_dev->ccid_bulk_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&bulk_dev->ccid_bulk_device);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccid_ctrl_open(struct inode *inode, struct file *fp)
+{
+ struct ccid_ctrl_dev *ctrl_dev = container_of(fp->private_data,
+ struct ccid_ctrl_dev,
+ ccid_ctrl_device);
+ struct f_ccid *ccid_dev = ctrl_dev_to_ccid(ctrl_dev);
+ unsigned long flags;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ if (atomic_read(&ctrl_dev->opened)) {
+ pr_debug("%s: ctrl device is already opened\n", __func__);
+ return -EBUSY;
+ }
+ atomic_set(&ctrl_dev->opened, 1);
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ fp->private_data = ccid_dev;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ return 0;
+}
+
+
+static int ccid_ctrl_release(struct inode *inode, struct file *fp)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+
+ atomic_set(&ctrl_dev->opened, 0);
+
+ return 0;
+}
+
+static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ int ret = 0;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ if (count > CTRL_BUF_SIZE)
+ count = CTRL_BUF_SIZE;
+
+ ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
+ ctrl_dev->tx_ctrl_done ||
+ !atomic_read(&ccid_dev->online));
+ if (ret < 0)
+ return ret;
+ ctrl_dev->tx_ctrl_done = 0;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ ret = copy_to_user(buf, ctrl_dev->buf, count);
+ if (ret)
+ return -EFAULT;
+
+ return count;
+}
+
+static long
+ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct usb_request *req = ccid_dev->notify_req;
+ struct usb_ccid_notification *ccid_notify = req->buf;
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ switch (cmd) {
+ case CCID_NOTIFY_CARD:
+ if (copy_from_user(ccid_notify, argp,
+ sizeof(struct usb_ccid_notification)))
+ return -EFAULT;
+ req->length = 2;
+ break;
+ case CCID_NOTIFY_HWERROR:
+ if (copy_from_user(ccid_notify, argp,
+ sizeof(struct usb_ccid_notification)))
+ return -EFAULT;
+ req->length = 4;
+ break;
+ case CCID_READ_DTR:
+ if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
+ return -EFAULT;
+ return 0;
+ }
+ ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("ccid notify ep enqueue error %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct file_operations ccid_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = ccid_ctrl_open,
+ .release = ccid_ctrl_release,
+ .read = ccid_ctrl_read,
+ .unlocked_ioctl = ccid_ctrl_ioctl,
+};
+
+static int ccid_ctrl_device_init(struct f_ccid *dev)
+{
+ int ret;
+ struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ INIT_LIST_HEAD(&ctrl_dev->tx_q);
+ init_waitqueue_head(&ctrl_dev->tx_wait_q);
+
+ ctrl_dev->ccid_ctrl_device.name = CCID_CTRL_DEV_NAME;
+ ctrl_dev->ccid_ctrl_device.fops = &ccid_ctrl_fops;
+ ctrl_dev->ccid_ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&ctrl_dev->ccid_ctrl_device);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ccid_free_func(struct usb_function *f)
+{
+ pr_debug("%s\n", __func__);
+}
+
+static int ccid_bind_config(struct f_ccid *ccid_dev)
+{
+ pr_debug("ccid_bind_config\n");
+
+ ccid_dev->function.name = FUNCTION_NAME;
+ ccid_dev->function.fs_descriptors = ccid_fs_descs;
+ ccid_dev->function.hs_descriptors = ccid_hs_descs;
+ ccid_dev->function.ss_descriptors = ccid_ss_descs;
+ ccid_dev->function.bind = ccid_function_bind;
+ ccid_dev->function.unbind = ccid_function_unbind;
+ ccid_dev->function.set_alt = ccid_function_set_alt;
+ ccid_dev->function.setup = ccid_function_setup;
+ ccid_dev->function.disable = ccid_function_disable;
+ ccid_dev->function.free_func = ccid_free_func;
+
+ return 0;
+}
+
+static struct f_ccid *ccid_setup(void)
+{
+ struct f_ccid *ccid_dev;
+ int ret;
+
+ ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
+ if (!ccid_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ spin_lock_init(&ccid_dev->lock);
+
+ ret = ccid_ctrl_device_init(ccid_dev);
+ if (ret) {
+ pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
+ __func__, ret);
+ goto err_ctrl_init;
+ }
+ ret = ccid_bulk_device_init(ccid_dev);
+ if (ret) {
+ pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
+ __func__, ret);
+ goto err_bulk_init;
+ }
+
+ return ccid_dev;
+err_bulk_init:
+ misc_deregister(&ccid_dev->ctrl_dev.ccid_ctrl_device);
+err_ctrl_init:
+ kfree(ccid_dev);
+error:
+ pr_err("ccid gadget driver failed to initialize\n");
+ return ERR_PTR(ret);
+}
+
+static inline struct ccid_opts *to_ccid_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct ccid_opts,
+ func_inst.group);
+}
+
+static void ccid_attr_release(struct config_item *item)
+{
+ struct ccid_opts *opts = to_ccid_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations ccid_item_ops = {
+ .release = ccid_attr_release,
+};
+
+static struct config_item_type ccid_func_type = {
+ .ct_item_ops = &ccid_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int ccid_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int name_len;
+ struct f_ccid *ccid;
+ struct ccid_opts *opts = container_of(fi, struct ccid_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ccid = ccid_setup();
+ if (IS_ERR(ccid))
+ return PTR_ERR(ccid);
+
+ opts->ccid = ccid;
+
+ return 0;
+}
+
+static void ccid_free_inst(struct usb_function_instance *f)
+{
+ struct ccid_opts *opts = container_of(f, struct ccid_opts, func_inst);
+
+ if (!opts->ccid)
+ return;
+
+ misc_deregister(&opts->ccid->ctrl_dev.ccid_ctrl_device);
+ misc_deregister(&opts->ccid->bulk_dev.ccid_bulk_device);
+
+ kfree(opts->ccid);
+ kfree(opts);
+}
+
+
+static struct usb_function_instance *ccid_alloc_inst(void)
+{
+ struct ccid_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = ccid_set_inst_name;
+ opts->func_inst.free_func_inst = ccid_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &ccid_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *ccid_alloc(struct usb_function_instance *fi)
+{
+ struct ccid_opts *opts;
+ int ret;
+
+ opts = container_of(fi, struct ccid_opts, func_inst);
+
+ ret = ccid_bind_config(opts->ccid);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &opts->ccid->function;
+}
+
+DECLARE_USB_FUNCTION_INIT(ccid, ccid_alloc_inst, ccid_alloc);
+MODULE_DESCRIPTION("USB CCID function Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_ccid.h b/drivers/usb/gadget/function/f_ccid.h
new file mode 100644
index 000000000000..935308cff0bc
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ccid.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2011, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef __F_CCID_H
+#define __F_CCID_H
+
+#define PROTOCOL_TO 0x01
+#define PROTOCOL_T1 0x02
+#define ABDATA_SIZE 512
+
+/* define for dwFeatures for Smart Card Device Class Descriptors */
+/* No special characteristics */
+#define CCID_FEATURES_NADA 0x00000000
+/* Automatic parameter configuration based on ATR data */
+#define CCID_FEATURES_AUTO_PCONF 0x00000002
+/* Automatic activation of ICC on inserting */
+#define CCID_FEATURES_AUTO_ACTIV 0x00000004
+/* Automatic ICC voltage selection */
+#define CCID_FEATURES_AUTO_VOLT 0x00000008
+/* Automatic ICC clock frequency change */
+#define CCID_FEATURES_AUTO_CLOCK 0x00000010
+/* Automatic baud rate change */
+#define CCID_FEATURES_AUTO_BAUD 0x00000020
+/*Automatic parameters negotiation made by the CCID */
+#define CCID_FEATURES_AUTO_PNEGO 0x00000040
+/* Automatic PPS made by the CCID according to the active parameters */
+#define CCID_FEATURES_AUTO_PPS 0x00000080
+/* CCID can set ICC in clock stop mode */
+#define CCID_FEATURES_ICCSTOP 0x00000100
+/* NAD value other than 00 accepted (T=1 protocol in use) */
+#define CCID_FEATURES_NAD 0x00000200
+/* Automatic IFSD exchange as first exchange (T=1 protocol in use) */
+#define CCID_FEATURES_AUTO_IFSD 0x00000400
+/* TPDU level exchanges with CCID */
+#define CCID_FEATURES_EXC_TPDU 0x00010000
+/* Short APDU level exchange with CCID */
+#define CCID_FEATURES_EXC_SAPDU 0x00020000
+/* Short and Extended APDU level exchange with CCID */
+#define CCID_FEATURES_EXC_APDU 0x00040000
+/* USB Wake up signaling supported on card insertion and removal */
+#define CCID_FEATURES_WAKEUP 0x00100000
+
+#define CCID_NOTIFY_CARD _IOW('C', 1, struct usb_ccid_notification)
+#define CCID_NOTIFY_HWERROR _IOW('C', 2, struct usb_ccid_notification)
+#define CCID_READ_DTR _IOR('C', 3, int)
+
+struct usb_ccid_notification {
+ __u8 buf[4];
+} __packed;
+
+struct ccid_bulk_in_header {
+ __u8 bMessageType;
+ __u32 wLength;
+ __u8 bSlot;
+ __u8 bSeq;
+ __u8 bStatus;
+ __u8 bError;
+ __u8 bSpecific;
+ __u8 abData[ABDATA_SIZE];
+ __u8 bSizeToSend;
+} __packed;
+
+struct ccid_bulk_out_header {
+ __u8 bMessageType;
+ __u32 wLength;
+ __u8 bSlot;
+ __u8 bSeq;
+ __u8 bSpecific_0;
+ __u8 bSpecific_1;
+ __u8 bSpecific_2;
+ __u8 APDU[ABDATA_SIZE];
+} __packed;
+#endif
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
new file mode 100644
index 000000000000..233221fed424
--- /dev/null
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -0,0 +1,1847 @@
+/*
+ * Copyright (c) 2011, 2013-2018, The Linux Foundation. All rights reserved.
+ * Linux Foundation chooses to take subject only to the GPLv2 license terms,
+ * and distributes only under these terms.
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ *
+ * f_cdev_read() API implementation is using borrowed code from
+ * drivers/usb/gadget/legacy/printer.c, which is
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2006 Craig W. Nadler
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/module.h>
+#include <asm/ioctls.h>
+#include <asm-generic/termios.h>
+
+#define DEVICE_NAME "at_usb"
+#define MODULE_NAME "msm_usb_bridge"
+#define NUM_INSTANCE 3
+
+#define MAX_CDEV_INST_NAME 15
+#define MAX_CDEV_FUNC_NAME 5
+
+#define BRIDGE_RX_QUEUE_SIZE 8
+#define BRIDGE_RX_BUF_SIZE 2048
+#define BRIDGE_TX_QUEUE_SIZE 8
+#define BRIDGE_TX_BUF_SIZE 2048
+
+#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
+
+struct cserial {
+ struct usb_function func;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ struct usb_cdc_line_coding port_line_coding;
+ u8 pending;
+ u8 q_again;
+ u8 data_id;
+ u16 serial_state;
+ u16 port_handshake_bits;
+ /* control signal callbacks*/
+ unsigned int (*get_dtr)(struct cserial *p);
+ unsigned int (*get_rts)(struct cserial *p);
+
+ /* notification callbacks */
+ void (*connect)(struct cserial *p);
+ void (*disconnect)(struct cserial *p);
+ int (*send_break)(struct cserial *p, int duration);
+ unsigned int (*send_carrier_detect)(struct cserial *p, unsigned int);
+ unsigned int (*send_ring_indicator)(struct cserial *p, unsigned int);
+ int (*send_modem_ctrl_bits)(struct cserial *p, int ctrl_bits);
+
+ /* notification changes to modem */
+ void (*notify_modem)(void *port, int ctrl_bits);
+};
+
+struct f_cdev {
+ struct cdev fcdev_cdev;
+ struct device *dev;
+ unsigned port_num;
+ char name[sizeof(DEVICE_NAME) + 2];
+ int minor;
+
+ spinlock_t port_lock;
+
+ wait_queue_head_t open_wq;
+ wait_queue_head_t read_wq;
+
+ struct list_head read_pool;
+ struct list_head read_queued;
+ struct list_head write_pool;
+
+ /* current active USB RX request */
+ struct usb_request *current_rx_req;
+ /* number of pending bytes */
+ size_t pending_rx_bytes;
+ /* current USB RX buffer */
+ u8 *current_rx_buf;
+
+ struct cserial port_usb;
+
+#define ACM_CTRL_DTR 0x01
+#define ACM_CTRL_RTS 0x02
+#define ACM_CTRL_DCD 0x01
+#define ACM_CTRL_DSR 0x02
+#define ACM_CTRL_BRK 0x04
+#define ACM_CTRL_RI 0x08
+
+ unsigned cbits_to_modem;
+ bool cbits_updated;
+
+ struct workqueue_struct *fcdev_wq;
+ bool is_connected;
+ bool port_open;
+
+ unsigned long nbytes_from_host;
+ unsigned long nbytes_to_host;
+ unsigned long nbytes_to_port_bridge;
+ unsigned long nbytes_from_port_bridge;
+};
+
+struct f_cdev_opts {
+ struct usb_function_instance func_inst;
+ struct f_cdev *port;
+ char *func_name;
+ u8 port_num;
+};
+
+static int major, minors;
+struct class *fcdev_classp;
+static DEFINE_IDA(chardev_ida);
+static DEFINE_MUTEX(chardev_ida_lock);
+
+static int usb_cser_alloc_chardev_region(void);
+static void usb_cser_chardev_deinit(void);
+static void usb_cser_read_complete(struct usb_ep *ep, struct usb_request *req);
+static int usb_cser_connect(struct f_cdev *port);
+static void usb_cser_disconnect(struct f_cdev *port);
+static struct f_cdev *f_cdev_alloc(char *func_name, int portno);
+static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req);
+
+static struct usb_interface_descriptor cser_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc cser_header_desc = {
+ .bLength = sizeof(cser_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+cser_call_mgmt_descriptor = {
+ .bLength = sizeof(cser_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+ .bmCapabilities = 0,
+ /* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor cser_descriptor = {
+ .bLength = sizeof(cser_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+ .bmCapabilities = USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc cser_union_desc = {
+ .bLength = sizeof(cser_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor cser_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor cser_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor cser_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *cser_fs_function[] = {
+ (struct usb_descriptor_header *) &cser_interface_desc,
+ (struct usb_descriptor_header *) &cser_header_desc,
+ (struct usb_descriptor_header *) &cser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &cser_descriptor,
+ (struct usb_descriptor_header *) &cser_union_desc,
+ (struct usb_descriptor_header *) &cser_fs_notify_desc,
+ (struct usb_descriptor_header *) &cser_fs_in_desc,
+ (struct usb_descriptor_header *) &cser_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor cser_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_endpoint_descriptor cser_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor cser_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *cser_hs_function[] = {
+ (struct usb_descriptor_header *) &cser_interface_desc,
+ (struct usb_descriptor_header *) &cser_header_desc,
+ (struct usb_descriptor_header *) &cser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &cser_descriptor,
+ (struct usb_descriptor_header *) &cser_union_desc,
+ (struct usb_descriptor_header *) &cser_hs_notify_desc,
+ (struct usb_descriptor_header *) &cser_hs_in_desc,
+ (struct usb_descriptor_header *) &cser_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor cser_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor cser_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor cser_ss_bulk_comp_desc = {
+ .bLength = sizeof(cser_ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_endpoint_descriptor cser_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_ss_ep_comp_descriptor cser_ss_notify_comp_desc = {
+ .bLength = sizeof(cser_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+};
+
+static struct usb_descriptor_header *cser_ss_function[] = {
+ (struct usb_descriptor_header *) &cser_interface_desc,
+ (struct usb_descriptor_header *) &cser_header_desc,
+ (struct usb_descriptor_header *) &cser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &cser_descriptor,
+ (struct usb_descriptor_header *) &cser_union_desc,
+ (struct usb_descriptor_header *) &cser_ss_notify_desc,
+ (struct usb_descriptor_header *) &cser_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &cser_ss_in_desc,
+ (struct usb_descriptor_header *) &cser_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &cser_ss_out_desc,
+ (struct usb_descriptor_header *) &cser_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string cser_string_defs[] = {
+ [0].s = "CDEV Serial",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings cser_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = cser_string_defs,
+};
+
+static struct usb_gadget_strings *usb_cser_strings[] = {
+ &cser_string_table,
+ NULL,
+};
+
+static inline struct f_cdev *func_to_port(struct usb_function *f)
+{
+ return container_of(f, struct f_cdev, port_usb.func);
+}
+
+static inline struct f_cdev *cser_to_port(struct cserial *cser)
+{
+ return container_of(cser, struct f_cdev, port_usb);
+}
+
+static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig)
+{
+ unsigned int uart_sig = 0;
+
+ acm_sig &= (ACM_CTRL_DTR | ACM_CTRL_RTS);
+ if (acm_sig & ACM_CTRL_DTR)
+ uart_sig |= TIOCM_DTR;
+
+ if (acm_sig & ACM_CTRL_RTS)
+ uart_sig |= TIOCM_RTS;
+
+ return uart_sig;
+}
+
+static void port_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_cdev *port = ep->driver_data;
+ struct usb_composite_dev *cdev = port->port_usb.func.config->cdev;
+
+ if (req->status != 0) {
+ dev_dbg(&cdev->gadget->dev, "port(%s) completion, err %d\n",
+ port->name, req->status);
+ return;
+ }
+
+ /* normal completion */
+ if (req->actual != sizeof(port->port_usb.port_line_coding)) {
+ dev_dbg(&cdev->gadget->dev, "port(%s) short resp, len %d\n",
+ port->name, req->actual);
+ usb_ep_set_halt(ep);
+ } else {
+ struct usb_cdc_line_coding *value = req->buf;
+
+ port->port_usb.port_line_coding = *value;
+ }
+}
+
+static void usb_cser_free_func(struct usb_function *f)
+{
+ /* Do nothing as cser_alloc() doesn't alloc anything. */
+}
+
+static int
+usb_cser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_cdev *port = func_to_port(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* SET_LINE_CODING ... just read and save what the host sends */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_LINE_CODING:
+ if (w_length != sizeof(struct usb_cdc_line_coding))
+ goto invalid;
+
+ value = w_length;
+ cdev->gadget->ep0->driver_data = port;
+ req->complete = port_complete_set_line_coding;
+ break;
+
+ /* GET_LINE_CODING ... return what host sent, or initial value */
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_GET_LINE_CODING:
+ value = min_t(unsigned, w_length,
+ sizeof(struct usb_cdc_line_coding));
+ memcpy(req->buf, &port->port_usb.port_line_coding, value);
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+ value = 0;
+ port->port_usb.port_handshake_bits = w_value;
+ pr_debug("USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d RST:%d\n",
+ w_value & ACM_CTRL_DTR ? 1 : 0,
+ w_value & ACM_CTRL_RTS ? 1 : 0);
+ if (port->port_usb.notify_modem)
+ port->port_usb.notify_modem(port, w_value);
+
+ break;
+
+ default:
+invalid:
+ dev_dbg(&cdev->gadget->dev,
+ "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ dev_dbg(&cdev->gadget->dev,
+ "port(%s) req%02x.%02x v%04x i%04x l%d\n",
+ port->name, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("port response on (%s), err %d\n",
+ port->name, value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int usb_cser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_cdev *port = func_to_port(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int rc = 0;
+
+ if (port->port_usb.notify->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset port(%s)\n", port->name);
+ usb_ep_disable(port->port_usb.notify);
+ }
+
+ if (!port->port_usb.notify->desc) {
+ if (config_ep_by_speed(cdev->gadget, f,
+ port->port_usb.notify)) {
+ port->port_usb.notify->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ rc = usb_ep_enable(port->port_usb.notify);
+ if (rc) {
+ dev_err(&cdev->gadget->dev, "can't enable %s, result %d\n",
+ port->port_usb.notify->name, rc);
+ return rc;
+ }
+ port->port_usb.notify->driver_data = port;
+
+ if (port->port_usb.in->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset port(%s)\n", port->name);
+ usb_cser_disconnect(port);
+ }
+ if (!port->port_usb.in->desc || !port->port_usb.out->desc) {
+ dev_dbg(&cdev->gadget->dev,
+ "activate port(%s)\n", port->name);
+ if (config_ep_by_speed(cdev->gadget, f, port->port_usb.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ port->port_usb.out)) {
+ port->port_usb.in->desc = NULL;
+ port->port_usb.out->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ usb_cser_connect(port);
+ return rc;
+}
+
+static void usb_cser_disable(struct usb_function *f)
+{
+ struct f_cdev *port = func_to_port(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ dev_dbg(&cdev->gadget->dev,
+ "port(%s) deactivated\n", port->name);
+
+ usb_cser_disconnect(port);
+ usb_ep_disable(port->port_usb.notify);
+ port->port_usb.notify->driver_data = NULL;
+}
+
+static int usb_cser_notify(struct f_cdev *port, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = port->port_usb.notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s: port disconnected\n", __func__);
+ return -ENODEV;
+ }
+
+ req = port->port_usb.notify_req;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(port->port_usb.data_id);
+ notify->wLength = cpu_to_le16(length);
+ /* 2 byte data copy */
+ memcpy(buf, data, length);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ pr_err("port %s can't notify serial state, %d\n",
+ port->name, status);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb.pending = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ return status;
+}
+
+static int port_notify_serial_state(struct cserial *cser)
+{
+ struct f_cdev *port = cser_to_port(cser);
+ int status;
+ unsigned long flags;
+ struct usb_composite_dev *cdev = port->port_usb.func.config->cdev;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb.pending) {
+ port->port_usb.pending = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ dev_dbg(&cdev->gadget->dev, "port %d serial state %04x\n",
+ port->port_num, port->port_usb.serial_state);
+ status = usb_cser_notify(port, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &port->port_usb.serial_state,
+ sizeof(port->port_usb.serial_state));
+ spin_lock_irqsave(&port->port_lock, flags);
+ } else {
+ port->port_usb.q_again = true;
+ status = 0;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return status;
+}
+
+static void usb_cser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_cdev *port = req->context;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb.pending = false;
+ if (req->status != -ESHUTDOWN && port->port_usb.q_again) {
+ port->port_usb.q_again = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ port_notify_serial_state(&port->port_usb);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void dun_cser_connect(struct cserial *cser)
+{
+ cser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ port_notify_serial_state(cser);
+}
+
+unsigned int dun_cser_get_dtr(struct cserial *cser)
+{
+ if (cser->port_handshake_bits & ACM_CTRL_DTR)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int dun_cser_get_rts(struct cserial *cser)
+{
+ if (cser->port_handshake_bits & ACM_CTRL_RTS)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int dun_cser_send_carrier_detect(struct cserial *cser,
+ unsigned int yes)
+{
+ u16 state;
+
+ state = cser->serial_state;
+ state &= ~ACM_CTRL_DCD;
+ if (yes)
+ state |= ACM_CTRL_DCD;
+
+ cser->serial_state = state;
+ return port_notify_serial_state(cser);
+}
+
+unsigned int dun_cser_send_ring_indicator(struct cserial *cser,
+ unsigned int yes)
+{
+ u16 state;
+
+ state = cser->serial_state;
+ state &= ~ACM_CTRL_RI;
+ if (yes)
+ state |= ACM_CTRL_RI;
+
+ cser->serial_state = state;
+ return port_notify_serial_state(cser);
+}
+
+static void dun_cser_disconnect(struct cserial *cser)
+{
+ cser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ port_notify_serial_state(cser);
+}
+
+static int dun_cser_send_break(struct cserial *cser, int duration)
+{
+ u16 state;
+
+ state = cser->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ cser->serial_state = state;
+ return port_notify_serial_state(cser);
+}
+
+static int dun_cser_send_ctrl_bits(struct cserial *cser, int ctrl_bits)
+{
+ cser->serial_state = ctrl_bits;
+ return port_notify_serial_state(cser);
+}
+
+static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+}
+
+static void usb_cser_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del_init(&req->list);
+ usb_cser_free_req(ep, req);
+ }
+}
+
+static struct usb_request *
+usb_cser_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("usb alloc request failed\n");
+ return 0;
+ }
+
+ req->length = len;
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ pr_err("request buf allocation failed\n");
+ usb_ep_free_request(ep, req);
+ return 0;
+ }
+
+ return req;
+}
+
+static int usb_cser_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_cdev *port = func_to_port(f);
+ int status;
+ struct usb_ep *ep;
+
+ if (cser_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ cser_string_defs[0].id = status;
+ }
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ port->port_usb.data_id = status;
+ cser_interface_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+ ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_in_desc);
+ if (!ep)
+ goto fail;
+ port->port_usb.in = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_out_desc);
+ if (!ep)
+ goto fail;
+ port->port_usb.out = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ port->port_usb.notify = ep;
+ ep->driver_data = cdev;
+ /* allocate notification */
+ port->port_usb.notify_req = usb_cser_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2, GFP_KERNEL);
+ if (!port->port_usb.notify_req)
+ goto fail;
+
+ port->port_usb.notify_req->complete = usb_cser_notify_complete;
+ port->port_usb.notify_req->context = port;
+
+ cser_hs_in_desc.bEndpointAddress = cser_fs_in_desc.bEndpointAddress;
+ cser_hs_out_desc.bEndpointAddress = cser_fs_out_desc.bEndpointAddress;
+
+ cser_ss_in_desc.bEndpointAddress = cser_fs_in_desc.bEndpointAddress;
+ cser_ss_out_desc.bEndpointAddress = cser_fs_out_desc.bEndpointAddress;
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ cser_hs_notify_desc.bEndpointAddress =
+ cser_fs_notify_desc.bEndpointAddress;
+ }
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ cser_ss_notify_desc.bEndpointAddress =
+ cser_fs_notify_desc.bEndpointAddress;
+ }
+
+ status = usb_assign_descriptors(f, cser_fs_function, cser_hs_function,
+ cser_ss_function);
+ if (status)
+ goto fail;
+
+ dev_dbg(&cdev->gadget->dev, "usb serial port(%d): %s speed IN/%s OUT/%s\n",
+ port->port_num,
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ port->port_usb.in->name, port->port_usb.out->name);
+ return 0;
+
+fail:
+ if (port->port_usb.notify_req)
+ usb_cser_free_req(port->port_usb.notify,
+ port->port_usb.notify_req);
+
+ if (port->port_usb.notify)
+ port->port_usb.notify->driver_data = NULL;
+ if (port->port_usb.out)
+ port->port_usb.out->driver_data = NULL;
+ if (port->port_usb.in)
+ port->port_usb.in->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+ return status;
+}
+
+static void cser_free_inst(struct usb_function_instance *fi)
+{
+ struct f_cdev_opts *opts;
+
+ opts = container_of(fi, struct f_cdev_opts, func_inst);
+
+ if (opts->port) {
+ device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
+ cdev_del(&opts->port->fcdev_cdev);
+ }
+ usb_cser_chardev_deinit();
+ kfree(opts->func_name);
+ kfree(opts->port);
+ kfree(opts);
+}
+
+static void usb_cser_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_cdev *port = func_to_port(f);
+
+ usb_free_all_descriptors(f);
+ usb_cser_free_req(port->port_usb.notify, port->port_usb.notify_req);
+}
+
+static int usb_cser_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num, int size,
+ void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("ep:%pK head:%pK num:%d size:%d cb:%pK",
+ ep, head, num, size, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_cser_alloc_req(ep, size, GFP_ATOMIC);
+ if (!req) {
+ pr_debug("req allocated:%d\n", i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add_tail(&req->list, head);
+ }
+
+ return 0;
+}
+
+static void usb_cser_start_rx(struct f_cdev *port)
+{
+ struct list_head *pool;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("start RX(USB OUT)\n");
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!(port->is_connected && port->port_open)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("can't start rx.\n");
+ return;
+ }
+
+ pool = &port->read_pool;
+ ep = port->port_usb.out;
+
+ while (!list_empty(pool)) {
+ struct usb_request *req;
+
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del_init(&req->list);
+ req->length = BRIDGE_RX_BUF_SIZE;
+ req->complete = usb_cser_read_complete;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_KERNEL);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ pr_err("port(%d):%pK usb ep(%s) queue failed\n",
+ port->port_num, port, ep->name);
+ list_add(&req->list, pool);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void usb_cser_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_cdev *port = ep->driver_data;
+ unsigned long flags;
+
+ pr_debug("ep:(%pK)(%s) port:%pK req_status:%d req->actual:%u\n",
+ ep, ep->name, port, req->status, req->actual);
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_open || req->status || !req->actual) {
+ list_add_tail(&req->list, &port->read_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->nbytes_from_host += req->actual;
+ list_add_tail(&req->list, &port->read_queued);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ wake_up(&port->read_wq);
+ return;
+}
+
+static void usb_cser_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ unsigned long flags;
+ struct f_cdev *port = ep->driver_data;
+
+ pr_debug("ep:(%pK)(%s) port:%pK req_stats:%d\n",
+ ep, ep->name, port, req->status);
+
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host += req->actual;
+ list_add_tail(&req->list, &port->write_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ switch (req->status) {
+ default:
+ pr_debug("unexpected %s status %d\n", ep->name, req->status);
+ /* FALL THROUGH */
+ case 0:
+ /* normal completion */
+ break;
+
+ case -ESHUTDOWN:
+ /* disconnect */
+ pr_debug("%s shutdown\n", ep->name);
+ break;
+ }
+
+ return;
+}
+
+static void usb_cser_start_io(struct f_cdev *port)
+{
+ int ret = -ENODEV;
+ unsigned long flags;
+
+ pr_debug("port: %pK\n", port);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected)
+ goto start_io_out;
+
+ port->current_rx_req = NULL;
+ port->pending_rx_bytes = 0;
+ port->current_rx_buf = NULL;
+
+ ret = usb_cser_alloc_requests(port->port_usb.out,
+ &port->read_pool,
+ BRIDGE_RX_QUEUE_SIZE, BRIDGE_RX_BUF_SIZE,
+ usb_cser_read_complete);
+ if (ret) {
+ pr_err("unable to allocate out requests\n");
+ goto start_io_out;
+ }
+
+ ret = usb_cser_alloc_requests(port->port_usb.in,
+ &port->write_pool,
+ BRIDGE_TX_QUEUE_SIZE, BRIDGE_TX_BUF_SIZE,
+ usb_cser_write_complete);
+ if (ret) {
+ usb_cser_free_requests(port->port_usb.out, &port->read_pool);
+ pr_err("unable to allocate IN requests\n");
+ goto start_io_out;
+ }
+
+start_io_out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (ret)
+ return;
+
+ usb_cser_start_rx(port);
+}
+
+static void usb_cser_stop_io(struct f_cdev *port)
+{
+ struct usb_ep *in;
+ struct usb_ep *out;
+ unsigned long flags;
+
+ pr_debug("port:%pK\n", port);
+
+ in = port->port_usb.in;
+ out = port->port_usb.out;
+
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(out);
+ out->driver_data = NULL;
+ usb_ep_disable(in);
+ in->driver_data = NULL;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->current_rx_req != NULL) {
+ kfree(port->current_rx_req->buf);
+ usb_ep_free_request(out, port->current_rx_req);
+ }
+
+ port->pending_rx_bytes = 0;
+ port->current_rx_buf = NULL;
+ usb_cser_free_requests(out, &port->read_queued);
+ usb_cser_free_requests(out, &port->read_pool);
+ usb_cser_free_requests(in, &port->write_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int f_cdev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ unsigned long flags;
+ struct f_cdev *port;
+
+ port = container_of(inode->i_cdev, struct f_cdev, fcdev_cdev);
+ if (!port) {
+ pr_err("Port is NULL.\n");
+ return -EINVAL;
+ }
+
+ if (port && port->port_open) {
+ pr_err("port is already opened.\n");
+ return -EBUSY;
+ }
+
+ file->private_data = port;
+ pr_debug("opening port(%s)(%pK)\n", port->name, port);
+ ret = wait_event_interruptible(port->open_wq,
+ port->is_connected);
+ if (ret) {
+ pr_debug("open interrupted.\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_open = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_cser_start_rx(port);
+
+ pr_debug("port(%s)(%pK) open is success\n", port->name, port);
+
+ return 0;
+}
+
+int f_cdev_release(struct inode *inode, struct file *file)
+{
+ unsigned long flags;
+ struct f_cdev *port;
+
+ port = file->private_data;
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_open = false;
+ port->cbits_updated = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("port(%s)(%pK) is closed.\n", port->name, port);
+
+ return 0;
+}
+
+ssize_t f_cdev_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long flags;
+ struct f_cdev *port;
+ struct usb_request *req;
+ struct list_head *pool;
+ struct usb_request *current_rx_req;
+ size_t pending_rx_bytes, bytes_copied = 0, size;
+ u8 *current_rx_buf;
+
+ port = file->private_data;
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("read on port(%s)(%pK) count:%zu\n", port->name, port, count);
+ spin_lock_irqsave(&port->port_lock, flags);
+ current_rx_req = port->current_rx_req;
+ pending_rx_bytes = port->pending_rx_bytes;
+ current_rx_buf = port->current_rx_buf;
+ port->current_rx_req = NULL;
+ port->current_rx_buf = NULL;
+ port->pending_rx_bytes = 0;
+ bytes_copied = 0;
+
+ if (list_empty(&port->read_queued) && !pending_rx_bytes) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s(): read_queued list is empty.\n", __func__);
+ goto start_rx;
+ }
+
+ /*
+ * Consider below cases:
+ * 1. If available read buffer size (i.e. count value) is greater than
+ * available data as part of one USB OUT request buffer, then consider
+ * copying multiple USB OUT request buffers until read buffer is filled.
+ * 2. If available read buffer size (i.e. count value) is smaller than
+ * available data as part of one USB OUT request buffer, then copy this
+ * buffer data across multiple read() call until whole USB OUT request
+ * buffer is copied.
+ */
+ while ((pending_rx_bytes || !list_empty(&port->read_queued)) && count) {
+ if (pending_rx_bytes == 0) {
+ pool = &port->read_queued;
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del_init(&req->list);
+ current_rx_req = req;
+ pending_rx_bytes = req->actual;
+ current_rx_buf = req->buf;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ size = count;
+ if (size > pending_rx_bytes)
+ size = pending_rx_bytes;
+
+ pr_debug("pending_rx_bytes:%zu count:%zu size:%zu\n",
+ pending_rx_bytes, count, size);
+ size -= copy_to_user(buf, current_rx_buf, size);
+ port->nbytes_to_port_bridge += size;
+ bytes_copied += size;
+ count -= size;
+ buf += size;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ list_add_tail(&current_rx_req->list, &port->read_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -EAGAIN;
+ }
+
+ /*
+ * partial data available, then update pending_rx_bytes,
+ * otherwise add USB request back to read_pool for next data.
+ */
+ if (size < pending_rx_bytes) {
+ pending_rx_bytes -= size;
+ current_rx_buf += size;
+ } else {
+ list_add_tail(&current_rx_req->list, &port->read_pool);
+ pending_rx_bytes = 0;
+ current_rx_req = NULL;
+ current_rx_buf = NULL;
+ }
+ }
+
+ port->pending_rx_bytes = pending_rx_bytes;
+ port->current_rx_buf = current_rx_buf;
+ port->current_rx_req = current_rx_req;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+start_rx:
+ usb_cser_start_rx(port);
+ return bytes_copied;
+}
+
+ssize_t f_cdev_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned long flags;
+ struct f_cdev *port;
+ struct usb_request *req;
+ struct list_head *pool;
+ unsigned xfer_size;
+ struct usb_ep *in;
+
+ port = file->private_data;
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ pr_debug("write on port(%s)(%pK)\n", port->name, port);
+
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: cable is disconnected.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (list_empty(&port->write_pool)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s: Request list is empty.\n", __func__);
+ return 0;
+ }
+
+ in = port->port_usb.in;
+ pool = &port->write_pool;
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del_init(&req->list);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: write buf size:%zu\n", __func__, count);
+ if (count > BRIDGE_TX_BUF_SIZE)
+ xfer_size = BRIDGE_TX_BUF_SIZE;
+ else
+ xfer_size = count;
+
+ ret = copy_from_user(req->buf, buf, xfer_size);
+ if (ret) {
+ pr_err("copy_from_user failed: err %d\n", ret);
+ ret = -EFAULT;
+ } else {
+ req->length = xfer_size;
+ req->zero = 1;
+ ret = usb_ep_queue(in, req, GFP_KERNEL);
+ if (ret) {
+ pr_err("EP QUEUE failed:%d\n", ret);
+ ret = -EIO;
+ goto err_exit;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_from_port_bridge += req->length;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+err_exit:
+ if (ret) {
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* USB cable is connected, add it back otherwise free request */
+ if (port->is_connected)
+ list_add(&req->list, &port->write_pool);
+ else
+ usb_cser_free_req(in, req);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+ }
+
+ return xfer_size;
+}
+
+static unsigned int f_cdev_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct f_cdev *port;
+ unsigned long flags;
+
+ port = file->private_data;
+ if (port && port->is_connected) {
+ poll_wait(file, &port->read_wq, wait);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!list_empty(&port->read_queued)) {
+ mask |= POLLIN | POLLRDNORM;
+ pr_debug("sets POLLIN for %s\n", port->name);
+ }
+
+ if (port->cbits_updated) {
+ mask |= POLLPRI;
+ pr_debug("sets POLLPRI for %s\n", port->name);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ } else {
+ pr_err("Failed due to NULL device or disconnected.\n");
+ mask = POLLERR;
+ }
+
+ return mask;
+}
+
+static int f_cdev_tiocmget(struct f_cdev *port)
+{
+ struct cserial *cser;
+ unsigned int result = 0;
+
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -ENODEV;
+ }
+
+ cser = &port->port_usb;
+ if (cser->get_dtr)
+ result |= (cser->get_dtr(cser) ? TIOCM_DTR : 0);
+
+ if (cser->get_rts)
+ result |= (cser->get_rts(cser) ? TIOCM_RTS : 0);
+
+ if (cser->serial_state & TIOCM_CD)
+ result |= TIOCM_CD;
+
+ if (cser->serial_state & TIOCM_RI)
+ result |= TIOCM_RI;
+ return result;
+}
+
+static int f_cdev_tiocmset(struct f_cdev *port,
+ unsigned int set, unsigned int clear)
+{
+ struct cserial *cser;
+ int status = 0;
+
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -ENODEV;
+ }
+
+ cser = &port->port_usb;
+ if (set & TIOCM_RI) {
+ if (cser->send_ring_indicator) {
+ cser->serial_state |= TIOCM_RI;
+ status = cser->send_ring_indicator(cser, 1);
+ }
+ }
+ if (clear & TIOCM_RI) {
+ if (cser->send_ring_indicator) {
+ cser->serial_state &= ~TIOCM_RI;
+ status = cser->send_ring_indicator(cser, 0);
+ }
+ }
+ if (set & TIOCM_CD) {
+ if (cser->send_carrier_detect) {
+ cser->serial_state |= TIOCM_CD;
+ status = cser->send_carrier_detect(cser, 1);
+ }
+ }
+ if (clear & TIOCM_CD) {
+ if (cser->send_carrier_detect) {
+ cser->serial_state &= ~TIOCM_CD;
+ status = cser->send_carrier_detect(cser, 0);
+ }
+ }
+
+ return status;
+}
+
+static long f_cdev_ioctl(struct file *fp, unsigned cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ int i = 0;
+ uint32_t val;
+ struct f_cdev *port;
+
+ port = fp->private_data;
+ if (!port) {
+ pr_err("port is null.\n");
+ return POLLERR;
+ }
+
+ switch (cmd) {
+ case TIOCMBIC:
+ case TIOCMBIS:
+ case TIOCMSET:
+ pr_debug("TIOCMSET on port(%s)%pK\n", port->name, port);
+ i = get_user(val, (uint32_t *)arg);
+ if (i) {
+ pr_err("Error getting TIOCMSET value\n");
+ return i;
+ }
+ ret = f_cdev_tiocmset(port, val, ~val);
+ break;
+ case TIOCMGET:
+ pr_debug("TIOCMGET on port(%s)%pK\n", port->name, port);
+ ret = f_cdev_tiocmget(port);
+ if (ret >= 0) {
+ ret = put_user(ret, (uint32_t *)arg);
+ port->cbits_updated = false;
+ }
+ break;
+ default:
+ pr_err("Received cmd:%d not supported\n", cmd);
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static void usb_cser_notify_modem(void *fport, int ctrl_bits)
+{
+ int temp;
+ struct f_cdev *port = fport;
+
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ pr_debug("port(%s): ctrl_bits:%x\n", port->name, ctrl_bits);
+
+ temp = convert_acm_sigs_to_uart(ctrl_bits);
+
+ if (temp == port->cbits_to_modem)
+ return;
+
+ port->cbits_to_modem = temp;
+ port->cbits_updated = true;
+
+ wake_up(&port->read_wq);
+}
+
+int usb_cser_connect(struct f_cdev *port)
+{
+ unsigned long flags;
+ int ret;
+ struct cserial *cser;
+
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -ENODEV;
+ }
+
+ pr_debug("port(%s) (%pK)\n", port->name, port);
+
+ cser = &port->port_usb;
+ cser->notify_modem = usb_cser_notify_modem;
+
+ ret = usb_ep_enable(cser->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK, err:%d",
+ cser->in, ret);
+ return ret;
+ }
+ cser->in->driver_data = port;
+
+ ret = usb_ep_enable(cser->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK, err: %d",
+ cser->out, ret);
+ cser->in->driver_data = 0;
+ return ret;
+ }
+ cser->out->driver_data = port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ cser->pending = false;
+ cser->q_again = false;
+ port->is_connected = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ usb_cser_start_io(port);
+ wake_up(&port->open_wq);
+ return 0;
+}
+
+void usb_cser_disconnect(struct f_cdev *port)
+{
+ unsigned long flags;
+
+ usb_cser_stop_io(port);
+
+ /* lower DTR to modem */
+ usb_cser_notify_modem(port, 0);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_connected = false;
+ port->nbytes_from_host = port->nbytes_to_host = 0;
+ port->nbytes_to_port_bridge = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static const struct file_operations f_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = f_cdev_open,
+ .release = f_cdev_release,
+ .read = f_cdev_read,
+ .write = f_cdev_write,
+ .poll = f_cdev_poll,
+ .unlocked_ioctl = f_cdev_ioctl,
+ .compat_ioctl = f_cdev_ioctl,
+};
+
+static struct f_cdev *f_cdev_alloc(char *func_name, int portno)
+{
+ int ret;
+ dev_t dev;
+ struct device *device;
+ struct f_cdev *port;
+
+ port = kzalloc(sizeof(struct f_cdev), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&chardev_ida_lock);
+ if (idr_is_empty(&chardev_ida.idr)) {
+ ret = usb_cser_alloc_chardev_region();
+ if (ret) {
+ mutex_unlock(&chardev_ida_lock);
+ pr_err("alloc chardev failed\n");
+ goto err_alloc_chardev;
+ }
+ }
+
+ ret = ida_simple_get(&chardev_ida, 0, 0, GFP_KERNEL);
+ if (ret >= NUM_INSTANCE) {
+ ida_simple_remove(&chardev_ida, ret);
+ mutex_unlock(&chardev_ida_lock);
+ ret = -ENODEV;
+ goto err_get_ida;
+ }
+
+ port->port_num = portno;
+ port->minor = ret;
+ mutex_unlock(&chardev_ida_lock);
+
+ snprintf(port->name, sizeof(port->name), "%s%d", DEVICE_NAME, portno);
+ spin_lock_init(&port->port_lock);
+
+ init_waitqueue_head(&port->open_wq);
+ init_waitqueue_head(&port->read_wq);
+ INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queued);
+ INIT_LIST_HEAD(&port->write_pool);
+
+ port->fcdev_wq = create_singlethread_workqueue(port->name);
+ if (!port->fcdev_wq) {
+ pr_err("Unable to create workqueue fcdev_wq for port:%s\n",
+ port->name);
+ ret = -ENOMEM;
+ goto err_get_ida;
+ }
+
+ /* create char device */
+ cdev_init(&port->fcdev_cdev, &f_cdev_fops);
+ dev = MKDEV(major, port->minor);
+ ret = cdev_add(&port->fcdev_cdev, dev, 1);
+ if (ret) {
+ pr_err("Failed to add cdev for port(%s)\n", port->name);
+ goto err_cdev_add;
+ }
+
+ device = device_create(fcdev_classp, NULL, dev, NULL, port->name);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ goto err_create_dev;
+ }
+
+ pr_info("port_name:%s (%pK) portno:(%d)\n",
+ port->name, port, port->port_num);
+ return port;
+
+err_create_dev:
+ cdev_del(&port->fcdev_cdev);
+err_cdev_add:
+ destroy_workqueue(port->fcdev_wq);
+err_get_ida:
+ usb_cser_chardev_deinit();
+err_alloc_chardev:
+ kfree(port);
+
+ return ERR_PTR(ret);
+}
+
+static void usb_cser_chardev_deinit(void)
+{
+
+ if (idr_is_empty(&chardev_ida.idr)) {
+
+ if (major) {
+ unregister_chrdev_region(MKDEV(major, 0), minors);
+ major = minors = 0;
+ }
+
+ if (!IS_ERR_OR_NULL(fcdev_classp))
+ class_destroy(fcdev_classp);
+ }
+}
+
+static int usb_cser_alloc_chardev_region(void)
+{
+ int ret;
+ dev_t dev;
+
+ ret = alloc_chrdev_region(&dev,
+ 0,
+ NUM_INSTANCE,
+ MODULE_NAME);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("alloc_chrdev_region() failed ret:%i\n", ret);
+ return ret;
+ }
+
+ major = MAJOR(dev);
+ minors = NUM_INSTANCE;
+
+ fcdev_classp = class_create(THIS_MODULE, MODULE_NAME);
+ if (IS_ERR(fcdev_classp)) {
+ pr_err("class_create() failed ENOMEM\n");
+ ret = -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline struct f_cdev_opts *to_f_cdev_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_cdev_opts,
+ func_inst.group);
+}
+
+static struct f_cdev_opts *to_fi_cdev_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct f_cdev_opts, func_inst);
+}
+
+static void cserial_attr_release(struct config_item *item)
+{
+ struct f_cdev_opts *opts = to_f_cdev_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations cserial_item_ops = {
+ .release = cserial_attr_release,
+};
+
+static ssize_t usb_cser_status_show(struct config_item *item, char *page)
+{
+ struct f_cdev *port = to_f_cdev_opts(item)->port;
+ char *buf;
+ unsigned long flags;
+ int temp = 0;
+ int ret;
+
+ buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ temp += scnprintf(buf + temp, 512 - temp,
+ "###PORT:%s###\n"
+ "port_no:%d\n"
+ "func:%s\n"
+ "nbytes_to_host: %lu\n"
+ "nbytes_from_host: %lu\n"
+ "nbytes_to_port_bridge: %lu\n"
+ "nbytes_from_port_bridge: %lu\n"
+ "cbits_to_modem: %u\n"
+ "Port Opened: %s\n",
+ port->name,
+ port->port_num,
+ to_f_cdev_opts(item)->func_name,
+ port->nbytes_to_host,
+ port->nbytes_from_host,
+ port->nbytes_to_port_bridge,
+ port->nbytes_from_port_bridge,
+ port->cbits_to_modem,
+ (port->port_open ? "Opened" : "Closed"));
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = scnprintf(page, temp, buf);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t usb_cser_status_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_cdev *port = to_f_cdev_opts(item)->port;
+ unsigned long flags;
+ u8 stats;
+
+ if (page == NULL) {
+ pr_err("Invalid buffer");
+ return len;
+ }
+
+ if (kstrtou8(page, 0, &stats) != 0 || stats != 0) {
+ pr_err("(%u)Wrong value. enter 0 to clear.\n", stats);
+ return len;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host = port->nbytes_from_host = 0;
+ port->nbytes_to_port_bridge = port->nbytes_from_port_bridge = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return len;
+}
+
+CONFIGFS_ATTR(usb_cser_, status);
+static struct configfs_attribute *cserial_attrs[] = {
+ &usb_cser_attr_status,
+ NULL,
+};
+
+static struct config_item_type cserial_func_type = {
+ .ct_item_ops = &cserial_item_ops,
+ .ct_attrs = cserial_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static int cser_set_inst_name(struct usb_function_instance *f, const char *name)
+{
+ struct f_cdev_opts *opts =
+ container_of(f, struct f_cdev_opts, func_inst);
+ char *ptr, *str;
+ size_t name_len, str_size;
+ int ret;
+ struct f_cdev *port;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_CDEV_INST_NAME)
+ return -ENAMETOOLONG;
+
+ /* expect name as cdev.<func>.<port_num> */
+ str = strnchr(name, strlen(name), '.');
+ if (!str) {
+ pr_err("invalid input (%s)\n", name);
+ return -EINVAL;
+ }
+
+ /* get function name */
+ str_size = name_len - strlen(str);
+ if (str_size > MAX_CDEV_FUNC_NAME)
+ return -ENAMETOOLONG;
+
+ ptr = kstrndup(name, str_size - 1, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("error:%ld\n", PTR_ERR(ptr));
+ return -ENOMEM;
+ }
+
+ opts->func_name = ptr;
+
+ /* get port number */
+ str = strrchr(name, '.');
+ if (!str) {
+ pr_err("err: port number not found\n");
+ return -EINVAL;
+ }
+ pr_debug("str:%s\n", str);
+
+ *str = '\0';
+ str++;
+
+ ret = kstrtou8(str, 0, &opts->port_num);
+ if (ret) {
+ pr_err("erro: not able to get port number\n");
+ return -EINVAL;
+ }
+
+ pr_debug("gser: port_num:%d func_name:%s\n",
+ opts->port_num, opts->func_name);
+
+ port = f_cdev_alloc(opts->func_name, opts->port_num);
+ if (IS_ERR(port)) {
+ pr_err("Failed to create cdev port(%d)\n", opts->port_num);
+ return -ENOMEM;
+ }
+
+ opts->port = port;
+
+ /* For DUN functionality only sets control signal handling */
+ if (!strcmp(opts->func_name, "dun")) {
+ port->port_usb.connect = dun_cser_connect;
+ port->port_usb.get_dtr = dun_cser_get_dtr;
+ port->port_usb.get_rts = dun_cser_get_rts;
+ port->port_usb.send_carrier_detect =
+ dun_cser_send_carrier_detect;
+ port->port_usb.send_ring_indicator =
+ dun_cser_send_ring_indicator;
+ port->port_usb.send_modem_ctrl_bits = dun_cser_send_ctrl_bits;
+ port->port_usb.disconnect = dun_cser_disconnect;
+ port->port_usb.send_break = dun_cser_send_break;
+ }
+
+ return 0;
+}
+
+static struct usb_function_instance *cser_alloc_inst(void)
+{
+ struct f_cdev_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.free_func_inst = cser_free_inst;
+ opts->func_inst.set_inst_name = cser_set_inst_name;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &cserial_func_type);
+ return &opts->func_inst;
+}
+
+static struct usb_function *cser_alloc(struct usb_function_instance *fi)
+{
+ struct f_cdev_opts *opts = to_fi_cdev_opts(fi);
+ struct f_cdev *port = opts->port;
+
+ port->port_usb.func.name = "cser";
+ port->port_usb.func.strings = usb_cser_strings;
+ port->port_usb.func.bind = usb_cser_bind;
+ port->port_usb.func.unbind = usb_cser_unbind;
+ port->port_usb.func.set_alt = usb_cser_set_alt;
+ port->port_usb.func.disable = usb_cser_disable;
+ port->port_usb.func.setup = usb_cser_setup;
+ port->port_usb.func.free_func = usb_cser_free_func;
+
+ return &port->port_usb.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(cser, cser_alloc_inst, cser_alloc);
+MODULE_DESCRIPTION("USB Serial Character Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c
new file mode 100644
index 000000000000..72f22a469ff1
--- /dev/null
+++ b/drivers/usb/gadget/function/f_diag.c
@@ -0,0 +1,1116 @@
+/* drivers/usb/gadget/f_diag.c
+ * Diag Function Device - Route ARM9 and ARM11 DIAG messages
+ * between HOST and DEVICE.
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+
+#include <linux/usb/usbdiag.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/kmemleak.h>
+#include <linux/qcom/diag_dload.h>
+
+#define MAX_INST_NAME_LEN 40
+
+/* for configfs support */
+struct diag_opts {
+ struct usb_function_instance func_inst;
+ char *name;
+};
+
+static inline struct diag_opts *to_diag_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct diag_opts,
+ func_inst.group);
+}
+
+static DEFINE_SPINLOCK(ch_lock);
+static LIST_HEAD(usb_diag_ch_list);
+
+static struct dload_struct __iomem *diag_dload;
+
+static struct usb_interface_descriptor intf_desc = {
+ .bLength = sizeof intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xFF,
+ .bInterfaceSubClass = 0xFF,
+ .bInterfaceProtocol = 0x30,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .bInterval = 0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = {
+ .bLength = sizeof ss_bulk_in_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ss_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = {
+ .bLength = sizeof ss_bulk_out_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *fs_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &fs_bulk_in_desc,
+ (struct usb_descriptor_header *) &fs_bulk_out_desc,
+ NULL,
+ };
+static struct usb_descriptor_header *hs_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &hs_bulk_in_desc,
+ (struct usb_descriptor_header *) &hs_bulk_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *ss_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &ss_bulk_in_desc,
+ (struct usb_descriptor_header *) &ss_bulk_in_comp_desc,
+ (struct usb_descriptor_header *) &ss_bulk_out_desc,
+ (struct usb_descriptor_header *) &ss_bulk_out_comp_desc,
+ NULL,
+};
+
+/**
+ * struct diag_context - USB diag function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_desc: USB IN endpoint descriptor struct
+ * @out_desc: USB OUT endpoint descriptor struct
+ * @read_pool: List of requests used for Rx (OUT ep)
+ * @write_pool: List of requests used for Tx (IN ep)
+ * @lock: Spinlock to proctect read_pool, write_pool lists
+ * @cdev: USB composite device struct
+ * @ch: USB diag channel
+ *
+ */
+struct diag_context {
+ struct usb_function function;
+ struct usb_ep *out;
+ struct usb_ep *in;
+ struct list_head read_pool;
+ struct list_head write_pool;
+ spinlock_t lock;
+ unsigned configured;
+ struct usb_composite_dev *cdev;
+ struct usb_diag_ch *ch;
+ struct kref kref;
+
+ /* pkt counters */
+ unsigned long dpkts_tolaptop;
+ unsigned long dpkts_tomodem;
+ unsigned dpkts_tolaptop_pending;
+
+ /* A list node inside the diag_dev_list */
+ struct list_head list_item;
+};
+
+static struct list_head diag_dev_list;
+
+static inline struct diag_context *func_to_diag(struct usb_function *f)
+{
+ return container_of(f, struct diag_context, function);
+}
+
+/* Called with ctxt->lock held; i.e. only use with kref_put_spinlock_irqsave */
+static void diag_context_release(struct kref *kref)
+{
+ struct diag_context *ctxt =
+ container_of(kref, struct diag_context, kref);
+
+ spin_unlock(&ctxt->lock);
+ kfree(ctxt);
+}
+
+static void diag_update_pid_and_serial_num(struct diag_context *ctxt)
+{
+ struct usb_composite_dev *cdev = ctxt->cdev;
+ struct usb_gadget_strings **table;
+ struct usb_string *s;
+ struct usb_gadget_string_container *uc;
+ struct dload_struct local_diag_dload = { 0 };
+
+ /*
+ * update pid and serial number to dload only if diag
+ * interface is zeroth interface.
+ */
+ if (intf_desc.bInterfaceNumber)
+ return;
+
+ if (!diag_dload) {
+ pr_debug("%s: unable to update PID and serial_no\n", __func__);
+ return;
+ }
+
+ /* update pid */
+ local_diag_dload.magic_struct.pid = PID_MAGIC_ID;
+ local_diag_dload.pid = cdev->desc.idProduct;
+ local_diag_dload.magic_struct.serial_num = SERIAL_NUM_MAGIC_ID;
+
+ list_for_each_entry(uc, &cdev->gstrings, list) {
+ table = (struct usb_gadget_strings **)uc->stash;
+ if (!table) {
+ pr_err("%s: can't update dload cookie\n", __func__);
+ break;
+ }
+
+ for (s = (*table)->strings; s && s->s; s++) {
+ if (s->id == cdev->desc.iSerialNumber) {
+ strlcpy(local_diag_dload.serial_number, s->s,
+ SERIAL_NUMBER_LENGTH);
+ goto update_dload;
+ }
+ }
+
+ }
+
+update_dload:
+ pr_debug("%s: dload:%pK pid:%x serial_num:%s\n",
+ __func__, diag_dload, local_diag_dload.pid,
+ local_diag_dload.serial_number);
+
+ memcpy_toio(diag_dload, &local_diag_dload, sizeof(local_diag_dload));
+}
+
+static void diag_write_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct diag_context *ctxt = ep->driver_data;
+ struct diag_request *d_req = req->context;
+ unsigned long flags;
+
+ ctxt->dpkts_tolaptop_pending--;
+
+ if (!req->status) {
+ if ((req->length >= ep->maxpacket) &&
+ ((req->length % ep->maxpacket) == 0)) {
+ ctxt->dpkts_tolaptop_pending++;
+ req->length = 0;
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ /* Queue zero length packet */
+ if (!usb_ep_queue(ctxt->in, req, GFP_ATOMIC))
+ return;
+ ctxt->dpkts_tolaptop_pending--;
+ } else {
+ ctxt->dpkts_tolaptop++;
+ }
+ }
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->write_pool);
+ if (req->length != 0) {
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ }
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ if (ctxt->ch && ctxt->ch->notify)
+ ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_WRITE_DONE, d_req);
+
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+}
+
+static void diag_read_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct diag_context *ctxt = ep->driver_data;
+ struct diag_request *d_req = req->context;
+ unsigned long flags;
+
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->read_pool);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ ctxt->dpkts_tomodem++;
+
+ if (ctxt->ch && ctxt->ch->notify)
+ ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_READ_DONE, d_req);
+
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+}
+
+/**
+ * usb_diag_open() - Open a diag channel over USB
+ * @name: Name of the channel
+ * @priv: Private structure pointer which will be passed in notify()
+ * @notify: Callback function to receive notifications
+ *
+ * This function iterates overs the available channels and returns
+ * the channel handler if the name matches. The notify callback is called
+ * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
+ *
+ */
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+ void (*notify)(void *, unsigned, struct diag_request *))
+{
+ struct usb_diag_ch *ch;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ /* Check if we already have a channel with this name */
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ if (!strcmp(name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch_lock, flags);
+
+ if (!found) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ch->name = name;
+ ch->priv = priv;
+ ch->notify = notify;
+
+ if (!found) {
+ spin_lock_irqsave(&ch_lock, flags);
+ list_add_tail(&ch->list, &usb_diag_ch_list);
+ spin_unlock_irqrestore(&ch_lock, flags);
+ }
+
+ return ch;
+}
+EXPORT_SYMBOL(usb_diag_open);
+
+/**
+ * usb_diag_close() - Close a diag channel over USB
+ * @ch: Channel handler
+ *
+ * This function closes the diag channel.
+ *
+ */
+void usb_diag_close(struct usb_diag_ch *ch)
+{
+ struct diag_context *dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ ch->priv = NULL;
+ ch->notify = NULL;
+ /* Free-up the resources if channel is no more active */
+ list_del(&ch->list);
+ list_for_each_entry(dev, &diag_dev_list, list_item)
+ if (dev->ch == ch)
+ dev->ch = NULL;
+ kfree(ch);
+
+ spin_unlock_irqrestore(&ch_lock, flags);
+}
+EXPORT_SYMBOL(usb_diag_close);
+
+static void free_reqs(struct diag_context *ctxt)
+{
+ struct list_head *act, *tmp;
+ struct usb_request *req;
+
+ list_for_each_safe(act, tmp, &ctxt->write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ctxt->in, req);
+ }
+
+ list_for_each_safe(act, tmp, &ctxt->read_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ctxt->out, req);
+ }
+}
+
+/**
+ * usb_diag_alloc_req() - Allocate USB requests
+ * @ch: Channel handler
+ * @n_write: Number of requests for Tx
+ * @n_read: Number of requests for Rx
+ *
+ * This function allocate read and write USB requests for the interface
+ * associated with this channel. The actual buffer is not allocated.
+ * The buffer is passed by diag char driver.
+ *
+ */
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ struct usb_request *req;
+ int i;
+ unsigned long flags;
+
+ if (!ctxt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ /* Free previous session's stale requests */
+ free_reqs(ctxt);
+ for (i = 0; i < n_write; i++) {
+ req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
+ if (!req)
+ goto fail;
+ kmemleak_not_leak(req);
+ req->complete = diag_write_complete;
+ list_add_tail(&req->list, &ctxt->write_pool);
+ }
+
+ for (i = 0; i < n_read; i++) {
+ req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
+ if (!req)
+ goto fail;
+ kmemleak_not_leak(req);
+ req->complete = diag_read_complete;
+ list_add_tail(&req->list, &ctxt->read_pool);
+ }
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return 0;
+fail:
+ free_reqs(ctxt);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -ENOMEM;
+
+}
+EXPORT_SYMBOL(usb_diag_alloc_req);
+#define DWC3_MAX_REQUEST_SIZE (16 * 1024 * 1024)
+/**
+ * usb_diag_request_size - Max request size for controller
+ * @ch: Channel handler
+ *
+ * Infom max request size so that diag driver can split packets
+ * in chunks of max size which controller can handle.
+ */
+int usb_diag_request_size(struct usb_diag_ch *ch)
+{
+ return DWC3_MAX_REQUEST_SIZE;
+}
+EXPORT_SYMBOL(usb_diag_request_size);
+
+/**
+ * usb_diag_read() - Read data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Rx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ *
+ */
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req;
+ struct usb_ep *out;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+ if (!ctxt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+
+ if (!ctxt->configured || !ctxt->out) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ out = ctxt->out;
+
+ if (list_empty(&ctxt->read_pool)) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
+ list_del(&req->list);
+ kref_get(&ctxt->kref); /* put called in complete callback */
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+
+ /* make sure context is still valid after releasing lock */
+ if (ctxt != ch->priv_usb) {
+ usb_ep_free_request(out, req);
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+ return -EIO;
+ }
+
+ if (usb_ep_queue(out, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->read_pool);
+ /* 1 error message for every 10 sec */
+ if (__ratelimit(&rl))
+ ERROR(ctxt->cdev, "%s: cannot queue"
+ " read request\n", __func__);
+
+ if (kref_put(&ctxt->kref, diag_context_release))
+ /* diag_context_release called spin_unlock already */
+ local_irq_restore(flags);
+ else
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_diag_read);
+
+/**
+ * usb_diag_write() - Write data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Tx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ *
+ */
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+ struct usb_ep *in;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+ if (!ctxt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+
+ if (!ctxt->configured || !ctxt->in) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ in = ctxt->in;
+
+ if (list_empty(&ctxt->write_pool)) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
+ list_del(&req->list);
+ kref_get(&ctxt->kref); /* put called in complete callback */
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+
+ /* make sure context is still valid after releasing lock */
+ if (ctxt != ch->priv_usb) {
+ usb_ep_free_request(in, req);
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+ return -EIO;
+ }
+
+ ctxt->dpkts_tolaptop_pending++;
+ if (usb_ep_queue(in, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->write_pool);
+ ctxt->dpkts_tolaptop_pending--;
+ /* 1 error message for every 10 sec */
+ if (__ratelimit(&rl))
+ ERROR(ctxt->cdev, "%s: cannot queue"
+ " read request\n", __func__);
+
+ if (kref_put(&ctxt->kref, diag_context_release))
+ /* diag_context_release called spin_unlock already */
+ local_irq_restore(flags);
+ else
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ /*
+ * It's possible that both write completion AND unbind could have been
+ * completed asynchronously by this point. Since they both release the
+ * kref, ctxt is _NOT_ guaranteed to be valid here.
+ */
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_diag_write);
+
+static void diag_function_disable(struct usb_function *f)
+{
+ struct diag_context *dev = func_to_diag(f);
+ unsigned long flags;
+
+ DBG(dev->cdev, "diag_function_disable\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->configured = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (dev->ch && dev->ch->notify)
+ dev->ch->notify(dev->ch->priv, USB_DIAG_DISCONNECT, NULL);
+
+ usb_ep_disable(dev->in);
+ dev->in->driver_data = NULL;
+
+ usb_ep_disable(dev->out);
+ dev->out->driver_data = NULL;
+ if (dev->ch)
+ dev->ch->priv_usb = NULL;
+}
+
+static void diag_free_func(struct usb_function *f)
+{
+ struct diag_context *ctxt = func_to_diag(f);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_del(&ctxt->list_item);
+ if (kref_put(&ctxt->kref, diag_context_release))
+ /* diag_context_release called spin_unlock already */
+ local_irq_restore(flags);
+ else
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static int diag_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct diag_context *dev = func_to_diag(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
+ int rc = 0;
+
+ if (config_ep_by_speed(cdev->gadget, f, dev->in) ||
+ config_ep_by_speed(cdev->gadget, f, dev->out)) {
+ dev->in->desc = NULL;
+ dev->out->desc = NULL;
+ return -EINVAL;
+ }
+
+ if (!dev->ch)
+ return -ENODEV;
+
+ /*
+ * Indicate to the diag channel that the active diag device is dev.
+ * Since a few diag devices can point to the same channel.
+ */
+ dev->ch->priv_usb = dev;
+
+ dev->in->driver_data = dev;
+ rc = usb_ep_enable(dev->in);
+ if (rc) {
+ ERROR(dev->cdev, "can't enable %s, result %d\n",
+ dev->in->name, rc);
+ return rc;
+ }
+ dev->out->driver_data = dev;
+ rc = usb_ep_enable(dev->out);
+ if (rc) {
+ ERROR(dev->cdev, "can't enable %s, result %d\n",
+ dev->out->name, rc);
+ usb_ep_disable(dev->in);
+ return rc;
+ }
+
+ dev->dpkts_tolaptop = 0;
+ dev->dpkts_tomodem = 0;
+ dev->dpkts_tolaptop_pending = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->configured = 1;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (dev->ch->notify)
+ dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL);
+
+ return rc;
+}
+
+static void diag_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct diag_context *ctxt = func_to_diag(f);
+ unsigned long flags;
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+
+ usb_free_descriptors(f->fs_descriptors);
+
+ /*
+ * Channel priv_usb may point to other diag function.
+ * Clear the priv_usb only if the channel is used by the
+ * diag dev we unbind here.
+ */
+ if (ctxt->ch && ctxt->ch->priv_usb == ctxt)
+ ctxt->ch->priv_usb = NULL;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ /* Free any pending USB requests from last session */
+ free_reqs(ctxt);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static int diag_function_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct diag_context *ctxt = func_to_diag(f);
+ struct usb_ep *ep;
+ int status = -ENODEV;
+
+ ctxt->cdev = c->cdev;
+
+ intf_desc.bInterfaceNumber = usb_interface_id(c, f);
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+ if (!ep)
+ goto fail;
+ ctxt->in = ep;
+ ep->driver_data = ctxt;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+ if (!ep)
+ goto fail;
+ ctxt->out = ep;
+ ep->driver_data = ctxt;
+
+ status = -ENOMEM;
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(fs_diag_desc);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hs_bulk_in_desc.bEndpointAddress =
+ fs_bulk_in_desc.bEndpointAddress;
+ hs_bulk_out_desc.bEndpointAddress =
+ fs_bulk_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_bulk_in_desc.bEndpointAddress =
+ fs_bulk_in_desc.bEndpointAddress;
+ ss_bulk_out_desc.bEndpointAddress =
+ fs_bulk_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(ss_diag_desc);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ /* Allow only first diag channel to update pid and serial no */
+ if (ctxt == list_first_entry(&diag_dev_list,
+ struct diag_context, list_item))
+ diag_update_pid_and_serial_num(ctxt);
+
+ return 0;
+fail:
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (ctxt->out)
+ ctxt->out->driver_data = NULL;
+ if (ctxt->in)
+ ctxt->in->driver_data = NULL;
+ return status;
+
+}
+
+static struct diag_context *diag_context_init(const char *name)
+{
+ struct diag_context *dev;
+ struct usb_diag_ch *_ch;
+ int found = 0;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ list_for_each_entry(_ch, &usb_diag_ch_list, list) {
+ if (!strcmp(name, _ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_warn("%s: unable to get diag usb channel\n", __func__);
+
+ _ch = kzalloc(sizeof(*_ch), GFP_KERNEL);
+ if (_ch == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ _ch->name = name;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ list_add_tail(&_ch->list, &usb_diag_ch_list);
+ spin_unlock_irqrestore(&ch_lock, flags);
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ list_add_tail(&dev->list_item, &diag_dev_list);
+
+ /*
+ * A few diag devices can point to the same channel, in case that
+ * the diag devices belong to different configurations, however
+ * only the active diag device will claim the channel by setting
+ * the ch->priv_usb (see diag_function_set_alt).
+ */
+ dev->ch = _ch;
+
+ dev->function.name = _ch->name;
+ dev->function.fs_descriptors = fs_diag_desc;
+ dev->function.hs_descriptors = hs_diag_desc;
+ dev->function.bind = diag_function_bind;
+ dev->function.unbind = diag_function_unbind;
+ dev->function.set_alt = diag_function_set_alt;
+ dev->function.disable = diag_function_disable;
+ dev->function.free_func = diag_free_func;
+ kref_init(&dev->kref);
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->read_pool);
+ INIT_LIST_HEAD(&dev->write_pool);
+
+ return dev;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = debug_buffer;
+ int temp = 0;
+ struct usb_diag_ch *ch;
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+
+ if (ctxt) {
+ spin_lock_irqsave(&ctxt->lock, flags);
+ temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+ "---Name: %s---\n"
+ "endpoints: %s, %s\n"
+ "dpkts_tolaptop: %lu\n"
+ "dpkts_tomodem: %lu\n"
+ "pkts_tolaptop_pending: %u\n",
+ ch->name,
+ ctxt->in->name, ctxt->out->name,
+ ctxt->dpkts_tolaptop,
+ ctxt->dpkts_tomodem,
+ ctxt->dpkts_tolaptop_pending);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ }
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_diag_ch *ch;
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+
+ if (ctxt) {
+ spin_lock_irqsave(&ctxt->lock, flags);
+ ctxt->dpkts_tolaptop = 0;
+ ctxt->dpkts_tomodem = 0;
+ ctxt->dpkts_tolaptop_pending = 0;
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ }
+ }
+
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations debug_fdiag_ops = {
+ .open = debug_open,
+ .read = debug_read_stats,
+ .write = debug_reset_stats,
+};
+
+struct dentry *dent_diag;
+static void fdiag_debugfs_init(void)
+{
+ struct dentry *dent_diag_status;
+ dent_diag = debugfs_create_dir("usb_diag", 0);
+ if (!dent_diag || IS_ERR(dent_diag))
+ return;
+
+ dent_diag_status = debugfs_create_file("status", 0444, dent_diag, 0,
+ &debug_fdiag_ops);
+
+ if (!dent_diag_status || IS_ERR(dent_diag_status)) {
+ debugfs_remove(dent_diag);
+ dent_diag = NULL;
+ return;
+ }
+}
+
+static void fdiag_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dent_diag);
+}
+#else
+static inline void fdiag_debugfs_init(void) {}
+static inline void fdiag_debugfs_remove(void) {}
+#endif
+
+static void diag_opts_release(struct config_item *item)
+{
+ struct diag_opts *opts = to_diag_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations diag_item_ops = {
+ .release = diag_opts_release,
+};
+
+static struct config_item_type diag_func_type = {
+ .ct_item_ops = &diag_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int diag_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ struct diag_opts *opts = container_of(fi, struct diag_opts, func_inst);
+ char *ptr;
+ int name_len;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ptr = kstrndup(name, name_len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ opts->name = ptr;
+
+ return 0;
+}
+
+static void diag_free_inst(struct usb_function_instance *f)
+{
+ struct diag_opts *opts;
+
+ opts = container_of(f, struct diag_opts, func_inst);
+ kfree(opts->name);
+ kfree(opts);
+}
+
+static struct usb_function_instance *diag_alloc_inst(void)
+{
+ struct diag_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = diag_set_inst_name;
+ opts->func_inst.free_func_inst = diag_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &diag_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *diag_alloc(struct usb_function_instance *fi)
+{
+ struct diag_opts *opts;
+ struct diag_context *dev;
+
+ opts = container_of(fi, struct diag_opts, func_inst);
+
+ dev = diag_context_init(opts->name);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ return &dev->function;
+}
+
+DECLARE_USB_FUNCTION(diag, diag_alloc_inst, diag_alloc);
+
+static int __init diag_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ INIT_LIST_HEAD(&diag_dev_list);
+
+ fdiag_debugfs_init();
+
+ ret = usb_function_register(&diagusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-diag-dload");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL, "qcom,android-usb");
+
+ if (!np)
+ pr_warn("diag: failed to find diag_dload imem node\n");
+
+ diag_dload = np ? of_iomap(np, 0) : NULL;
+
+ return ret;
+}
+
+static void __exit diag_exit(void)
+{
+ struct list_head *act, *tmp;
+ struct usb_diag_ch *_ch;
+ unsigned long flags;
+
+ if (diag_dload)
+ iounmap(diag_dload);
+
+ usb_function_unregister(&diagusb_func);
+
+ fdiag_debugfs_remove();
+
+ list_for_each_safe(act, tmp, &usb_diag_ch_list) {
+ _ch = list_entry(act, struct usb_diag_ch, list);
+
+ spin_lock_irqsave(&ch_lock, flags);
+ /* Free if diagchar is not using the channel anymore */
+ if (!_ch->priv) {
+ list_del(&_ch->list);
+ kfree(_ch);
+ }
+ spin_unlock_irqrestore(&ch_lock, flags);
+ }
+
+}
+
+module_init(diag_init);
+module_exit(diag_exit);
+
+MODULE_DESCRIPTION("Diag function driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index c045d4176a9c..cd6441e8354c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -24,6 +24,7 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/uio.h>
+#include <linux/ipc_logging.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
@@ -41,6 +42,16 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define NUM_PAGES 10 /* # of pages for ipc logging */
+
+static void *ffs_ipc_log;
+#define ffs_log(fmt, ...) do { \
+ if (ffs_ipc_log) \
+ ipc_log_string(ffs_ipc_log, "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -57,10 +68,32 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
static int __must_check
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+static LIST_HEAD(inst_list);
+
+/* ffs instance status */
+#define INST_NAME_SIZE 16
+
+struct ffs_inst_status {
+ char inst_name[INST_NAME_SIZE];
+ struct list_head list;
+ struct mutex ffs_lock;
+ bool inst_exist;
+ struct f_fs_opts *opts;
+ struct ffs_data *ffs_data;
+};
+
+/* Free instance structures */
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name);
+static void ffs_inst_clean_delay(const char *inst_name);
+static int ffs_inst_exist_check(const char *inst_name);
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst);
/* The function structure ***************************************************/
struct ffs_ep;
+static bool first_read_done;
struct ffs_function {
struct usb_configuration *conf;
@@ -118,12 +151,14 @@ struct ffs_ep {
u8 num;
int status; /* P: epfile->mutex */
+ bool is_busy;
};
struct ffs_epfile {
/* Protects ep->ep and ep->req. */
struct mutex mutex;
wait_queue_head_t wait;
+ atomic_t error;
struct ffs_data *ffs;
struct ffs_ep *ep; /* P: ffs->eps_lock */
@@ -136,6 +171,7 @@ struct ffs_epfile {
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
+ atomic_t opened;
};
/* ffs_io_data structure ***************************************************/
@@ -211,6 +247,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
spin_unlock_irq(&ffs->ev.waitq.lock);
+ ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
req->buf = data;
req->length = len;
@@ -235,11 +274,18 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
}
ffs->setup_state = FFS_NO_SETUP;
+
+ ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
{
+ ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+ ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
if (ffs->ev.can_stall) {
pr_vdebug("ep0 stall\n");
usb_ep_set_halt(ffs->gadget->ep0);
@@ -260,6 +306,13 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -388,6 +441,9 @@ done_spin:
break;
}
+ ffs_log("exit:ret %zu state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
return ret;
}
@@ -421,6 +477,10 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
ffs->ev.count * sizeof *ffs->ev.types);
spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+ ffs->setup_state, ffs->flags, n);
+
mutex_unlock(&ffs->mutex);
return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -436,6 +496,13 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -524,20 +591,36 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
spin_unlock_irq(&ffs->ev.waitq.lock);
done_mutex:
+ ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
kfree(data);
+
return ret;
}
static int ffs_ep0_open(struct inode *inode, struct file *file)
{
struct ffs_data *ffs = inode->i_private;
+ int ret;
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (unlikely(ffs->state == FFS_CLOSING))
return -EBUSY;
+ smp_mb__before_atomic();
+ if (atomic_read(&ffs->opened))
+ return -EBUSY;
+
file->private_data = ffs;
ffs_data_opened(ffs);
@@ -550,6 +633,9 @@ static int ffs_ep0_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
ffs_data_closed(ffs);
return 0;
@@ -563,6 +649,13 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -581,6 +674,13 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
unsigned int mask = POLLWRNORM;
int ret;
+ ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
poll_wait(file, &ffs->ev.waitq, wait);
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -611,6 +711,8 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
break;
}
+ ffs_log("exit: mask %u", mask);
+
mutex_unlock(&ffs->mutex);
return mask;
@@ -632,10 +734,16 @@ static const struct file_operations ffs_ep0_operations = {
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
+ struct ffs_ep *ep = _ep->driver_data;
ENTER();
- if (likely(req->context)) {
+
+ /* req may be freed during unbind */
+ if (ep && ep->req && likely(req->context)) {
struct ffs_ep *ep = _ep->driver_data;
ep->status = req->status ? req->status : req->actual;
+ /* Set is_busy false to indicate completion of last request */
+ ep->is_busy = false;
+ ffs_log("ep status %d for req %pK", ep->status, req);
complete(req->context);
}
}
@@ -648,6 +756,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->req->actual;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ ffs_log("enter: ret %d", ret);
+
if (io_data->read && ret > 0) {
mm_segment_t oldfs = get_fs();
@@ -671,6 +781,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
+
+ ffs_log("exit");
}
static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -680,18 +792,35 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
ENTER();
+ ffs_log("enter");
+
INIT_WORK(&io_data->work, ffs_user_copy_worker);
schedule_work(&io_data->work);
+
+ ffs_log("exit");
}
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
struct ffs_ep *ep;
+ struct ffs_data *ffs = epfile->ffs;
char *data = NULL;
ssize_t ret, data_len = -EINVAL;
int halt;
+ ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name,
+ atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE");
+
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
+ smp_mb__before_atomic();
+retry:
+ if (atomic_read(&epfile->error))
+ return -ENODEV;
+
/* Are we still active? */
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
ret = -ENODEV;
@@ -706,11 +835,28 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
goto error;
}
- ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
- if (ret) {
+ /* Don't wait on write if device is offline */
+ if (!io_data->read) {
ret = -EINTR;
goto error;
}
+
+ /*
+ * If ep is disabled, this fails all current IOs
+ * and wait for next epfile open to happen.
+ */
+ smp_mb__before_atomic();
+ if (!atomic_read(&epfile->error)) {
+ ret = wait_event_interruptible(epfile->wait,
+ (ep = epfile->ep));
+ if (ret < 0)
+ goto error;
+ }
+
+ if (!ep) {
+ ret = -ENODEV;
+ goto error;
+ }
}
/* Do we halt? */
@@ -820,25 +966,50 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
} else {
- DECLARE_COMPLETION_ONSTACK(done);
+ struct completion *done;
req = ep->req;
req->buf = data;
req->length = data_len;
+ ret = 0;
- req->context = &done;
req->complete = ffs_epfile_io_complete;
- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ if (io_data->read) {
+ reinit_completion(&epfile->ffs->epout_completion);
+ done = &epfile->ffs->epout_completion;
+ } else {
+ reinit_completion(&epfile->ffs->epin_completion);
+ done = &epfile->ffs->epin_completion;
+ }
+ req->context = done;
+
+ /*
+ * Don't queue another read request if previous is
+ * still busy.
+ */
+ if (!(io_data->read && ep->is_busy)) {
+ ep->is_busy = true;
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ }
spin_unlock_irq(&epfile->ffs->eps_lock);
if (unlikely(ret < 0)) {
- /* nop */
+ ep->is_busy = false;
+ ret = -EIO;
} else if (unlikely(
- wait_for_completion_interruptible(&done))) {
+ wait_for_completion_interruptible(done))) {
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ /*
+ * While we were acquiring lock endpoint got
+ * disabled (disconnect) or changed
+ * (composition switch) ?
+ */
+ if (epfile->ep == ep)
+ usb_ep_dequeue(ep->ep, req);
+ spin_unlock_irq(&epfile->ffs->eps_lock);
ret = -EINTR;
- usb_ep_dequeue(ep->ep, req);
} else {
/*
* XXX We may end up silently droping data
@@ -847,11 +1018,58 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* to maxpacketsize), we may end up with more
* data then user space has space for.
*/
- ret = ep->status;
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ /*
+ * While we were acquiring lock endpoint got
+ * disabled (disconnect) or changed
+ * (composition switch) ?
+ */
+ if (epfile->ep == ep) {
+ ret = ep->status;
+ if (ret >= 0)
+ first_read_done = true;
+ } else {
+ ret = -ENODEV;
+ }
+
+ /* do wait again if func eps are not enabled */
+ if (io_data->read && !first_read_done
+ && ret < 0) {
+ unsigned short count = ffs->eps_count;
+
+ pr_debug("%s: waiting for the online state\n",
+ __func__);
+ ret = 0;
+ kfree(data);
+ data = NULL;
+ data_len = -EINVAL;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ mutex_unlock(&epfile->mutex);
+ epfile = ffs->epfiles;
+ do {
+ atomic_set(&epfile->error, 0);
+ ++epfile;
+ } while (--count);
+ epfile = file->private_data;
+ goto retry;
+ }
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
if (io_data->read && ret > 0) {
- ret = copy_to_iter(data, ret, &io_data->data);
- if (!ret)
- ret = -EFAULT;
+
+ if (ret > data_len) {
+ ret = -EOVERFLOW;
+ pr_err("More data(%zd) received than intended length(%zu)\n",
+ ret, data_len);
+
+ } else {
+ ret = copy_to_iter(data, ret, &io_data->data);
+ pr_debug("copied (%zd) bytes to user space\n", ret);
+ if (!ret) {
+ pr_err("Fail to copy to user\n");
+ ret = -EFAULT;
+ }
+ }
}
}
kfree(data);
@@ -859,6 +1077,9 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
}
mutex_unlock(&epfile->mutex);
+
+ ffs_log("exit:ret %zu", ret);
+
return ret;
error_lock:
@@ -866,6 +1087,9 @@ error_lock:
mutex_unlock(&epfile->mutex);
error:
kfree(data);
+
+ ffs_log("exit: ret %zu", ret);
+
return ret;
}
@@ -873,15 +1097,39 @@ static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ int ret;
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
+ smp_mb__before_atomic();
+ if (atomic_read(&epfile->opened)) {
+ pr_err("%s(): ep(%s) is already opened.\n",
+ __func__, epfile->name);
+ return -EBUSY;
+ }
+
+ smp_mb__before_atomic();
+ atomic_set(&epfile->opened, 1);
file->private_data = epfile;
ffs_data_opened(epfile->ffs);
+ smp_mb__before_atomic();
+ atomic_set(&epfile->error, 0);
+ first_read_done = false;
+
+ ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
return 0;
}
@@ -894,6 +1142,9 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
if (likely(io_data && io_data->ep && io_data->req))
@@ -903,6 +1154,8 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
+ ffs_log("exit: value %d", value);
+
return value;
}
@@ -913,6 +1166,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -940,6 +1195,9 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kfree(p);
else
*from = p->data;
+
+ ffs_log("exit");
+
return res;
}
@@ -950,6 +1208,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -989,6 +1249,9 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
} else {
*to = p->data;
}
+
+ ffs_log("exit");
+
return res;
}
@@ -999,7 +1262,16 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
+ smp_mb__before_atomic();
+ atomic_set(&epfile->opened, 0);
+ atomic_set(&epfile->error, 1);
ffs_data_closed(epfile->ffs);
+ file->private_data = NULL;
+
+ ffs_log("exit");
return 0;
}
@@ -1012,6 +1284,13 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -1064,6 +1343,8 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("exit:ret %d", ret);
+
return ret;
}
@@ -1095,6 +1376,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
ENTER();
+ ffs_log("enter");
+
inode = new_inode(sb);
if (likely(inode)) {
@@ -1114,6 +1397,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode->i_op = iops;
}
+ ffs_log("exit");
+
return inode;
}
@@ -1128,6 +1413,8 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
ENTER();
+ ffs_log("enter");
+
dentry = d_alloc_name(sb->s_root, name);
if (unlikely(!dentry))
return NULL;
@@ -1139,6 +1426,9 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
}
d_add(dentry, inode);
+
+ ffs_log("exit");
+
return dentry;
}
@@ -1164,6 +1454,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
ENTER();
+ ffs_log("enter");
+
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
@@ -1188,6 +1480,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&ffs_ep0_operations)))
return -ENOMEM;
+ ffs_log("exit");
+
return 0;
}
@@ -1195,6 +1489,8 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
{
ENTER();
+ ffs_log("enter");
+
if (!opts || !*opts)
return 0;
@@ -1277,6 +1573,8 @@ invalid:
opts = comma + 1;
}
+ ffs_log("exit");
+
return 0;
}
@@ -1299,9 +1597,12 @@ ffs_fs_mount(struct file_system_type *t, int flags,
int ret;
void *ffs_dev;
struct ffs_data *ffs;
+ struct ffs_inst_status *inst_status;
ENTER();
+ ffs_log("enter");
+
ret = ffs_fs_parse_opts(&data, opts);
if (unlikely(ret < 0))
return ERR_PTR(ret);
@@ -1326,11 +1627,26 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ffs->private_data = ffs_dev;
data.ffs_data = ffs;
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find instance (%s)\n",
+ ffs->dev_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Store ffs to global status structure */
+ ffs_dev_lock();
+ inst_status->ffs_data = ffs;
+ ffs_dev_unlock();
+
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
if (IS_ERR(rv) && data.ffs_data) {
ffs_release_dev(data.ffs_data);
ffs_data_put(data.ffs_data);
}
+
+ ffs_log("exit");
+
return rv;
}
@@ -1339,11 +1655,15 @@ ffs_fs_kill_sb(struct super_block *sb)
{
ENTER();
+ ffs_log("enter");
+
kill_litter_super(sb);
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
ffs_data_closed(sb->s_fs_info);
}
+
+ ffs_log("exit");
}
static struct file_system_type ffs_fs_type = {
@@ -1380,7 +1700,6 @@ static void functionfs_cleanup(void)
unregister_filesystem(&ffs_fs_type);
}
-
/* ffs_data and ffs_function construction and destruction code **************/
static void ffs_data_clear(struct ffs_data *ffs);
@@ -1390,39 +1709,72 @@ static void ffs_data_get(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter");
+
+ smp_mb__before_atomic();
atomic_inc(&ffs->ref);
+
+ ffs_log("exit");
}
static void ffs_data_opened(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ smp_mb__before_atomic();
atomic_inc(&ffs->ref);
if (atomic_add_return(1, &ffs->opened) == 1 &&
ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_data_put(struct ffs_data *ffs)
{
+ struct ffs_inst_status *inst_status;
+ const char *dev_name;
+
ENTER();
+ ffs_log("enter");
+
+ smp_mb__before_atomic();
if (unlikely(atomic_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
+ /* Clear ffs from global structure */
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (!IS_ERR(inst_status)) {
+ ffs_dev_lock();
+ inst_status->ffs_data = NULL;
+ ffs_dev_unlock();
+ }
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->ep0req_completion.wait));
- kfree(ffs->dev_name);
+ dev_name = ffs->dev_name;
kfree(ffs);
+ ffs_inst_clean_delay(dev_name);
+ kfree(dev_name);
}
+
+ ffs_log("exit");
}
static void ffs_data_closed(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ smp_mb__before_atomic();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
@@ -1438,11 +1790,16 @@ static void ffs_data_closed(struct ffs_data *ffs)
ffs_data_reset(ffs);
}
}
+
+ smp_mb__before_atomic();
if (atomic_read(&ffs->opened) < 0) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_put(ffs);
}
@@ -1454,6 +1811,8 @@ static struct ffs_data *ffs_data_new(void)
ENTER();
+ ffs_log("enter");
+
atomic_set(&ffs->ref, 1);
atomic_set(&ffs->opened, 0);
ffs->state = FFS_READ_DESCRIPTORS;
@@ -1461,10 +1820,14 @@ static struct ffs_data *ffs_data_new(void)
spin_lock_init(&ffs->eps_lock);
init_waitqueue_head(&ffs->ev.waitq);
init_completion(&ffs->ep0req_completion);
+ init_completion(&ffs->epout_completion);
+ init_completion(&ffs->epin_completion);
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
+ ffs_log("exit");
+
return ffs;
}
@@ -1472,8 +1835,16 @@ static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
+ pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
+ __func__, ffs->gadget, ffs->flags);
ffs_closed(ffs);
+ if (ffs->gadget)
+ pr_err("%s: ffs:%pK ffs->gadget= %pK, ffs->flags= %lu\n",
+ __func__, ffs, ffs->gadget, ffs->flags);
BUG_ON(ffs->gadget);
if (ffs->epfiles)
@@ -1485,12 +1856,18 @@ static void ffs_data_clear(struct ffs_data *ffs)
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_data_reset(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_clear(ffs);
ffs->epfiles = NULL;
@@ -1517,6 +1894,9 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs->ms_os_descs_ext_prop_count = 0;
ffs->ms_os_descs_ext_prop_name_len = 0;
ffs->ms_os_descs_ext_prop_data_len = 0;
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
@@ -1527,6 +1907,9 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (WARN_ON(ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
@@ -1552,6 +1935,10 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
}
ffs->gadget = cdev->gadget;
+
+ ffs_log("exit: state %d setup_state %d flag %lu gadget %pK\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
+
ffs_data_get(ffs);
return 0;
}
@@ -1565,6 +1952,8 @@ static void functionfs_unbind(struct ffs_data *ffs)
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
ffs_data_put(ffs);
}
}
@@ -1576,6 +1965,9 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
count = ffs->eps_count;
epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
@@ -1586,6 +1978,7 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
epfile->ffs = ffs;
mutex_init(&epfile->mutex);
init_waitqueue_head(&epfile->wait);
+ atomic_set(&epfile->opened, 0);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
@@ -1600,6 +1993,10 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
}
ffs->epfiles = epfiles;
+
+ ffs_log("exit: eps_count %u state %d setup_state %d flag %lu",
+ count, ffs->state, ffs->setup_state, ffs->flags);
+
return 0;
}
@@ -1609,6 +2006,8 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
ENTER();
+ ffs_log("enter: count %u", count);
+
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex) ||
waitqueue_active(&epfile->wait));
@@ -1620,6 +2019,8 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
}
kfree(epfiles);
+
+ ffs_log("exit");
}
static void ffs_func_eps_disable(struct ffs_function *func)
@@ -1629,19 +2030,30 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
+
+ smp_mb__before_atomic();
+ if (epfile)
+ atomic_set(&epfile->error, 1);
+
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
if (epfile) {
+ atomic_set(&epfile->error, 1);
epfile->ep = NULL;
++epfile;
}
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+ ffs_log("exit");
}
static int ffs_func_eps_enable(struct ffs_function *func)
@@ -1653,17 +2065,17 @@ static int ffs_func_eps_enable(struct ffs_function *func)
unsigned long flags;
int ret = 0;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
- struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
- int needs_comp_desc = false;
int desc_idx;
- if (ffs->gadget->speed == USB_SPEED_SUPER) {
+ if (ffs->gadget->speed == USB_SPEED_SUPER)
desc_idx = 2;
- needs_comp_desc = true;
- } else if (ffs->gadget->speed == USB_SPEED_HIGH)
+ else if (ffs->gadget->speed == USB_SPEED_HIGH)
desc_idx = 1;
else
desc_idx = 0;
@@ -1681,11 +2093,11 @@ static int ffs_func_eps_enable(struct ffs_function *func)
ep->ep->driver_data = ep;
ep->ep->desc = ds;
- if (needs_comp_desc) {
- comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
- USB_DT_ENDPOINT_SIZE);
- ep->ep->maxburst = comp_desc->bMaxBurst + 1;
- ep->ep->comp_desc = comp_desc;
+ ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
+ if (ret) {
+ pr_err("%s(): config_ep_by_speed(%d) err for %s\n",
+ __func__, ret, ep->ep->name);
+ break;
}
ret = usb_ep_enable(ep->ep);
@@ -1693,6 +2105,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ds);
epfile->isoc = usb_endpoint_xfer_isoc(ds);
+ ffs_log("usb_ep_enable %s", ep->ep->name);
} else {
break;
}
@@ -1704,6 +2117,8 @@ static int ffs_func_eps_enable(struct ffs_function *func)
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -1744,6 +2159,8 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
/* At least two bytes are required: length and type */
if (len < 2) {
pr_vdebug("descriptor too short\n");
@@ -1860,6 +2277,8 @@ inv_length:
#undef __entity_check_STRING
#undef __entity_check_ENDPOINT
+ ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
return length;
}
@@ -1871,6 +2290,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (;;) {
int ret;
@@ -1898,6 +2319,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
data += ret;
++num;
}
+
+ ffs_log("exit: len %u", len);
}
static int __ffs_data_do_entity(enum ffs_entity_type type,
@@ -1909,6 +2332,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
ENTER();
+ ffs_log("enter: type %u", type);
+
switch (type) {
case FFS_DESCRIPTOR:
break;
@@ -1947,6 +2372,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
break;
}
+ ffs_log("exit");
+
return 0;
}
@@ -1956,6 +2383,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
u16 bcd_version = le16_to_cpu(desc->bcdVersion);
u16 w_index = le16_to_cpu(desc->wIndex);
+ ffs_log("enter");
+
if (bcd_version != 1) {
pr_vdebug("unsupported os descriptors version: %d",
bcd_version);
@@ -1973,6 +2402,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
return -EINVAL;
}
+ ffs_log("exit: size of desc %zu", sizeof(*desc));
+
return sizeof(*desc);
}
@@ -1992,6 +2423,8 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u os desc type %d", len, type);
+
/* loop over all ext compat/ext prop descriptors */
while (feature_count--) {
ret = entity(type, h, data, len, priv);
@@ -2002,6 +2435,9 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
data += ret;
len -= ret;
}
+
+ ffs_log("exit");
+
return _len - len;
}
@@ -2015,6 +2451,8 @@ static int __must_check ffs_do_os_descs(unsigned count,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (num = 0; num < count; ++num) {
int ret;
enum ffs_os_desc_type type;
@@ -2064,6 +2502,9 @@ static int __must_check ffs_do_os_descs(unsigned count,
len -= ret;
data += ret;
}
+
+ ffs_log("exit");
+
return _len - len;
}
@@ -2079,6 +2520,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
ENTER();
+ ffs_log("enter: len %u", len);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *d = data;
@@ -2086,11 +2529,17 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ d->Reserved1 != 1) {
+ pr_err("%s(): Invalid os_desct_ext_compat\n",
+ __func__);
return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
- if (d->Reserved2[i])
+ if (d->Reserved2[i]) {
+ pr_err("%s(): Invalid Reserved2 of ext_compat\n",
+ __func__);
return -EINVAL;
+ }
length = sizeof(struct usb_ext_compat_desc);
}
@@ -2134,6 +2583,9 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
return -EINVAL;
}
+
+ ffs_log("exit");
+
return length;
}
@@ -2147,6 +2599,8 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (get_unaligned_le32(data + 4) != len)
goto error;
@@ -2260,10 +2714,13 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ffs->ss_descs_count = counts[2];
ffs->ms_os_descs_count = os_descs_count;
+ ffs_log("exit");
+
return 0;
error:
kfree(_data);
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -2277,6 +2734,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (unlikely(len < 16 ||
get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len))
@@ -2393,12 +2852,14 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ffs->stringtabs = stringtabs;
ffs->raw_strings = _data;
+ ffs_log("exit");
return 0;
error_free:
kfree(stringtabs);
error:
kfree(_data);
+ ffs_log("exit: -EINVAL");
return -EINVAL;
}
@@ -2411,6 +2872,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
enum usb_functionfs_event_type rem_type1, rem_type2 = type;
int neg = 0;
+ ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/*
* Abort any unhandled setup
*
@@ -2470,6 +2934,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
wake_up_locked(&ffs->ev.waitq);
if (ffs->ffs_eventfd)
eventfd_signal(ffs->ffs_eventfd, 1);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_event_add(struct ffs_data *ffs,
@@ -2504,6 +2971,8 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
int idx;
static const char *speed_names[] = { "full", "high", "super" };
+ ffs_log("enter");
+
if (type != FFS_DESCRIPTOR)
return 0;
@@ -2579,6 +3048,8 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
}
ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+ ffs_log("exit");
+
return 0;
}
@@ -2590,6 +3061,8 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
unsigned idx;
u8 newValue;
+ ffs_log("enter: type %d", type);
+
switch (type) {
default:
case FFS_DESCRIPTOR:
@@ -2634,6 +3107,9 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
pr_vdebug("%02x -> %02x\n", *valuep, newValue);
*valuep = newValue;
+
+ ffs_log("exit: newValue %d", newValue);
+
return 0;
}
@@ -2644,6 +3120,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
struct ffs_function *func = priv;
u8 length = 0;
+ ffs_log("enter: type %d", type);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *desc = data;
@@ -2713,6 +3191,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
}
+ ffs_log("exit");
+
return length;
}
@@ -2726,6 +3206,8 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
ENTER();
+ ffs_log("enter");
+
/*
* Legacy gadget triggers binding in functionfs_ready_callback,
* which already uses locking; taking the same lock here would
@@ -2760,6 +3242,8 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
ffs_opts->refcnt++;
func->function.strings = func->ffs->stringtabs;
+ ffs_log("exit");
+
return ffs_opts;
}
@@ -2803,6 +3287,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
/* Has descriptors only for speeds gadget does not support */
if (unlikely(!(full | high | super)))
return -ENOTSUPP;
@@ -2894,7 +3381,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
goto error;
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
- if (c->cdev->use_os_string)
+ if (c->cdev->use_os_string) {
for (i = 0; i < ffs->interfaces_count; ++i) {
struct usb_os_desc *desc;
@@ -2905,22 +3392,29 @@ static int _ffs_func_bind(struct usb_configuration *c,
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
- ret = ffs_do_os_descs(ffs->ms_os_descs_count,
- vla_ptr(vlabuf, d, raw_descs) +
- fs_len + hs_len + ss_len,
- d_raw_descs__sz - fs_len - hs_len - ss_len,
- __ffs_func_bind_do_os_desc, func);
- if (unlikely(ret < 0))
- goto error;
+ ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) +
+ fs_len + hs_len + ss_len,
+ d_raw_descs__sz - fs_len - hs_len -
+ ss_len,
+ __ffs_func_bind_do_os_desc, func);
+ if (unlikely(ret < 0))
+ goto error;
+ }
func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0;
/* And we're done */
ffs_event_add(ffs, FUNCTIONFS_BIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return 0;
error:
/* XXX Do we need to release all claimed endpoints here? */
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -2931,6 +3425,8 @@ static int ffs_func_bind(struct usb_configuration *c,
struct ffs_function *func = ffs_func_from_usb(f);
int ret;
+ ffs_log("enter");
+
if (IS_ERR(ffs_opts))
return PTR_ERR(ffs_opts);
@@ -2938,6 +3434,8 @@ static int ffs_func_bind(struct usb_configuration *c,
if (ret && !--ffs_opts->refcnt)
functionfs_unbind(func->ffs);
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -2948,7 +3446,12 @@ static void ffs_reset_work(struct work_struct *work)
{
struct ffs_data *ffs = container_of(work,
struct ffs_data, reset_work);
+
+ ffs_log("enter");
+
ffs_data_reset(ffs);
+
+ ffs_log("exit");
}
static int ffs_func_set_alt(struct usb_function *f,
@@ -2958,14 +3461,20 @@ static int ffs_func_set_alt(struct usb_function *f,
struct ffs_data *ffs = func->ffs;
int ret = 0, intf;
+ ffs_log("enter");
+
if (alt != (unsigned)-1) {
intf = ffs_func_revmap_intf(func, interface);
if (unlikely(intf < 0))
return intf;
}
- if (ffs->func)
+ if (ffs->func) {
ffs_func_eps_disable(ffs->func);
+ ffs->func = NULL;
+ /* matching put to allow LPM on disconnect */
+ usb_gadget_autopm_put_async(ffs->gadget);
+ }
if (ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
@@ -2985,14 +3494,24 @@ static int ffs_func_set_alt(struct usb_function *f,
ffs->func = func;
ret = ffs_func_eps_enable(func);
- if (likely(ret >= 0))
+ if (likely(ret >= 0)) {
ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+ /* Disable USB LPM later on bus_suspend */
+ usb_gadget_autopm_get_async(ffs->gadget);
+ }
+
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
static void ffs_func_disable(struct usb_function *f)
{
+ ffs_log("enter");
+
ffs_func_set_alt(f, 0, (unsigned)-1);
+
+ ffs_log("exit");
}
static int ffs_func_setup(struct usb_function *f,
@@ -3005,6 +3524,8 @@ static int ffs_func_setup(struct usb_function *f,
ENTER();
+ ffs_log("enter");
+
pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
@@ -3048,19 +3569,31 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+ ffs_log("exit");
+
return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static void ffs_func_suspend(struct usb_function *f)
{
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+
+ ffs_log("exit");
}
static void ffs_func_resume(struct usb_function *f)
{
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+
+ ffs_log("exit");
}
@@ -3077,11 +3610,15 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
short *nums = func->interfaces_nums;
unsigned count = func->ffs->interfaces_count;
+ ffs_log("enter");
+
for (; count; --count, ++nums) {
if (*nums >= 0 && *nums == intf)
return nums - func->interfaces_nums;
}
+ ffs_log("exit");
+
return -EDOM;
}
@@ -3094,6 +3631,8 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
list_for_each_entry(dev, &ffs_devices, entry) {
if (!dev->name || !name)
continue;
@@ -3101,6 +3640,8 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
return dev;
}
+ ffs_log("exit");
+
return NULL;
}
@@ -3111,12 +3652,16 @@ static struct ffs_dev *_ffs_get_single_dev(void)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
if (list_is_singular(&ffs_devices)) {
dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
if (dev->single)
return dev;
}
+ ffs_log("exit");
+
return NULL;
}
@@ -3127,11 +3672,17 @@ static struct ffs_dev *_ffs_find_dev(const char *name)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
dev = _ffs_get_single_dev();
if (dev)
return dev;
- return _ffs_do_find_dev(name);
+ dev = _ffs_do_find_dev(name);
+
+ ffs_log("exit");
+
+ return dev;
}
/* Configfs support *********************************************************/
@@ -3161,25 +3712,146 @@ static struct config_item_type ffs_func_type = {
/* Function registration interface ******************************************/
-static void ffs_free_inst(struct usb_function_instance *f)
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst)
{
- struct f_fs_opts *opts;
+ struct ffs_inst_status *inst_status;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (!strncasecmp(inst_status->inst_name,
+ inst_name, strlen(inst_name)))
+ return inst_status;
+ }
+
+ if (!create_inst)
+ return ERR_PTR(-ENODEV);
+
+ inst_status = kzalloc(sizeof(struct ffs_inst_status),
+ GFP_KERNEL);
+ if (!inst_status)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&inst_status->ffs_lock);
+ snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name);
+ list_add_tail(&inst_status->list, &inst_list);
+
+ return inst_status;
+}
+
+static int ffs_inst_exist_check(const char *inst_name)
+{
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return -ENODEV;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ mutex_unlock(&inst_status->ffs_lock);
+ pr_err_ratelimited(
+ "%s: f_fs instance (%s) has been freed already.\n",
+ __func__, inst_name);
+ return -ENODEV;
+ }
+
+ mutex_unlock(&inst_status->ffs_lock);
+
+ return 0;
+}
+
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name)
+{
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return;
+ }
+
+ inst_status->opts = NULL;
- opts = to_f_fs_opts(f);
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
+static void ffs_inst_clean_delay(const char *inst_name)
+{
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find (%s) instance\n",
+ __func__, inst_name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ if (inst_status->opts) {
+ ffs_inst_clean(inst_status->opts, inst_name);
+ pr_err_ratelimited("%s: Delayed free memory\n",
+ __func__);
+ }
+ mutex_unlock(&inst_status->ffs_lock);
+ return;
+ }
+
+ mutex_unlock(&inst_status->ffs_lock);
+}
+
+static void ffs_free_inst(struct usb_function_instance *f)
+{
+ struct f_fs_opts *opts;
+ struct ffs_inst_status *inst_status;
+
+ opts = to_f_fs_opts(f);
+
+ inst_status = name_to_inst_status(opts->dev->name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find (%s) instance\n",
+ opts->dev->name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+ if (opts->dev->ffs_data
+ && atomic_read(&opts->dev->ffs_data->opened)) {
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("Dev is open, free mem when dev (%s) close\n",
+ opts->dev->name);
+ return;
+ }
+
+ ffs_inst_clean(opts, opts->dev->name);
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
+}
+
#define MAX_INST_NAME_LEN 40
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
- struct f_fs_opts *opts;
+ struct f_fs_opts *opts, *opts_prev;
+ struct ffs_data *ffs_data_tmp;
char *ptr;
const char *tmp;
int name_len, ret;
+ struct ffs_inst_status *inst_status;
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
@@ -3189,6 +3861,23 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
if (!ptr)
return -ENOMEM;
+ inst_status = name_to_inst_status(ptr, true);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to create status struct for (%s) instance\n",
+ ptr);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+ opts_prev = inst_status->opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("instance (%s): prev inst do not freed yet\n",
+ inst_status->inst_name);
+ return -EBUSY;
+ }
+ mutex_unlock(&inst_status->ffs_lock);
+
opts = to_f_fs_opts(fi);
tmp = NULL;
@@ -3203,10 +3892,28 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
}
opts->dev->name_allocated = true;
+ /*
+ * If ffs instance is freed and created once, new allocated
+ * opts->dev need to initialize opts->dev->ffs_data, and
+ * ffs_private_data also need to update new allocated opts->dev
+ * address.
+ */
+ ffs_data_tmp = inst_status->ffs_data;
+ if (ffs_data_tmp)
+ opts->dev->ffs_data = ffs_data_tmp;
+
+ if (opts->dev->ffs_data)
+ opts->dev->ffs_data->private_data = opts->dev;
+
ffs_dev_unlock();
kfree(tmp);
+ mutex_lock(&inst_status->ffs_lock);
+ inst_status->inst_exist = true;
+ inst_status->opts = opts;
+ mutex_unlock(&inst_status->ffs_lock);
+
return 0;
}
@@ -3253,6 +3960,10 @@ static void ffs_func_unbind(struct usb_configuration *c,
unsigned long flags;
ENTER();
+
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (ffs->func == func) {
ffs_func_eps_disable(func);
ffs->func = NULL;
@@ -3267,6 +3978,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
if (ep->ep && ep->req)
usb_ep_free_request(ep->ep, ep->req);
ep->req = NULL;
+ ep->ep = NULL;
++ep;
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
@@ -3282,6 +3994,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3344,12 +4059,16 @@ static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
+ ffs_log("enter");
+
existing = _ffs_do_find_dev(name);
if (existing)
return -EBUSY;
dev->name = name;
+ ffs_log("exit");
+
return 0;
}
@@ -3360,10 +4079,14 @@ int ffs_name_dev(struct ffs_dev *dev, const char *name)
{
int ret;
+ ffs_log("enter");
+
ffs_dev_lock();
ret = _ffs_name_dev(dev, name);
ffs_dev_unlock();
+ ffs_log("exit");
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_name_dev);
@@ -3372,6 +4095,8 @@ int ffs_single_dev(struct ffs_dev *dev)
{
int ret;
+ ffs_log("enter");
+
ret = 0;
ffs_dev_lock();
@@ -3381,6 +4106,9 @@ int ffs_single_dev(struct ffs_dev *dev)
dev->single = true;
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3390,12 +4118,17 @@ EXPORT_SYMBOL_GPL(ffs_single_dev);
*/
static void _ffs_free_dev(struct ffs_dev *dev)
{
+
+ ffs_log("enter");
+
list_del(&dev->entry);
if (dev->name_allocated)
kfree(dev->name);
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
+
+ ffs_log("exit");
}
static void *ffs_acquire_dev(const char *dev_name)
@@ -3403,6 +4136,9 @@ static void *ffs_acquire_dev(const char *dev_name)
struct ffs_dev *ffs_dev;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
@@ -3417,6 +4153,9 @@ static void *ffs_acquire_dev(const char *dev_name)
ffs_dev->mounted = true;
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ffs_dev;
}
@@ -3425,6 +4164,9 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
struct ffs_dev *ffs_dev;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_dev = ffs_data->private_data;
@@ -3436,6 +4178,8 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
}
ffs_dev_unlock();
+
+ ffs_log("exit");
}
static int ffs_ready(struct ffs_data *ffs)
@@ -3444,6 +4188,9 @@ static int ffs_ready(struct ffs_data *ffs)
int ret = 0;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
@@ -3468,6 +4215,9 @@ static int ffs_ready(struct ffs_data *ffs)
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done:
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ret;
}
@@ -3478,11 +4228,16 @@ static void ffs_closed(struct ffs_data *ffs)
struct config_item *ci;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
- if (!ffs_obj)
+ if (!ffs_obj) {
+ ffs_dev_unlock();
goto done;
+ }
ffs_obj->desc_ready = false;
@@ -3490,23 +4245,30 @@ static void ffs_closed(struct ffs_data *ffs)
ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs);
- if (ffs_obj->opts)
+ if (ffs_obj->opts) {
opts = ffs_obj->opts;
- else
+ } else {
+ ffs_dev_unlock();
goto done;
+ }
+ smp_mb__before_atomic();
if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
- || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+ || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) {
+ ffs_dev_unlock();
goto done;
+ }
ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
ffs_dev_unlock();
- if (test_bit(FFS_FL_BOUND, &ffs->flags))
+ if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
unregister_gadget_item(ci);
+ ffs_log("unreg gadget done");
+ }
return;
done:
- ffs_dev_unlock();
+ ffs_log("exit");
}
/* Misc helper functions ****************************************************/
@@ -3541,5 +4303,39 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
}
DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
+
+static int ffs_init(void)
+{
+ ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0);
+ if (IS_ERR_OR_NULL(ffs_ipc_log))
+ ffs_ipc_log = NULL;
+
+ return 0;
+}
+module_init(ffs_init);
+
+static void __exit ffs_exit(void)
+{
+ struct ffs_inst_status *inst_status, *inst_status_tmp = NULL;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+ inst_status_tmp = inst_status;
+ }
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+
+ if (ffs_ipc_log) {
+ ipc_log_context_destroy(ffs_ipc_log);
+ ffs_ipc_log = NULL;
+ }
+}
+module_exit(ffs_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Nazarewicz");
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
new file mode 100644
index 000000000000..266d19049986
--- /dev/null
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -0,0 +1,3302 @@
+/* Copyright (c) 2015-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "f_gsi.h"
+#include "rndis.h"
+#include "debug.h"
+
+static unsigned int gsi_in_aggr_size;
+module_param(gsi_in_aggr_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gsi_in_aggr_size,
+ "Aggr size of bus transfer to host");
+
+static unsigned int gsi_out_aggr_size;
+module_param(gsi_out_aggr_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gsi_out_aggr_size,
+ "Aggr size of bus transfer to device");
+
+static unsigned int num_in_bufs = GSI_NUM_IN_BUFFERS;
+module_param(num_in_bufs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_in_bufs,
+ "Number of IN buffers");
+
+static unsigned int num_out_bufs = GSI_NUM_OUT_BUFFERS;
+module_param(num_out_bufs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_out_bufs,
+ "Number of OUT buffers");
+
+static bool qti_packet_debug;
+module_param(qti_packet_debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
+
+static struct workqueue_struct *ipa_usb_wq;
+
+static struct gsi_inst_status {
+ struct mutex gsi_lock;
+ bool inst_exist;
+ struct gsi_opts *opts;
+} inst_status[IPA_USB_MAX_TETH_PROT_SIZE];
+
+/* Deregister misc device and free instance structures */
+static void gsi_inst_clean(struct gsi_opts *opts);
+
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port);
+static void ipa_disconnect_handler(struct gsi_data_port *d_port);
+static int gsi_ctrl_send_notification(struct f_gsi *gsi);
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
+static void gsi_free_trb_buffer(struct f_gsi *gsi);
+static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned len, gfp_t flags);
+static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
+
+static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
+{
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ log_event_dbg("%s: remote_wakeup_allowed:%s", __func__,
+ remote_wakeup_allowed ? "true" : "false");
+ return remote_wakeup_allowed;
+}
+
+void post_event(struct gsi_data_port *port, u8 event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+
+ port->evt_q.tail++;
+ /* Check for wraparound and make room */
+ port->evt_q.tail = port->evt_q.tail % MAXQUEUELEN;
+
+ /* Check for overflow */
+ if (port->evt_q.tail == port->evt_q.head) {
+ log_event_err("%s: event queue overflow error", __func__);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+ return;
+ }
+ /* Add event to queue */
+ port->evt_q.event[port->evt_q.tail] = event;
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+}
+
+void post_event_to_evt_queue(struct gsi_data_port *port, u8 event)
+{
+ post_event(port, event);
+ queue_work(port->ipa_usb_wq, &port->usb_ipa_w);
+}
+
+u8 read_event(struct gsi_data_port *port)
+{
+ u8 event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+ if (port->evt_q.head == port->evt_q.tail) {
+ log_event_dbg("%s: event queue empty", __func__);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+ return EVT_NONE;
+ }
+
+ port->evt_q.head++;
+ /* Check for wraparound and make room */
+ port->evt_q.head = port->evt_q.head % MAXQUEUELEN;
+
+ event = port->evt_q.event[port->evt_q.head];
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+
+ return event;
+}
+
+u8 peek_event(struct gsi_data_port *port)
+{
+ u8 event;
+ unsigned long flags;
+ u8 peek_index = 0;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+ if (port->evt_q.head == port->evt_q.tail) {
+ log_event_dbg("%s: event queue empty", __func__);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+ return EVT_NONE;
+ }
+
+ peek_index = (port->evt_q.head + 1) % MAXQUEUELEN;
+ event = port->evt_q.event[peek_index];
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+
+ return event;
+}
+
+void reset_event_queue(struct gsi_data_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+ port->evt_q.head = port->evt_q.tail = MAXQUEUELEN - 1;
+ memset(&port->evt_q.event[0], EVT_NONE, MAXQUEUELEN);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+}
+
+int gsi_wakeup_host(struct f_gsi *gsi)
+{
+
+ int ret;
+ struct usb_gadget *gadget;
+ struct usb_function *func;
+
+ func = &gsi->function;
+ gadget = gsi->gadget;
+
+ log_event_dbg("Entering %s", __func__);
+
+ /*
+ * In Super-Speed mode, remote wakeup is not allowed for suspended
+ * functions which have been disallowed by the host to issue Function
+ * Remote Wakeup.
+ * Note - We deviate here from the USB 3.0 spec and allow
+ * non-suspended functions to issue remote-wakeup even if they were not
+ * allowed to do so by the host. This is done in order to support non
+ * fully USB 3.0 compatible hosts.
+ */
+ if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended)) {
+ log_event_dbg("%s: Calling usb_func_wakeup", __func__);
+ ret = usb_func_wakeup(func);
+ } else {
+ log_event_dbg("%s: Calling usb_gadget_wakeup", __func__);
+ ret = usb_gadget_wakeup(gadget);
+ }
+
+ if ((ret == -EBUSY) || (ret == -EAGAIN))
+ log_event_dbg("RW delayed due to LPM exit.");
+ else if (ret)
+ log_event_err("wakeup failed. ret=%d.", ret);
+
+ return ret;
+}
+
+/*
+ * Callback for when when network interface is up
+ * and userspace is ready to answer DHCP requests, or remote wakeup
+ */
+int ipa_usb_notify_cb(enum ipa_usb_notify_event event,
+ void *driver_data)
+{
+ struct f_gsi *gsi = driver_data;
+ unsigned long flags;
+ struct gsi_ctrl_pkt *cpkt_notify_connect, *cpkt_notify_speed;
+
+ if (!gsi) {
+ log_event_err("%s: invalid driver data", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&gsi->d_port.lock, flags);
+
+ switch (event) {
+ case IPA_USB_DEVICE_READY:
+
+ if (gsi->d_port.net_ready_trigger) {
+ spin_unlock_irqrestore(&gsi->d_port.lock, flags);
+ log_event_dbg("%s: Already triggered", __func__);
+ return 1;
+ }
+
+ log_event_err("%s: Set net_ready_trigger", __func__);
+ gsi->d_port.net_ready_trigger = true;
+
+ if (gsi->prot_id == IPA_USB_ECM) {
+ cpkt_notify_connect = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt_notify_connect)) {
+ spin_unlock_irqrestore(&gsi->d_port.lock,
+ flags);
+ log_event_dbg("%s: err cpkt_notify_connect\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cpkt_notify_connect->type = GSI_CTRL_NOTIFY_CONNECT;
+
+ cpkt_notify_speed = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt_notify_speed)) {
+ spin_unlock_irqrestore(&gsi->d_port.lock,
+ flags);
+ gsi_ctrl_pkt_free(cpkt_notify_connect);
+ log_event_dbg("%s: err cpkt_notify_speed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cpkt_notify_speed->type = GSI_CTRL_NOTIFY_SPEED;
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ list_add_tail(&cpkt_notify_connect->list,
+ &gsi->c_port.cpkt_resp_q);
+ list_add_tail(&cpkt_notify_speed->list,
+ &gsi->c_port.cpkt_resp_q);
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ gsi_ctrl_send_notification(gsi);
+ }
+
+ /* Do not post EVT_CONNECTED for RNDIS.
+ Data path for RNDIS is enabled on EVT_HOST_READY.
+ */
+ if (gsi->prot_id != IPA_USB_RNDIS) {
+ post_event(&gsi->d_port, EVT_CONNECTED);
+ queue_work(gsi->d_port.ipa_usb_wq,
+ &gsi->d_port.usb_ipa_w);
+ }
+ break;
+
+ case IPA_USB_REMOTE_WAKEUP:
+ gsi_wakeup_host(gsi);
+ break;
+
+ case IPA_USB_SUSPEND_COMPLETED:
+ post_event(&gsi->d_port, EVT_IPA_SUSPEND);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+ break;
+ }
+
+ spin_unlock_irqrestore(&gsi->d_port.lock, flags);
+ return 1;
+}
+
+static int ipa_connect_channels(struct gsi_data_port *d_port)
+{
+ int ret;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ struct ipa_usb_xdci_chan_params *in_params =
+ &d_port->ipa_in_channel_params;
+ struct ipa_usb_xdci_chan_params *out_params =
+ &d_port->ipa_out_channel_params;
+ struct ipa_usb_xdci_connect_params *conn_params =
+ &d_port->ipa_conn_pms;
+ struct usb_gadget *gadget = gsi->gadget;
+ struct gsi_channel_info gsi_channel_info;
+ struct ipa_req_chan_out_params ipa_in_channel_out_params;
+ struct ipa_req_chan_out_params ipa_out_channel_out_params;
+
+ log_event_dbg("%s: USB GSI IN OPS", __func__);
+ usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+ GSI_EP_OP_PREPARE_TRBS);
+ usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+ GSI_EP_OP_STARTXFER);
+ d_port->in_xfer_rsc_index = usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_GET_XFER_IDX);
+
+ memset(in_params, 0x0, sizeof(*in_params));
+ gsi_channel_info.ch_req = &d_port->in_request;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&gsi_channel_info,
+ GSI_EP_OP_GET_CH_INFO);
+
+ log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
+ in_params->client =
+ (gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+ IPA_CLIENT_USB_DPL_CONS;
+ in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
+ in_params->teth_prot = gsi->prot_id;
+ in_params->gevntcount_low_addr =
+ gsi_channel_info.gevntcount_low_addr;
+ in_params->gevntcount_hi_addr =
+ gsi_channel_info.gevntcount_hi_addr;
+ in_params->dir = GSI_CHAN_DIR_FROM_GSI;
+ in_params->xfer_ring_len = gsi_channel_info.xfer_ring_len;
+ in_params->xfer_ring_base_addr = gsi_channel_info.xfer_ring_base_addr;
+ in_params->xfer_scratch.last_trb_addr_iova =
+ gsi_channel_info.last_trb_addr;
+ in_params->xfer_ring_base_addr = in_params->xfer_ring_base_addr_iova =
+ gsi_channel_info.xfer_ring_base_addr;
+ in_params->data_buff_base_len = d_port->in_request.buf_len *
+ d_port->in_request.num_bufs;
+ in_params->data_buff_base_addr = in_params->data_buff_base_addr_iova =
+ d_port->in_request.dma;
+ in_params->xfer_scratch.const_buffer_size =
+ gsi_channel_info.const_buffer_size;
+ in_params->xfer_scratch.depcmd_low_addr =
+ gsi_channel_info.depcmd_low_addr;
+ in_params->xfer_scratch.depcmd_hi_addr =
+ gsi_channel_info.depcmd_hi_addr;
+
+ if (d_port->out_ep) {
+ log_event_dbg("%s: USB GSI OUT OPS", __func__);
+ usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+ GSI_EP_OP_PREPARE_TRBS);
+ usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+ GSI_EP_OP_STARTXFER);
+ d_port->out_xfer_rsc_index =
+ usb_gsi_ep_op(d_port->out_ep,
+ NULL, GSI_EP_OP_GET_XFER_IDX);
+ memset(out_params, 0x0, sizeof(*out_params));
+ gsi_channel_info.ch_req = &d_port->out_request;
+ usb_gsi_ep_op(d_port->out_ep, (void *)&gsi_channel_info,
+ GSI_EP_OP_GET_CH_INFO);
+ log_event_dbg("%s: USB GSI OUT OPS Completed", __func__);
+ out_params->client = IPA_CLIENT_USB_PROD;
+ out_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
+ out_params->teth_prot = gsi->prot_id;
+ out_params->gevntcount_low_addr =
+ gsi_channel_info.gevntcount_low_addr;
+ out_params->gevntcount_hi_addr =
+ gsi_channel_info.gevntcount_hi_addr;
+ out_params->dir = GSI_CHAN_DIR_TO_GSI;
+ out_params->xfer_ring_len =
+ gsi_channel_info.xfer_ring_len;
+ out_params->xfer_ring_base_addr =
+ out_params->xfer_ring_base_addr_iova =
+ gsi_channel_info.xfer_ring_base_addr;
+ out_params->data_buff_base_len = d_port->out_request.buf_len *
+ d_port->out_request.num_bufs;
+ out_params->data_buff_base_addr =
+ out_params->data_buff_base_addr_iova =
+ d_port->out_request.dma;
+ out_params->xfer_scratch.last_trb_addr_iova =
+ gsi_channel_info.last_trb_addr;
+ out_params->xfer_scratch.const_buffer_size =
+ gsi_channel_info.const_buffer_size;
+ out_params->xfer_scratch.depcmd_low_addr =
+ gsi_channel_info.depcmd_low_addr;
+ out_params->xfer_scratch.depcmd_hi_addr =
+ gsi_channel_info.depcmd_hi_addr;
+ }
+
+ /* Populate connection params */
+ conn_params->max_pkt_size =
+ (gadget->speed == USB_SPEED_SUPER) ?
+ IPA_USB_SUPER_SPEED_1024B : IPA_USB_HIGH_SPEED_512B;
+ conn_params->ipa_to_usb_xferrscidx =
+ d_port->in_xfer_rsc_index;
+ conn_params->usb_to_ipa_xferrscidx =
+ d_port->out_xfer_rsc_index;
+ conn_params->usb_to_ipa_xferrscidx_valid =
+ (gsi->prot_id != IPA_USB_DIAG) ? true : false;
+ conn_params->ipa_to_usb_xferrscidx_valid = true;
+ conn_params->teth_prot = gsi->prot_id;
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
+ if (gsi_out_aggr_size)
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_dev
+ = gsi_out_aggr_size;
+ else
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_dev
+ = d_port->out_aggr_size;
+ if (gsi_in_aggr_size)
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_host
+ = gsi_in_aggr_size;
+ else
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_host
+ = d_port->in_aggr_size;
+ conn_params->teth_prot_params.max_packet_number_to_dev =
+ DEFAULT_MAX_PKT_PER_XFER;
+ conn_params->max_supported_bandwidth_mbps =
+ (gadget->speed == USB_SPEED_SUPER) ? 3600 : 400;
+
+ memset(&ipa_in_channel_out_params, 0x0,
+ sizeof(ipa_in_channel_out_params));
+ memset(&ipa_out_channel_out_params, 0x0,
+ sizeof(ipa_out_channel_out_params));
+
+ log_event_dbg("%s: Calling xdci_connect", __func__);
+ ret = ipa_usb_xdci_connect(out_params, in_params,
+ &ipa_out_channel_out_params,
+ &ipa_in_channel_out_params,
+ conn_params);
+ if (ret) {
+ log_event_err("%s: IPA connect failed %d", __func__, ret);
+ return ret;
+ }
+ log_event_dbg("%s: xdci_connect done", __func__);
+
+ log_event_dbg("%s: IN CH HDL %x", __func__,
+ ipa_in_channel_out_params.clnt_hdl);
+ log_event_dbg("%s: IN CH DBL addr %x", __func__,
+ ipa_in_channel_out_params.db_reg_phs_addr_lsb);
+
+ log_event_dbg("%s: OUT CH HDL %x", __func__,
+ ipa_out_channel_out_params.clnt_hdl);
+ log_event_dbg("%s: OUT CH DBL addr %x", __func__,
+ ipa_out_channel_out_params.db_reg_phs_addr_lsb);
+
+ d_port->in_channel_handle = ipa_in_channel_out_params.clnt_hdl;
+ d_port->in_db_reg_phs_addr_lsb =
+ ipa_in_channel_out_params.db_reg_phs_addr_lsb;
+ d_port->in_db_reg_phs_addr_msb =
+ ipa_in_channel_out_params.db_reg_phs_addr_msb;
+
+ if (gsi->prot_id != IPA_USB_DIAG) {
+ d_port->out_channel_handle =
+ ipa_out_channel_out_params.clnt_hdl;
+ d_port->out_db_reg_phs_addr_lsb =
+ ipa_out_channel_out_params.db_reg_phs_addr_lsb;
+ d_port->out_db_reg_phs_addr_msb =
+ ipa_out_channel_out_params.db_reg_phs_addr_msb;
+ }
+ return ret;
+}
+
+static void ipa_data_path_enable(struct gsi_data_port *d_port)
+{
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ struct usb_gsi_request req;
+ u64 dbl_register_addr;
+ bool block_db = false;
+
+
+ log_event_dbg("in_db_reg_phs_addr_lsb = %x",
+ gsi->d_port.in_db_reg_phs_addr_lsb);
+ usb_gsi_ep_op(gsi->d_port.in_ep,
+ (void *)&gsi->d_port.in_db_reg_phs_addr_lsb,
+ GSI_EP_OP_STORE_DBL_INFO);
+
+ if (gsi->d_port.out_ep) {
+ log_event_dbg("out_db_reg_phs_addr_lsb = %x",
+ gsi->d_port.out_db_reg_phs_addr_lsb);
+ usb_gsi_ep_op(gsi->d_port.out_ep,
+ (void *)&gsi->d_port.out_db_reg_phs_addr_lsb,
+ GSI_EP_OP_STORE_DBL_INFO);
+
+ usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+ GSI_EP_OP_ENABLE_GSI);
+ }
+
+ /* Unblock doorbell to GSI */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+
+ dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb;
+ dbl_register_addr = dbl_register_addr << 32;
+ dbl_register_addr =
+ dbl_register_addr | gsi->d_port.in_db_reg_phs_addr_lsb;
+
+ /* use temp gsi request to pass 64 bit dbl reg addr and num_bufs */
+ req.buf_base_addr = &dbl_register_addr;
+
+ req.num_bufs = gsi->d_port.in_request.num_bufs;
+ usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_IN_DB);
+
+ if (gsi->d_port.out_ep) {
+ usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+ GSI_EP_OP_UPDATEXFER);
+ }
+}
+
+static void ipa_disconnect_handler(struct gsi_data_port *d_port)
+{
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ bool block_db = true;
+
+ log_event_dbg("%s: EP Disable for data", __func__);
+
+ if (gsi->d_port.in_ep) {
+ /*
+ * Block doorbell to GSI to avoid USB wrapper from
+ * ringing doorbell in case IPA clocks are OFF.
+ */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
+ }
+
+ if (gsi->d_port.out_ep)
+ usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
+
+ gsi->d_port.net_ready_trigger = false;
+}
+
+static void ipa_disconnect_work_handler(struct gsi_data_port *d_port)
+{
+ int ret;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+
+ log_event_dbg("%s: Calling xdci_disconnect", __func__);
+
+ ret = ipa_usb_xdci_disconnect(gsi->d_port.out_channel_handle,
+ gsi->d_port.in_channel_handle, gsi->prot_id);
+ if (ret)
+ log_event_err("%s: IPA disconnect failed %d",
+ __func__, ret);
+
+ log_event_dbg("%s: xdci_disconnect done", __func__);
+
+ /* invalidate channel handles*/
+ gsi->d_port.in_channel_handle = -EINVAL;
+ gsi->d_port.out_channel_handle = -EINVAL;
+
+ usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+ if (gsi->d_port.out_ep)
+ usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+ /* free buffers allocated with each TRB */
+ gsi_free_trb_buffer(gsi);
+}
+
+static int ipa_suspend_work_handler(struct gsi_data_port *d_port)
+{
+ int ret = 0;
+ bool block_db, f_suspend;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ struct usb_function *f = &gsi->function;
+
+ f_suspend = f->func_wakeup_allowed;
+ log_event_dbg("%s: f_suspend:%d", __func__, f_suspend);
+
+ if (!usb_gsi_ep_op(gsi->d_port.in_ep, (void *) &f_suspend,
+ GSI_EP_OP_CHECK_FOR_SUSPEND)) {
+ ret = -EFAULT;
+ block_db = false;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ goto done;
+ }
+
+ log_event_dbg("%s: Calling xdci_suspend", __func__);
+ ret = ipa_usb_xdci_suspend(gsi->d_port.out_channel_handle,
+ gsi->d_port.in_channel_handle, gsi->prot_id,
+ usb_gsi_remote_wakeup_allowed(f));
+ if (!ret) {
+ d_port->sm_state = STATE_SUSPENDED;
+ log_event_dbg("%s: STATE SUSPENDED", __func__);
+ goto done;
+ }
+
+ if (ret == -EFAULT) {
+ block_db = false;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_wakeup_host(gsi);
+ } else if (ret == -EINPROGRESS) {
+ d_port->sm_state = STATE_SUSPEND_IN_PROGRESS;
+ } else {
+ log_event_err("%s: Error %d for %d", __func__, ret,
+ gsi->prot_id);
+ }
+done:
+ log_event_dbg("%s: xdci_suspend ret %d", __func__, ret);
+ return ret;
+}
+
+static void ipa_resume_work_handler(struct gsi_data_port *d_port)
+{
+ bool block_db;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ int ret;
+
+ log_event_dbg("%s: Calling xdci_resume", __func__);
+
+ ret = ipa_usb_xdci_resume(gsi->d_port.out_channel_handle,
+ gsi->d_port.in_channel_handle,
+ gsi->prot_id);
+ if (ret)
+ log_event_dbg("%s: xdci_resume ret %d", __func__, ret);
+
+ log_event_dbg("%s: xdci_resume done", __func__);
+
+ block_db = false;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+}
+
+static void ipa_work_handler(struct work_struct *w)
+{
+ struct gsi_data_port *d_port = container_of(w, struct gsi_data_port,
+ usb_ipa_w);
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ u8 event;
+ int ret = 0;
+ struct usb_gadget *gadget = gsi->gadget;
+ struct device *dev;
+ struct device *gad_dev;
+ bool block_db;
+
+ event = read_event(d_port);
+
+ log_event_dbg("%s: event = %x sm_state %x", __func__,
+ event, d_port->sm_state);
+
+ if (gadget) {
+ dev = &gadget->dev;
+ if (!dev || !dev->parent) {
+ log_event_err("%s(): dev or dev->parent is NULL.\n",
+ __func__);
+ return;
+ }
+ gad_dev = dev->parent;
+ } else {
+ log_event_err("%s(): gadget is NULL.\n", __func__);
+ return;
+ }
+
+ gsi = d_port_to_gsi(d_port);
+
+ switch (d_port->sm_state) {
+ case STATE_UNINITIALIZED:
+ break;
+ case STATE_INITIALIZED:
+ if (event == EVT_CONNECT_IN_PROGRESS) {
+ usb_gadget_autopm_get(gadget);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ /* allocate buffers used with each TRB */
+ ret = gsi_alloc_trb_buffer(gsi);
+ if (ret) {
+ log_event_err("%s: gsi_alloc_trb_failed\n",
+ __func__);
+ break;
+ }
+ ipa_connect_channels(d_port);
+ d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
+ log_event_dbg("%s: ST_INIT_EVT_CONN_IN_PROG",
+ __func__);
+ } else if (event == EVT_HOST_READY) {
+ /*
+ * When in a composition such as RNDIS + ADB,
+ * RNDIS host sends a GEN_CURRENT_PACKET_FILTER msg
+ * to enable/disable flow control eg. during RNDIS
+ * adaptor disable/enable from device manager.
+ * In the case of the msg to disable flow control,
+ * connect IPA channels and enable data path.
+ * EVT_HOST_READY is posted to the state machine
+ * in the handler for this msg.
+ */
+ usb_gadget_autopm_get(gadget);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ /* allocate buffers used with each TRB */
+ ret = gsi_alloc_trb_buffer(gsi);
+ if (ret) {
+ log_event_err("%s: gsi_alloc_trb_failed\n",
+ __func__);
+ break;
+ }
+
+ ipa_connect_channels(d_port);
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_INIT_EVT_HOST_READY", __func__);
+ }
+ break;
+ case STATE_CONNECT_IN_PROGRESS:
+ if (event == EVT_HOST_READY) {
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_HOST_READY",
+ __func__);
+ } else if (event == EVT_CONNECTED) {
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_CON %d",
+ __func__, __LINE__);
+ } else if (event == EVT_SUSPEND) {
+ if (peek_event(d_port) == EVT_DISCONNECTED) {
+ read_event(d_port);
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUS_DIS",
+ __func__);
+ log_event_dbg("%s: put_async1 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ break;
+ }
+ ret = ipa_suspend_work_handler(d_port);
+ if (!ret) {
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUS",
+ __func__);
+ log_event_dbg("%s: put_async2 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ } else if (event == EVT_DISCONNECTED) {
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_DIS",
+ __func__);
+ log_event_dbg("%s: put_async3 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ break;
+ case STATE_CONNECTED:
+ if (event == EVT_DISCONNECTED || event == EVT_HOST_NRDY) {
+ if (peek_event(d_port) == EVT_HOST_READY) {
+ read_event(d_port);
+ log_event_dbg("%s: NO_OP NRDY_RDY", __func__);
+ break;
+ }
+
+ if (event == EVT_HOST_NRDY) {
+ log_event_dbg("%s: ST_CON_HOST_NRDY\n",
+ __func__);
+ block_db = true;
+ /* stop USB ringing doorbell to GSI(OUT_EP) */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_rndis_ipa_reset_trigger(d_port);
+ usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ usb_gsi_ep_op(d_port->out_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ }
+
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_EVT_DIS", __func__);
+ log_event_dbg("%s: put_async4 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ } else if (event == EVT_SUSPEND) {
+ if (peek_event(d_port) == EVT_DISCONNECTED) {
+ read_event(d_port);
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_EVT_SUS_DIS",
+ __func__);
+ log_event_dbg("%s: put_async5 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ break;
+ }
+ ret = ipa_suspend_work_handler(d_port);
+ if (!ret) {
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_EVT_SUS",
+ __func__);
+ log_event_dbg("%s: put_async6 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ } else if (event == EVT_CONNECTED) {
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_CON_EVT_CON", __func__);
+ }
+ break;
+ case STATE_DISCONNECTED:
+ if (event == EVT_CONNECT_IN_PROGRESS) {
+ ipa_connect_channels(d_port);
+ d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
+ log_event_dbg("%s: ST_DIS_EVT_CON_IN_PROG", __func__);
+ } else if (event == EVT_UNINITIALIZED) {
+ d_port->sm_state = STATE_UNINITIALIZED;
+ log_event_dbg("%s: ST_DIS_EVT_UNINIT", __func__);
+ }
+ break;
+ case STATE_SUSPEND_IN_PROGRESS:
+ if (event == EVT_IPA_SUSPEND) {
+ d_port->sm_state = STATE_SUSPENDED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_SUS_IN_PROG_EVT_IPA_SUS",
+ __func__);
+ log_event_dbg("%s: put_async6 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ } else if (event == EVT_RESUMED) {
+ ipa_resume_work_handler(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA disconnect is done in disconnect work
+ * (due to cable disconnect) or in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(gadget);
+ log_event_dbg("%s: ST_SUS_IN_PROG_EVT_RES", __func__);
+ log_event_dbg("%s: get_nores1 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ } else if (event == EVT_DISCONNECTED) {
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_SUS_IN_PROG_EVT_DIS", __func__);
+ log_event_dbg("%s: put_async7 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ break;
+
+ case STATE_SUSPENDED:
+ if (event == EVT_RESUMED) {
+ usb_gadget_autopm_get(gadget);
+ log_event_dbg("%s: ST_SUS_EVT_RES", __func__);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ ipa_resume_work_handler(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ } else if (event == EVT_DISCONNECTED) {
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ log_event_dbg("%s: ST_SUS_EVT_DIS", __func__);
+ }
+ break;
+ default:
+ log_event_dbg("%s: Invalid state to SM", __func__);
+ }
+
+ if (peek_event(d_port) != EVT_NONE) {
+ log_event_dbg("%s: New events to process", __func__);
+ queue_work(d_port->ipa_usb_wq, &d_port->usb_ipa_w);
+ }
+}
+
+static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned len, gfp_t flags)
+{
+ struct gsi_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct gsi_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt)
+{
+ if (pkt) {
+ kfree(pkt->buf);
+ kfree(pkt);
+ }
+}
+
+static void gsi_ctrl_clear_cpkt_queues(struct f_gsi *gsi, bool skip_req_q)
+{
+ struct gsi_ctrl_pkt *cpkt = NULL;
+ struct list_head *act, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ if (skip_req_q)
+ goto clean_resp_q;
+
+ list_for_each_safe(act, tmp, &gsi->c_port.cpkt_req_q) {
+ cpkt = list_entry(act, struct gsi_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gsi_ctrl_pkt_free(cpkt);
+ }
+clean_resp_q:
+ list_for_each_safe(act, tmp, &gsi->c_port.cpkt_resp_q) {
+ cpkt = list_entry(act, struct gsi_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gsi_ctrl_pkt_free(cpkt);
+ }
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+}
+
+static int gsi_ctrl_send_cpkt_tomodem(struct f_gsi *gsi, void *buf, size_t len)
+{
+ unsigned long flags;
+ struct gsi_ctrl_port *c_port = &gsi->c_port;
+ struct gsi_ctrl_pkt *cpkt;
+
+ spin_lock_irqsave(&c_port->lock, flags);
+ /* drop cpkt if port is not open */
+ if (!gsi->c_port.is_open) {
+ log_event_dbg("%s: ctrl device %s is not open",
+ __func__, gsi->c_port.name);
+ c_port->cpkt_drop_cnt++;
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ return -ENODEV;
+ }
+
+ cpkt = gsi_ctrl_pkt_alloc(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ log_event_err("%s: Reset func pkt allocation failed", __func__);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ return -ENOMEM;
+ }
+
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ list_add_tail(&cpkt->list, &c_port->cpkt_req_q);
+ c_port->host_to_modem++;
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ log_event_dbg("%s: Wake up read queue", __func__);
+ wake_up(&c_port->read_wq);
+
+ return 0;
+}
+
+static int gsi_ctrl_dev_open(struct inode *ip, struct file *fp)
+{
+ struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+ struct gsi_ctrl_port,
+ ctrl_device);
+ struct f_gsi *gsi;
+ struct gsi_inst_status *inst_cur;
+
+ if (!c_port) {
+ pr_err_ratelimited("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ pr_devel_ratelimited("%s: open ctrl dev %s", __func__, c_port->name);
+
+ gsi = container_of(c_port, struct f_gsi, c_port);
+ inst_cur = &inst_status[gsi->prot_id];
+
+ mutex_lock(&inst_cur->gsi_lock);
+
+ fp->private_data = &gsi->prot_id;
+
+ if (!inst_cur->inst_exist) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], GSI instance freed already\n",
+ __func__, gsi->prot_id);
+ return -ENODEV;
+ }
+
+ if (c_port->is_open) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ log_event_err("%s: Already opened\n", __func__);
+ return -EBUSY;
+ }
+
+ c_port->is_open = true;
+
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ return 0;
+}
+
+static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
+{
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+
+ mutex_lock(&inst_cur->gsi_lock);
+
+ if (unlikely(inst_cur->inst_exist == false)) {
+ if (inst_cur->opts) {
+ /* GSI instance clean up */
+ gsi_inst_clean(inst_cur->opts);
+ inst_cur->opts = NULL;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], Delayed free instance memory\n",
+ __func__, prot_id);
+ return -ENODEV;
+ }
+
+ inst_cur->opts->gsi->c_port.is_open = false;
+
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ log_event_dbg("close ctrl dev %s\n",
+ inst_cur->opts->gsi->c_port.name);
+
+ return 0;
+}
+
+static ssize_t
+gsi_ctrl_dev_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct gsi_ctrl_port *c_port;
+ struct gsi_ctrl_pkt *cpkt = NULL;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ unsigned long flags;
+ int ret = 0;
+
+ log_event_dbg("%s: Enter %zu", __func__, count);
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
+ if (!c_port) {
+ log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ if (count > GSI_MAX_CTRL_PKT_SIZE) {
+ log_event_err("Large buff size %zu, should be %d",
+ count, GSI_MAX_CTRL_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ /* block until a new packet is available */
+ spin_lock_irqsave(&c_port->lock, flags);
+ while (list_empty(&c_port->cpkt_req_q)) {
+ log_event_dbg("Requests list is empty. Wait.");
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ ret = wait_event_interruptible(c_port->read_wq,
+ !list_empty(&c_port->cpkt_req_q));
+ if (ret < 0) {
+ log_event_err("Waiting failed");
+ return -ERESTARTSYS;
+ }
+ log_event_dbg("Received request packet");
+ spin_lock_irqsave(&c_port->lock, flags);
+ }
+
+ cpkt = list_first_entry(&c_port->cpkt_req_q, struct gsi_ctrl_pkt,
+ list);
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ if (cpkt->len > count) {
+ log_event_err("cpkt size large:%d > buf size:%zu",
+ cpkt->len, count);
+ gsi_ctrl_pkt_free(cpkt);
+ return -ENOMEM;
+ }
+
+ log_event_dbg("%s: cpkt size:%d", __func__, cpkt->len);
+ if (qti_packet_debug)
+ print_hex_dump(KERN_DEBUG, "READ:", DUMP_PREFIX_OFFSET, 16, 1,
+ cpkt->buf, min_t(int, 30, cpkt->len), false);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ log_event_err("copy_to_user failed: err %d", ret);
+ ret = -EFAULT;
+ } else {
+ log_event_dbg("%s: copied %d bytes to user", __func__,
+ cpkt->len);
+ ret = cpkt->len;
+ c_port->copied_to_modem++;
+ }
+
+ gsi_ctrl_pkt_free(cpkt);
+
+ log_event_dbg("%s: Exit %zu", __func__, count);
+
+ return ret;
+}
+
+static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct gsi_ctrl_pkt *cpkt;
+ struct gsi_ctrl_port *c_port;
+ struct usb_request *req;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ struct f_gsi *gsi;
+
+ log_event_dbg("Enter %zu", count);
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+ req = c_port->notify_req;
+
+ if (!c_port || !req || !req->buf) {
+ log_event_err("%s: c_port %pK req %pK req->buf %pK",
+ __func__, c_port, req, req ? req->buf : req);
+ return -ENODEV;
+ }
+
+ if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
+ log_event_err("error: ctrl pkt length %zu", count);
+ return -EINVAL;
+ }
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_err("USB cable not connected\n");
+ return -ECONNRESET;
+ }
+
+ if (gsi->function.func_is_suspended &&
+ !gsi->function.func_wakeup_allowed) {
+ c_port->cpkt_drop_cnt++;
+ log_event_err("drop ctrl pkt of len %zu", count);
+ return -ENOTSUPP;
+ }
+
+ cpkt = gsi_ctrl_pkt_alloc(count, GFP_KERNEL);
+ if (IS_ERR(cpkt)) {
+ log_event_err("failed to allocate ctrl pkt");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cpkt->buf, buf, count);
+ if (ret) {
+ log_event_err("copy_from_user failed err:%d", ret);
+ gsi_ctrl_pkt_free(cpkt);
+ return ret;
+ }
+ cpkt->type = GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE;
+ c_port->copied_from_modem++;
+ if (qti_packet_debug)
+ print_hex_dump(KERN_DEBUG, "WRITE:", DUMP_PREFIX_OFFSET, 16, 1,
+ cpkt->buf, min_t(int, 30, count), false);
+
+ spin_lock_irqsave(&c_port->lock, flags);
+ list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ if (!gsi_ctrl_send_notification(gsi))
+ c_port->modem_to_host++;
+
+ log_event_dbg("Exit %zu", count);
+
+ return ret ? ret : count;
+}
+
+static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned cmd,
+ unsigned long arg)
+{
+ struct gsi_ctrl_port *c_port;
+ struct f_gsi *gsi;
+ struct gsi_ctrl_pkt *cpkt;
+ struct ep_info info;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ int val, ret = 0;
+ unsigned long flags;
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+
+ if (!c_port) {
+ log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case QTI_CTRL_MODEM_OFFLINE:
+ if (gsi->prot_id == IPA_USB_DIAG) {
+ log_event_dbg("%s:Modem Offline not handled", __func__);
+ goto exit_ioctl;
+ }
+ atomic_set(&c_port->ctrl_online, 0);
+ gsi_ctrl_clear_cpkt_queues(gsi, true);
+ cpkt = gsi_ctrl_pkt_alloc(0, GFP_KERNEL);
+ if (IS_ERR(cpkt)) {
+ log_event_err("%s: err allocating cpkt\n", __func__);
+ return -ENOMEM;
+ }
+ cpkt->type = GSI_CTRL_NOTIFY_OFFLINE;
+ spin_lock_irqsave(&c_port->lock, flags);
+ list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ gsi_ctrl_send_notification(gsi);
+ break;
+ case QTI_CTRL_MODEM_ONLINE:
+ if (gsi->prot_id == IPA_USB_DIAG) {
+ log_event_dbg("%s:Modem Online not handled", __func__);
+ goto exit_ioctl;
+ }
+
+ atomic_set(&c_port->ctrl_online, 1);
+ break;
+ case QTI_CTRL_GET_LINE_STATE:
+ val = atomic_read(&gsi->connected);
+ ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+ if (ret) {
+ log_event_err("copy_to_user fail LINE_STATE");
+ ret = -EFAULT;
+ }
+ log_event_dbg("%s: Sent line_state: %d for prot id:%d",
+ __func__,
+ atomic_read(&gsi->connected), gsi->prot_id);
+ break;
+ case QTI_CTRL_EP_LOOKUP:
+ case GSI_MBIM_EP_LOOKUP:
+ log_event_dbg("%s: EP_LOOKUP for prot id:%d", __func__,
+ gsi->prot_id);
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("EP_LOOKUP failed: not connected");
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (gsi->prot_id == IPA_USB_DIAG &&
+ (gsi->d_port.in_channel_handle == -EINVAL)) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (gsi->d_port.in_channel_handle == -EINVAL &&
+ gsi->d_port.out_channel_handle == -EINVAL) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = gsi->data_id;
+ info.ipa_ep_pair.cons_pipe_num =
+ (gsi->prot_id == IPA_USB_DIAG) ? -1 :
+ gsi->d_port.out_channel_handle;
+ info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
+
+ log_event_dbg("%s: prot id :%d ep_type:%d intf:%d",
+ __func__, gsi->prot_id, info.ph_ep_info.ep_type,
+ info.ph_ep_info.peripheral_iface_id);
+
+ log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
+ __func__, info.ipa_ep_pair.cons_pipe_num,
+ info.ipa_ep_pair.prod_pipe_num);
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ log_event_err("copy_to_user fail MBIM");
+ ret = -EFAULT;
+ }
+ break;
+ case GSI_MBIM_GET_NTB_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &gsi->d_port.ntb_info.ntb_input_size,
+ sizeof(gsi->d_port.ntb_info.ntb_input_size));
+ if (ret) {
+ log_event_err("copy_to_user failNTB_SIZE");
+ ret = -EFAULT;
+ }
+ log_event_dbg("Sent NTB size %d",
+ gsi->d_port.ntb_info.ntb_input_size);
+ break;
+ case GSI_MBIM_GET_DATAGRAM_COUNT:
+ ret = copy_to_user((void __user *)arg,
+ &gsi->d_port.ntb_info.ntb_max_datagrams,
+ sizeof(gsi->d_port.ntb_info.ntb_max_datagrams));
+ if (ret) {
+ log_event_err("copy_to_user fail DATAGRAM");
+ ret = -EFAULT;
+ }
+ log_event_dbg("Sent NTB datagrams count %d",
+ gsi->d_port.ntb_info.ntb_max_datagrams);
+ break;
+ default:
+ log_event_err("wrong parameter");
+ ret = -EINVAL;
+ }
+
+exit_ioctl:
+ return ret;
+}
+
+static unsigned int gsi_ctrl_dev_poll(struct file *fp, poll_table *wait)
+{
+ struct gsi_ctrl_port *c_port;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ unsigned long flags;
+ unsigned int mask = 0;
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
+ if (!c_port) {
+ log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ poll_wait(fp, &c_port->read_wq, wait);
+
+ spin_lock_irqsave(&c_port->lock, flags);
+ if (!list_empty(&c_port->cpkt_req_q)) {
+ mask |= POLLIN | POLLRDNORM;
+ log_event_dbg("%s sets POLLIN for %s", __func__, c_port->name);
+ }
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ return mask;
+}
+
+/* file operations for rmnet/mbim/dpl devices */
+static const struct file_operations gsi_ctrl_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = gsi_ctrl_dev_open,
+ .release = gsi_ctrl_dev_release,
+ .read = gsi_ctrl_dev_read,
+ .write = gsi_ctrl_dev_write,
+ .unlocked_ioctl = gsi_ctrl_dev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gsi_ctrl_dev_ioctl,
+#endif
+ .poll = gsi_ctrl_dev_poll,
+};
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int gsi_xfer_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+int gsi_function_ctrl_port_init(struct f_gsi *gsi)
+{
+ int ret;
+ int sz = GSI_CTRL_NAME_LEN;
+ bool ctrl_dev_create = true;
+
+ if (!gsi) {
+ log_event_err("%s: gsi prot ctx is NULL", __func__);
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
+ INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
+
+ spin_lock_init(&gsi->c_port.lock);
+
+ init_waitqueue_head(&gsi->c_port.read_wq);
+
+ if (gsi->prot_id == IPA_USB_RMNET)
+ strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
+ else if (gsi->prot_id == IPA_USB_MBIM)
+ strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
+ else if (gsi->prot_id == IPA_USB_DIAG)
+ strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
+ else
+ ctrl_dev_create = false;
+
+ if (!ctrl_dev_create)
+ return 0;
+
+ gsi->c_port.ctrl_device.name = gsi->c_port.name;
+ gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
+ gsi->c_port.ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&gsi->c_port.ctrl_device);
+ if (ret) {
+ log_event_err("%s: misc register failed prot id %d",
+ __func__, gsi->prot_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct net_device *gsi_rndis_get_netdev(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+
+static void gsi_rndis_open(struct f_gsi *rndis)
+{
+ struct usb_composite_dev *cdev = rndis->function.config->cdev;
+
+ log_event_dbg("%s", __func__);
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+ gsi_xfer_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->params);
+}
+
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port)
+{
+ struct f_gsi *rndis = d_port_to_gsi(d_port);
+ unsigned long flags;
+
+ if (!rndis) {
+ log_event_err("%s: gsi prot ctx is %pK", __func__, rndis);
+ return;
+ }
+
+ spin_lock_irqsave(&rndis->d_port.lock, flags);
+ if (!rndis) {
+ log_event_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+ return;
+ }
+
+ rndis->d_port.net_ready_trigger = false;
+ spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+}
+
+void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param)
+{
+ struct f_gsi *rndis = param->v;
+ struct gsi_data_port *d_port;
+
+ if (!rndis) {
+ log_event_err("%s: gsi prot ctx is %pK", __func__, rndis);
+ return;
+ }
+
+ d_port = &rndis->d_port;
+
+ if (enable) {
+ log_event_dbg("%s: posting HOST_NRDY\n", __func__);
+ post_event(d_port, EVT_HOST_NRDY);
+ } else {
+ log_event_dbg("%s: posting HOST_READY\n", __func__);
+ post_event(d_port, EVT_HOST_READY);
+ }
+
+ queue_work(rndis->d_port.ipa_usb_wq, &rndis->d_port.usb_ipa_w);
+}
+
+static int queue_notification_request(struct f_gsi *gsi)
+{
+ int ret;
+ unsigned long flags;
+
+ ret = usb_func_ep_queue(&gsi->function, gsi->c_port.notify,
+ gsi->c_port.notify_req, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ gsi->c_port.notify_req_queued = false;
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ }
+
+ log_event_dbg("%s: ret:%d req_queued:%d",
+ __func__, ret, gsi->c_port.notify_req_queued);
+
+ return ret;
+}
+
+static int gsi_ctrl_send_notification(struct f_gsi *gsi)
+{
+ __le32 *data;
+ struct usb_cdc_notification *event;
+ struct usb_request *req = gsi->c_port.notify_req;
+ struct gsi_ctrl_pkt *cpkt;
+ unsigned long flags;
+ bool del_free_cpkt = false;
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("%s: cable disconnect", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ if (list_empty(&gsi->c_port.cpkt_resp_q)) {
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_dbg("%s: cpkt_resp_q is empty\n", __func__);
+ return 0;
+ }
+
+ log_event_dbg("%s: notify_req_queued:%d\n",
+ __func__, gsi->c_port.notify_req_queued);
+
+ if (gsi->c_port.notify_req_queued) {
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_dbg("%s: notify_req is already queued.\n", __func__);
+ return 0;
+ }
+
+ cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
+ struct gsi_ctrl_pkt, list);
+ log_event_dbg("%s: cpkt->type:%d\n", __func__, cpkt->type);
+
+ event = req->buf;
+
+ switch (cpkt->type) {
+ case GSI_CTRL_NOTIFY_CONNECT:
+ del_free_cpkt = true;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(1);
+ event->wLength = cpu_to_le16(0);
+ break;
+ case GSI_CTRL_NOTIFY_SPEED:
+ del_free_cpkt = true;
+ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(8);
+
+ /* SPEED_CHANGE data is up/down speeds in bits/sec */
+ data = req->buf + sizeof(*event);
+ data[0] = cpu_to_le32(gsi_xfer_bitrate(gsi->gadget));
+ data[1] = data[0];
+
+ log_event_dbg("notify speed %d",
+ gsi_xfer_bitrate(gsi->gadget));
+ break;
+ case GSI_CTRL_NOTIFY_OFFLINE:
+ del_free_cpkt = true;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(0);
+ break;
+ case GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE:
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(0);
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ data = req->buf;
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+ /*
+ * we need to free dummy packet for RNDIS as sending
+ * notification about response available multiple time,
+ * RNDIS host driver doesn't like. All SEND/GET
+ * ENCAPSULATED response is one-to-one for RNDIS case
+ * and host expects to have below sequence:
+ * ep0: USB_CDC_SEND_ENCAPSULATED_COMMAND
+ * int_ep: device->host: RESPONSE_AVAILABLE
+ * ep0: USB_GET_SEND_ENCAPSULATED_COMMAND
+ * For RMNET case: host ignores multiple notification.
+ */
+ del_free_cpkt = true;
+ }
+ break;
+ default:
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_err("%s:unknown notify state", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /*
+ * Delete and free cpkt related to non NOTIFY_RESPONSE_AVAILABLE
+ * notification whereas NOTIFY_RESPONSE_AVAILABLE related cpkt is
+ * deleted from USB_CDC_GET_ENCAPSULATED_RESPONSE setup request
+ */
+ if (del_free_cpkt) {
+ list_del(&cpkt->list);
+ gsi_ctrl_pkt_free(cpkt);
+ }
+
+ gsi->c_port.notify_req_queued = true;
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_dbg("send Notify type %02x", event->bNotificationType);
+
+ return queue_notification_request(gsi);
+}
+
+static void gsi_ctrl_notify_resp_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+ struct usb_cdc_notification *event = req->buf;
+ int status = req->status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ gsi->c_port.notify_req_queued = false;
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ log_event_dbg("ESHUTDOWN/ECONNRESET, connection gone");
+ gsi_ctrl_clear_cpkt_queues(gsi, false);
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+ break;
+ default:
+ log_event_err("Unknown event %02x --> %d",
+ event->bNotificationType, req->status);
+ /* FALLTHROUGH */
+ case 0:
+ break;
+ }
+}
+
+static void gsi_rndis_response_available(void *_rndis)
+{
+ struct f_gsi *gsi = _rndis;
+ struct gsi_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ cpkt = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ log_event_err("%s: err allocating cpkt\n", __func__);
+ return;
+ }
+
+ cpkt->type = GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE;
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ list_add_tail(&cpkt->list, &gsi->c_port.cpkt_resp_q);
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ gsi_ctrl_send_notification(gsi);
+}
+
+static void gsi_rndis_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *rndis = req->context;
+ rndis_init_msg_type *buf;
+ int status;
+
+ if (req->status != 0) {
+ log_event_err("RNDIS command completion error %d\n",
+ req->status);
+ return;
+ }
+
+ status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+ if (status < 0)
+ log_event_err("RNDIS command error %d, %d/%d",
+ status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ rndis->d_port.in_aggr_size = min_t(u32,
+ rndis->d_port.in_aggr_size,
+ rndis->params->dl_max_xfer_size);
+ log_event_dbg("RNDIS host dl_aggr_size:%d in_aggr_size:%d\n",
+ rndis->params->dl_max_xfer_size,
+ rndis->d_port.in_aggr_size);
+ }
+}
+
+static void
+gsi_ctrl_set_ntb_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* now for SET_NTB_INPUT_SIZE only */
+ unsigned in_size = 0;
+ struct f_gsi *gsi = req->context;
+ struct gsi_ntb_info *ntb = NULL;
+
+ log_event_dbg("dev:%pK", gsi);
+
+ req->context = NULL;
+ if (req->status || req->actual != req->length) {
+ log_event_err("Bad control-OUT transfer");
+ goto invalid;
+ }
+
+ if (req->length == 4) {
+ in_size = get_unaligned_le32(req->buf);
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_gsi_ntb_parameters.dwNtbInMaxSize))
+ goto invalid;
+ } else if (req->length == 8) {
+ ntb = (struct gsi_ntb_info *)req->buf;
+ in_size = get_unaligned_le32(&(ntb->ntb_input_size));
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_gsi_ntb_parameters.dwNtbInMaxSize))
+ goto invalid;
+
+ gsi->d_port.ntb_info.ntb_max_datagrams =
+ get_unaligned_le16(&(ntb->ntb_max_datagrams));
+ } else {
+ goto invalid;
+ }
+
+ log_event_dbg("Set NTB INPUT SIZE %d", in_size);
+
+ gsi->d_port.ntb_info.ntb_input_size = in_size;
+ return;
+
+invalid:
+ log_event_err("Illegal NTB INPUT SIZE %d from host", in_size);
+ usb_ep_set_halt(ep);
+}
+
+static void gsi_ctrl_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, req->actual);
+}
+
+static void gsi_ctrl_reset_cmd_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, 0);
+}
+
+static void gsi_ctrl_send_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_notification(gsi);
+}
+
+static int
+gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int id, value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ struct gsi_ctrl_pkt *cpkt;
+ u8 *buf;
+ u32 n;
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("usb cable is not connected");
+ return -ENOTCONN;
+ }
+
+ /* rmnet and dpl does not have ctrl_id */
+ if (gsi->ctrl_id == -ENODEV)
+ id = gsi->data_id;
+ else
+ id = gsi->ctrl_id;
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_RESET_FUNCTION:
+
+ log_event_dbg("USB_CDC_RESET_FUNCTION");
+ value = 0;
+ req->complete = gsi_ctrl_reset_cmd_complete;
+ req->context = gsi;
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ log_event_dbg("USB_CDC_SEND_ENCAPSULATED_COMMAND");
+
+ if (w_value || w_index != id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->context = gsi;
+ if (gsi->prot_id == IPA_USB_RNDIS)
+ req->complete = gsi_rndis_command_complete;
+ else
+ req->complete = gsi_ctrl_cmd_complete;
+ /* later, rndis_response_available() sends a notification */
+ break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ log_event_dbg("USB_CDC_GET_ENCAPSULATED_RESPONSE");
+ if (w_value || w_index != id)
+ goto invalid;
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ /* return the result */
+ buf = rndis_get_next_response(gsi->params, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ rndis_free_response(gsi->params, buf);
+ value = n;
+ }
+ break;
+ }
+
+ spin_lock(&gsi->c_port.lock);
+ if (list_empty(&gsi->c_port.cpkt_resp_q)) {
+ log_event_dbg("ctrl resp queue empty");
+ spin_unlock(&gsi->c_port.lock);
+ break;
+ }
+
+ cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
+ struct gsi_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gsi->c_port.get_encap_cnt++;
+ spin_unlock(&gsi->c_port.lock);
+
+ value = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, value);
+ gsi_ctrl_pkt_free(cpkt);
+
+ req->complete = gsi_ctrl_send_response_complete;
+ req->context = gsi;
+ log_event_dbg("copied encap_resp %d bytes",
+ value);
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
+ __func__, w_value & GSI_CTRL_DTR ? 1 : 0);
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+ value = 0;
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_ETHERNET_PACKET_FILTER:
+ /* see 6.2.30: no data, wIndex = interface,
+ * wValue = packet filter bitmap
+ */
+ if (w_length != 0 || w_index != id)
+ goto invalid;
+ log_event_dbg("packet filter %02x", w_value);
+ /* REVISIT locking of cdc_filter. This assumes the UDC
+ * driver won't have a concurrent packet TX irq running on
+ * another CPU; or that if it does, this write is atomic...
+ */
+ gsi->d_port.cdc_filter = w_value;
+ value = 0;
+ break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_PARAMETERS:
+ log_event_dbg("USB_CDC_GET_NTB_PARAMETERS");
+
+ if (w_length == 0 || w_value != 0 || w_index != id)
+ break;
+
+ value = w_length > sizeof(mbim_gsi_ntb_parameters) ?
+ sizeof(mbim_gsi_ntb_parameters) : w_length;
+ memcpy(req->buf, &mbim_gsi_ntb_parameters, value);
+ break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_INPUT_SIZE:
+
+ log_event_dbg("USB_CDC_GET_NTB_INPUT_SIZE");
+
+ if (w_length < 4 || w_value != 0 || w_index != id)
+ break;
+
+ put_unaligned_le32(gsi->d_port.ntb_info.ntb_input_size,
+ req->buf);
+ value = 4;
+ log_event_dbg("Reply to host INPUT SIZE %d",
+ gsi->d_port.ntb_info.ntb_input_size);
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_NTB_INPUT_SIZE:
+ log_event_dbg("USB_CDC_SET_NTB_INPUT_SIZE");
+
+ if (w_length != 4 && w_length != 8) {
+ log_event_err("wrong NTB length %d", w_length);
+ break;
+ }
+
+ if (w_value != 0 || w_index != id)
+ break;
+
+ req->complete = gsi_ctrl_set_ntb_cmd_complete;
+ req->length = w_length;
+ req->context = gsi;
+
+ value = req->length;
+ break;
+ default:
+invalid:
+ log_event_err("inval ctrl req%02x.%02x v%04x i%04x l%d",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ log_event_dbg("req%02x.%02x v%04x i%04x l%d",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ log_event_err("response on err %d", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * function *MUST* implement a get_alt() method.
+ */
+static int gsi_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ /* RNDIS, RMNET and DPL only support alt 0*/
+ if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
+ gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG)
+ return 0;
+ else if (intf == gsi->data_id)
+ return gsi->data_interface_up;
+
+ return -EINVAL;
+}
+
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
+{
+ u32 len_in = 0, len_out = 0;
+ int ret = 0;
+
+ log_event_dbg("allocate trb's buffer\n");
+
+ if (gsi->d_port.in_ep && !gsi->d_port.in_request.buf_base_addr) {
+ log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
+ gsi->d_port.in_request.num_bufs,
+ gsi->d_port.in_request.buf_len);
+
+ len_in = gsi->d_port.in_request.buf_len *
+ gsi->d_port.in_request.num_bufs;
+ gsi->d_port.in_request.buf_base_addr =
+ dma_zalloc_coherent(gsi->gadget->dev.parent,
+ len_in, &gsi->d_port.in_request.dma, GFP_KERNEL);
+ if (!gsi->d_port.in_request.buf_base_addr) {
+ dev_err(&gsi->gadget->dev,
+ "IN buf_base_addr allocate failed %s\n",
+ gsi->function.name);
+ ret = -ENOMEM;
+ goto fail1;
+ }
+ }
+
+ if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
+ log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
+ gsi->d_port.out_request.num_bufs,
+ gsi->d_port.out_request.buf_len);
+
+ len_out = gsi->d_port.out_request.buf_len *
+ gsi->d_port.out_request.num_bufs;
+ gsi->d_port.out_request.buf_base_addr =
+ dma_zalloc_coherent(gsi->gadget->dev.parent,
+ len_out, &gsi->d_port.out_request.dma, GFP_KERNEL);
+ if (!gsi->d_port.out_request.buf_base_addr) {
+ dev_err(&gsi->gadget->dev,
+ "OUT buf_base_addr allocate failed %s\n",
+ gsi->function.name);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ log_event_dbg("finished allocating trb's buffer\n");
+ return ret;
+
+fail:
+ if (len_in && gsi->d_port.in_request.buf_base_addr) {
+ dma_free_coherent(gsi->gadget->dev.parent, len_in,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma);
+ gsi->d_port.in_request.buf_base_addr = NULL;
+ }
+fail1:
+ return ret;
+}
+
+static void gsi_free_trb_buffer(struct f_gsi *gsi)
+{
+ u32 len;
+
+ log_event_dbg("freeing trb's buffer\n");
+
+ if (gsi->d_port.out_ep &&
+ gsi->d_port.out_request.buf_base_addr) {
+ len = gsi->d_port.out_request.buf_len *
+ gsi->d_port.out_request.num_bufs;
+ dma_free_coherent(gsi->gadget->dev.parent, len,
+ gsi->d_port.out_request.buf_base_addr,
+ gsi->d_port.out_request.dma);
+ gsi->d_port.out_request.buf_base_addr = NULL;
+ }
+
+ if (gsi->d_port.in_ep &&
+ gsi->d_port.in_request.buf_base_addr) {
+ len = gsi->d_port.in_request.buf_len *
+ gsi->d_port.in_request.num_bufs;
+ dma_free_coherent(gsi->gadget->dev.parent, len,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma);
+ gsi->d_port.in_request.buf_base_addr = NULL;
+ }
+}
+
+static int gsi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct net_device *net;
+ int ret;
+
+ log_event_dbg("intf=%u, alt=%u", intf, alt);
+
+ /* Control interface has only altsetting 0 */
+ if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+ if (alt != 0)
+ goto fail;
+
+ if (!gsi->c_port.notify)
+ goto fail;
+
+ if (gsi->c_port.notify->driver_data) {
+ log_event_dbg("reset gsi control %d", intf);
+ usb_ep_disable(gsi->c_port.notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ gsi->c_port.notify);
+ if (ret) {
+ gsi->c_port.notify->desc = NULL;
+ log_event_err("Config-fail notify ep %s: err %d",
+ gsi->c_port.notify->name, ret);
+ goto fail;
+ }
+
+ ret = usb_ep_enable(gsi->c_port.notify);
+ if (ret) {
+ log_event_err("usb ep#%s enable failed, err#%d",
+ gsi->c_port.notify->name, ret);
+ goto fail;
+ }
+ gsi->c_port.notify->driver_data = gsi;
+ }
+
+ /* Data interface has two altsettings, 0 and 1 */
+ if (intf == gsi->data_id) {
+ gsi->d_port.net_ready_trigger = false;
+ /* for rndis and rmnet alt is always 0 update alt accordingly */
+ if (gsi->prot_id == IPA_USB_RNDIS ||
+ gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG) {
+ if (gsi->d_port.in_ep &&
+ !gsi->d_port.in_ep->driver_data)
+ alt = 1;
+ else
+ alt = 0;
+ }
+
+ if (alt > 1)
+ goto notify_ep_disable;
+
+ if (gsi->data_interface_up == alt)
+ return 0;
+
+ if (gsi->d_port.in_ep && gsi->d_port.in_ep->driver_data)
+ gsi->d_port.ntb_info.ntb_input_size =
+ MBIM_NTB_DEFAULT_IN_SIZE;
+ if (alt == 1) {
+ if (gsi->d_port.in_ep && !gsi->d_port.in_ep->desc
+ && config_ep_by_speed(cdev->gadget, f,
+ gsi->d_port.in_ep)) {
+ gsi->d_port.in_ep->desc = NULL;
+ goto notify_ep_disable;
+ }
+
+ if (gsi->d_port.out_ep && !gsi->d_port.out_ep->desc
+ && config_ep_by_speed(cdev->gadget, f,
+ gsi->d_port.out_ep)) {
+ gsi->d_port.out_ep->desc = NULL;
+ goto notify_ep_disable;
+ }
+
+ /* Configure EPs for GSI */
+ if (gsi->d_port.in_ep) {
+ if (gsi->prot_id == IPA_USB_DIAG)
+ gsi->d_port.in_ep->ep_intr_num = 3;
+ else
+ gsi->d_port.in_ep->ep_intr_num = 2;
+ usb_gsi_ep_op(gsi->d_port.in_ep,
+ &gsi->d_port.in_request,
+ GSI_EP_OP_CONFIG);
+ }
+
+ if (gsi->d_port.out_ep) {
+ gsi->d_port.out_ep->ep_intr_num = 1;
+ usb_gsi_ep_op(gsi->d_port.out_ep,
+ &gsi->d_port.out_request,
+ GSI_EP_OP_CONFIG);
+ }
+
+ gsi->gadget = cdev->gadget;
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ gsi_rndis_open(gsi);
+ net = gsi_rndis_get_netdev("rndis0");
+ if (IS_ERR(net))
+ goto notify_ep_disable;
+
+ log_event_dbg("RNDIS RX/TX early activation");
+ gsi->d_port.cdc_filter = 0;
+ rndis_set_param_dev(gsi->params, net,
+ &gsi->d_port.cdc_filter);
+ }
+
+ if (gsi->prot_id == IPA_USB_ECM)
+ gsi->d_port.cdc_filter = DEFAULT_FILTER;
+
+ /*
+ * For RNDIS the event is posted from the flow control
+ * handler which is invoked when the host sends the
+ * GEN_CURRENT_PACKET_FILTER message.
+ */
+ if (gsi->prot_id != IPA_USB_RNDIS)
+ post_event(&gsi->d_port,
+ EVT_CONNECT_IN_PROGRESS);
+ queue_work(gsi->d_port.ipa_usb_wq,
+ &gsi->d_port.usb_ipa_w);
+ }
+ if (alt == 0 && ((gsi->d_port.in_ep &&
+ !gsi->d_port.in_ep->driver_data) ||
+ (gsi->d_port.out_ep &&
+ !gsi->d_port.out_ep->driver_data))) {
+ ipa_disconnect_handler(&gsi->d_port);
+ }
+
+ gsi->data_interface_up = alt;
+ log_event_dbg("DATA_INTERFACE id = %d, status = %d",
+ gsi->data_id, gsi->data_interface_up);
+ }
+
+ atomic_set(&gsi->connected, 1);
+
+ /* send 0 len pkt to qti to notify state change */
+ if (gsi->prot_id == IPA_USB_DIAG)
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+
+ return 0;
+
+notify_ep_disable:
+ if (gsi->c_port.notify && gsi->c_port.notify->driver_data)
+ usb_ep_disable(gsi->c_port.notify);
+fail:
+ return -EINVAL;
+}
+
+static void gsi_disable(struct usb_function *f)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ atomic_set(&gsi->connected, 0);
+
+ if (gsi->prot_id == IPA_USB_RNDIS)
+ rndis_uninit(gsi->params);
+
+ /* Disable Control Path */
+ if (gsi->c_port.notify &&
+ gsi->c_port.notify->driver_data) {
+ usb_ep_disable(gsi->c_port.notify);
+ gsi->c_port.notify->driver_data = NULL;
+ }
+
+ gsi_ctrl_clear_cpkt_queues(gsi, false);
+ /* send 0 len pkt to qti/qbi to notify state change */
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+ gsi->c_port.notify_req_queued = false;
+ /* Disable Data Path - only if it was initialized already (alt=1) */
+ if (!gsi->data_interface_up) {
+ log_event_dbg("%s: data intf is closed", __func__);
+ return;
+ }
+
+ gsi->data_interface_up = false;
+
+ log_event_dbg("%s deactivated", gsi->function.name);
+ ipa_disconnect_handler(&gsi->d_port);
+ post_event(&gsi->d_port, EVT_DISCONNECTED);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+}
+
+static void gsi_suspend(struct usb_function *f)
+{
+ bool block_db;
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ /* Check if function is already suspended in gsi_func_suspend() */
+ if (f->func_is_suspended) {
+ log_event_dbg("%s: func already suspended, return\n", __func__);
+ return;
+ }
+
+ block_db = true;
+ usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ post_event(&gsi->d_port, EVT_SUSPEND);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+ log_event_dbg("gsi suspended");
+}
+
+static void gsi_resume(struct usb_function *f)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ log_event_dbg("%s", __func__);
+
+ /*
+ * If the function is in USB3 Function Suspend state, resume is
+ * canceled. In this case resume is done by a Function Resume request.
+ */
+ if ((cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ if (gsi->c_port.notify && !gsi->c_port.notify->desc)
+ config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify);
+
+ /* Check any pending cpkt, and queue immediately on resume */
+ gsi_ctrl_send_notification(gsi);
+
+ /*
+ * Linux host does not send RNDIS_MSG_INIT or non-zero
+ * RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
+ * Trigger state machine explicitly on resume.
+ */
+ if (gsi->prot_id == IPA_USB_RNDIS &&
+ !usb_gsi_remote_wakeup_allowed(f))
+ rndis_flow_control(gsi->params, false);
+
+ post_event(&gsi->d_port, EVT_RESUMED);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+
+ log_event_dbg("%s: completed", __func__);
+}
+
+static int gsi_func_suspend(struct usb_function *f, u8 options)
+{
+ bool func_wakeup_allowed;
+
+ log_event_dbg("func susp %u cmd for %s",
+ options, f->name ? f->name : "");
+
+ func_wakeup_allowed =
+ ((options & FUNC_SUSPEND_OPT_RW_EN_MASK) != 0);
+
+ if (options & FUNC_SUSPEND_OPT_SUSP_MASK) {
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ if (!f->func_is_suspended) {
+ gsi_suspend(f);
+ f->func_is_suspended = true;
+ }
+ } else {
+ if (f->func_is_suspended) {
+ f->func_is_suspended = false;
+ gsi_resume(f);
+ }
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ }
+
+ return 0;
+}
+
+static int gsi_update_function_bind_params(struct f_gsi *gsi,
+ struct usb_composite_dev *cdev,
+ struct gsi_function_bind_info *info)
+{
+ struct usb_ep *ep;
+ struct usb_cdc_notification *event;
+ struct usb_function *f = &gsi->function;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (info->string_defs[0].id != 0)
+ goto skip_string_id_alloc;
+
+ if (info->ctrl_str_idx >= 0 && info->ctrl_desc) {
+ /* ctrl interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->ctrl_str_idx].id = status;
+ info->ctrl_desc->iInterface = status;
+ }
+
+ if (info->data_str_idx >= 0 && info->data_desc) {
+ /* data interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->data_str_idx].id = status;
+ info->data_desc->iInterface = status;
+ }
+
+ if (info->iad_str_idx >= 0 && info->iad_desc) {
+ /* IAD iFunction label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->iad_str_idx].id = status;
+ info->iad_desc->iFunction = status;
+ }
+
+ if (info->mac_str_idx >= 0 && info->cdc_eth_desc) {
+ /* IAD iFunction label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->mac_str_idx].id = status;
+ info->cdc_eth_desc->iMACAddress = status;
+ }
+
+skip_string_id_alloc:
+ if (info->ctrl_desc)
+ info->ctrl_desc->bInterfaceNumber = gsi->ctrl_id;
+
+ if (info->iad_desc)
+ info->iad_desc->bFirstInterface = gsi->ctrl_id;
+
+ if (info->union_desc) {
+ info->union_desc->bMasterInterface0 = gsi->ctrl_id;
+ info->union_desc->bSlaveInterface0 = gsi->data_id;
+ }
+
+ if (info->data_desc)
+ info->data_desc->bInterfaceNumber = gsi->data_id;
+
+ if (info->data_nop_desc)
+ info->data_nop_desc->bInterfaceNumber = gsi->data_id;
+
+ /* allocate instance-specific endpoints */
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig_by_name
+ (cdev->gadget, info->fs_in_desc, info->in_epname);
+ if (!ep)
+ goto fail;
+ gsi->d_port.in_ep = ep;
+ msm_ep_config(gsi->d_port.in_ep, NULL);
+ ep->driver_data = cdev; /* claim */
+ }
+
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig_by_name
+ (cdev->gadget, info->fs_out_desc, info->out_epname);
+ if (!ep)
+ goto fail;
+ gsi->d_port.out_ep = ep;
+ msm_ep_config(gsi->d_port.out_ep, NULL);
+ ep->driver_data = cdev; /* claim */
+ }
+
+ if (info->fs_notify_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+ if (!ep)
+ goto fail;
+ gsi->c_port.notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* allocate notification request and buffer */
+ gsi->c_port.notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!gsi->c_port.notify_req)
+ goto fail;
+
+ gsi->c_port.notify_req->buf =
+ kmalloc(info->notify_buf_len, GFP_KERNEL);
+ if (!gsi->c_port.notify_req->buf)
+ goto fail;
+
+ gsi->c_port.notify_req->length = info->notify_buf_len;
+ gsi->c_port.notify_req->context = gsi;
+ gsi->c_port.notify_req->complete =
+ gsi_ctrl_notify_resp_complete;
+ event = gsi->c_port.notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+
+ if (gsi->ctrl_id == -ENODEV)
+ event->wIndex = cpu_to_le16(gsi->data_id);
+ else
+ event->wIndex = cpu_to_le16(gsi->ctrl_id);
+
+ event->wLength = cpu_to_le16(0);
+ }
+
+ gsi->d_port.in_request.buf_len = info->in_req_buf_len;
+ gsi->d_port.in_request.num_bufs = info->in_req_num_buf;
+ if (gsi->d_port.out_ep) {
+ gsi->d_port.out_request.buf_len = info->out_req_buf_len;
+ gsi->d_port.out_request.num_bufs = info->out_req_num_buf;
+ }
+
+ /* Initialize event queue */
+ spin_lock_init(&gsi->d_port.evt_q.q_lock);
+ gsi->d_port.evt_q.head = gsi->d_port.evt_q.tail = MAXQUEUELEN - 1;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
+ if (!gsi->function.fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ if (info->fs_in_desc)
+ info->hs_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+ if (info->fs_out_desc)
+ info->hs_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc)
+ info->hs_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ if (info->fs_in_desc)
+ info->ss_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+
+ if (info->fs_out_desc)
+ info->ss_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc)
+ info->ss_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (gsi->c_port.notify_req) {
+ kfree(gsi->c_port.notify_req->buf);
+ usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
+ }
+ /* we might as well release our claims on endpoints */
+ if (gsi->c_port.notify)
+ gsi->c_port.notify->driver_data = NULL;
+ if (gsi->d_port.out_ep && gsi->d_port.out_ep->desc)
+ gsi->d_port.out_ep->driver_data = NULL;
+ if (gsi->d_port.in_ep && gsi->d_port.in_ep->desc)
+ gsi->d_port.in_ep->driver_data = NULL;
+ log_event_err("%s: bind failed for %s", __func__, f->name);
+ return -ENOMEM;
+}
+
+static void ipa_ready_callback(void *user_data)
+{
+ struct f_gsi *gsi = user_data;
+
+ log_event_info("%s: ipa is ready\n", __func__);
+
+ gsi->d_port.ipa_ready = true;
+ wake_up_interruptible(&gsi->d_port.wait_for_ipa_ready);
+}
+
+static int gsi_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct gsi_function_bind_info info = {0};
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct rndis_params *params;
+ int status;
+
+ if (gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG)
+ gsi->ctrl_id = -ENODEV;
+ else {
+ status = gsi->ctrl_id = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ }
+
+ status = gsi->data_id = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+
+ switch (gsi->prot_id) {
+ case IPA_USB_RNDIS:
+ info.string_defs = rndis_gsi_string_defs;
+ info.ctrl_desc = &rndis_gsi_control_intf;
+ info.ctrl_str_idx = 0;
+ info.data_desc = &rndis_gsi_data_intf;
+ info.data_str_idx = 1;
+ info.iad_desc = &rndis_gsi_iad_descriptor;
+ info.iad_str_idx = 2;
+ info.union_desc = &rndis_gsi_union_desc;
+ info.fs_in_desc = &rndis_gsi_fs_in_desc;
+ info.fs_out_desc = &rndis_gsi_fs_out_desc;
+ info.fs_notify_desc = &rndis_gsi_fs_notify_desc;
+ info.hs_in_desc = &rndis_gsi_hs_in_desc;
+ info.hs_out_desc = &rndis_gsi_hs_out_desc;
+ info.hs_notify_desc = &rndis_gsi_hs_notify_desc;
+ info.ss_in_desc = &rndis_gsi_ss_in_desc;
+ info.ss_out_desc = &rndis_gsi_ss_out_desc;
+ info.ss_notify_desc = &rndis_gsi_ss_notify_desc;
+ info.fs_desc_hdr = gsi_eth_fs_function;
+ info.hs_desc_hdr = gsi_eth_hs_function;
+ info.ss_desc_hdr = gsi_eth_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+ gsi->d_port.in_aggr_size = GSI_IN_RNDIS_AGGR_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_AGGR_SIZE;
+ info.out_req_num_buf = num_out_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+
+ params = rndis_register(gsi_rndis_response_available, gsi,
+ gsi_rndis_flow_ctrl_enable);
+ if (IS_ERR(params))
+ goto fail;
+
+ gsi->params = params;
+
+ rndis_set_param_medium(gsi->params, RNDIS_MEDIUM_802_3, 0);
+
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
+ random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
+ log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
+ gsi->d_port.ipa_init_params.host_ethaddr,
+ gsi->d_port.ipa_init_params.device_ethaddr);
+ memcpy(gsi->ethaddr, &gsi->d_port.ipa_init_params.host_ethaddr,
+ ETH_ALEN);
+ rndis_set_host_mac(gsi->params, gsi->ethaddr);
+
+ if (gsi->manufacturer && gsi->vendorID &&
+ rndis_set_param_vendor(gsi->params, gsi->vendorID,
+ gsi->manufacturer))
+ goto dereg_rndis;
+
+ log_event_dbg("%s: max_pkt_per_xfer : %d", __func__,
+ DEFAULT_MAX_PKT_PER_XFER);
+ rndis_set_max_pkt_xfer(gsi->params, DEFAULT_MAX_PKT_PER_XFER);
+
+ /* In case of aggregated packets QC device will request
+ * aliment to 4 (2^2).
+ */
+ log_event_dbg("%s: pkt_alignment_factor : %d", __func__,
+ DEFAULT_PKT_ALIGNMENT_FACTOR);
+ rndis_set_pkt_alignment_factor(gsi->params,
+ DEFAULT_PKT_ALIGNMENT_FACTOR);
+ if (gsi->rndis_use_wceis) {
+ info.iad_desc->bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ info.iad_desc->bFunctionSubClass = 0x01;
+ info.iad_desc->bFunctionProtocol = 0x03;
+ info.ctrl_desc->bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ info.ctrl_desc->bInterfaceSubClass = 0x1;
+ info.ctrl_desc->bInterfaceProtocol = 0x03;
+ }
+ break;
+ case IPA_USB_MBIM:
+ info.string_defs = mbim_gsi_string_defs;
+ info.ctrl_desc = &mbim_gsi_control_intf;
+ info.ctrl_str_idx = 0;
+ info.data_desc = &mbim_gsi_data_intf;
+ info.data_str_idx = 1;
+ info.data_nop_desc = &mbim_gsi_data_nop_intf;
+ info.iad_desc = &mbim_gsi_iad_desc;
+ info.iad_str_idx = -1;
+ info.union_desc = &mbim_gsi_union_desc;
+ info.fs_in_desc = &mbim_gsi_fs_in_desc;
+ info.fs_out_desc = &mbim_gsi_fs_out_desc;
+ info.fs_notify_desc = &mbim_gsi_fs_notify_desc;
+ info.hs_in_desc = &mbim_gsi_hs_in_desc;
+ info.hs_out_desc = &mbim_gsi_hs_out_desc;
+ info.hs_notify_desc = &mbim_gsi_hs_notify_desc;
+ info.ss_in_desc = &mbim_gsi_ss_in_desc;
+ info.ss_out_desc = &mbim_gsi_ss_out_desc;
+ info.ss_notify_desc = &mbim_gsi_ss_notify_desc;
+ info.fs_desc_hdr = mbim_gsi_fs_function;
+ info.hs_desc_hdr = mbim_gsi_hs_function;
+ info.ss_desc_hdr = mbim_gsi_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ gsi->d_port.in_aggr_size = GSI_IN_MBIM_AGGR_SIZE;
+ info.in_req_buf_len = GSI_IN_MBIM_AGGR_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_MBIM_BUF_LEN;
+ info.out_req_num_buf = num_out_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ mbim_gsi_desc.wMaxSegmentSize = cpu_to_le16(0x800);
+
+ /*
+ * If MBIM is bound in a config other than the first, tell
+ * Windows about it by returning the num as a string in the
+ * OS descriptor's subCompatibleID field. Windows only supports
+ * up to config #4.
+ */
+ if (c->bConfigurationValue >= 2 &&
+ c->bConfigurationValue <= 4) {
+ log_event_dbg("MBIM in configuration %d",
+ c->bConfigurationValue);
+ mbim_gsi_ext_config_desc.function.subCompatibleID[0] =
+ c->bConfigurationValue + '0';
+ }
+ break;
+ case IPA_USB_RMNET:
+ info.string_defs = rmnet_gsi_string_defs;
+ info.data_desc = &rmnet_gsi_interface_desc;
+ info.data_str_idx = 0;
+ info.fs_in_desc = &rmnet_gsi_fs_in_desc;
+ info.fs_out_desc = &rmnet_gsi_fs_out_desc;
+ info.fs_notify_desc = &rmnet_gsi_fs_notify_desc;
+ info.hs_in_desc = &rmnet_gsi_hs_in_desc;
+ info.hs_out_desc = &rmnet_gsi_hs_out_desc;
+ info.hs_notify_desc = &rmnet_gsi_hs_notify_desc;
+ info.ss_in_desc = &rmnet_gsi_ss_in_desc;
+ info.ss_out_desc = &rmnet_gsi_ss_out_desc;
+ info.ss_notify_desc = &rmnet_gsi_ss_notify_desc;
+ info.fs_desc_hdr = rmnet_gsi_fs_function;
+ info.hs_desc_hdr = rmnet_gsi_hs_function;
+ info.ss_desc_hdr = rmnet_gsi_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ gsi->d_port.in_aggr_size = GSI_IN_RMNET_AGGR_SIZE;
+ info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
+ info.out_req_num_buf = num_out_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ break;
+ case IPA_USB_ECM:
+ info.string_defs = ecm_gsi_string_defs;
+ info.ctrl_desc = &ecm_gsi_control_intf;
+ info.ctrl_str_idx = 0;
+ info.data_desc = &ecm_gsi_data_intf;
+ info.data_str_idx = 2;
+ info.data_nop_desc = &ecm_gsi_data_nop_intf;
+ info.cdc_eth_desc = &ecm_gsi_desc;
+ info.mac_str_idx = 1;
+ info.union_desc = &ecm_gsi_union_desc;
+ info.fs_in_desc = &ecm_gsi_fs_in_desc;
+ info.fs_out_desc = &ecm_gsi_fs_out_desc;
+ info.fs_notify_desc = &ecm_gsi_fs_notify_desc;
+ info.hs_in_desc = &ecm_gsi_hs_in_desc;
+ info.hs_out_desc = &ecm_gsi_hs_out_desc;
+ info.hs_notify_desc = &ecm_gsi_hs_notify_desc;
+ info.ss_in_desc = &ecm_gsi_ss_in_desc;
+ info.ss_out_desc = &ecm_gsi_ss_out_desc;
+ info.ss_notify_desc = &ecm_gsi_ss_notify_desc;
+ info.fs_desc_hdr = ecm_gsi_fs_function;
+ info.hs_desc_hdr = ecm_gsi_hs_function;
+ info.ss_desc_hdr = ecm_gsi_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ gsi->d_port.in_aggr_size = GSI_ECM_AGGR_SIZE;
+ info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_ECM_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_ECM_BUF_LEN;
+ info.out_req_num_buf = GSI_ECM_NUM_OUT_BUFFERS;
+ info.notify_buf_len = GSI_CTRL_NOTIFY_BUFF_LEN;
+
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
+ random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
+ log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
+ gsi->d_port.ipa_init_params.host_ethaddr,
+ gsi->d_port.ipa_init_params.device_ethaddr);
+
+ snprintf(gsi->ethaddr, sizeof(gsi->ethaddr),
+ "%02X%02X%02X%02X%02X%02X",
+ gsi->d_port.ipa_init_params.host_ethaddr[0],
+ gsi->d_port.ipa_init_params.host_ethaddr[1],
+ gsi->d_port.ipa_init_params.host_ethaddr[2],
+ gsi->d_port.ipa_init_params.host_ethaddr[3],
+ gsi->d_port.ipa_init_params.host_ethaddr[4],
+ gsi->d_port.ipa_init_params.host_ethaddr[5]);
+ info.string_defs[1].s = gsi->ethaddr;
+ break;
+ case IPA_USB_DIAG:
+ info.string_defs = qdss_gsi_string_defs;
+ info.data_desc = &qdss_gsi_data_intf_desc;
+ info.data_str_idx = 0;
+ info.fs_in_desc = &qdss_gsi_hs_data_desc;
+ info.hs_in_desc = &qdss_gsi_hs_data_desc;
+ info.ss_in_desc = &qdss_gsi_ss_data_desc;
+ info.fs_desc_hdr = qdss_gsi_hs_data_only_desc;
+ info.hs_desc_hdr = qdss_gsi_hs_data_only_desc;
+ info.ss_desc_hdr = qdss_gsi_ss_data_only_desc;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "";
+ info.in_req_buf_len = 16384;
+ info.in_req_num_buf = num_in_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ break;
+ default:
+ log_event_err("%s: Invalid prot id %d", __func__,
+ gsi->prot_id);
+ return -EINVAL;
+ }
+
+ status = gsi_update_function_bind_params(gsi, cdev, &info);
+ if (status)
+ goto dereg_rndis;
+
+ status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
+ if (!status) {
+ log_event_info("%s: ipa is not ready", __func__);
+ status = wait_event_interruptible_timeout(
+ gsi->d_port.wait_for_ipa_ready, gsi->d_port.ipa_ready,
+ msecs_to_jiffies(GSI_IPA_READY_TIMEOUT));
+ if (!status) {
+ log_event_err("%s: ipa ready timeout", __func__);
+ status = -ETIMEDOUT;
+ goto dereg_rndis;
+ }
+ }
+
+ gsi->d_port.ipa_usb_notify_cb = ipa_usb_notify_cb;
+ status = ipa_usb_init_teth_prot(gsi->prot_id,
+ &gsi->d_port.ipa_init_params, gsi->d_port.ipa_usb_notify_cb,
+ gsi);
+ if (status) {
+ log_event_err("%s: failed to init teth prot %d",
+ __func__, gsi->prot_id);
+ goto dereg_rndis;
+ }
+
+ gsi->d_port.sm_state = STATE_INITIALIZED;
+
+ DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ f->name,
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ gsi->d_port.in_ep->name, gsi->d_port.out_ep->name,
+ gsi->c_port.notify->name);
+ return 0;
+
+dereg_rndis:
+ rndis_deregister(gsi->params);
+fail:
+ return status;
+}
+
+static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ /*
+ * Use drain_workqueue to accomplish below conditions:
+ * 1. Make sure that any running work completed
+ * 2. Make sure to wait until all pending work completed i.e. workqueue
+ * is not having any pending work.
+ * Above conditions are making sure that ipa_usb_deinit_teth_prot()
+ * with ipa driver shall not fail due to unexpected state.
+ */
+ drain_workqueue(gsi->d_port.ipa_usb_wq);
+ ipa_usb_deinit_teth_prot(gsi->prot_id);
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ gsi->d_port.sm_state = STATE_UNINITIALIZED;
+ rndis_deregister(gsi->params);
+ }
+
+ if (gsi->prot_id == IPA_USB_MBIM)
+ mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (gsi->c_port.notify) {
+ kfree(gsi->c_port.notify_req->buf);
+ usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
+ }
+}
+
+
+static void gsi_free_func(struct usb_function *f)
+{
+ log_event_dbg("%s\n", __func__);
+}
+
+int gsi_bind_config(struct f_gsi *gsi)
+{
+ int status = 0;
+ enum ipa_usb_teth_prot prot_id = gsi->prot_id;
+
+ log_event_dbg("%s: prot id %d", __func__, prot_id);
+
+ switch (prot_id) {
+ case IPA_USB_RNDIS:
+ gsi->function.name = "rndis";
+ gsi->function.strings = rndis_gsi_strings;
+ break;
+ case IPA_USB_ECM:
+ gsi->function.name = "cdc_ethernet";
+ gsi->function.strings = ecm_gsi_strings;
+ break;
+ case IPA_USB_RMNET:
+ gsi->function.name = "rmnet";
+ gsi->function.strings = rmnet_gsi_strings;
+ break;
+ case IPA_USB_MBIM:
+ gsi->function.name = "mbim";
+ gsi->function.strings = mbim_gsi_strings;
+ break;
+ case IPA_USB_DIAG:
+ gsi->function.name = "dpl";
+ gsi->function.strings = qdss_gsi_strings;
+ break;
+ default:
+ log_event_err("%s: invalid prot id %d", __func__, prot_id);
+ return -EINVAL;
+ }
+
+ /* descriptors are per-instance copies */
+ gsi->function.bind = gsi_bind;
+ gsi->function.unbind = gsi_unbind;
+ gsi->function.set_alt = gsi_set_alt;
+ gsi->function.get_alt = gsi_get_alt;
+ gsi->function.setup = gsi_setup;
+ gsi->function.disable = gsi_disable;
+ gsi->function.free_func = gsi_free_func;
+ gsi->function.suspend = gsi_suspend;
+ gsi->function.func_suspend = gsi_func_suspend;
+ gsi->function.resume = gsi_resume;
+
+ INIT_WORK(&gsi->d_port.usb_ipa_w, ipa_work_handler);
+
+ return status;
+}
+
+static struct f_gsi *gsi_function_init(enum ipa_usb_teth_prot prot_id)
+{
+ struct f_gsi *gsi;
+ int ret = 0;
+
+ if (prot_id >= IPA_USB_MAX_TETH_PROT_SIZE) {
+ log_event_err("%s: invalid prot id %d", __func__, prot_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
+ if (!gsi) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ spin_lock_init(&gsi->d_port.lock);
+
+ init_waitqueue_head(&gsi->d_port.wait_for_ipa_ready);
+
+ gsi->d_port.in_channel_handle = -EINVAL;
+ gsi->d_port.out_channel_handle = -EINVAL;
+
+ gsi->prot_id = prot_id;
+
+ gsi->d_port.ipa_usb_wq = ipa_usb_wq;
+
+ ret = gsi_function_ctrl_port_init(gsi);
+ if (ret) {
+ kfree(gsi);
+ goto error;
+ }
+
+ return gsi;
+error:
+ return ERR_PTR(ret);
+}
+
+static void gsi_opts_release(struct config_item *item)
+{
+ struct gsi_opts *opts = to_gsi_opts(item);
+
+ log_event_dbg("Release GSI: %s\n", __func__);
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations gsi_item_ops = {
+ .release = gsi_opts_release,
+};
+
+static ssize_t gsi_info_show(struct config_item *item, char *page)
+{
+ struct ipa_usb_xdci_chan_params *ipa_chnl_params;
+ struct ipa_usb_xdci_connect_params *con_pms;
+ struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+ int ret, j = 0;
+ unsigned int len = 0;
+ char *buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (gsi && atomic_read(&gsi->connected)) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "Info: Prot_id:%d\n",
+ gsi->prot_id);
+ ipa_chnl_params = &gsi->d_port.ipa_in_channel_params;
+ con_pms = &gsi->d_port.ipa_conn_pms;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%55s\n",
+ "==================================================");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10s\n", "Ctrl Name: ", gsi->c_port.name);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Online: ",
+ gsi->c_port.ctrl_online.counter);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Open: ",
+ gsi->c_port.is_open);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Host to Modem: ",
+ gsi->c_port.host_to_modem);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Modem to Host: ",
+ gsi->c_port.modem_to_host);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Cpd to Modem: ",
+ gsi->c_port.copied_to_modem);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Cpd From Modem: ",
+ gsi->c_port.copied_from_modem);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Pkt Drops: ",
+ gsi->c_port.cpkt_drop_cnt);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+ "==============");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Protocol ID: ", gsi->prot_id);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "SM State: ", gsi->d_port.sm_state);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN XferRscIndex: ",
+ gsi->d_port.in_xfer_rsc_index);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10d\n", "IN Chnl Hdl: ",
+ gsi->d_port.in_channel_handle);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "IN Chnl Dbl Addr: ",
+ gsi->d_port.in_db_reg_phs_addr_lsb);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN TRB Ring Len: ",
+ ipa_chnl_params->xfer_ring_len);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "IN TRB Base Addr: ", (unsigned int)
+ ipa_chnl_params->xfer_ring_base_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "GEVENTCNTLO IN Addr: ",
+ ipa_chnl_params->gevntcount_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "DEPCMDLO IN Addr: ",
+ ipa_chnl_params->xfer_scratch.depcmd_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "IN LastTRB Addr Off: ",
+ ipa_chnl_params->xfer_scratch.last_trb_addr_iova);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN Buffer Size: ",
+ ipa_chnl_params->xfer_scratch.const_buffer_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN/DL Aggr Size: ",
+ con_pms->teth_prot_params.max_xfer_size_bytes_to_host);
+
+ ipa_chnl_params = &gsi->d_port.ipa_out_channel_params;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+ "==============");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT XferRscIndex: ",
+ gsi->d_port.out_xfer_rsc_index);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10d\n", "OUT Channel Hdl: ",
+ gsi->d_port.out_channel_handle);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "OUT Channel Dbl Addr: ",
+ gsi->d_port.out_db_reg_phs_addr_lsb);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT TRB Ring Len: ",
+ ipa_chnl_params->xfer_ring_len);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "OUT TRB Base Addr: ", (unsigned int)
+ ipa_chnl_params->xfer_ring_base_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "GEVENTCNTLO OUT Addr: ",
+ ipa_chnl_params->gevntcount_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "DEPCMDLO OUT Addr: ",
+ ipa_chnl_params->xfer_scratch.depcmd_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "OUT LastTRB Addr Off: ",
+ ipa_chnl_params->xfer_scratch.last_trb_addr_iova);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT Buffer Size: ",
+ ipa_chnl_params->xfer_scratch.const_buffer_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT/UL Aggr Size: ",
+ con_pms->teth_prot_params.max_xfer_size_bytes_to_dev);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT/UL Packets to dev: ",
+ con_pms->teth_prot_params.max_packet_number_to_dev);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Net_ready_trigger:",
+ gsi->d_port.net_ready_trigger);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+ "USB Bus Events");
+ for (j = 0; j < MAXQUEUELEN; j++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d\t", gsi->d_port.evt_q.event[j]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Eventq head: ",
+ gsi->d_port.evt_q.head);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Eventq tail: ",
+ gsi->d_port.evt_q.tail);
+ }
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ ret = scnprintf(page, len, buf);
+
+ kfree(buf);
+
+ return ret;
+}
+
+CONFIGFS_ATTR_RO(gsi_, info);
+
+static ssize_t gsi_rndis_wceis_show(struct config_item *item, char *page)
+{
+ struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", gsi->rndis_use_wceis);
+}
+
+static ssize_t gsi_rndis_wceis_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ gsi->rndis_use_wceis = val;
+
+ return len;
+}
+
+CONFIGFS_ATTR(gsi_, rndis_wceis);
+
+static struct configfs_attribute *gsi_rndis_attrs[] = {
+ &gsi_attr_info,
+ &gsi_attr_rndis_wceis,
+ NULL,
+};
+
+static struct config_item_type gsi_func_rndis_type = {
+ .ct_item_ops = &gsi_item_ops,
+ .ct_attrs = gsi_rndis_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+
+static struct configfs_attribute *gsi_attrs[] = {
+ &gsi_attr_info,
+ NULL,
+};
+
+static struct config_item_type gsi_func_type = {
+ .ct_item_ops = &gsi_item_ops,
+ .ct_attrs = gsi_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void gsi_inst_clean(struct gsi_opts *opts)
+{
+ if (opts->gsi->c_port.ctrl_device.fops)
+ misc_deregister(&opts->gsi->c_port.ctrl_device);
+
+ kfree(opts->gsi);
+ kfree(opts);
+}
+
+static int gsi_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int prot_id, name_len;
+ struct f_gsi *gsi;
+ struct gsi_opts *opts, *opts_prev;
+
+ opts = container_of(fi, struct gsi_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ prot_id = name_to_prot_id(name);
+ if (prot_id < 0) {
+ log_event_err("%s: failed to find prot id for %s instance\n",
+ __func__, name);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ opts_prev = inst_status[prot_id].opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err("%s: prot_id = %d, prev inst do not freed yet\n",
+ __func__, prot_id);
+ return -EBUSY;
+ }
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
+ if (prot_id == IPA_USB_RNDIS)
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &gsi_func_rndis_type);
+ gsi = gsi_function_init(prot_id);
+ if (IS_ERR(gsi))
+ return PTR_ERR(gsi);
+
+ opts->gsi = gsi;
+
+ /* Set instance status */
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ inst_status[prot_id].inst_exist = true;
+ inst_status[prot_id].opts = opts;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
+ return 0;
+}
+
+static void gsi_free_inst(struct usb_function_instance *f)
+{
+ struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
+ enum ipa_usb_teth_prot prot_id;
+
+ if (!opts->gsi)
+ return;
+
+ prot_id = opts->gsi->prot_id;
+
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ if (opts->gsi->c_port.is_open) {
+ /* Mark instance exist as false */
+ inst_status[prot_id].inst_exist = false;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err(
+ "%s: [prot_id = %d] Dev is open, free mem when dev close\n",
+ __func__, prot_id);
+ return;
+ }
+
+ /* Clear instance status */
+ gsi_inst_clean(opts);
+ inst_status[prot_id].inst_exist = false;
+ inst_status[prot_id].opts = NULL;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+}
+
+static struct usb_function_instance *gsi_alloc_inst(void)
+{
+ struct gsi_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = gsi_set_inst_name;
+ opts->func_inst.free_func_inst = gsi_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &gsi_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *gsi_alloc(struct usb_function_instance *fi)
+{
+ struct gsi_opts *opts;
+ int ret;
+
+ opts = container_of(fi, struct gsi_opts, func_inst);
+
+ ret = gsi_bind_config(opts->gsi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &opts->gsi->function;
+}
+
+DECLARE_USB_FUNCTION(gsi, gsi_alloc_inst, gsi_alloc);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("GSI function driver");
+
+static int fgsi_init(void)
+{
+ int i;
+
+ ipa_usb_wq = alloc_workqueue("k_ipa_usb",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 1);
+ if (!ipa_usb_wq) {
+ log_event_err("Failed to create workqueue for IPA");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ mutex_init(&inst_status[i].gsi_lock);
+
+ return usb_function_register(&gsiusb_func);
+}
+module_init(fgsi_init);
+
+static void __exit fgsi_exit(void)
+{
+ if (ipa_usb_wq)
+ destroy_workqueue(ipa_usb_wq);
+ usb_function_unregister(&gsiusb_func);
+}
+module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
new file mode 100644
index 000000000000..96f1b5011960
--- /dev/null
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef _F_GSI_H
+#define _F_GSI_H
+
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/ipa.h>
+#include <uapi/linux/usb/cdc.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb/msm_hsusb.h>
+
+#define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
+#define GSI_MBIM_CTRL_NAME "android_mbim"
+#define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
+#define GSI_MAX_CTRL_PKT_SIZE 8192
+#define GSI_CTRL_DTR (1 << 0)
+
+
+#define GSI_NUM_IN_BUFFERS 15
+#define GSI_IN_BUFF_SIZE 2048
+#define GSI_NUM_OUT_BUFFERS 15
+#define GSI_ECM_NUM_OUT_BUFFERS 31
+#define GSI_OUT_AGGR_SIZE 24576
+
+#define GSI_IN_RNDIS_AGGR_SIZE 9216
+#define GSI_IN_MBIM_AGGR_SIZE 16384
+#define GSI_IN_RMNET_AGGR_SIZE 16384
+#define GSI_ECM_AGGR_SIZE 2048
+
+#define GSI_OUT_MBIM_BUF_LEN 16384
+#define GSI_OUT_RMNET_BUF_LEN 16384
+#define GSI_OUT_ECM_BUF_LEN 2048
+
+#define GSI_IPA_READY_TIMEOUT 5000
+
+#define ETH_ADDR_STR_LEN 14
+
+/* mbin and ecm */
+#define GSI_CTRL_NOTIFY_BUFF_LEN 16
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define GSI_MBIM_IOCTL_MAGIC 'o'
+#define GSI_MBIM_GET_NTB_SIZE _IOR(GSI_MBIM_IOCTL_MAGIC, 2, u32)
+#define GSI_MBIM_GET_DATAGRAM_COUNT _IOR(GSI_MBIM_IOCTL_MAGIC, 3, u16)
+#define GSI_MBIM_EP_LOOKUP _IOR(GSI_MBIM_IOCTL_MAGIC, 4, struct ep_info)
+#define GSI_MBIM_DATA_EP_TYPE_HSUSB 0x2
+/* ID for Microsoft OS String */
+#define GSI_MBIM_OS_STRING_ID 0xEE
+
+#define EVT_NONE 0
+#define EVT_UNINITIALIZED 1
+#define EVT_INITIALIZED 2
+#define EVT_CONNECT_IN_PROGRESS 3
+#define EVT_CONNECTED 4
+#define EVT_HOST_NRDY 5
+#define EVT_HOST_READY 6
+#define EVT_DISCONNECTED 7
+#define EVT_SUSPEND 8
+#define EVT_IPA_SUSPEND 9
+#define EVT_RESUMED 10
+
+enum connection_state {
+ STATE_UNINITIALIZED,
+ STATE_INITIALIZED,
+ STATE_CONNECT_IN_PROGRESS,
+ STATE_CONNECTED,
+ STATE_DISCONNECTED,
+ STATE_SUSPEND_IN_PROGRESS,
+ STATE_SUSPENDED
+};
+
+enum gsi_ctrl_notify_state {
+ GSI_CTRL_NOTIFY_NONE,
+ GSI_CTRL_NOTIFY_CONNECT,
+ GSI_CTRL_NOTIFY_SPEED,
+ GSI_CTRL_NOTIFY_OFFLINE,
+ GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE,
+};
+
+#define MAXQUEUELEN 128
+struct event_queue {
+ u8 event[MAXQUEUELEN];
+ u8 head, tail;
+ spinlock_t q_lock;
+};
+
+struct gsi_ntb_info {
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+ u16 reserved;
+};
+
+struct gsi_ctrl_pkt {
+ void *buf;
+ int len;
+ enum gsi_ctrl_notify_state type;
+ struct list_head list;
+};
+
+struct gsi_function_bind_info {
+ struct usb_string *string_defs;
+ int ctrl_str_idx;
+ int data_str_idx;
+ int iad_str_idx;
+ int mac_str_idx;
+ struct usb_interface_descriptor *ctrl_desc;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_interface_assoc_descriptor *iad_desc;
+ struct usb_cdc_ether_desc *cdc_eth_desc;
+ struct usb_cdc_union_desc *union_desc;
+ struct usb_interface_descriptor *data_nop_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+ const char *in_epname;
+ const char *out_epname;
+
+ u32 in_req_buf_len;
+ u32 in_req_num_buf;
+ u32 out_req_buf_len;
+ u32 out_req_num_buf;
+ u32 notify_buf_len;
+};
+
+struct gsi_ctrl_port {
+ char name[GSI_CTRL_NAME_LEN];
+ struct miscdevice ctrl_device;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ bool notify_req_queued;
+
+ atomic_t ctrl_online;
+
+ bool is_open;
+
+ wait_queue_head_t read_wq;
+
+ struct list_head cpkt_req_q;
+ struct list_head cpkt_resp_q;
+ unsigned long cpkts_len;
+
+ spinlock_t lock;
+
+ int ipa_cons_clnt_hdl;
+ int ipa_prod_clnt_hdl;
+
+ unsigned host_to_modem;
+ unsigned copied_to_modem;
+ unsigned copied_from_modem;
+ unsigned modem_to_host;
+ unsigned cpkt_drop_cnt;
+ unsigned get_encap_cnt;
+};
+
+struct gsi_data_port {
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+ struct usb_gsi_request in_request;
+ struct usb_gsi_request out_request;
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *driver_data);
+ struct ipa_usb_teth_params ipa_init_params;
+ int in_channel_handle;
+ int out_channel_handle;
+ u32 in_db_reg_phs_addr_lsb;
+ u32 in_db_reg_phs_addr_msb;
+ u32 out_db_reg_phs_addr_lsb;
+ u32 out_db_reg_phs_addr_msb;
+ u32 in_xfer_rsc_index;
+ u32 out_xfer_rsc_index;
+ u16 in_last_trb_addr;
+ u16 cdc_filter;
+ u32 in_aggr_size;
+ u32 out_aggr_size;
+
+ bool ipa_ready;
+ bool net_ready_trigger;
+ struct gsi_ntb_info ntb_info;
+
+ spinlock_t lock;
+
+ struct work_struct usb_ipa_w;
+ struct workqueue_struct *ipa_usb_wq;
+ enum connection_state sm_state;
+ struct event_queue evt_q;
+ wait_queue_head_t wait_for_ipa_ready;
+
+ /* Track these for debugfs */
+ struct ipa_usb_xdci_chan_params ipa_in_channel_params;
+ struct ipa_usb_xdci_chan_params ipa_out_channel_params;
+ struct ipa_usb_xdci_connect_params ipa_conn_pms;
+};
+
+struct f_gsi {
+ struct usb_function function;
+ struct usb_gadget *gadget;
+ enum ipa_usb_teth_prot prot_id;
+ int ctrl_id;
+ int data_id;
+ u32 vendorID;
+ u8 ethaddr[ETH_ADDR_STR_LEN];
+ const char *manufacturer;
+ struct rndis_params *params;
+ atomic_t connected;
+ bool data_interface_up;
+ bool rndis_use_wceis;
+
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+ struct gsi_data_port d_port;
+ struct gsi_ctrl_port c_port;
+};
+
+static inline struct f_gsi *func_to_gsi(struct usb_function *f)
+{
+ return container_of(f, struct f_gsi, function);
+}
+
+static inline struct f_gsi *d_port_to_gsi(struct gsi_data_port *d)
+{
+ return container_of(d, struct f_gsi, d_port);
+}
+
+static inline struct f_gsi *c_port_to_gsi(struct gsi_ctrl_port *d)
+{
+ return container_of(d, struct f_gsi, c_port);
+}
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN 40
+
+struct gsi_opts {
+ struct usb_function_instance func_inst;
+ struct f_gsi *gsi;
+};
+
+static inline struct gsi_opts *to_gsi_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct gsi_opts,
+ func_inst.group);
+}
+
+static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+{
+ if (!name)
+ goto error;
+
+ if (!strncmp("rndis", name, MAX_INST_NAME_LEN))
+ return IPA_USB_RNDIS;
+ if (!strncmp("ecm", name, MAX_INST_NAME_LEN))
+ return IPA_USB_ECM;
+ if (!strncmp("rmnet", name, MAX_INST_NAME_LEN))
+ return IPA_USB_RMNET;
+ if (!strncasecmp("mbim", name, MAX_INST_NAME_LEN))
+ return IPA_USB_MBIM;
+ if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN))
+ return IPA_USB_DIAG;
+
+error:
+ return -EINVAL;
+}
+
+/* device descriptors */
+
+#define LOG2_STATUS_INTERVAL_MSEC 5
+#define MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+/* rmnet device descriptors */
+
+static struct usb_interface_descriptor rmnet_gsi_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_gsi_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_gsi_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_notify_comp_desc = {
+ .bLength = sizeof(rmnet_gsi_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_in_comp_desc = {
+ .bLength = sizeof(rmnet_gsi_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_out_comp_desc = {
+ .bLength = sizeof(rmnet_gsi_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *rmnet_gsi_ss_function[] = {
+ (struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_out_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+static struct usb_string rmnet_gsi_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_gsi_strings[] = {
+ &rmnet_gsi_string_table,
+ NULL,
+};
+
+/* rndis device descriptors */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_gsi_control_intf = {
+ .bLength = sizeof(rndis_gsi_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_MISC,
+ .bInterfaceSubClass = 0x04,
+ .bInterfaceProtocol = 0x01, /* RNDIS over Ethernet */
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_gsi_header_desc = {
+ .bLength = sizeof(rndis_gsi_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_gsi_call_mgmt_descriptor = {
+ .bLength = sizeof(rndis_gsi_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_gsi_acm_descriptor = {
+ .bLength = sizeof(rndis_gsi_acm_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_gsi_union_desc = {
+ .bLength = sizeof(rndis_gsi_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_gsi_data_intf = {
+ .bLength = sizeof(rndis_gsi_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_gsi_iad_descriptor = {
+ .bLength = sizeof(rndis_gsi_iad_descriptor),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_MISC,
+ .bFunctionSubClass = 0x04,
+ .bFunctionProtocol = 0x01, /* RNDIS over Ethernet */
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *gsi_eth_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_gsi_control_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_header_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_union_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_gsi_data_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *gsi_eth_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_gsi_control_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_header_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_union_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_gsi_data_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_intr_comp_desc = {
+ .bLength = sizeof(rndis_gsi_ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_bulk_comp_desc = {
+ .bLength = sizeof(rndis_gsi_ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *gsi_eth_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_gsi_control_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_header_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_union_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_gsi_data_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string rndis_gsi_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_gsi_strings[] = {
+ &rndis_gsi_string_table,
+ NULL,
+};
+
+/* mbim device descriptors */
+#define MBIM_NTB_DEFAULT_IN_SIZE (0x4000)
+
+static struct usb_cdc_ncm_ntb_parameters mbim_gsi_ntb_parameters = {
+ .wLength = sizeof(mbim_gsi_ntb_parameters),
+ .bmNtbFormatsSupported = cpu_to_le16(USB_CDC_NCM_NTB16_SUPPORTED),
+ .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE),
+ .wNdpInDivisor = cpu_to_le16(4),
+ .wNdpInPayloadRemainder = cpu_to_le16(0),
+ .wNdpInAlignment = cpu_to_le16(4),
+
+ .dwNtbOutMaxSize = cpu_to_le32(0x4000),
+ .wNdpOutDivisor = cpu_to_le16(4),
+ .wNdpOutPayloadRemainder = cpu_to_le16(0),
+ .wNdpOutAlignment = cpu_to_le16(4),
+ .wNtbOutMaxDatagrams = 16,
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation;
+ */
+#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor mbim_gsi_iad_desc = {
+ .bLength = sizeof(mbim_gsi_iad_desc),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = 2,
+ .bFunctionSubClass = 0x0e,
+ .bFunctionProtocol = 0,
+ /* .iFunction = DYNAMIC */
+};
+
+/* interface descriptor: */
+static struct usb_interface_descriptor mbim_gsi_control_intf = {
+ .bLength = sizeof(mbim_gsi_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0x02,
+ .bInterfaceSubClass = 0x0e,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mbim_gsi_header_desc = {
+ .bLength = sizeof(mbim_gsi_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc mbim_gsi_union_desc = {
+ .bLength = sizeof(mbim_gsi_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_mbim_desc mbim_gsi_desc = {
+ .bLength = sizeof(mbim_gsi_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_TYPE,
+
+ .bcdMBIMVersion = cpu_to_le16(0x0100),
+
+ .wMaxControlMessage = cpu_to_le16(0x1000),
+ .bNumberFilters = 0x20,
+ .bMaxFilterSize = 0x80,
+ .wMaxSegmentSize = cpu_to_le16(0xfe0),
+ .bmNetworkCapabilities = 0x20,
+};
+
+static struct usb_cdc_mbim_extended_desc mbim_gsi_ext_mbb_desc = {
+ .bLength = sizeof(mbim_gsi_ext_mbb_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_EXTENDED_TYPE,
+
+ .bcdMBIMExtendedVersion = cpu_to_le16(0x0100),
+ .bMaxOutstandingCommandMessages = 64,
+ .wMTU = cpu_to_le16(1500),
+};
+
+/* the default data interface has no endpoints ... */
+static struct usb_interface_descriptor mbim_gsi_data_nop_intf = {
+ .bLength = sizeof(mbim_gsi_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+static struct usb_interface_descriptor mbim_gsi_data_intf = {
+ .bLength = sizeof(mbim_gsi_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *mbim_gsi_fs_function[] = {
+ (struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_gsi_control_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_header_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_union_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_data_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor mbim_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor mbim_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *mbim_gsi_hs_function[] = {
+ (struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_gsi_control_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_header_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_union_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_data_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_hs_out_desc,
+ NULL,
+};
+
+/* Super Speed Support */
+static struct usb_endpoint_descriptor mbim_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_notify_comp_desc = {
+ .bLength = sizeof(mbim_gsi_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_in_comp_desc = {
+ .bLength = sizeof(mbim_gsi_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_out_comp_desc = {
+ .bLength = sizeof(mbim_gsi_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *mbim_gsi_ss_function[] = {
+ (struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_gsi_control_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_header_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_union_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_data_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string mbim_gsi_string_defs[] = {
+ [0].s = "MBIM Control",
+ [1].s = "MBIM Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings mbim_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = mbim_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *mbim_gsi_strings[] = {
+ &mbim_gsi_string_table,
+ NULL,
+};
+
+/* Microsoft OS Descriptors */
+
+/*
+ * We specify our own bMS_VendorCode byte which Windows will use
+ * as the bRequest value in subsequent device get requests.
+ */
+#define MBIM_VENDOR_CODE 0xA5
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mbim_gsi_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mbim_gsi_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* Microsoft Extended Configuration Descriptor */
+static struct {
+ struct mbim_gsi_ext_config_desc_header header;
+ struct mbim_gsi_ext_config_desc_function function;
+} mbim_gsi_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mbim_gsi_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = 1,
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' },
+ /* .subCompatibleID = DYNAMIC */
+ },
+};
+/* ecm device descriptors */
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5
+#define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+/* interface descriptor: */
+static struct usb_interface_descriptor ecm_gsi_control_intf = {
+ .bLength = sizeof(ecm_gsi_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_gsi_header_desc = {
+ .bLength = sizeof(ecm_gsi_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_gsi_union_desc = {
+ .bLength = sizeof(ecm_gsi_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_gsi_desc = {
+ .bLength = sizeof(ecm_gsi_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+
+ /* this descriptor actually adds value, surprise! */
+ /* .iMACAddress = DYNAMIC */
+ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
+ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
+ .wNumberMCFilters = cpu_to_le16(0),
+ .bNumberPowerFilters = 0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_gsi_data_nop_intf = {
+ .bLength = sizeof(ecm_gsi_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_gsi_data_intf = {
+ .bLength = sizeof(ecm_gsi_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor ecm_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_gsi_fs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_gsi_control_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_header_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_union_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_gsi_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_data_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor ecm_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_gsi_hs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_gsi_control_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_header_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_union_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_gsi_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_data_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_notify_comp_desc = {
+ .bLength = sizeof(ecm_gsi_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_in_comp_desc = {
+ .bLength = sizeof(ecm_gsi_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_out_comp_desc = {
+ .bLength = sizeof(ecm_gsi_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ecm_gsi_ss_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_gsi_control_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_header_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_union_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_data_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string ecm_gsi_string_defs[] = {
+ [0].s = "CDC Ethernet Control Model (ECM)",
+ [1].s = NULL /* DYNAMIC */,
+ [2].s = "CDC Ethernet Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = ecm_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_gsi_strings[] = {
+ &ecm_gsi_string_table,
+ NULL,
+};
+
+/* qdss device descriptor */
+
+static struct usb_interface_descriptor qdss_gsi_data_intf_desc = {
+ .bLength = sizeof(qdss_gsi_data_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_gsi_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_gsi_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_gsi_data_ep_comp_desc = {
+ .bLength = sizeof(qdss_gsi_data_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *qdss_gsi_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_gsi_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_gsi_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_gsi_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_gsi_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_gsi_ss_data_desc,
+ (struct usb_descriptor_header *) &qdss_gsi_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string qdss_gsi_string_defs[] = {
+ [0].s = "QDSS DATA",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings qdss_gsi_string_table = {
+ .language = 0x0409,
+ .strings = qdss_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *qdss_gsi_strings[] = {
+ &qdss_gsi_string_table,
+ NULL,
+};
+#endif
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 590e056d3618..e01d20939449 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -44,18 +44,19 @@ struct f_hidg {
/* configuration */
unsigned char bInterfaceSubClass;
unsigned char bInterfaceProtocol;
+ unsigned char protocol;
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
/* recv report */
struct list_head completed_out_req;
- spinlock_t spinlock;
+ spinlock_t read_spinlock;
wait_queue_head_t read_queue;
unsigned int qlen;
/* send report */
- struct mutex lock;
+ spinlock_t write_spinlock;
bool write_pending;
wait_queue_head_t write_queue;
struct usb_request *req;
@@ -98,6 +99,60 @@ static struct hid_descriptor hidg_desc = {
/*.desc[0].wDescriptorLenght = DYNAMIC */
};
+/* Super-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
+ .bLength = sizeof(hidg_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
+ .bLength = sizeof(hidg_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
+ NULL,
+};
+
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
@@ -204,20 +259,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
#define READ_COND (!list_empty(&hidg->completed_out_req))
/* wait for at least one buffer to complete */
while (!READ_COND) {
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(hidg->read_queue, READ_COND))
return -ERESTARTSYS;
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
}
/* pick the first one */
@@ -232,7 +287,7 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
req = list->req;
count = min_t(unsigned int, count, req->actual - list->pos);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
/* copy to user outside spinlock */
count -= copy_to_user(buffer, req->buf + list->pos, count);
@@ -254,9 +309,9 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
return ret;
}
} else {
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add(&list->list, &hidg->completed_out_req);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
}
@@ -267,13 +322,16 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+ unsigned long flags;
if (req->status != 0) {
ERROR(hidg->func.config->cdev,
"End Point Request ERROR: %d\n", req->status);
}
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
@@ -281,18 +339,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
+ struct usb_request *req;
+ unsigned long flags;
ssize_t status = -ENOMEM;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
- mutex_lock(&hidg->lock);
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
-
+try_again:
/* write queue */
while (!WRITE_COND) {
- mutex_unlock(&hidg->lock);
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
@@ -300,37 +360,59 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
hidg->write_queue, WRITE_COND))
return -ERESTARTSYS;
- mutex_lock(&hidg->lock);
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
}
+ hidg->write_pending = 1;
+ req = hidg->req;
count = min_t(unsigned, count, hidg->report_length);
- status = copy_from_user(hidg->req->buf, buffer, count);
+
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+ status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
- mutex_unlock(&hidg->lock);
- return -EINVAL;
+ status = -EINVAL;
+ goto release_write_pending;
}
- hidg->req->status = 0;
- hidg->req->zero = 0;
- hidg->req->length = count;
- hidg->req->complete = f_hidg_req_complete;
- hidg->req->context = hidg;
- hidg->write_pending = 1;
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
- status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+ /* when our function has been disabled by host */
+ if (!hidg->req) {
+ free_ep_req(hidg->in_ep, req);
+ /*
+ * TODO
+ * Should we fail with error here?
+ */
+ goto try_again;
+ }
+
+ req->status = 0;
+ req->zero = 0;
+ req->length = count;
+ req->complete = f_hidg_req_complete;
+ req->context = hidg;
+
+ status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- hidg->write_pending = 0;
- wake_up(&hidg->write_queue);
+ goto release_write_pending_unlocked;
} else {
status = count;
}
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
- mutex_unlock(&hidg->lock);
+ return status;
+release_write_pending:
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+release_write_pending_unlocked:
+ hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
+ wake_up(&hidg->write_queue);
return status;
}
@@ -377,26 +459,42 @@ static int f_hidg_open(struct inode *inode, struct file *fd)
static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *) req->context;
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
struct f_hidg_req_list *req_list;
unsigned long flags;
- req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
- if (!req_list)
- return;
+ switch (req->status) {
+ case 0:
+ req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
+ if (!req_list) {
+ ERROR(cdev, "Unable to allocate mem for req_list\n");
+ goto free_req;
+ }
- req_list->req = req;
+ req_list->req = req;
- spin_lock_irqsave(&hidg->spinlock, flags);
- list_add_tail(&req_list->list, &hidg->completed_out_req);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ list_add_tail(&req_list->list, &hidg->completed_out_req);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
- wake_up(&hidg->read_queue);
+ wake_up(&hidg->read_queue);
+ break;
+ default:
+ ERROR(cdev, "Set report failed %d\n", req->status);
+ /* FALLTHROUGH */
+ case -ECONNABORTED: /* hardware forced ep reset */
+ case -ECONNRESET: /* request dequeued */
+ case -ESHUTDOWN: /* disconnect from host */
+free_req:
+ free_ep_req(ep, req);
+ return;
+ }
}
static int hidg_setup(struct usb_function *f,
@@ -430,7 +528,9 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_PROTOCOL):
VDBG(cdev, "get_protocol\n");
- goto stall;
+ length = min_t(unsigned int, length, 1);
+ ((u8 *) req->buf)[0] = hidg->protocol;
+ goto respond;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
@@ -442,6 +542,17 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_PROTOCOL):
VDBG(cdev, "set_protocol\n");
+ if (value > HID_REPORT_PROTOCOL)
+ goto stall;
+ length = 0;
+ /*
+ * We assume that programs implementing the Boot protocol
+ * are also compatible with the Report Protocol
+ */
+ if (hidg->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) {
+ hidg->protocol = value;
+ goto respond;
+ }
goto stall;
break;
@@ -507,19 +618,30 @@ static void hidg_disable(struct usb_function *f)
usb_ep_disable(hidg->in_ep);
usb_ep_disable(hidg->out_ep);
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
free_ep_req(hidg->out_ep, list->req);
list_del(&list->list);
kfree(list);
}
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->write_pending) {
+ free_ep_req(hidg->in_ep, hidg->req);
+ hidg->write_pending = 1;
+ }
+
+ hidg->req = NULL;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
}
static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_request *req_in = NULL;
+ unsigned long flags;
int i, status = 0;
VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
@@ -540,6 +662,12 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
goto fail;
}
hidg->in_ep->driver_data = hidg;
+
+ req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length);
+ if (!req_in) {
+ status = -ENOMEM;
+ goto disable_ep_in;
+ }
}
@@ -551,12 +679,12 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
hidg->out_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
- goto fail;
+ goto free_req_in;
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
ERROR(cdev, "Enable OUT endpoint FAILED!\n");
- goto fail;
+ goto free_req_in;
}
hidg->out_ep->driver_data = hidg;
@@ -572,17 +700,37 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
req->context = hidg;
status = usb_ep_queue(hidg->out_ep, req,
GFP_ATOMIC);
- if (status)
+ if (status) {
ERROR(cdev, "%s queue req --> %d\n",
hidg->out_ep->name, status);
+ free_ep_req(hidg->out_ep, req);
+ }
} else {
- usb_ep_disable(hidg->out_ep);
status = -ENOMEM;
- goto fail;
+ goto disable_out_ep;
}
}
}
+ if (hidg->in_ep != NULL) {
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ hidg->req = req_in;
+ hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
+ wake_up(&hidg->write_queue);
+ }
+ return 0;
+disable_out_ep:
+ usb_ep_disable(hidg->out_ep);
+free_req_in:
+ if (req_in)
+ free_ep_req(hidg->in_ep, req_in);
+
+disable_ep_in:
+ if (hidg->in_ep)
+ usb_ep_disable(hidg->in_ep);
+
fail:
return status;
}
@@ -631,21 +779,18 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
hidg->out_ep = ep;
- /* preallocate request and buffer */
- status = -ENOMEM;
- hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL);
- if (!hidg->req)
- goto fail;
-
- hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL);
- if (!hidg->req->buf)
- goto fail;
-
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg->protocol = HID_REPORT_PROTOCOL;
+ hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_in_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
/*
@@ -661,13 +806,20 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_hs_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
+ hidg_ss_in_ep_desc.bEndpointAddress =
+ hidg_fs_in_ep_desc.bEndpointAddress;
+ hidg_ss_out_ep_desc.bEndpointAddress =
+ hidg_fs_out_ep_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors);
if (status)
goto fail;
- mutex_init(&hidg->lock);
- spin_lock_init(&hidg->spinlock);
+ spin_lock_init(&hidg->write_spinlock);
+ hidg->write_pending = 1;
+ hidg->req = NULL;
+ spin_lock_init(&hidg->read_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
INIT_LIST_HEAD(&hidg->completed_out_req);
@@ -693,11 +845,8 @@ fail_free_descs:
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
- if (hidg->req != NULL) {
- kfree(hidg->req->buf);
- if (hidg->in_ep != NULL)
- usb_ep_free_request(hidg->in_ep, hidg->req);
- }
+ if (hidg->req != NULL)
+ free_ep_req(hidg->in_ep, hidg->req);
return status;
}
@@ -825,11 +974,21 @@ end:
CONFIGFS_ATTR(f_hid_opts_, report_desc);
+static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
+{
+ struct f_hid_opts *opts = to_f_hid_opts(item);
+
+ return sprintf(page, "%d:%d\n", major, opts->minor);
+}
+
+CONFIGFS_ATTR_RO(f_hid_opts_, dev);
+
static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_subclass,
&f_hid_opts_attr_protocol,
&f_hid_opts_attr_report_length,
&f_hid_opts_attr_report_desc,
+ &f_hid_opts_attr_dev,
NULL,
};
@@ -853,7 +1012,7 @@ static void hidg_free_inst(struct usb_function_instance *f)
mutex_lock(&hidg_ida_lock);
hidg_put_minor(opts->minor);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
mutex_unlock(&hidg_ida_lock);
@@ -879,7 +1038,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
mutex_lock(&hidg_ida_lock);
- if (idr_is_empty(&hidg_ida.idr)) {
+ if (ida_is_empty(&hidg_ida)) {
status = ghid_setup(NULL, HIDG_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -892,7 +1051,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
goto unlock;
}
@@ -924,11 +1083,6 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
device_destroy(hidg_class, MKDEV(major, hidg->minor));
cdev_del(&hidg->cdev);
- /* disable/free request and end point */
- usb_ep_disable(hidg->in_ep);
- kfree(hidg->req->buf);
- usb_ep_free_request(hidg->in_ep, hidg->req);
-
usb_free_all_descriptors(f);
}
@@ -980,6 +1134,20 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
}
DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc);
+
+static int __init afunc_init(void)
+{
+ return usb_function_register(&hidusb_func);
+}
+
+static void __exit afunc_exit(void)
+{
+ usb_function_unregister(&hidusb_func);
+}
+
+module_init(afunc_init);
+module_exit(afunc_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabien Chouteau");
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index ddc3aad886b7..e775f89053ea 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -308,9 +308,7 @@ static void disable_loopback(struct f_loopback *loop)
static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct f_loopback *loop = ep->driver_data;
-
- return alloc_ep_req(ep, len, loop->buflen);
+ return alloc_ep_req(ep, len);
}
static int alloc_requests(struct usb_composite_dev *cdev,
@@ -333,7 +331,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
if (!in_req)
goto fail;
- out_req = lb_alloc_ep_req(loop->out_ep, 0);
+ out_req = lb_alloc_ep_req(loop->out_ep, loop->buflen);
if (!out_req)
goto fail_in;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 25488c89308a..01e25ae0fb25 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -458,13 +458,23 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
struct fsg_buffhd *bh = req->context;
if (req->status || req->actual != req->length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, req->length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->inreq_busy = 0;
+ bh->state = BUF_STATE_EMPTY;
+ return;
+ }
+
spin_lock(&common->lock);
bh->inreq_busy = 0;
bh->state = BUF_STATE_EMPTY;
@@ -477,15 +487,24 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
- dump_msg(common, "bulk-out", req->buf, req->actual);
if (req->status || req->actual != bh->bulk_out_intended_length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, bh->bulk_out_intended_length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->outreq_busy = 0;
+ return;
+ }
+
+ dump_msg(common, "bulk-out", req->buf, req->actual);
spin_lock(&common->lock);
bh->outreq_busy = 0;
bh->state = BUF_STATE_FULL;
@@ -2274,6 +2293,8 @@ reset:
fsg->bulk_out_enabled = 0;
}
+ /* allow usb LPM after eps are disabled */
+ usb_gadget_autopm_put_async(common->gadget);
common->fsg = NULL;
wake_up(&common->fsg_wait);
}
@@ -2338,6 +2359,10 @@ static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
fsg->common->new_fsg = fsg;
+
+ /* prevents usb LPM until thread runs to completion */
+ usb_gadget_autopm_get_async(fsg->common->gadget);
+
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
}
@@ -2460,9 +2485,13 @@ static void handle_exception(struct fsg_common *common)
&common->fsg->atomic_bitflags))
usb_ep_clear_halt(common->fsg->bulk_in);
- if (common->ep0_req_tag == exception_req_tag)
- ep0_queue(common); /* Complete the status stage */
-
+ if (common->ep0_req_tag == exception_req_tag) {
+ /* Complete the status stage */
+ if (common->cdev)
+ usb_composite_setup_continue(common->cdev);
+ else
+ ep0_queue(common);
+ }
/*
* Technically this should go here, but it would only be
* a waste of time. Ditto for the INTERFACE_CHANGE and
@@ -2476,8 +2505,14 @@ static void handle_exception(struct fsg_common *common)
case FSG_STATE_CONFIG_CHANGE:
do_set_interface(common, common->new_fsg);
- if (common->new_fsg)
+ if (common->new_fsg) {
+ /*
+ * make sure delayed_status flag updated when set_alt
+ * returned.
+ */
+ msleep(200);
usb_composite_setup_continue(common->cdev);
+ }
break;
case FSG_STATE_EXIT:
diff --git a/drivers/usb/gadget/function/f_mbim.c b/drivers/usb/gadget/function/f_mbim.c
new file mode 100644
index 000000000000..e7c3278f66d4
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mbim.c
@@ -0,0 +1,2147 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <linux/usb/cdc.h>
+
+#include <linux/usb/composite.h>
+#include <linux/platform_device.h>
+
+#include <linux/spinlock.h>
+
+/*
+ * This function is a "Mobile Broadband Interface Model" (MBIM) link.
+ * MBIM is intended to be used with high-speed network attachments.
+ *
+ * Note that MBIM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ */
+
+#define MBIM_BULK_BUFFER_SIZE 4096
+#define MAX_CTRL_PKT_SIZE 4096
+
+enum mbim_peripheral_ep_type {
+ MBIM_DATA_EP_TYPE_RESERVED = 0x0,
+ MBIM_DATA_EP_TYPE_HSIC = 0x1,
+ MBIM_DATA_EP_TYPE_HSUSB = 0x2,
+ MBIM_DATA_EP_TYPE_PCIE = 0x3,
+ MBIM_DATA_EP_TYPE_EMBEDDED = 0x4,
+ MBIM_DATA_EP_TYPE_BAM_DMUX = 0x5,
+};
+
+struct mbim_peripheral_ep_info {
+ enum peripheral_ep_type ep_type;
+ u32 peripheral_iface_id;
+};
+
+struct mbim_ipa_ep_pair {
+ u32 cons_pipe_num;
+ u32 prod_pipe_num;
+};
+
+struct mbim_ipa_ep_info {
+ struct mbim_peripheral_ep_info ph_ep_info;
+ struct mbim_ipa_ep_pair ipa_ep_pair;
+};
+
+#define MBIM_IOCTL_MAGIC 'o'
+#define MBIM_GET_NTB_SIZE _IOR(MBIM_IOCTL_MAGIC, 2, u32)
+#define MBIM_GET_DATAGRAM_COUNT _IOR(MBIM_IOCTL_MAGIC, 3, u16)
+
+#define MBIM_EP_LOOKUP _IOR(MBIM_IOCTL_MAGIC, 4, struct mbim_ipa_ep_info)
+
+
+#define NR_MBIM_PORTS 1
+#define MBIM_DEFAULT_PORT 0
+
+/* ID for Microsoft OS String */
+#define MBIM_OS_STRING_ID 0xEE
+
+struct ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct mbim_ep_descs {
+ struct usb_endpoint_descriptor *in;
+ struct usb_endpoint_descriptor *out;
+ struct usb_endpoint_descriptor *notify;
+};
+
+struct mbim_notify_port {
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ atomic_t notify_count;
+};
+
+enum mbim_notify_state {
+ MBIM_NOTIFY_NONE,
+ MBIM_NOTIFY_CONNECT,
+ MBIM_NOTIFY_SPEED,
+ MBIM_NOTIFY_RESPONSE_AVAILABLE,
+};
+
+struct f_mbim {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+
+ atomic_t online;
+
+ atomic_t open_excl;
+ atomic_t ioctl_excl;
+ atomic_t read_excl;
+ atomic_t write_excl;
+
+ wait_queue_head_t read_wq;
+
+ enum transport_type xport;
+ u8 port_num;
+ struct data_port bam_port;
+ struct mbim_notify_port not_port;
+
+ struct mbim_ep_descs fs;
+ struct mbim_ep_descs hs;
+
+ u8 ctrl_id, data_id;
+ bool data_interface_up;
+
+ spinlock_t lock;
+
+ struct list_head cpkt_req_q;
+ struct list_head cpkt_resp_q;
+
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+
+ atomic_t error;
+ unsigned int cpkt_drop_cnt;
+ bool remote_wakeup_enabled;
+};
+
+struct mbim_ntb_input_size {
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+ u16 reserved;
+};
+
+/* temporary variable used between mbim_open() and mbim_gadget_bind() */
+static struct f_mbim *_mbim_dev;
+
+static unsigned int nr_mbim_ports;
+
+static struct mbim_ports {
+ struct f_mbim *port;
+ unsigned port_num;
+} mbim_ports[NR_MBIM_PORTS];
+
+static inline struct f_mbim *func_to_mbim(struct usb_function *f)
+{
+ return container_of(f, struct f_mbim, function);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define MBIM_NTB_DEFAULT_IN_SIZE (0x4000)
+#define MBIM_NTB_OUT_SIZE (0x1000)
+#define MBIM_NDP_IN_DIVISOR (0x4)
+
+#define NTB_DEFAULT_IN_SIZE_IPA (0x4000)
+#define MBIM_NTB_OUT_SIZE_IPA (0x4000)
+
+#define MBIM_FORMATS_SUPPORTED USB_CDC_NCM_NTB16_SUPPORTED
+
+static struct usb_cdc_ncm_ntb_parameters mbim_ntb_parameters = {
+ .wLength = sizeof(mbim_ntb_parameters),
+ .bmNtbFormatsSupported = cpu_to_le16(MBIM_FORMATS_SUPPORTED),
+ .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE),
+ .wNdpInDivisor = cpu_to_le16(MBIM_NDP_IN_DIVISOR),
+ .wNdpInPayloadRemainder = cpu_to_le16(0),
+ .wNdpInAlignment = cpu_to_le16(4),
+
+ .dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE),
+ .wNdpOutDivisor = cpu_to_le16(4),
+ .wNdpOutPayloadRemainder = cpu_to_le16(0),
+ .wNdpOutAlignment = cpu_to_le16(4),
+ .wNtbOutMaxDatagrams = 0,
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor mbim_iad_desc = {
+ .bLength = sizeof(mbim_iad_desc),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = 2,
+ .bFunctionSubClass = 0x0e,
+ .bFunctionProtocol = 0,
+ /* .iFunction = DYNAMIC */
+};
+
+/* interface descriptor: */
+static struct usb_interface_descriptor mbim_control_intf = {
+ .bLength = sizeof(mbim_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0x02,
+ .bInterfaceSubClass = 0x0e,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mbim_header_desc = {
+ .bLength = sizeof(mbim_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc mbim_union_desc = {
+ .bLength = sizeof(mbim_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_mbim_desc mbim_desc = {
+ .bLength = sizeof(mbim_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_TYPE,
+
+ .bcdMBIMVersion = cpu_to_le16(0x0100),
+
+ .wMaxControlMessage = cpu_to_le16(0x1000),
+ .bNumberFilters = 0x20,
+ .bMaxFilterSize = 0x80,
+ .wMaxSegmentSize = cpu_to_le16(0x800),
+ .bmNetworkCapabilities = 0x20,
+};
+
+static struct usb_cdc_mbim_extended_desc ext_mbb_desc = {
+ .bLength = sizeof(ext_mbb_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_EXTENDED_TYPE,
+
+ .bcdMBIMExtendedVersion = cpu_to_le16(0x0100),
+ .bMaxOutstandingCommandMessages = 64,
+ .wMTU = cpu_to_le16(1500),
+};
+
+/* the default data interface has no endpoints ... */
+static struct usb_interface_descriptor mbim_data_nop_intf = {
+ .bLength = sizeof(mbim_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+static struct usb_interface_descriptor mbim_data_intf = {
+ .bLength = sizeof(mbim_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *mbim_fs_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &fs_mbim_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &fs_mbim_in_desc,
+ (struct usb_descriptor_header *) &fs_mbim_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor hs_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *mbim_hs_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &hs_mbim_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &hs_mbim_in_desc,
+ (struct usb_descriptor_header *) &hs_mbim_out_desc,
+ NULL,
+};
+
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ss_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_notify_comp_desc = {
+ .bLength = sizeof(ss_mbim_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_in_comp_desc = {
+ .bLength = sizeof(ss_mbim_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ss_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_out_comp_desc = {
+ .bLength = sizeof(ss_mbim_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *mbim_ss_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &ss_mbim_notify_desc,
+ (struct usb_descriptor_header *) &ss_mbim_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &ss_mbim_in_desc,
+ (struct usb_descriptor_header *) &ss_mbim_in_comp_desc,
+ (struct usb_descriptor_header *) &ss_mbim_out_desc,
+ (struct usb_descriptor_header *) &ss_mbim_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+#define STRING_CTRL_IDX 0
+#define STRING_DATA_IDX 1
+
+static struct usb_string mbim_string_defs[] = {
+ [STRING_CTRL_IDX].s = "MBIM Control",
+ [STRING_DATA_IDX].s = "MBIM Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings mbim_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = mbim_string_defs,
+};
+
+static struct usb_gadget_strings *mbim_strings[] = {
+ &mbim_string_table,
+ NULL,
+};
+
+/* Microsoft OS Descriptors */
+
+/*
+ * We specify our own bMS_VendorCode byte which Windows will use
+ * as the bRequest value in subsequent device get requests.
+ */
+#define MBIM_VENDOR_CODE 0xA5
+
+/* Microsoft OS String */
+static u8 mbim_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ MBIM_VENDOR_CODE,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mbim_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mbim_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* Microsoft Extended Configuration Descriptor */
+static struct {
+ struct mbim_ext_config_desc_header header;
+ struct mbim_ext_config_desc_function function;
+} mbim_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mbim_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = 1,
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' },
+ /* .subCompatibleID = DYNAMIC */
+ },
+};
+
+static inline int mbim_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void mbim_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static struct ctrl_pkt *mbim_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void mbim_free_ctrl_pkt(struct ctrl_pkt *pkt)
+{
+ if (pkt) {
+ kfree(pkt->buf);
+ kfree(pkt);
+ }
+}
+
+static struct usb_request *mbim_alloc_req(struct usb_ep *ep, int buffer_size,
+ size_t extra_buf)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size + extra_buf, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+void fmbim_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* ---------------------------- BAM INTERFACE ----------------------------- */
+
+static int mbim_bam_setup(int no_ports)
+{
+ int ret;
+
+ pr_info("no_ports:%d\n", no_ports);
+
+ ret = bam_data_setup(USB_FUNC_MBIM, no_ports);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ pr_info("Initialized %d ports\n", no_ports);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------*/
+
+static inline void mbim_reset_values(struct f_mbim *mbim)
+{
+ mbim->ntb_input_size = MBIM_NTB_DEFAULT_IN_SIZE;
+
+ atomic_set(&mbim->online, 0);
+}
+
+static void mbim_reset_function_queue(struct f_mbim *dev)
+{
+ struct ctrl_pkt *cpkt = NULL;
+
+ pr_debug("Queue empty packet for QBI\n");
+
+ spin_lock(&dev->lock);
+
+ cpkt = mbim_alloc_ctrl_pkt(0, GFP_ATOMIC);
+ if (!cpkt) {
+ pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+ spin_unlock(&dev->lock);
+ return;
+ }
+
+ list_add_tail(&cpkt->list, &dev->cpkt_req_q);
+ spin_unlock(&dev->lock);
+
+ pr_debug("%s: Wake up read queue\n", __func__);
+ wake_up(&dev->read_wq);
+}
+
+static void fmbim_reset_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *dev = req->context;
+
+ mbim_reset_function_queue(dev);
+}
+
+static void mbim_clear_queues(struct f_mbim *mbim)
+{
+ struct ctrl_pkt *cpkt = NULL;
+ struct list_head *act, *tmp;
+
+ spin_lock(&mbim->lock);
+ list_for_each_safe(act, tmp, &mbim->cpkt_req_q) {
+ cpkt = list_entry(act, struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ list_for_each_safe(act, tmp, &mbim->cpkt_resp_q) {
+ cpkt = list_entry(act, struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock(&mbim->lock);
+}
+
+/*
+ * Context: mbim->lock held
+ */
+static void mbim_do_notify(struct f_mbim *mbim)
+{
+ struct usb_request *req = mbim->not_port.notify_req;
+ struct usb_cdc_notification *event;
+ int status;
+
+ pr_debug("notify_state: %d\n", mbim->not_port.notify_state);
+
+ if (!req)
+ return;
+
+ event = req->buf;
+
+ switch (mbim->not_port.notify_state) {
+
+ case MBIM_NOTIFY_NONE:
+ if (atomic_read(&mbim->not_port.notify_count) > 0)
+ pr_err("Pending notifications in MBIM_NOTIFY_NONE\n");
+ else
+ pr_debug("No pending notifications\n");
+
+ return;
+
+ case MBIM_NOTIFY_RESPONSE_AVAILABLE:
+ pr_debug("Notification %02x sent\n", event->bNotificationType);
+
+ if (atomic_read(&mbim->not_port.notify_count) <= 0) {
+ pr_debug("notify_response_available: done\n");
+ return;
+ }
+
+ spin_unlock(&mbim->lock);
+ status = usb_func_ep_queue(&mbim->function,
+ mbim->not_port.notify,
+ req, GFP_ATOMIC);
+ spin_lock(&mbim->lock);
+ if (status) {
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_err("Queue notify request failed, err: %d\n",
+ status);
+ }
+
+ return;
+ }
+
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(mbim->ctrl_id);
+
+ /*
+ * In double buffering if there is a space in FIFO,
+ * completion callback can be called right after the call,
+ * so unlocking
+ */
+ atomic_inc(&mbim->not_port.notify_count);
+ pr_debug("queue request: notify_count = %d\n",
+ atomic_read(&mbim->not_port.notify_count));
+ spin_unlock(&mbim->lock);
+ status = usb_func_ep_queue(&mbim->function, mbim->not_port.notify, req,
+ GFP_ATOMIC);
+ spin_lock(&mbim->lock);
+ if (status) {
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_err("usb_func_ep_queue failed, err: %d\n", status);
+ }
+}
+
+static void mbim_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *mbim = req->context;
+ struct usb_cdc_notification *event = req->buf;
+
+ pr_debug("dev:%pK\n", mbim);
+
+ spin_lock(&mbim->lock);
+ switch (req->status) {
+ case 0:
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_debug("notify_count = %d\n",
+ atomic_read(&mbim->not_port.notify_count));
+ break;
+
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ mbim->not_port.notify_state = MBIM_NOTIFY_NONE;
+ atomic_set(&mbim->not_port.notify_count, 0);
+ pr_info("ESHUTDOWN/ECONNRESET, connection gone\n");
+ spin_unlock(&mbim->lock);
+ mbim_clear_queues(mbim);
+ mbim_reset_function_queue(mbim);
+ spin_lock(&mbim->lock);
+ break;
+ default:
+ pr_err("Unknown event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+
+ mbim_do_notify(mbim);
+ spin_unlock(&mbim->lock);
+
+ pr_debug("dev:%pK Exit\n", mbim);
+}
+
+static void mbim_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* now for SET_NTB_INPUT_SIZE only */
+ unsigned in_size = 0;
+ struct usb_function *f = req->context;
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct mbim_ntb_input_size *ntb = NULL;
+
+ pr_debug("dev:%pK\n", mbim);
+
+ req->context = NULL;
+ if (req->status || req->actual != req->length) {
+ pr_err("Bad control-OUT transfer\n");
+ goto invalid;
+ }
+
+ if (req->length == 4) {
+ in_size = get_unaligned_le32(req->buf);
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {
+ pr_err("Illegal INPUT SIZE (%d) from host\n", in_size);
+ goto invalid;
+ }
+ } else if (req->length == 8) {
+ ntb = (struct mbim_ntb_input_size *)req->buf;
+ in_size = get_unaligned_le32(&(ntb->ntb_input_size));
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {
+ pr_err("Illegal INPUT SIZE (%d) from host\n", in_size);
+ goto invalid;
+ }
+ mbim->ntb_max_datagrams =
+ get_unaligned_le16(&(ntb->ntb_max_datagrams));
+ } else {
+ pr_err("Illegal NTB length %d\n", in_size);
+ goto invalid;
+ }
+
+ pr_debug("Set NTB INPUT SIZE %d\n", in_size);
+
+ mbim->ntb_input_size = in_size;
+ return;
+
+invalid:
+ usb_ep_set_halt(ep);
+
+ pr_err("dev:%pK Failed\n", mbim);
+}
+
+static void
+fmbim_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *dev = req->context;
+ struct ctrl_pkt *cpkt = NULL;
+ int len = req->actual;
+ static bool first_command_sent;
+
+ if (!dev) {
+ pr_err("mbim dev is null\n");
+ return;
+ }
+
+ if (req->status < 0) {
+ pr_err("mbim command error %d\n", req->status);
+ return;
+ }
+
+ /*
+ * Wait for user to process prev MBIM_OPEN cmd before handling new one.
+ * However don't drop first command during bootup as file may not be
+ * opened by now. Queue the command in this case.
+ */
+ if (!atomic_read(&dev->open_excl) && first_command_sent) {
+ pr_err("mbim not opened yet, dropping cmd pkt = %d\n", len);
+ return;
+ }
+ if (!first_command_sent)
+ first_command_sent = true;
+
+ pr_debug("dev:%pK port#%d\n", dev, dev->port_num);
+
+ cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (!cpkt) {
+ pr_err("Unable to allocate ctrl pkt\n");
+ return;
+ }
+
+ pr_debug("Add to cpkt_req_q packet with len = %d\n", len);
+ memcpy(cpkt->buf, req->buf, len);
+
+ spin_lock(&dev->lock);
+
+ list_add_tail(&cpkt->list, &dev->cpkt_req_q);
+ spin_unlock(&dev->lock);
+
+ /* wakeup read thread */
+ pr_debug("Wake up read queue\n");
+ wake_up(&dev->read_wq);
+}
+
+static int
+mbim_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+ struct usb_request *req = cdev->req;
+ struct ctrl_pkt *cpkt = NULL;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /*
+ * composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+
+ if (!atomic_read(&mbim->online)) {
+ pr_warn("usb cable is not connected\n");
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_RESET_FUNCTION:
+
+ pr_debug("USB_CDC_RESET_FUNCTION\n");
+ value = 0;
+ req->complete = fmbim_reset_cmd_complete;
+ req->context = mbim;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+
+ pr_debug("USB_CDC_SEND_ENCAPSULATED_COMMAND\n");
+
+ if (w_length > req->length) {
+ pr_debug("w_length > req->length: %d > %d\n",
+ w_length, req->length);
+ }
+ value = w_length;
+ req->complete = fmbim_cmd_complete;
+ req->context = mbim;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+
+ pr_debug("USB_CDC_GET_ENCAPSULATED_RESPONSE\n");
+
+ if (w_value) {
+ pr_err("w_length > 0: %d\n", w_length);
+ break;
+ }
+
+ pr_debug("req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ spin_lock(&mbim->lock);
+ if (list_empty(&mbim->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty\n");
+ spin_unlock(&mbim->lock);
+ break;
+ }
+
+ cpkt = list_first_entry(&mbim->cpkt_resp_q,
+ struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&mbim->lock);
+
+ value = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, value);
+ mbim_free_ctrl_pkt(cpkt);
+
+ pr_debug("copied encapsulated_response %d bytes\n",
+ value);
+
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_PARAMETERS:
+
+ pr_debug("USB_CDC_GET_NTB_PARAMETERS\n");
+
+ if (w_length == 0 || w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ value = w_length > sizeof(mbim_ntb_parameters) ?
+ sizeof(mbim_ntb_parameters) : w_length;
+ memcpy(req->buf, &mbim_ntb_parameters, value);
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_INPUT_SIZE:
+
+ pr_debug("USB_CDC_GET_NTB_INPUT_SIZE\n");
+
+ if (w_length < 4 || w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ put_unaligned_le32(mbim->ntb_input_size, req->buf);
+ value = 4;
+ pr_debug("Reply to host INPUT SIZE %d\n",
+ mbim->ntb_input_size);
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_NTB_INPUT_SIZE:
+
+ pr_debug("USB_CDC_SET_NTB_INPUT_SIZE\n");
+
+ if (w_length != 4 && w_length != 8) {
+ pr_err("wrong NTB length %d\n", w_length);
+ break;
+ }
+
+ if (w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ req->complete = mbim_ep0out_complete;
+ req->length = w_length;
+ req->context = f;
+
+ value = req->length;
+ break;
+
+ /* optional in mbim descriptor: */
+ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
+ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
+
+ default:
+ pr_err("invalid control req: %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("control request: %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+
+ if (value < 0) {
+ pr_err("queueing req failed: %02x.%02x, err %d\n",
+ ctrl->bRequestType,
+ ctrl->bRequest, value);
+ }
+ } else {
+ pr_err("ctrl req err %d: %02x.%02x v%04x i%04x l%d\n",
+ value, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+/*
+ * This function handles the Microsoft-specific OS descriptor control
+ * requests that are issued by Windows host drivers to determine the
+ * configuration containing the MBIM function.
+ *
+ * Unlike mbim_setup() this function handles two specific device requests,
+ * and only when a configuration has not yet been selected.
+ */
+static int mbim_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* only respond to OS descriptors when no configuration selected */
+ if (cdev->config || !mbim_ext_config_desc.function.subCompatibleID[0])
+ return value;
+
+ pr_debug("%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MSFT OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MBIM_OS_STRING_ID) {
+
+ value = (w_length < sizeof(mbim_os_string) ?
+ w_length : sizeof(mbim_os_string));
+ memcpy(cdev->req->buf, mbim_os_string, value);
+
+ } else if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+ && ctrl->bRequest == MBIM_VENDOR_CODE && w_index == 4) {
+
+ /* Handle Extended OS descriptor */
+ value = (w_length < sizeof(mbim_ext_config_desc) ?
+ w_length : sizeof(mbim_ext_config_desc));
+ memcpy(cdev->req->buf, &mbim_ext_config_desc, value);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ pr_err("response queue error: %d\n", rc);
+ }
+ return value;
+}
+
+static int mbim_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+ int ret = 0;
+
+ pr_debug("intf=%u, alt=%u\n", intf, alt);
+
+ /* Control interface has only altsetting 0 */
+ if (intf == mbim->ctrl_id) {
+
+ pr_info("CONTROL_INTERFACE\n");
+
+ if (alt != 0)
+ goto fail;
+
+ if (mbim->not_port.notify->driver_data) {
+ pr_info("reset mbim control %d\n", intf);
+ usb_ep_disable(mbim->not_port.notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->not_port.notify);
+ if (ret) {
+ mbim->not_port.notify->desc = NULL;
+ pr_err("Failed configuring notify ep %s: err %d\n",
+ mbim->not_port.notify->name, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(mbim->not_port.notify);
+ if (ret) {
+ pr_err("usb ep#%s enable failed, err#%d\n",
+ mbim->not_port.notify->name, ret);
+ return ret;
+ }
+ mbim->not_port.notify->driver_data = mbim;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == mbim->data_id) {
+
+ pr_info("DATA_INTERFACE id %d, data interface status %d\n",
+ mbim->data_id, mbim->data_interface_up);
+
+ if (alt > 1)
+ goto fail;
+
+ if (mbim->data_interface_up == alt)
+ return 0;
+
+ if (mbim->bam_port.in->driver_data) {
+ pr_info("reset mbim, alt-%d\n", alt);
+ mbim_reset_values(mbim);
+ }
+
+ if (alt == 0) {
+ /*
+ * perform bam data disconnect handshake upon usb
+ * disconnect
+ */
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_disconnect();
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ bam_data_disconnect(&mbim->bam_port,
+ USB_FUNC_MBIM, mbim->port_num);
+ if (!gadget_is_dwc3(cdev->gadget))
+ break;
+
+ if (msm_ep_unconfig(mbim->bam_port.in) ||
+ msm_ep_unconfig(mbim->bam_port.out)) {
+ pr_err("ep_unconfig failed\n");
+ goto fail;
+ }
+ default:
+ pr_err("unknown transport\n");
+ }
+ goto notify_ready;
+ }
+
+ pr_info("Alt set 1, initialize ports\n");
+
+ /*
+ * CDC Network only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ pr_info("Choose endpoints\n");
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->bam_port.in);
+ if (ret) {
+ mbim->bam_port.in->desc = NULL;
+ pr_err("IN ep %s failed: %d\n",
+ mbim->bam_port.in->name, ret);
+ return ret;
+ }
+
+ pr_info("Set mbim port in_desc = 0x%pK\n",
+ mbim->bam_port.in->desc);
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->bam_port.out);
+ if (ret) {
+ mbim->bam_port.out->desc = NULL;
+ pr_err("OUT ep %s failed: %d\n",
+ mbim->bam_port.out->name, ret);
+ return ret;
+ }
+
+ pr_info("Set mbim port out_desc = 0x%pK\n",
+ mbim->bam_port.out->desc);
+
+ pr_debug("Activate mbim\n");
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_connect(cdev->gadget, mbim->bam_port.in,
+ mbim->bam_port.out);
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ ret = bam_data_connect(&mbim->bam_port,
+ mbim->xport, mbim->port_num,
+ USB_FUNC_MBIM);
+ if (ret) {
+ pr_err("bam_data_setup failed:err:%d\n",
+ ret);
+ goto fail;
+ }
+ break;
+ default:
+ pr_err("unknown transport\n");
+ }
+notify_ready:
+ mbim->data_interface_up = alt;
+ spin_lock(&mbim->lock);
+ mbim->not_port.notify_state = MBIM_NOTIFY_RESPONSE_AVAILABLE;
+ spin_unlock(&mbim->lock);
+ } else {
+ goto fail;
+ }
+
+ atomic_set(&mbim->online, 1);
+
+ pr_info("SET DEVICE ONLINE\n");
+
+ return 0;
+
+fail:
+ pr_err("ERROR: Illegal Interface\n");
+ return -EINVAL;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * this MBIM function *MUST* implement a get_alt() method.
+ */
+static int mbim_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ if (intf == mbim->ctrl_id)
+ return 0;
+ else if (intf == mbim->data_id)
+ return mbim->data_interface_up;
+
+ return -EINVAL;
+}
+
+static void mbim_disable(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+
+ pr_info("SET DEVICE OFFLINE\n");
+ atomic_set(&mbim->online, 0);
+ mbim->remote_wakeup_enabled = 0;
+
+ /* Disable Control Path */
+ if (mbim->not_port.notify->driver_data) {
+ usb_ep_disable(mbim->not_port.notify);
+ mbim->not_port.notify->driver_data = NULL;
+ }
+ atomic_set(&mbim->not_port.notify_count, 0);
+ mbim->not_port.notify_state = MBIM_NOTIFY_NONE;
+
+ mbim_clear_queues(mbim);
+ mbim_reset_function_queue(mbim);
+
+ /* Disable Data Path - only if it was initialized already (alt=1) */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_disconnect();
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ if (gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(mbim->bam_port.out);
+ msm_ep_unconfig(mbim->bam_port.in);
+ }
+ bam_data_disconnect(&mbim->bam_port, USB_FUNC_MBIM,
+ mbim->port_num);
+ break;
+ default:
+ pr_err("unknown transport\n");
+ }
+
+ mbim->data_interface_up = false;
+ pr_info("mbim deactivated\n");
+}
+
+#define MBIM_ACTIVE_PORT 0
+
+static void mbim_suspend(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_info("mbim suspended\n");
+
+ pr_debug("%s(): remote_wakeup:%d\n:", __func__,
+ mbim->cdev->gadget->remote_wakeup);
+
+ if (mbim->xport == USB_GADGET_XPORT_BAM_DMUX)
+ return;
+
+ /* If the function is in Function Suspend state, avoid suspending the
+ * MBIM function again.
+ */
+ if ((mbim->cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ if (mbim->cdev->gadget->speed == USB_SPEED_SUPER)
+ mbim->remote_wakeup_enabled = f->func_wakeup_allowed;
+ else
+ mbim->remote_wakeup_enabled = mbim->cdev->gadget->remote_wakeup;
+
+ /* MBIM data interface is up only when alt setting is set to 1. */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ if (!mbim->remote_wakeup_enabled)
+ atomic_set(&mbim->online, 0);
+
+ bam_data_suspend(&mbim->bam_port, mbim->port_num, USB_FUNC_MBIM,
+ mbim->remote_wakeup_enabled);
+}
+
+static void mbim_resume(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_info("mbim resumed\n");
+
+ if (mbim->xport == USB_GADGET_XPORT_BAM_DMUX)
+ return;
+
+ /*
+ * If the function is in USB3 Function Suspend state, resume is
+ * canceled. In this case resume is done by a Function Resume request.
+ */
+ if ((mbim->cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ /* resume control path by queuing notify req */
+ spin_lock(&mbim->lock);
+ mbim_do_notify(mbim);
+ spin_unlock(&mbim->lock);
+
+ /* MBIM data interface is up only when alt setting is set to 1. */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ if (!mbim->remote_wakeup_enabled)
+ atomic_set(&mbim->online, 1);
+
+ bam_data_resume(&mbim->bam_port, mbim->port_num, USB_FUNC_MBIM,
+ mbim->remote_wakeup_enabled);
+}
+
+static int mbim_func_suspend(struct usb_function *f, unsigned char options)
+{
+ enum {
+ MBIM_FUNC_SUSPEND_MASK = 0x1,
+ MBIM_FUNC_WAKEUP_EN_MASK = 0x2
+ };
+
+ bool func_wakeup_allowed;
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ if (f == NULL)
+ return -EINVAL;
+
+ pr_debug("Got Function Suspend(%u) command for %s function\n",
+ options, f->name ? f->name : "");
+
+ /* Function Suspend is supported by Super Speed devices only */
+ if (mbim->cdev->gadget->speed != USB_SPEED_SUPER)
+ return -ENOTSUPP;
+
+ func_wakeup_allowed =
+ ((options & MBIM_FUNC_WAKEUP_EN_MASK) != 0);
+
+ if (options & MBIM_FUNC_SUSPEND_MASK) {
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ if (!f->func_is_suspended) {
+ mbim_suspend(f);
+ f->func_is_suspended = true;
+ }
+ } else {
+ if (f->func_is_suspended) {
+ f->func_is_suspended = false;
+ mbim_resume(f);
+ }
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ }
+
+ return 0;
+}
+
+static int mbim_get_status(struct usb_function *f)
+{
+ enum {
+ MBIM_STS_FUNC_WAKEUP_CAP_SHIFT = 0,
+ MBIM_STS_FUNC_WAKEUP_EN_SHIFT = 1
+ };
+
+ unsigned remote_wakeup_enabled_bit;
+ const unsigned remote_wakeup_capable_bit = 1;
+
+ remote_wakeup_enabled_bit = f->func_wakeup_allowed ? 1 : 0;
+ return (remote_wakeup_enabled_bit << MBIM_STS_FUNC_WAKEUP_EN_SHIFT) |
+ (remote_wakeup_capable_bit << MBIM_STS_FUNC_WAKEUP_CAP_SHIFT);
+}
+
+/*---------------------- function driver setup/binding ---------------------*/
+
+static int
+mbim_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_mbim *mbim = func_to_mbim(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_cdc_notification *event;
+
+ pr_info("Enter\n");
+
+ mbim->cdev = cdev;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ mbim->ctrl_id = status;
+ mbim_iad_desc.bFirstInterface = status;
+
+ mbim_control_intf.bInterfaceNumber = status;
+ mbim_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ mbim->data_id = status;
+ mbim->data_interface_up = false;
+
+ mbim_data_nop_intf.bInterfaceNumber = status;
+ mbim_data_intf.bInterfaceNumber = status;
+ mbim_union_desc.bSlaveInterface0 = status;
+
+ mbim->bam_port.cdev = cdev;
+ mbim->bam_port.func = &mbim->function;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_in_desc);
+ if (!ep) {
+ pr_err("usb epin autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb epin autoconfig succeeded\n");
+ ep->driver_data = cdev; /* claim */
+ mbim->bam_port.in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_out_desc);
+ if (!ep) {
+ pr_err("usb epout autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb epout autoconfig succeeded\n");
+ ep->driver_data = cdev; /* claim */
+ mbim->bam_port.out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_notify_desc);
+ if (!ep) {
+ pr_err("usb notify ep autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb notify ep autoconfig succeeded\n");
+ mbim->not_port.notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ mbim->not_port.notify_req = mbim_alloc_req(ep, NCM_STATUS_BYTECOUNT,
+ cdev->gadget->extra_buf_alloc);
+ if (!mbim->not_port.notify_req) {
+ pr_info("failed to allocate notify request\n");
+ goto fail;
+ }
+ pr_info("allocated notify ep request & request buffer\n");
+
+ mbim->not_port.notify_req->context = mbim;
+ mbim->not_port.notify_req->complete = mbim_notify_complete;
+ mbim->not_port.notify_req->length = sizeof(*event);
+ event = mbim->not_port.notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(mbim->ctrl_id);
+ event->wLength = cpu_to_le16(0);
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(mbim_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hs_mbim_in_desc.bEndpointAddress =
+ fs_mbim_in_desc.bEndpointAddress;
+ hs_mbim_out_desc.bEndpointAddress =
+ fs_mbim_out_desc.bEndpointAddress;
+ hs_mbim_notify_desc.bEndpointAddress =
+ fs_mbim_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(mbim_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_mbim_in_desc.bEndpointAddress =
+ fs_mbim_in_desc.bEndpointAddress;
+ ss_mbim_out_desc.bEndpointAddress =
+ fs_mbim_out_desc.bEndpointAddress;
+ ss_mbim_notify_desc.bEndpointAddress =
+ fs_mbim_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(mbim_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ /*
+ * If MBIM is bound in a config other than the first, tell Windows
+ * about it by returning the num as a string in the OS descriptor's
+ * subCompatibleID field. Windows only supports up to config #4.
+ */
+ if (c->bConfigurationValue >= 2 && c->bConfigurationValue <= 4) {
+ pr_debug("MBIM in configuration %d\n", c->bConfigurationValue);
+ mbim_ext_config_desc.function.subCompatibleID[0] =
+ c->bConfigurationValue + '0';
+ }
+
+ pr_info("mbim(%d): %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ mbim->port_num,
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ mbim->bam_port.in->name, mbim->bam_port.out->name,
+ mbim->not_port.notify->name);
+
+ return 0;
+
+fail:
+ pr_err("%s failed to bind, err %d\n", f->name, status);
+
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (mbim->not_port.notify_req) {
+ kfree(mbim->not_port.notify_req->buf);
+ usb_ep_free_request(mbim->not_port.notify,
+ mbim->not_port.notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (mbim->not_port.notify)
+ mbim->not_port.notify->driver_data = NULL;
+ if (mbim->bam_port.out)
+ mbim->bam_port.out->driver_data = NULL;
+ if (mbim->bam_port.in)
+ mbim->bam_port.in->driver_data = NULL;
+
+ return status;
+}
+
+static void mbim_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_debug("unbinding mbim\n");
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(mbim->not_port.notify_req->buf);
+ usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req);
+
+ mbim_ext_config_desc.function.subCompatibleID[0] = 0;
+}
+
+/**
+ * mbim_bind_config - add MBIM link to a configuration
+ * @c: the configuration to support the network link
+ * Context: single threaded during gadget setup
+ * Returns zero on success, else negative errno.
+ */
+int mbim_bind_config(struct usb_configuration *c, unsigned portno,
+ char *xport_name)
+{
+ struct f_mbim *mbim = NULL;
+ int status = 0;
+
+ pr_info("port number %u\n", portno);
+
+ if (portno >= nr_mbim_ports) {
+ pr_err("Can not add port %u. Max ports = %d\n",
+ portno, nr_mbim_ports);
+ return -ENODEV;
+ }
+
+ /* allocate and initialize one new instance */
+ mbim = mbim_ports[portno].port;
+ if (!mbim) {
+ pr_err("mbim struct not allocated\n");
+ return -ENOMEM;
+ }
+
+ mbim->xport = str_to_xport(xport_name);
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM2BAM:
+ /* Override BAM2BAM to BAM_DMUX for old ABI compatibility */
+ mbim->xport = USB_GADGET_XPORT_BAM_DMUX;
+ /* fall-through */
+ case USB_GADGET_XPORT_BAM_DMUX:
+ status = gbam_mbim_setup();
+ if (status)
+ break;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ status = mbim_bam_setup(nr_mbim_ports);
+ if (status)
+ break;
+ mbim_ntb_parameters.wNtbOutMaxDatagrams = 16;
+ /* For IPA this is proven to give maximum throughput */
+ mbim_ntb_parameters.dwNtbInMaxSize =
+ cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
+ mbim_ntb_parameters.dwNtbOutMaxSize =
+ cpu_to_le32(MBIM_NTB_OUT_SIZE_IPA);
+ /* update rx buffer size to be used by usb rx request buffer */
+ mbim->bam_port.rx_buffer_size = MBIM_NTB_OUT_SIZE_IPA;
+ mbim_ntb_parameters.wNdpInDivisor = 1;
+ pr_debug("MBIM: dwNtbOutMaxSize:%d\n", MBIM_NTB_OUT_SIZE_IPA);
+ break;
+ default:
+ status = -EINVAL;
+ }
+
+ if (status) {
+ pr_err("%s transport setup failed\n", xport_name);
+ return status;
+ }
+
+
+ /* maybe allocate device-global string IDs */
+ if (mbim_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ mbim_string_defs[STRING_CTRL_IDX].id = status;
+ mbim_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ mbim_string_defs[STRING_DATA_IDX].id = status;
+ mbim_data_nop_intf.iInterface = status;
+ mbim_data_intf.iInterface = status;
+ }
+
+ mbim->cdev = c->cdev;
+
+ mbim_reset_values(mbim);
+
+ mbim->function.name = "usb_mbim";
+ mbim->function.strings = mbim_strings;
+ mbim->function.bind = mbim_bind;
+ mbim->function.unbind = mbim_unbind;
+ mbim->function.set_alt = mbim_set_alt;
+ mbim->function.get_alt = mbim_get_alt;
+ mbim->function.setup = mbim_setup;
+ mbim->function.disable = mbim_disable;
+ mbim->function.suspend = mbim_suspend;
+ mbim->function.func_suspend = mbim_func_suspend;
+ mbim->function.get_status = mbim_get_status;
+ mbim->function.resume = mbim_resume;
+
+ INIT_LIST_HEAD(&mbim->cpkt_req_q);
+ INIT_LIST_HEAD(&mbim->cpkt_resp_q);
+
+ status = usb_add_function(c, &mbim->function);
+
+ pr_info("Exit status %d\n", status);
+
+ return status;
+}
+
+/* ------------ MBIM DRIVER File Operations API for USER SPACE ------------ */
+
+static ssize_t
+mbim_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct f_mbim *dev = fp->private_data;
+ struct ctrl_pkt *cpkt = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("Enter(%zu)\n", count);
+
+ if (!dev) {
+ pr_err("Received NULL mbim pointer\n");
+ return -ENODEV;
+ }
+
+ if (count > MBIM_BULK_BUFFER_SIZE) {
+ pr_err("Buffer size is too big %zu, should be at most %d\n",
+ count, MBIM_BULK_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&dev->read_excl)) {
+ pr_err("Previous reading is not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (atomic_read(&dev->error)) {
+ mbim_unlock(&dev->read_excl);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (list_empty(&dev->cpkt_req_q)) {
+ pr_debug("Requests list is empty. Wait.\n");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = wait_event_interruptible(dev->read_wq,
+ !list_empty(&dev->cpkt_req_q));
+ if (ret < 0) {
+ pr_err("Waiting failed\n");
+ mbim_unlock(&dev->read_excl);
+ return -ERESTARTSYS;
+ }
+ pr_debug("Received request packet\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_req_q, struct ctrl_pkt,
+ list);
+ if (cpkt->len > count) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->read_excl);
+ pr_err("cpkt size too big:%d > buf size:%zu\n",
+ cpkt->len, count);
+ return -ENOMEM;
+ }
+
+ pr_debug("cpkt size:%d\n", cpkt->len);
+
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->read_excl);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ pr_err("copy_to_user failed: err %d\n", ret);
+ ret = -ENOMEM;
+ } else {
+ pr_debug("copied %d bytes to user\n", cpkt->len);
+ ret = cpkt->len;
+ }
+
+ mbim_free_ctrl_pkt(cpkt);
+
+ return ret;
+}
+
+static ssize_t
+mbim_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
+{
+ struct f_mbim *dev = fp->private_data;
+ struct ctrl_pkt *cpkt = NULL;
+ struct usb_request *req = dev->not_port.notify_req;
+ int ret = 0;
+ unsigned long flags;
+
+ pr_debug("Enter(%zu)\n", count);
+
+ if (!dev || !req || !req->buf) {
+ pr_err("%s: dev %pK req %pK req->buf %pK\n",
+ __func__, dev, req, req ? req->buf : req);
+ return -ENODEV;
+ }
+
+ if (!count || count > MAX_CTRL_PKT_SIZE) {
+ pr_err("error: ctrl pkt length %zu\n", count);
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&dev->write_excl)) {
+ pr_err("Previous writing not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&dev->online)) {
+ pr_err("USB cable not connected\n");
+ mbim_unlock(&dev->write_excl);
+ return -EPIPE;
+ }
+
+ if (dev->not_port.notify_state != MBIM_NOTIFY_RESPONSE_AVAILABLE) {
+ pr_err("dev:%pK state=%d error\n", dev,
+ dev->not_port.notify_state);
+ mbim_unlock(&dev->write_excl);
+ return -EINVAL;
+ }
+
+ if (dev->function.func_is_suspended &&
+ !dev->function.func_wakeup_allowed) {
+ dev->cpkt_drop_cnt++;
+ pr_err("drop ctrl pkt of len %zu\n", count);
+ return -ENOTSUPP;
+ }
+
+ cpkt = mbim_alloc_ctrl_pkt(count, GFP_KERNEL);
+ if (!cpkt) {
+ pr_err("failed to allocate ctrl pkt\n");
+ mbim_unlock(&dev->write_excl);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cpkt->buf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed err:%d\n", ret);
+ mbim_free_ctrl_pkt(cpkt);
+ mbim_unlock(&dev->write_excl);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+
+ if (atomic_inc_return(&dev->not_port.notify_count) != 1) {
+ pr_debug("delay ep_queue: notifications queue is busy[%d]\n",
+ atomic_read(&dev->not_port.notify_count));
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->write_excl);
+ return count;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_func_ep_queue(&dev->function, dev->not_port.notify,
+ req, GFP_ATOMIC);
+ if (ret == -ENOTSUPP || (ret < 0 && ret != -EAGAIN)) {
+ spin_lock_irqsave(&dev->lock, flags);
+ /* check if device disconnected while we dropped lock */
+ if (atomic_read(&dev->online)) {
+ list_del(&cpkt->list);
+ atomic_dec(&dev->not_port.notify_count);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ dev->cpkt_drop_cnt++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_err("drop ctrl pkt of len %d error %d\n", cpkt->len, ret);
+ } else {
+ ret = 0;
+ }
+ mbim_unlock(&dev->write_excl);
+
+ pr_debug("Exit(%zu)\n", count);
+
+ return ret ? ret : count;
+}
+
+static int mbim_open(struct inode *ip, struct file *fp)
+{
+ pr_info("Open mbim driver\n");
+
+ while (!_mbim_dev) {
+ pr_err("mbim_dev not created yet\n");
+ return -ENODEV;
+ }
+
+ if (mbim_lock(&_mbim_dev->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ pr_info("Lock mbim_dev->open_excl for open\n");
+
+ if (!atomic_read(&_mbim_dev->online))
+ pr_err("USB cable not connected\n");
+
+ fp->private_data = _mbim_dev;
+
+ atomic_set(&_mbim_dev->error, 0);
+
+ pr_info("Exit, mbim file opened\n");
+
+ return 0;
+}
+
+static int mbim_release(struct inode *ip, struct file *fp)
+{
+ pr_info("Close mbim file\n");
+
+ mbim_unlock(&_mbim_dev->open_excl);
+
+ return 0;
+}
+
+#define BAM_DMUX_CHANNEL_ID 8
+static long mbim_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct f_mbim *mbim = fp->private_data;
+ struct data_port *port;
+ struct mbim_ipa_ep_info info;
+ int ret = 0;
+
+ pr_debug("Received command %d\n", cmd);
+
+ if (!mbim) {
+ pr_err("Bad parameter\n");
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&mbim->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case MBIM_GET_NTB_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &mbim->ntb_input_size, sizeof(mbim->ntb_input_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent NTB size %d\n", mbim->ntb_input_size);
+ break;
+ case MBIM_GET_DATAGRAM_COUNT:
+ ret = copy_to_user((void __user *)arg,
+ &mbim->ntb_max_datagrams,
+ sizeof(mbim->ntb_max_datagrams));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent NTB datagrams count %d\n",
+ mbim->ntb_max_datagrams);
+ break;
+
+ case MBIM_EP_LOOKUP:
+ if (!atomic_read(&mbim->online)) {
+ pr_warn("usb cable is not connected\n");
+ return -ENOTCONN;
+ }
+
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ /*
+ * Rmnet and MBIM share the same BAM-DMUX channel.
+ * This channel number 8 should be in sync with
+ * the one defined in u_bam.c.
+ */
+ info.ph_ep_info.ep_type = MBIM_DATA_EP_TYPE_BAM_DMUX;
+ info.ph_ep_info.peripheral_iface_id =
+ BAM_DMUX_CHANNEL_ID;
+ info.ipa_ep_pair.cons_pipe_num = 0;
+ info.ipa_ep_pair.prod_pipe_num = 0;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ port = &mbim->bam_port;
+ if ((port->ipa_producer_ep == -1) ||
+ (port->ipa_consumer_ep == -1)) {
+ pr_err("EP_LOOKUP failed - IPA pipes not updated\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = mbim->data_id;
+ info.ipa_ep_pair.cons_pipe_num = port->ipa_consumer_ep;
+ info.ipa_ep_pair.prod_pipe_num = port->ipa_producer_ep;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_err("unknown transport\n");
+ goto fail;
+ }
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ break;
+
+ default:
+ pr_err("wrong parameter\n");
+ ret = -EINVAL;
+ }
+
+fail:
+ mbim_unlock(&mbim->ioctl_excl);
+
+ return ret;
+}
+
+/* file operations for MBIM device /dev/android_mbim */
+static const struct file_operations mbim_fops = {
+ .owner = THIS_MODULE,
+ .open = mbim_open,
+ .release = mbim_release,
+ .read = mbim_read,
+ .write = mbim_write,
+ .unlocked_ioctl = mbim_ioctl,
+};
+
+static struct miscdevice mbim_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_mbim",
+ .fops = &mbim_fops,
+};
+
+static int mbim_init(int instances)
+{
+ int i;
+ struct f_mbim *dev = NULL;
+ int ret;
+
+ pr_info("initialize %d instances\n", instances);
+
+ if (instances > NR_MBIM_PORTS) {
+ pr_err("Max-%d instances supported\n", NR_MBIM_PORTS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < instances; i++) {
+ dev = kzalloc(sizeof(struct f_mbim), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate mbim dev\n");
+ ret = -ENOMEM;
+ goto fail_probe;
+ }
+
+ dev->port_num = i;
+ dev->bam_port.ipa_consumer_ep = -1;
+ dev->bam_port.ipa_producer_ep = -1;
+
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_req_q);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+ mbim_ports[i].port = dev;
+ mbim_ports[i].port_num = i;
+
+ init_waitqueue_head(&dev->read_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ nr_mbim_ports++;
+
+ }
+
+ _mbim_dev = dev;
+ ret = misc_register(&mbim_device);
+ if (ret) {
+ pr_err("mbim driver failed to register\n");
+ goto fail_probe;
+ }
+
+ pr_info("Initialized %d ports\n", nr_mbim_ports);
+
+ return ret;
+
+fail_probe:
+ pr_err("Failed\n");
+ for (i = 0; i < nr_mbim_ports; i++) {
+ kfree(mbim_ports[i].port);
+ mbim_ports[i].port = NULL;
+ }
+
+ return ret;
+}
+
+static void fmbim_cleanup(void)
+{
+ int i = 0;
+
+ pr_info("Enter\n");
+
+ for (i = 0; i < nr_mbim_ports; i++) {
+ kfree(mbim_ports[i].port);
+ mbim_ports[i].port = NULL;
+ }
+ nr_mbim_ports = 0;
+
+ misc_deregister(&mbim_device);
+
+ _mbim_dev = NULL;
+}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 0380f260b092..b942f38ab10b 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -167,6 +167,15 @@ static struct usb_endpoint_descriptor bulk_in_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
/* .bLength = DYNAMIC */
@@ -198,7 +207,7 @@ static struct usb_gadget_strings *midi_strings[] = {
static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static const uint8_t f_midi_cin_length[] = {
@@ -718,6 +727,7 @@ fail:
static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
+ struct usb_descriptor_header **midi_ss_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
@@ -725,7 +735,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
struct usb_string *us;
- int status, n, jack = 1, i = 0;
+ int status, n, jack = 1, i = 0, j = 0;
midi->gadget = cdev->gadget;
tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
@@ -765,11 +775,20 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
if (!midi->out_ep)
goto fail;
+ /* allocate temporary function list for ss */
+ midi_ss_function = kcalloc((MAX_PORTS * 4) + 11,
+ sizeof(*midi_ss_function), GFP_KERNEL);
+ if (!midi_ss_function) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* allocate temporary function list */
midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
GFP_KERNEL);
if (!midi_function) {
status = -ENOMEM;
+ kfree(midi_ss_function);
goto fail;
}
@@ -783,6 +802,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_interface_desc;
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
@@ -791,6 +816,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_header_desc;
/* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
@@ -804,6 +831,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
in_ext->bJackID = jack++;
in_ext->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_ext;
out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
@@ -815,6 +843,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
out_emb->pins[0].baSourceID = in_ext->bJackID;
out_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_emb;
/* link it to the endpoint */
ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
@@ -832,6 +862,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
in_emb->bJackID = jack++;
in_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_emb;
out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
@@ -843,6 +874,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
out_ext->pins[0].baSourceID = in_emb->bJackID;
out_ext->pins[0].baSourcePin = 1;
midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_ext;
/* link it to the endpoint */
ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
@@ -862,6 +895,16 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
midi_function[i++] = NULL;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_out_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_out_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_in_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_in_desc;
+ midi_ss_function[j++] = NULL;
+
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -880,13 +923,23 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
goto fail_f_midi;
}
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ f->ss_descriptors = usb_copy_descriptors(midi_ss_function);
+ if (!f->ss_descriptors)
+ goto fail_f_midi;
+ }
+
kfree(midi_function);
+ kfree(midi_ss_function);
return 0;
fail_f_midi:
kfree(midi_function);
usb_free_descriptors(f->hs_descriptors);
+ kfree(midi_ss_function);
fail:
f_midi_unregister_card(midi);
fail_register:
@@ -1109,7 +1162,7 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 256;
+ opts->buflen = 1024;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
@@ -1137,6 +1190,7 @@ static void f_midi_free(struct usb_function *f)
mutex_lock(&opts->lock);
for (i = opts->in_ports - 1; i >= 0; --i)
kfree(midi->in_port[i]);
+ opts->func_inst.f = NULL;
kfree(midi);
opts->func_inst.f = NULL;
--opts->refcnt;
@@ -1157,7 +1211,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
card = midi->card;
midi->card = NULL;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
usb_free_all_descriptors(f);
}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index b25cb3594d01..2d8d5e28ec39 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -26,6 +26,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/device.h>
@@ -40,6 +42,8 @@
#include "configfs.h"
+#define MTP_RX_BUFFER_INIT_SIZE 1048576
+#define MTP_TX_BUFFER_INIT_SIZE 1048576
#define MTP_BULK_BUFFER_SIZE 16384
#define INTR_BUFFER_SIZE 28
#define MAX_INST_NAME_LEN 40
@@ -56,7 +60,7 @@
#define STATE_ERROR 4 /* error from completion routine */
/* number of tx and rx requests to allocate */
-#define TX_REQ_MAX 4
+#define MTP_TX_REQ_MAX 8
#define RX_REQ_MAX 2
#define INTR_REQ_MAX 5
@@ -74,6 +78,17 @@
#define MTP_RESPONSE_DEVICE_BUSY 0x2019
#define DRIVER_NAME "mtp"
+#define MAX_ITERATION 100
+
+unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
+module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
+
+unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
+module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
+
+unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
+module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
+
static const char mtp_shortname[] = DRIVER_NAME "_usb";
struct mtp_dev {
@@ -114,6 +129,16 @@ struct mtp_dev {
uint16_t xfer_command;
uint32_t xfer_transaction_id;
int xfer_result;
+ struct {
+ unsigned long vfs_rbytes;
+ unsigned long vfs_wbytes;
+ unsigned vfs_rtime;
+ unsigned vfs_wtime;
+ } perf[MAX_ITERATION];
+ unsigned dbg_read_index;
+ unsigned dbg_write_index;
+ bool is_ptp;
+ struct mutex read_mutex;
};
static struct usb_interface_descriptor mtp_interface_desc = {
@@ -141,27 +166,34 @@ static struct usb_endpoint_descriptor mtp_ss_in_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(1024),
+ .wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
- .bLength = sizeof(mtp_ss_in_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- /* .bMaxBurst = DYNAMIC, */
+ .bLength = sizeof(mtp_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
};
+
static struct usb_endpoint_descriptor mtp_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(1024),
+ .wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
- .bLength = sizeof(mtp_ss_out_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- /* .bMaxBurst = DYNAMIC, */
+ .bLength = sizeof(mtp_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
};
static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
@@ -204,9 +236,13 @@ static struct usb_endpoint_descriptor mtp_intr_desc = {
};
static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
- .bLength = sizeof(mtp_intr_ss_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
+ .bLength = sizeof(mtp_intr_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
};
static struct usb_descriptor_header *fs_mtp_descs[] = {
@@ -310,10 +346,12 @@ struct mtp_ext_config_desc_function {
};
/* MTP Extended Configuration Descriptor */
-struct {
+struct ext_mtp_desc {
struct mtp_ext_config_desc_header header;
struct mtp_ext_config_desc_function function;
-} mtp_ext_config_desc = {
+};
+
+struct ext_mtp_desc mtp_ext_config_desc = {
.header = {
.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
.bcdVersion = __constant_cpu_to_le16(0x0100),
@@ -327,6 +365,20 @@ struct {
},
};
+struct ext_mtp_desc ptp_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'P', 'T', 'P' },
+ },
+};
+
struct mtp_device_status {
__le16 wLength;
__le16 wCode;
@@ -432,7 +484,7 @@ static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
{
struct mtp_dev *dev = _mtp_dev;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
mtp_req_put(dev, &dev->tx_idle, req);
@@ -445,7 +497,7 @@ static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
struct mtp_dev *dev = _mtp_dev;
dev->rx_done = 1;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
wake_up(&dev->read_wq);
@@ -455,7 +507,7 @@ static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
{
struct mtp_dev *dev = _mtp_dev;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
mtp_req_put(dev, &dev->intr_idle, req);
@@ -473,7 +525,7 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
struct usb_ep *ep;
int i;
- DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+ DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
ep = usb_ep_autoconfig(cdev->gadget, in_desc);
if (!ep) {
@@ -502,18 +554,43 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
ep->driver_data = dev; /* claim the endpoint */
dev->ep_intr = ep;
+retry_tx_alloc:
/* now allocate requests for our endpoints */
- for (i = 0; i < TX_REQ_MAX; i++) {
- req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ for (i = 0; i < mtp_tx_reqs; i++) {
+ req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
+ if (!req) {
+ if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
+ goto fail;
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+ mtp_tx_reqs = MTP_TX_REQ_MAX;
+ goto retry_tx_alloc;
+ }
req->complete = mtp_complete_in;
mtp_req_put(dev, &dev->tx_idle, req);
}
+
+ /*
+ * The RX buffer should be aligned to EP max packet for
+ * some controllers. At bind time, we don't know the
+ * operational speed. Hence assuming super speed max
+ * packet size.
+ */
+ if (mtp_rx_req_len % 1024)
+ mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+
+retry_rx_alloc:
for (i = 0; i < RX_REQ_MAX; i++) {
- req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
+ if (!req) {
+ if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
+ goto fail;
+ for (--i; i >= 0; i--)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+ goto retry_rx_alloc;
+ }
req->complete = mtp_complete_out;
dev->rx_req[i] = req;
}
@@ -538,12 +615,10 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
struct mtp_dev *dev = fp->private_data;
struct usb_composite_dev *cdev = dev->cdev;
struct usb_request *req;
- ssize_t r = count;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
- size_t len = 0;
- DBG(cdev, "mtp_read(%zu)\n", count);
+ DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
/* we will block until we're online */
DBG(cdev, "mtp_read: waiting for online state\n");
@@ -553,15 +628,12 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
r = ret;
goto done;
}
- spin_lock_irq(&dev->lock);
- if (dev->ep_out->desc) {
- len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
- if (len > MTP_BULK_BUFFER_SIZE) {
- spin_unlock_irq(&dev->lock);
- return -EINVAL;
- }
- }
+ len = ALIGN(count, dev->ep_out->maxpacket);
+ if (len > mtp_rx_req_len)
+ return -EINVAL;
+
+ spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED) {
/* report cancelation to userspace */
dev->state = STATE_READY;
@@ -571,32 +643,50 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
dev->state = STATE_BUSY;
spin_unlock_irq(&dev->lock);
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ goto done;
+ }
requeue_req:
/* queue a request */
req = dev->rx_req[0];
req->length = len;
dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
goto done;
} else {
- DBG(cdev, "rx %p queue\n", req);
+ DBG(cdev, "rx %pK queue\n", req);
}
/* wait for a request to complete */
- ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, req);
+ spin_lock_irq(&dev->lock);
+ dev->state = STATE_CANCELED;
+ spin_unlock_irq(&dev->lock);
+ goto done;
+ }
if (ret < 0) {
r = ret;
usb_ep_dequeue(dev->ep_out, req);
goto done;
}
+ mutex_lock(&dev->read_mutex);
if (dev->state == STATE_BUSY) {
/* If we got a 0-len packet, throw it back and try again. */
if (req->actual == 0)
goto requeue_req;
- DBG(cdev, "rx %p %d\n", req, req->actual);
+ DBG(cdev, "rx %pK %d\n", req, req->actual);
xfer = (req->actual < count) ? req->actual : count;
r = xfer;
if (copy_to_user(buf, req->buf, xfer))
@@ -604,6 +694,7 @@ requeue_req:
} else
r = -EIO;
+ mutex_unlock(&dev->read_mutex);
done:
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED)
@@ -612,7 +703,7 @@ done:
dev->state = STATE_READY;
spin_unlock_irq(&dev->lock);
- DBG(cdev, "mtp_read returning %zd\n", r);
+ DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
return r;
}
@@ -627,7 +718,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
int sendZLP = 0;
int ret;
- DBG(cdev, "mtp_write(%zu)\n", count);
+ DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED) {
@@ -666,12 +757,14 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
((req = mtp_req_get(dev, &dev->tx_idle))
|| dev->state != STATE_BUSY));
if (!req) {
+ DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
+ ret, dev->state);
r = ret;
break;
}
- if (count > MTP_BULK_BUFFER_SIZE)
- xfer = MTP_BULK_BUFFER_SIZE;
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
else
xfer = count;
if (xfer && copy_from_user(req->buf, buf, xfer)) {
@@ -704,7 +797,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
dev->state = STATE_READY;
spin_unlock_irq(&dev->lock);
- DBG(cdev, "mtp_write returning %zd\n", r);
+ DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
return r;
}
@@ -722,6 +815,7 @@ static void send_file_work(struct work_struct *data)
int xfer, ret, hdr_size;
int r = 0;
int sendZLP = 0;
+ ktime_t start_time;
/* read our parameters */
smp_rmb();
@@ -729,6 +823,11 @@ static void send_file_work(struct work_struct *data)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
if (dev->xfer_send_header) {
@@ -759,12 +858,15 @@ static void send_file_work(struct work_struct *data)
break;
}
if (!req) {
+ DBG(cdev,
+ "send_file_work request NULL ret:%d state:%d\n",
+ ret, dev->state);
r = ret;
break;
}
- if (count > MTP_BULK_BUFFER_SIZE)
- xfer = MTP_BULK_BUFFER_SIZE;
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
else
xfer = count;
@@ -782,21 +884,27 @@ static void send_file_work(struct work_struct *data)
header->transaction_id =
__cpu_to_le32(dev->xfer_transaction_id);
}
-
+ start_time = ktime_get();
ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
&offset);
if (ret < 0) {
r = ret;
break;
}
+
xfer = ret + hdr_size;
+ dev->perf[dev->dbg_read_index].vfs_rtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
+ dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
hdr_size = 0;
req->length = xfer;
ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
if (ret < 0) {
DBG(cdev, "send_file_work: xfer error %d\n", ret);
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
r = -EIO;
break;
}
@@ -810,7 +918,7 @@ static void send_file_work(struct work_struct *data)
if (req)
mtp_req_put(dev, &dev->tx_idle, req);
- DBG(cdev, "send_file_work returning %d\n", r);
+ DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
/* write the result */
dev->xfer_result = r;
smp_wmb();
@@ -828,6 +936,7 @@ static void receive_file_work(struct work_struct *data)
int64_t count;
int ret, cur_buf = 0;
int r = 0;
+ ktime_t start_time;
/* read our parameters */
smp_rmb();
@@ -835,35 +944,67 @@ static void receive_file_work(struct work_struct *data)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "receive_file_work(%lld)\n", count);
+ if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
+ DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
+ count, dev->ep_out->maxpacket);
while (count > 0 || write_req) {
if (count > 0) {
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
/* queue a request */
read_req = dev->rx_req[cur_buf];
cur_buf = (cur_buf + 1) % RX_REQ_MAX;
- read_req->length = (count > MTP_BULK_BUFFER_SIZE
- ? MTP_BULK_BUFFER_SIZE : count);
+ /* some h/w expects size to be aligned to ep's MTU */
+ read_req->length = mtp_rx_req_len;
+
dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
break;
}
}
if (write_req) {
- DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
+ start_time = ktime_get();
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
ret = vfs_write(filp, write_req->buf, write_req->actual,
&offset);
DBG(cdev, "vfs_write %d\n", ret);
if (ret != write_req->actual) {
r = -EIO;
- dev->state = STATE_ERROR;
+ mutex_unlock(&dev->read_mutex);
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
break;
}
+ mutex_unlock(&dev->read_mutex);
+ dev->perf[dev->dbg_write_index].vfs_wtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
+ dev->dbg_write_index =
+ (dev->dbg_write_index + 1) % MAX_ITERATION;
write_req = NULL;
}
@@ -871,8 +1012,12 @@ static void receive_file_work(struct work_struct *data)
/* wait for our last read to complete */
ret = wait_event_interruptible(dev->read_wq,
dev->rx_done || dev->state != STATE_BUSY);
- if (dev->state == STATE_CANCELED) {
- r = -ECANCELED;
+ if (dev->state == STATE_CANCELED
+ || dev->state == STATE_OFFLINE) {
+ if (dev->state == STATE_OFFLINE)
+ r = -EIO;
+ else
+ r = -ECANCELED;
if (!dev->rx_done)
usb_ep_dequeue(dev->ep_out, read_req);
break;
@@ -881,6 +1026,17 @@ static void receive_file_work(struct work_struct *data)
r = read_req->status;
break;
}
+
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
+ /* Check if we aligned the size due to MTU constraint */
+ if (count < read_req->length)
+ read_req->actual = (read_req->actual > count ?
+ count : read_req->actual);
/* if xfer_file_length is 0xFFFFFFFF, then we read until
* we get a zero length packet
*/
@@ -897,6 +1053,7 @@ static void receive_file_work(struct work_struct *data)
write_req = read_req;
read_req = NULL;
+ mutex_unlock(&dev->read_mutex);
}
}
@@ -937,85 +1094,107 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
return ret;
}
-static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
+ struct mtp_file_range *mfr)
{
struct mtp_dev *dev = fp->private_data;
struct file *filp = NULL;
+ struct work_struct *work;
int ret = -EINVAL;
- if (mtp_lock(&dev->ioctl_excl))
+ if (mtp_lock(&dev->ioctl_excl)) {
+ DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
return -EBUSY;
+ }
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancellation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr->fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr->offset;
+ dev->xfer_file_length = mfr->length;
+ /* make sure write is done before parameters are read */
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr->command;
+ dev->xfer_transaction_id = mfr->transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct mtp_event event;
+ int ret = -EINVAL;
switch (code) {
case MTP_SEND_FILE:
case MTP_RECEIVE_FILE:
case MTP_SEND_FILE_WITH_HEADER:
- {
- struct mtp_file_range mfr;
- struct work_struct *work;
-
- spin_lock_irq(&dev->lock);
- if (dev->state == STATE_CANCELED) {
- /* report cancelation to userspace */
- dev->state = STATE_READY;
- spin_unlock_irq(&dev->lock);
- ret = -ECANCELED;
- goto out;
- }
- if (dev->state == STATE_OFFLINE) {
- spin_unlock_irq(&dev->lock);
- ret = -ENODEV;
- goto out;
- }
- dev->state = STATE_BUSY;
- spin_unlock_irq(&dev->lock);
-
if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
ret = -EFAULT;
goto fail;
}
- /* hold a reference to the file while we are working with it */
- filp = fget(mfr.fd);
- if (!filp) {
- ret = -EBADF;
- goto fail;
- }
-
- /* write the parameters */
- dev->xfer_file = filp;
- dev->xfer_file_offset = mfr.offset;
- dev->xfer_file_length = mfr.length;
- smp_wmb();
-
- if (code == MTP_SEND_FILE_WITH_HEADER) {
- work = &dev->send_file_work;
- dev->xfer_send_header = 1;
- dev->xfer_command = mfr.command;
- dev->xfer_transaction_id = mfr.transaction_id;
- } else if (code == MTP_SEND_FILE) {
- work = &dev->send_file_work;
- dev->xfer_send_header = 0;
- } else {
- work = &dev->receive_file_work;
- }
-
- /* We do the file transfer on a work queue so it will run
- * in kernel context, which is necessary for vfs_read and
- * vfs_write to use our buffers in the kernel address space.
- */
- queue_work(dev->wq, work);
- /* wait for operation to complete */
- flush_workqueue(dev->wq);
- fput(filp);
-
- /* read the result */
- smp_rmb();
- ret = dev->xfer_result;
- break;
- }
+ ret = mtp_send_receive_ioctl(fp, code, &mfr);
+ break;
case MTP_SEND_EVENT:
- {
- struct mtp_event event;
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
/* return here so we don't change dev->state below,
* which would interfere with bulk transfer state.
*/
@@ -1023,28 +1202,93 @@ static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
ret = -EFAULT;
else
ret = mtp_send_event(dev, &event);
- goto out;
+ mtp_unlock(&dev->ioctl_excl);
+ break;
+ default:
+ DBG(dev->cdev, "unknown ioctl code: %d\n", code);
}
+fail:
+ return ret;
+}
+
+/*
+ * 32 bit userspace calling into 64 bit kernel. handle ioctl code
+ * and userspace pointer
+*/
+#ifdef CONFIG_COMPAT
+static long compat_mtp_ioctl(struct file *fp, unsigned code,
+ unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct __compat_mtp_file_range cmfr;
+ struct mtp_event event;
+ struct __compat_mtp_event cevent;
+ unsigned cmd;
+ bool send_file = false;
+ int ret = -EINVAL;
+
+ switch (code) {
+ case COMPAT_MTP_SEND_FILE:
+ cmd = MTP_SEND_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_RECEIVE_FILE:
+ cmd = MTP_RECEIVE_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_FILE_WITH_HEADER:
+ cmd = MTP_SEND_FILE_WITH_HEADER;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_EVENT:
+ cmd = MTP_SEND_EVENT;
+ break;
+ default:
+ DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+ ret = -ENOIOCTLCMD;
+ goto fail;
}
+ if (send_file) {
+ if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ mfr.fd = cmfr.fd;
+ mfr.offset = cmfr.offset;
+ mfr.length = cmfr.length;
+ mfr.command = cmfr.command;
+ mfr.transaction_id = cmfr.transaction_id;
+ ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
+ } else {
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&cevent, (void __user *)value,
+ sizeof(cevent))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ event.length = cevent.length;
+ event.data = compat_ptr(cevent.data);
+ ret = mtp_send_event(dev, &event);
+ mtp_unlock(&dev->ioctl_excl);
+ }
fail:
- spin_lock_irq(&dev->lock);
- if (dev->state == STATE_CANCELED)
- ret = -ECANCELED;
- else if (dev->state != STATE_OFFLINE)
- dev->state = STATE_READY;
- spin_unlock_irq(&dev->lock);
-out:
- mtp_unlock(&dev->ioctl_excl);
- DBG(dev->cdev, "ioctl returning %d\n", ret);
return ret;
}
+#endif
static int mtp_open(struct inode *ip, struct file *fp)
{
printk(KERN_INFO "mtp_open\n");
- if (mtp_lock(&_mtp_dev->open_excl))
+ if (mtp_lock(&_mtp_dev->open_excl)) {
+ pr_err("%s mtp_release not called returning EBUSY\n", __func__);
return -EBUSY;
+ }
/* clear any error condition */
if (_mtp_dev->state != STATE_OFFLINE)
@@ -1068,6 +1312,9 @@ static const struct file_operations mtp_fops = {
.read = mtp_read,
.write = mtp_write,
.unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_mtp_ioctl,
+#endif
.open = mtp_open,
.release = mtp_release,
};
@@ -1110,9 +1357,21 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
if (ctrl->bRequest == 1
&& (ctrl->bRequestType & USB_DIR_IN)
&& (w_index == 4 || w_index == 5)) {
- value = (w_length < sizeof(mtp_ext_config_desc) ?
- w_length : sizeof(mtp_ext_config_desc));
- memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ if (!dev->is_ptp) {
+ value = (w_length <
+ sizeof(mtp_ext_config_desc) ?
+ w_length :
+ sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc,
+ value);
+ } else {
+ value = (w_length <
+ sizeof(ptp_ext_config_desc) ?
+ w_length :
+ sizeof(ptp_ext_config_desc));
+ memcpy(cdev->req->buf, &ptp_ext_config_desc,
+ value);
+ }
}
} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
@@ -1181,7 +1440,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
struct mtp_instance *fi_mtp;
dev->cdev = cdev;
- DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+ DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
/* allocate interface ID(s) */
id = usb_interface_id(c, f);
@@ -1235,6 +1494,15 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
mtp_ss_out_comp_desc.bMaxBurst = max_burst;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ mtp_ss_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_ss_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ fi_mtp->func_inst.f = &dev->function;
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
gadget_is_superspeed(c->cdev->gadget) ? "super" :
(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
@@ -1246,19 +1514,24 @@ static void
mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct mtp_dev *dev = func_to_mtp(f);
+ struct mtp_instance *fi_mtp;
struct usb_request *req;
int i;
-
+ fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+ mutex_lock(&dev->read_mutex);
while ((req = mtp_req_get(dev, &dev->tx_idle)))
mtp_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
mtp_request_free(dev->rx_req[i], dev->ep_out);
while ((req = mtp_req_get(dev, &dev->intr_idle)))
mtp_request_free(req, dev->ep_intr);
+ mutex_unlock(&dev->read_mutex);
dev->state = STATE_OFFLINE;
+ dev->is_ptp = false;
kfree(f->os_desc_table);
f->os_desc_n = 0;
+ fi_mtp->func_inst.f = NULL;
}
static int mtp_function_set_alt(struct usb_function *f,
@@ -1322,6 +1595,120 @@ static void mtp_function_disable(struct usb_function *f)
VDBG(cdev, "%s disabled\n", dev->function.name);
}
+static int debug_mtp_read_stats(struct seq_file *s, void *unused)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int i;
+ unsigned long flags;
+ unsigned min, max = 0, sum = 0, iteration = 0;
+
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Write Stats:\n");
+ seq_puts(s, "\n=======================\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ min = dev->perf[0].vfs_wtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_wbytes,
+ dev->perf[i].vfs_wtime);
+ if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+ sum += dev->perf[i].vfs_wtime;
+ if (min > dev->perf[i].vfs_wtime)
+ min = dev->perf[i].vfs_wtime;
+ if (max < dev->perf[i].vfs_wtime)
+ max = dev->perf[i].vfs_wtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ min = max = sum = iteration = 0;
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Read Stats:\n");
+ seq_puts(s, "\n=======================\n");
+
+ min = dev->perf[0].vfs_rtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_rbytes,
+ dev->perf[i].vfs_rtime);
+ if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+ sum += dev->perf[i].vfs_rtime;
+ if (min > dev->perf[i].vfs_rtime)
+ min = dev->perf[i].vfs_rtime;
+ if (max < dev->perf[i].vfs_rtime)
+ max = dev->perf[i].vfs_rtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int clear_stats;
+ unsigned long flags;
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (buf == NULL) {
+ pr_err("[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
+ pr_err("Wrong value. To clear stats, enter value as 0.\n");
+ goto done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
+ dev->dbg_read_index = 0;
+ dev->dbg_write_index = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+done:
+ return count;
+}
+
+static int debug_mtp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_mtp_read_stats, inode->i_private);
+}
+
+static const struct file_operations debug_mtp_ops = {
+ .open = debug_mtp_open,
+ .read = seq_read,
+ .write = debug_mtp_reset_stats,
+};
+
+struct dentry *dent_mtp;
+static void mtp_debugfs_init(void)
+{
+ struct dentry *dent_mtp_status;
+
+ dent_mtp = debugfs_create_dir("usb_mtp", 0);
+ if (!dent_mtp || IS_ERR(dent_mtp))
+ return;
+
+ dent_mtp_status = debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ dent_mtp, 0, &debug_mtp_ops);
+ if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
+ debugfs_remove(dent_mtp);
+ dent_mtp = NULL;
+ return;
+ }
+}
+
+static void mtp_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dent_mtp);
+}
+
static int __mtp_setup(struct mtp_instance *fi_mtp)
{
struct mtp_dev *dev;
@@ -1358,6 +1745,7 @@ static int __mtp_setup(struct mtp_instance *fi_mtp)
if (ret)
goto err2;
+ mtp_debugfs_init();
return 0;
err2:
@@ -1382,6 +1770,7 @@ static void mtp_cleanup(void)
if (!dev)
return;
+ mtp_debugfs_remove();
misc_deregister(&mtp_device);
destroy_workqueue(dev->wq);
_mtp_dev = NULL;
@@ -1480,6 +1869,8 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
config_group_init_type_name(&fi_mtp->func_inst.group,
"", &mtp_func_type);
+ mutex_init(&fi_mtp->dev->read_mutex);
+
return &fi_mtp->func_inst;
}
EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
@@ -1539,6 +1930,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
dev->function.disable = mtp_function_disable;
dev->function.setup = mtp_ctrlreq_configfs;
dev->function.free_func = mtp_free;
+ dev->is_ptp = !mtp_config;
return &dev->function;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 93086efef5a8..a1332f77f173 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -334,6 +334,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
+ .bLength = sizeof(ncm_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ncm_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
+ .bLength = sizeof(ncm_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ncm_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
+ .bLength = sizeof(ncm_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ncm_ss_in_desc,
+ (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -1359,17 +1430,39 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
*/
if (!ncm_opts->bound) {
mutex_lock(&ncm_opts->lock);
+ ncm_opts->net = gether_setup_default();
+ if (IS_ERR(ncm_opts->net)) {
+ status = PTR_ERR(ncm_opts->net);
+ mutex_unlock(&ncm_opts->lock);
+ goto error;
+ }
gether_set_gadget(ncm_opts->net, cdev->gadget);
status = gether_register_netdev(ncm_opts->net);
mutex_unlock(&ncm_opts->lock);
- if (status)
- return status;
+ if (status) {
+ free_netdev(ncm_opts->net);
+ goto error;
+ }
ncm_opts->bound = true;
}
+
+ /* export host's Ethernet address in CDC format */
+ status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
+ sizeof(ncm->ethaddr));
+ if (status < 12) { /* strlen("01234567890a") */
+ ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
+ __func__, status);
+ status = -EINVAL;
+ goto netdev_cleanup;
+ }
+ ncm->port.ioport = netdev_priv(ncm_opts->net);
+
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
- if (IS_ERR(us))
- return PTR_ERR(us);
+ if (IS_ERR(us)) {
+ status = PTR_ERR(us);
+ goto netdev_cleanup;
+ }
ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1435,8 +1528,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ncm_ss_in_desc.bEndpointAddress =
+ fs_ncm_in_desc.bEndpointAddress;
+ ncm_ss_out_desc.bEndpointAddress =
+ fs_ncm_out_desc.bEndpointAddress;
+ ncm_ss_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ ncm_ss_function);
if (status)
goto fail;
@@ -1464,7 +1566,10 @@ fail:
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
+netdev_cleanup:
+ gether_cleanup(netdev_priv(ncm_opts->net));
+error:
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
@@ -1512,8 +1617,6 @@ static void ncm_free_inst(struct usb_function_instance *f)
opts = container_of(f, struct f_ncm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
- else
- free_netdev(opts->net);
kfree(opts);
}
@@ -1526,12 +1629,6 @@ static struct usb_function_instance *ncm_alloc_inst(void)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
- opts->net = gether_setup_default();
- if (IS_ERR(opts->net)) {
- struct net_device *net = opts->net;
- kfree(opts);
- return ERR_CAST(net);
- }
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
@@ -1554,9 +1651,13 @@ static void ncm_free(struct usb_function *f)
static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
+ struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
+ func_inst);
DBG(c->cdev, "ncm unbind\n");
+ opts->bound = false;
+
hrtimer_cancel(&ncm->task_timer);
tasklet_kill(&ncm->tx_tasklet);
@@ -1570,13 +1671,14 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+ gether_cleanup(netdev_priv(opts->net));
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
{
struct f_ncm *ncm;
struct f_ncm_opts *opts;
- int status;
/* allocate and initialize one new instance */
ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1586,20 +1688,9 @@ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
opts = container_of(fi, struct f_ncm_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
-
- /* export host's Ethernet address in CDC format */
- status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
- sizeof(ncm->ethaddr));
- if (status < 12) { /* strlen("01234567890a") */
- kfree(ncm);
- mutex_unlock(&opts->lock);
- return ERR_PTR(-EINVAL);
- }
ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
-
spin_lock_init(&ncm->lock);
ncm_reset_values(ncm);
- ncm->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
ncm->port.is_fixed = true;
ncm->port.supports_multi_frame = true;
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d6396e0909ee..98a72b7d6b6a 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -376,7 +376,7 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+ ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status);
return status;
}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 8e32b41fc129..2f509f8bcd4b 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1272,7 +1272,7 @@ static void gprinter_free_inst(struct usb_function_instance *f)
mutex_lock(&printer_ida_lock);
gprinter_put_minor(opts->minor);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
mutex_unlock(&printer_ida_lock);
@@ -1296,7 +1296,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
mutex_lock(&printer_ida_lock);
- if (idr_is_empty(&printer_ida.idr)) {
+ if (ida_is_empty(&printer_ida)) {
status = gprinter_setup(PRINTER_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -1309,7 +1309,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
goto unlock;
}
diff --git a/drivers/usb/gadget/function/f_qc_ecm.c b/drivers/usb/gadget/function/f_qc_ecm.c
new file mode 100644
index 000000000000..d96f727b2da4
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_ecm.c
@@ -0,0 +1,1166 @@
+/*
+ * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+
+#include "u_bam_data.h"
+#include <linux/ecm_ipa.h>
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link. The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support. It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.) A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ *
+ * This function is based on USB CDC Ethernet link function driver and
+ * contains MSM specific implementation.
+ */
+
+
+enum ecm_qc_notify_state {
+ ECM_QC_NOTIFY_NONE, /* don't notify */
+ ECM_QC_NOTIFY_CONNECT, /* issue CONNECT next */
+ ECM_QC_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
+};
+
+struct f_ecm_qc {
+ struct qc_gether port;
+ u8 ctrl_id, data_id;
+ enum transport_type xport;
+ u8 port_num;
+ char ethaddr[14];
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ bool is_open;
+ struct data_port bam_port;
+ bool ecm_mdm_ready_trigger;
+
+ bool data_interface_up;
+};
+
+static struct f_ecm_qc *__ecm;
+
+static struct ecm_ipa_params ipa_params;
+
+static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_ecm_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide. More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+/* Currently only one std ecm instance is supported - port index 0. */
+#define ECM_QC_NO_PORTS 1
+#define ECM_QC_DEFAULT_PORT 0
+#define ECM_QC_ACTIVE_PORT 0
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ecm_qc_control_intf = {
+ .bLength = sizeof(ecm_qc_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_qc_header_desc = {
+ .bLength = sizeof(ecm_qc_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_qc_union_desc = {
+ .bLength = sizeof(ecm_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_qc_desc = {
+ .bLength = sizeof(ecm_qc_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+
+ /* this descriptor actually adds value, surprise! */
+ /* .iMACAddress = DYNAMIC */
+ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
+ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
+ .wNumberMCFilters = cpu_to_le16(0),
+ .bNumberPowerFilters = 0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_qc_data_nop_intf = {
+ .bLength = sizeof(ecm_qc_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_qc_data_intf = {
+ .bLength = sizeof(ecm_qc_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_qc_fs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_qc_hs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_notify_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_in_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+}
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_out_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ecm_qc_ss_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_qc_string_defs[] = {
+ [0].s = "CDC Ethernet Control Model (ECM)",
+ [1].s = NULL /* DYNAMIC */,
+ [2].s = "CDC Ethernet Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = ecm_qc_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_qc_strings[] = {
+ &ecm_qc_string_table,
+ NULL,
+};
+
+static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
+{
+ struct usb_request *req = ecm->notify_req;
+ struct usb_cdc_notification *event;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ __le32 *data;
+ int status;
+
+ /* notification already in flight? */
+ if (!req)
+ return;
+
+ event = req->buf;
+ switch (ecm->notify_state) {
+ case ECM_QC_NOTIFY_NONE:
+ return;
+
+ case ECM_QC_NOTIFY_CONNECT:
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ if (ecm->is_open) {
+ event->wValue = cpu_to_le16(1);
+ ecm->notify_state = ECM_QC_NOTIFY_SPEED;
+ } else {
+ event->wValue = cpu_to_le16(0);
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ }
+ event->wLength = 0;
+ req->length = sizeof(*event);
+
+ DBG(cdev, "notify connect %s\n",
+ ecm->is_open ? "true" : "false");
+ break;
+
+ case ECM_QC_NOTIFY_SPEED:
+ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(8);
+ req->length = ECM_QC_STATUS_BYTECOUNT;
+
+ /* SPEED_CHANGE data is up/down speeds in bits/sec */
+ data = req->buf + sizeof(*event);
+ data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget));
+ data[1] = data[0];
+
+ DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget));
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ }
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+ ecm->notify_req = NULL;
+ status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+ if (status < 0) {
+ ecm->notify_req = req;
+ DBG(cdev, "notify --> %d\n", status);
+ }
+}
+
+static void ecm_qc_notify(struct f_ecm_qc *ecm)
+{
+ /* NOTE on most versions of Linux, host side cdc-ethernet
+ * won't listen for notifications until its netdevice opens.
+ * The first notification then sits in the FIFO for a long
+ * time, and the second one is queued.
+ */
+ ecm->notify_state = ECM_QC_NOTIFY_CONNECT;
+ ecm_qc_do_notify(ecm);
+}
+
+void *ecm_qc_get_ipa_rx_cb(void)
+{
+ return ipa_params.ecm_ipa_rx_dp_notify;
+}
+
+void *ecm_qc_get_ipa_tx_cb(void)
+{
+ return ipa_params.ecm_ipa_tx_dp_notify;
+}
+
+void *ecm_qc_get_ipa_priv(void)
+{
+ return ipa_params.private;
+}
+
+bool ecm_qc_get_skip_ep_config(void)
+{
+ return ipa_params.skip_ep_cfg;
+}
+/*-------------------------------------------------------------------------*/
+
+
+
+static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ecm_qc *ecm = req->context;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ struct usb_cdc_notification *event = req->buf;
+
+ switch (req->status) {
+ case 0:
+ /* no fault */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ default:
+ DBG(cdev, "event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+ ecm->notify_req = req;
+ ecm_qc_do_notify(ecm);
+}
+
+static int ecm_qc_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("Enter\n");
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_ETHERNET_PACKET_FILTER:
+ /* see 6.2.30: no data, wIndex = interface,
+ * wValue = packet filter bitmap
+ */
+ if (w_length != 0 || w_index != ecm->ctrl_id)
+ goto invalid;
+ DBG(cdev, "packet filter %02x\n", w_value);
+ /* REVISIT locking of cdc_filter. This assumes the UDC
+ * driver won't have a concurrent packet TX irq running on
+ * another CPU; or that if it does, this write is atomic...
+ */
+ ecm->port.cdc_filter = w_value;
+ value = 0;
+ break;
+
+ /* and optionally:
+ * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+ * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_STATISTIC:
+ */
+
+ default:
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("ecm req %02x.%02x response err %d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* Control interface has only altsetting 0 */
+ if (intf == ecm->ctrl_id) {
+ if (alt != 0) {
+ pr_warn("fail, alt setting is not 0\n");
+ goto fail;
+ }
+
+ if (ecm->notify->driver_data) {
+ VDBG(cdev, "reset ecm control %d\n", intf);
+ usb_ep_disable(ecm->notify);
+ }
+ if (!(ecm->notify->desc)) {
+ VDBG(cdev, "init ecm ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+ goto fail;
+ }
+ usb_ep_enable(ecm->notify);
+ ecm->notify->driver_data = ecm;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == ecm->data_id) {
+ if (alt > 1)
+ goto fail;
+
+ if (ecm->data_interface_up == alt)
+ return 0;
+
+ if (!ecm->port.in_ep->desc ||
+ !ecm->port.out_ep->desc) {
+ DBG(cdev, "init ecm\n");
+ __ecm->ecm_mdm_ready_trigger = false;
+ if (config_ep_by_speed(cdev->gadget, f,
+ ecm->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ ecm->port.out_ep)) {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ goto fail;
+ }
+ }
+
+ if (alt == 0 && ecm->port.in_ep->driver_data) {
+ DBG(cdev, "reset ecm\n");
+ __ecm->ecm_mdm_ready_trigger = false;
+ /* ecm->port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ bam_data_disconnect(&ecm->bam_port, USB_FUNC_ECM,
+ ecm->port_num);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
+ } else if (ecm->data_interface_up &&
+ gadget_is_dwc3(cdev->gadget)) {
+ if (msm_ep_unconfig(ecm->port.in_ep) ||
+ msm_ep_unconfig(ecm->port.out_ep)) {
+ pr_err("%s: ep_unconfig failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+ }
+ /* CDC Ethernet only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ if (alt == 1) {
+ struct net_device *net;
+
+ /* Enable zlps by default for ECM conformance;
+ * override for musb_hdrc (avoids txdma ovhead).
+ */
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+ );
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+ DBG(cdev, "activate ecm\n");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ net = gether_qc_connect_name(&ecm->port,
+ "ecm0", true);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ }
+
+ ecm->bam_port.cdev = cdev;
+ ecm->bam_port.func = &ecm->port.func;
+ ecm->bam_port.in = ecm->port.in_ep;
+ ecm->bam_port.out = ecm->port.out_ep;
+ if (bam_data_connect(&ecm->bam_port, ecm->xport,
+ ecm->port_num, USB_FUNC_ECM))
+ goto fail;
+ }
+
+ ecm->data_interface_up = alt;
+ /* NOTE this can be a minor disagreement with the ECM spec,
+ * which says speed notifications will "always" follow
+ * connection notifications. But we allow one connect to
+ * follow another (if the first is in flight), and instead
+ * just guarantee that a speed notification is always sent.
+ */
+ ecm_qc_notify(ecm);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_qc_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ if (intf == ecm->ctrl_id)
+ return 0;
+ return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_qc_disable(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+
+ DBG(cdev, "ecm deactivated\n");
+
+ if (ecm->port.in_ep->driver_data) {
+ bam_data_disconnect(&ecm->bam_port, USB_FUNC_ECM,
+ ecm->port_num);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
+ } else {
+ /* release EPs incase no set_alt(1) yet */
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ }
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA &&
+ gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(ecm->port.out_ep);
+ msm_ep_unconfig(ecm->port.in_ep);
+ }
+
+ if (ecm->notify->driver_data) {
+ usb_ep_disable(ecm->notify);
+ ecm->notify->driver_data = NULL;
+ ecm->notify->desc = NULL;
+ }
+
+ ecm->data_interface_up = false;
+}
+
+static void ecm_qc_suspend(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ bool remote_wakeup_allowed;
+
+ /* Is DATA interface initialized? */
+ if (!ecm->data_interface_up) {
+ pr_err("%s(): data interface not up\n", __func__);
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed =
+ f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s(): remote_wakeup:%d\n:", __func__, remote_wakeup_allowed);
+ if (!remote_wakeup_allowed)
+ __ecm->ecm_mdm_ready_trigger = false;
+
+ bam_data_suspend(&ecm->bam_port, ecm->port_num, USB_FUNC_ECM,
+ remote_wakeup_allowed);
+
+ pr_debug("ecm suspended\n");
+}
+
+static void ecm_qc_resume(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (!ecm->data_interface_up) {
+ pr_err("%s(): data interface was not up\n", __func__);
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed =
+ f->config->cdev->gadget->remote_wakeup;
+
+ bam_data_resume(&ecm->bam_port, ecm->port_num, USB_FUNC_ECM,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ ecm->is_open = ecm->ecm_mdm_ready_trigger ? true : false;
+ ecm_qc_notify(ecm);
+ }
+
+ pr_debug("ecm resumed\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ * - disconnected/unconfigured
+ * - configured but inactive (data alt 0)
+ * - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting). Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_qc_open(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+}
+
+static void ecm_qc_close(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = false;
+ ecm_qc_notify(ecm);
+}
+
+/* Callback to let ECM_IPA trigger us when network interface is up */
+void ecm_mdm_ready(void)
+{
+ struct f_ecm_qc *ecm = __ecm;
+ int port_num;
+
+ if (!ecm) {
+ pr_err("can't set ecm_ready_trigger, no ecm instance\n");
+ return;
+ }
+
+ if (ecm->ecm_mdm_ready_trigger) {
+ pr_err("already triggered - can't set ecm_ready_trigger\n");
+ return;
+ }
+
+ pr_debug("set ecm_ready_trigger\n");
+ ecm->ecm_mdm_ready_trigger = true;
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+ port_num = (u_bam_data_func_to_port(USB_FUNC_ECM,
+ ECM_QC_ACTIVE_PORT));
+ if (port_num < 0)
+ return;
+ bam_data_start_rx_tx(port_num);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+
+ ecm->ctrl_id = status;
+
+ ecm_qc_control_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0) {
+ pr_debug("no more interface IDs can be allocated\n");
+ goto fail;
+ }
+
+ ecm->data_id = status;
+
+ ecm_qc_data_nop_intf.bInterfaceNumber = status;
+ ecm_qc_data_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_in)\n");
+ goto fail;
+ }
+
+ ecm->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_out)\n");
+ goto fail;
+ }
+
+ ecm->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is *OPTIONAL* but we
+ * don't treat it that way. It's simpler, and some newer CDC
+ * profiles (wireless handsets) no longer treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_notify)\n");
+ goto fail;
+ }
+ ecm->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!ecm->notify_req) {
+ pr_debug("can not allocate notification request\n");
+ goto fail;
+ }
+ ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!ecm->notify_req->buf)
+ goto fail;
+ ecm->notify_req->context = ecm;
+ ecm->notify_req->complete = ecm_qc_notify_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(ecm_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ ecm_qc_hs_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_hs_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_hs_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ecm_qc_ss_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_ss_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_ss_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ f->ss_descriptors = usb_copy_descriptors(ecm_qc_ss_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ ecm->port.open = ecm_qc_open;
+ ecm->port.close = ecm_qc_close;
+
+ DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ ecm->port.in_ep->name, ecm->port.out_ep->name,
+ ecm->notify->name);
+ return 0;
+
+fail:
+
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (ecm->notify_req) {
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+ if (ecm->port.out_ep->desc)
+ ecm->port.out_ep->driver_data = NULL;
+ if (ecm->port.in_ep->desc)
+ ecm->port.in_ep->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ DBG(c->cdev, "ecm unbind\n");
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+ ecm_qc_string_defs[1].s = NULL;
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ bam_data_flush_workqueue();
+ ecm_ipa_cleanup(ipa_params.private);
+ }
+
+ kfree(ecm);
+ __ecm = NULL;
+}
+
+/**
+ * ecm_qc_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * @xport_name: data path transport type name ("BAM2BAM" or "BAM2BAM_IPA")
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_qc_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name)
+{
+ struct f_ecm_qc *ecm;
+ int status;
+
+ if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+ return -EINVAL;
+
+ pr_debug("data transport type is %s\n", xport_name);
+
+ /* maybe allocate device-global string IDs */
+ if (ecm_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[0].id = status;
+ ecm_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[2].id = status;
+ ecm_qc_data_intf.iInterface = status;
+
+ /* MAC address */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[1].id = status;
+ ecm_qc_desc.iMACAddress = status;
+ }
+
+ /* allocate and initialize one new instance */
+ ecm = kzalloc(sizeof(*ecm), GFP_KERNEL);
+ if (!ecm)
+ return -ENOMEM;
+ __ecm = ecm;
+
+ ecm->xport = str_to_xport(xport_name);
+ pr_debug("set xport = %d\n", ecm->xport);
+
+ /* export host's Ethernet address in CDC format */
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_get_macs(ipa_params.device_ethaddr,
+ ipa_params.host_ethaddr);
+ snprintf(ecm->ethaddr, sizeof(ecm->ethaddr),
+ "%02X%02X%02X%02X%02X%02X",
+ ipa_params.host_ethaddr[0], ipa_params.host_ethaddr[1],
+ ipa_params.host_ethaddr[2], ipa_params.host_ethaddr[3],
+ ipa_params.host_ethaddr[4], ipa_params.host_ethaddr[5]);
+ ipa_params.device_ready_notify = ecm_mdm_ready;
+ } else
+ snprintf(ecm->ethaddr, sizeof(ecm->ethaddr),
+ "%02X%02X%02X%02X%02X%02X",
+ ethaddr[0], ethaddr[1], ethaddr[2],
+ ethaddr[3], ethaddr[4], ethaddr[5]);
+
+ ecm_qc_string_defs[1].s = ecm->ethaddr;
+
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+
+ ecm->port.func.name = "cdc_ethernet";
+ ecm->port.func.strings = ecm_qc_strings;
+ /* descriptors are per-instance copies */
+ ecm->port.func.bind = ecm_qc_bind;
+ ecm->port.func.unbind = ecm_qc_unbind;
+ ecm->port.func.set_alt = ecm_qc_set_alt;
+ ecm->port.func.get_alt = ecm_qc_get_alt;
+ ecm->port.func.setup = ecm_qc_setup;
+ ecm->port.func.disable = ecm_qc_disable;
+ ecm->port.func.suspend = ecm_qc_suspend;
+ ecm->port.func.resume = ecm_qc_resume;
+ ecm->ecm_mdm_ready_trigger = false;
+
+ status = usb_add_function(c, &ecm->port.func);
+ if (status) {
+ pr_err("failed to add function\n");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ __ecm = NULL;
+ return status;
+ }
+
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return status;
+
+ pr_debug("setting ecm_ipa, host_ethaddr=%pM, device_ethaddr=%pM",
+ ipa_params.host_ethaddr, ipa_params.device_ethaddr);
+ status = ecm_ipa_init(&ipa_params);
+ if (status) {
+ pr_err("failed to initialize ecm_ipa\n");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ __ecm = NULL;
+
+ } else {
+ pr_debug("ecm_ipa successful created\n");
+ }
+
+ return status;
+}
+
+static int ecm_qc_init(void)
+{
+ int ret;
+
+ pr_debug("initialize ecm qc port instance\n");
+
+ ret = bam_data_setup(USB_FUNC_ECM, ECM_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
new file mode 100644
index 000000000000..2d62b07cb3f6
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -0,0 +1,1552 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+#include "u_data_ipa.h"
+#include <linux/rndis_ipa.h>
+#include "configfs.h"
+
+unsigned int rndis_dl_max_xfer_size = 9216;
+module_param(rndis_dl_max_xfer_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_xfer_size,
+ "Max size of bus transfer to host");
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet. The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex. Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short: it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets. Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data. The control model is built around
+ * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored). RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface. That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely. Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ * - Power management ... references data that's scattered around lots
+ * of other documentation, which is incorrect/incomplete there too.
+ *
+ * - There are various undocumented protocol requirements, like the need
+ * to send garbage in some control-OUT messages.
+ *
+ * - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+ struct usb_function func;
+ u8 ctrl_id, data_id;
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ u8 ul_max_pkt_per_xfer;
+ u8 pkt_alignment_factor;
+ u32 max_pkt_size;
+ const char *manufacturer;
+ struct rndis_params *params;
+ atomic_t ioctl_excl;
+ atomic_t open_excl;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ atomic_t notify_count;
+ struct gadget_ipa_port bam_port;
+ u8 port_num;
+ u16 cdc_filter;
+ bool net_ready_trigger;
+ bool use_wceis;
+};
+
+static struct ipa_usb_init_params rndis_ipa_params;
+static spinlock_t rndis_lock;
+static bool rndis_ipa_supported;
+static void rndis_qc_open(struct f_rndis_qc *rndis);
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_rndis_qc, func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT 8 /* 8 bytes data */
+
+/* currently only one rndis instance is supported - port
+ * index 0.
+ */
+#define RNDIS_QC_NO_PORTS 1
+#define RNDIS_QC_ACTIVE_PORT 0
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define RNDIS_QC_IOCTL_MAGIC 'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE _IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
+
+
+/* interface descriptor: */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+ .bLength = sizeof(rndis_qc_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_MISC,
+ .bInterfaceSubClass = 0x04,
+ .bInterfaceProtocol = 0x01, /* RNDIS over ethernet */
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+ .bLength = sizeof(rndis_qc_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+ .bLength = sizeof(rndis_qc_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+ .bLength = sizeof(rndis_qc_acm_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+ .bLength = sizeof(rndis_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+ .bLength = sizeof(rndis_qc_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+ .bLength = sizeof(rndis_qc_iad_descriptor),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_MISC,
+ .bFunctionSubClass = 0x04,
+ .bFunctionProtocol = 0x01, /* RNDIS over ethernet */
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+ .bLength = sizeof(rndis_qc_ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+ .bLength = sizeof(rndis_qc_ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+ &rndis_qc_string_table,
+ NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1)
+ return 0;
+
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void rndis_qc_response_available(void *_rndis)
+{
+ struct f_rndis_qc *rndis = _rndis;
+ struct usb_request *req = rndis->notify_req;
+ __le32 *data = req->buf;
+ int status;
+
+ if (atomic_inc_return(&rndis->notify_count) != 1)
+ return;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ /* Send RNDIS RESPONSE_AVAILABLE notification; a
+ * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+ *
+ * This is the only notification defined by RNDIS.
+ */
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ pr_info("notify/0 --> %d\n", status);
+ }
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis;
+ int status = req->status;
+ struct usb_composite_dev *cdev;
+ struct usb_ep *notify_ep;
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ if (!rndis->func.config || !rndis->func.config->cdev) {
+ pr_err("%s(): cdev or config is NULL.\n", __func__);
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ cdev = rndis->func.config->cdev;
+
+ /* after TX:
+ * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+ * - RNDIS_RESPONSE_AVAILABLE (status/irq)
+ */
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&rndis->notify_count, 0);
+ goto out;
+ default:
+ pr_info("RNDIS %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ /* FALLTHROUGH */
+ case 0:
+ if (ep != rndis->notify)
+ goto out;
+
+ /* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&rndis->notify_count))
+ goto out;
+ notify_ep = rndis->notify;
+ spin_unlock(&rndis_lock);
+ status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&rndis_lock);
+ if (!_rndis_qc)
+ goto out;
+ atomic_dec(&_rndis_qc->notify_count);
+ DBG(cdev, "notify/1 --> %d\n", status);
+ spin_unlock(&rndis_lock);
+ }
+ }
+
+ return;
+
+out:
+ spin_unlock(&rndis_lock);
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis;
+ int status;
+ rndis_init_msg_type *buf;
+ u32 ul_max_xfer_size, dl_max_xfer_size;
+
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error %d\n",
+ __func__, req->status);
+ return;
+ }
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+ if (status < 0)
+ pr_err("RNDIS command error %d, %d/%d\n",
+ status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params);
+ ipa_data_set_ul_max_xfer_size(ul_max_xfer_size);
+ /*
+ * For consistent data throughput from IPA, it is required to
+ * fine tune aggregation byte limit as 7KB. RNDIS IPA driver
+ * use provided this value to calculate aggregation byte limit
+ * and program IPA hardware for aggregation.
+ * Host provides 8KB or 16KB as Max Transfer size, hence select
+ * minimum out of host provided value and optimum transfer size
+ * to get 7KB as aggregation byte limit.
+ */
+ if (rndis_dl_max_xfer_size)
+ dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
+ rndis_get_dl_max_xfer_size(rndis->params));
+ else
+ dl_max_xfer_size =
+ rndis_get_dl_max_xfer_size(rndis->params);
+ ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
+ }
+ spin_unlock(&rndis_lock);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("%s: Enter\n", __func__);
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* RNDIS uses the CDC command encapsulation mechanism to implement
+ * an RPC scheme, with much getting/setting of attributes by OID.
+ */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->complete = rndis_qc_command_complete;
+ /* later, rndis_response_available() sends a notification */
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ else {
+ u8 *buf;
+ u32 n;
+
+ /* return the result */
+ buf = rndis_get_next_response(rndis->params, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ req->complete = rndis_qc_response_complete;
+ rndis_free_response(rndis->params, buf);
+ value = n;
+ }
+ /* else stalls ... spec says to avoid that */
+ }
+ break;
+
+ default:
+invalid:
+ VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->context = rndis;
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("rndis response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+struct net_device *rndis_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct f_rndis_qc_opts *opts;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ int ret;
+
+ /* we know alt == 0 */
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ if (intf == rndis->ctrl_id) {
+ if (rndis->notify->driver_data) {
+ VDBG(cdev, "reset rndis control %d\n", intf);
+ usb_ep_disable(rndis->notify);
+ }
+ if (!rndis->notify->desc) {
+ VDBG(cdev, "init rndis ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+ goto fail;
+ }
+ usb_ep_enable(rndis->notify);
+ rndis->notify->driver_data = rndis;
+
+ } else if (intf == rndis->data_id) {
+ struct net_device *net;
+
+ rndis->net_ready_trigger = false;
+ if (rndis->bam_port.in->driver_data) {
+ DBG(cdev, "reset rndis\n");
+ /* bam_port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ ipa_data_disconnect(&rndis->bam_port,
+ USB_IPA_FUNC_RNDIS);
+ }
+
+ if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) {
+ DBG(cdev, "init rndis\n");
+ if (config_ep_by_speed(cdev->gadget, f,
+ rndis->bam_port.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ rndis->bam_port.out)) {
+ rndis->bam_port.in->desc = NULL;
+ rndis->bam_port.out->desc = NULL;
+ goto fail;
+ }
+ }
+
+ /* RNDIS should be in the "RNDIS uninitialized" state,
+ * either never activated or after rndis_uninit().
+ *
+ * We don't want data to flow here until a nonzero packet
+ * filter is set, at which point it enters "RNDIS data
+ * initialized" state ... but we do want the endpoints
+ * to be activated. It's a strange little state.
+ *
+ * REVISIT the RNDIS gadget code has done this wrong for a
+ * very long time. We need another call to the link layer
+ * code -- gether_updown(...bool) maybe -- to do it right.
+ */
+ rndis->cdc_filter = 0;
+
+ rndis->bam_port.cdev = cdev;
+ rndis->bam_port.func = &rndis->func;
+ ipa_data_port_select(USB_IPA_FUNC_RNDIS);
+ usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ rndis->port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ rndis->port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ return ret;
+ }
+ if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ src_connection_idx, dst_connection_idx))
+ goto fail;
+
+ DBG(cdev, "RNDIS RX/TX early activation ...\n");
+ rndis_qc_open(rndis);
+ net = rndis_qc_get_net("rndis0");
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ opts->net = net;
+
+ rndis_set_param_dev(rndis->params, net,
+ &rndis->cdc_filter);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ DBG(cdev, "rndis deactivated\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis_uninit(rndis->params);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
+
+ msm_ep_unconfig(rndis->bam_port.out);
+ msm_ep_unconfig(rndis->bam_port.in);
+ usb_ep_disable(rndis->notify);
+ rndis->notify->driver_data = NULL;
+}
+
+static void rndis_qc_suspend(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n:",
+ __func__, remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ /* This is required as Linux host side RNDIS driver doesn't
+ * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus.
+ * Hence we perform same operations explicitly here for Linux
+ * host case. In case of windows, this RNDIS state machine is
+ * already updated due to receiving of PACKET_FILTER.
+ */
+ rndis_flow_control(rndis->params, true);
+ pr_debug("%s(): Disconnecting\n", __func__);
+ }
+
+ ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
+ pr_debug("rndis suspended\n");
+}
+
+static void rndis_qc_resume(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ pr_debug("%s: rndis resumed\n", __func__);
+
+ /* Nothing to do if DATA interface wasn't initialized */
+ if (!rndis->bam_port.cdev) {
+ pr_debug("data interface was not up\n");
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ rndis_qc_open(rndis);
+ /*
+ * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
+ * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
+ * resume. Hence trigger USB IPA transfer functionality
+ * explicitly here. For Windows host case is also being
+ * handle with RNDIS state machine.
+ */
+ rndis_flow_control(rndis->params, false);
+ }
+
+ pr_debug("%s: RNDIS resume completed\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested. A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct f_rndis_qc *rndis)
+{
+ struct usb_composite_dev *cdev = rndis->func.config->cdev;
+
+ DBG(cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+ rndis_qc_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->params);
+}
+
+void ipa_data_flow_control_enable(bool enable, struct rndis_params *param)
+{
+ if (enable)
+ ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS);
+ else
+ ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct rndis_params *params;
+ int status;
+ struct usb_ep *ep;
+
+ status = rndis_ipa_init(&rndis_ipa_params);
+ if (status) {
+ pr_err("%s: failed to init rndis_ipa\n", __func__);
+ return status;
+ }
+
+ rndis_ipa_supported = true;
+ /* maybe allocate device-global string IDs */
+ if (rndis_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[0].id = status;
+ rndis_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[1].id = status;
+ rndis_qc_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[2].id = status;
+ rndis_qc_iad_descriptor.iFunction = status;
+ }
+
+ if (rndis->use_wceis) {
+ rndis_qc_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_qc_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_qc_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_control_intf.bInterfaceSubClass = 0x1;
+ rndis_qc_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->ctrl_id = status;
+ rndis_qc_iad_descriptor.bFirstInterface = status;
+
+ rndis_qc_control_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->data_id = status;
+
+ rndis_qc_data_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+ if (!ep)
+ goto fail;
+ rndis->bam_port.in = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+ if (!ep)
+ goto fail;
+ rndis->bam_port.out = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is, strictly speaking,
+ * optional. We don't treat it that way though! It's simpler,
+ * and some newer profiles don't treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ rndis->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!rndis->notify_req)
+ goto fail;
+ rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!rndis->notify_req->buf)
+ goto fail;
+ rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+ rndis->notify_req->context = rndis;
+ rndis->notify_req->complete = rndis_qc_response_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rndis_qc_hs_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_hs_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_hs_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ rndis_qc_ss_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_ss_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_ss_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ params = rndis_register(rndis_qc_response_available, rndis,
+ ipa_data_flow_control_enable);
+ if (params < 0)
+ goto fail;
+ rndis->params = params;
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->params, rndis->ethaddr);
+
+ if (rndis->manufacturer && rndis->vendorID &&
+ rndis_set_param_vendor(rndis->params, rndis->vendorID,
+ rndis->manufacturer))
+ goto fail;
+
+ pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
+ rndis->ul_max_pkt_per_xfer);
+ rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer);
+
+ /* In case of aggregated packets QC device will request
+ * aliment to 4 (2^2).
+ */
+ pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
+ rndis->pkt_alignment_factor);
+ rndis_set_pkt_alignment_factor(rndis->params,
+ rndis->pkt_alignment_factor);
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ rndis->bam_port.in->name, rndis->bam_port.out->name,
+ rndis->notify->name);
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (rndis->notify_req) {
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+ if (rndis->bam_port.out->desc)
+ rndis->bam_port.out->driver_data = NULL;
+ if (rndis->bam_port.in->desc)
+ rndis->bam_port.in->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void rndis_qc_free(struct usb_function *f)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ opts->refcnt--;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+
+ pr_debug("rndis_qc_unbind: free\n");
+ rndis_deregister(rndis->params);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ ipa_data_flush_workqueue();
+ rndis_ipa_cleanup(rndis_ipa_params.private);
+ rndis_ipa_supported = false;
+
+}
+
+void rndis_ipa_reset_trigger(void)
+{
+ struct f_rndis_qc *rndis;
+
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ return;
+ }
+
+ rndis->net_ready_trigger = false;
+}
+
+/*
+ * Callback let RNDIS_IPA trigger us when network interface is up
+ * and userspace is ready to answer DHCP requests
+ */
+void rndis_net_ready_notify(void)
+{
+ struct f_rndis_qc *rndis;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+ if (rndis->net_ready_trigger) {
+ pr_err("%s: Already triggered", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+
+ pr_debug("%s: Set net_ready_trigger", __func__);
+ rndis->net_ready_trigger = true;
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS);
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+
+static struct
+usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
+ u32 vendorID, const char *manufacturer,
+ u8 max_pkt_per_xfer, u8 pkt_alignment_factor)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+
+ /* allocate and initialize one new instance */
+ opts = container_of(fi, struct f_rndis_qc_opts, func_inst);
+
+ opts->refcnt++;
+ rndis = opts->rndis;
+
+ rndis->vendorID = opts->vendor_id;
+ rndis->manufacturer = opts->manufacturer;
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(rndis_ipa_params.host_ethaddr);
+ random_ether_addr(rndis_ipa_params.device_ethaddr);
+ pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+ rndis_ipa_params.host_ethaddr,
+ rndis_ipa_params.device_ethaddr);
+ ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr);
+ rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
+
+ /* if max_pkt_per_xfer was not configured set to default value */
+ rndis->ul_max_pkt_per_xfer =
+ max_pkt_per_xfer ? max_pkt_per_xfer :
+ DEFAULT_MAX_PKT_PER_XFER;
+ ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+
+ /*
+ * Check no RNDIS aggregation, and alignment if not mentioned,
+ * use alignment factor as zero. If aggregated RNDIS data transfer,
+ * max packet per transfer would be default if it is not set
+ * explicitly, and same way use alignment factor as 2 by default.
+ * This would eliminate need of writing to sysfs if default RNDIS
+ * aggregation setting required. Writing to both sysfs entries,
+ * those values will always override default values.
+ */
+ if ((rndis->pkt_alignment_factor == 0) &&
+ (rndis->ul_max_pkt_per_xfer == 1))
+ rndis->pkt_alignment_factor = 0;
+ else
+ rndis->pkt_alignment_factor = pkt_alignment_factor ?
+ pkt_alignment_factor :
+ DEFAULT_PKT_ALIGNMENT_FACTOR;
+
+ /* RNDIS activates when the host changes this filter */
+ rndis->cdc_filter = 0;
+
+ rndis->func.name = "rndis";
+ rndis->func.strings = rndis_qc_strings;
+ /* descriptors are per-instance copies */
+ rndis->func.bind = rndis_qc_bind;
+ rndis->func.unbind = rndis_qc_unbind;
+ rndis->func.set_alt = rndis_qc_set_alt;
+ rndis->func.setup = rndis_qc_setup;
+ rndis->func.disable = rndis_qc_disable;
+ rndis->func.suspend = rndis_qc_suspend;
+ rndis->func.resume = rndis_qc_resume;
+ rndis->func.free_func = rndis_qc_free;
+
+ _rndis_qc = rndis;
+
+ return &rndis->func;
+}
+
+static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
+{
+ return rndis_qc_bind_config_vendor(fi, 0, NULL, 0, 0);
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+ int ret = 0;
+ unsigned long flags;
+ pr_info("Open rndis QC driver\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not created yet\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+ pr_err("Already opened\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ fp->private_data = _rndis_qc;
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
+
+ if (!ret)
+ pr_info("rndis QC file opened\n");
+
+ return ret;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ pr_info("Close rndis QC file\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return -ENODEV;
+ }
+ rndis_qc_unlock(&_rndis_qc->open_excl);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ u8 qc_max_pkt_per_xfer = 0;
+ u32 qc_max_pkt_size = 0;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer;
+ qc_max_pkt_size = _rndis_qc->max_pkt_size;
+
+ if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ spin_unlock_irqrestore(&rndis_lock, flags);
+
+ pr_info("Received command %d\n", cmd);
+
+ switch (cmd) {
+ case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+ ret = copy_to_user((void __user *)arg,
+ &qc_max_pkt_per_xfer,
+ sizeof(qc_max_pkt_per_xfer));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent UL max packets per xfer %d\n",
+ qc_max_pkt_per_xfer);
+ break;
+ case RNDIS_QC_GET_MAX_PKT_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &qc_max_pkt_size,
+ sizeof(qc_max_pkt_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_debug("Sent max packet size %d\n",
+ qc_max_pkt_size);
+ break;
+ default:
+ pr_err("Unsupported IOCTL\n");
+ ret = -EINVAL;
+ }
+
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ rndis_qc_unlock(&_rndis_qc->ioctl_excl);
+
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+ .owner = THIS_MODULE,
+ .open = rndis_qc_open_dev,
+ .release = rndis_qc_release_dev,
+ .unlocked_ioctl = rndis_qc_ioctl,
+};
+
+static struct miscdevice rndis_qc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_rndis_qc",
+ .fops = &rndis_qc_fops,
+};
+
+static void qcrndis_free_inst(struct usb_function_instance *f)
+{
+ struct f_rndis_qc_opts *opts = container_of(f,
+ struct f_rndis_qc_opts, func_inst);
+ unsigned long flags;
+
+ misc_deregister(&rndis_qc_device);
+
+ ipa_data_free(USB_IPA_FUNC_RNDIS);
+ spin_lock_irqsave(&rndis_lock, flags);
+ kfree(opts->rndis);
+ _rndis_qc = NULL;
+ kfree(opts);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+static int qcrndis_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+ int name_len;
+ int ret;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ pr_debug("initialize rndis QC instance\n");
+ rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+ if (!rndis) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&rndis_lock);
+ opts->rndis = rndis;
+ ret = misc_register(&rndis_qc_device);
+ if (ret)
+ pr_err("rndis QC driver failed to register\n");
+
+ ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ kfree(rndis);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline
+struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_rndis_qc_opts,
+ func_inst.group);
+}
+
+static void qcrndis_attr_release(struct config_item *item)
+{
+ struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qcrndis_item_ops = {
+ .release = qcrndis_attr_release,
+};
+
+
+static ssize_t qcrndis_wceis_show(struct config_item *item, char *page)
+{
+ struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", rndis->use_wceis);
+}
+
+static ssize_t qcrndis_wceis_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis;
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ rndis->use_wceis = val;
+
+ return len;
+}
+
+CONFIGFS_ATTR(qcrndis_, wceis);
+
+static struct configfs_attribute *qcrndis_attrs[] = {
+ &qcrndis_attr_wceis,
+ NULL,
+};
+
+static struct config_item_type qcrndis_func_type = {
+ .ct_item_ops = &qcrndis_item_ops,
+ .ct_attrs = qcrndis_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct usb_function_instance *qcrndis_alloc_inst(void)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = qcrndis_set_inst_name;
+ opts->func_inst.free_func_inst = qcrndis_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &qcrndis_func_type);
+
+ return &opts->func_inst;
+}
+
+void *rndis_qc_get_ipa_rx_cb(void)
+{
+ return rndis_ipa_params.ipa_rx_notify;
+}
+
+void *rndis_qc_get_ipa_tx_cb(void)
+{
+ return rndis_ipa_params.ipa_tx_notify;
+}
+
+void *rndis_qc_get_ipa_priv(void)
+{
+ return rndis_ipa_params.private;
+}
+
+bool rndis_qc_get_skip_ep_config(void)
+{
+ return rndis_ipa_params.skip_ep_cfg;
+}
+
+DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc);
+
+static int __init usb_qcrndis_init(void)
+{
+ int ret;
+
+ ret = usb_function_register(&rndis_bamusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit usb_qcrndis_exit(void)
+{
+ usb_function_unregister(&rndis_bamusb_func);
+}
+
+module_init(usb_qcrndis_init);
+module_exit(usb_qcrndis_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
new file mode 100644
index 000000000000..2c416472e279
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -0,0 +1,1187 @@
+/*
+ * f_qdss.c -- QDSS function Driver
+ *
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/usb_qdss.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/cdc.h>
+
+#include "f_qdss.h"
+
+static DEFINE_SPINLOCK(qdss_lock);
+static LIST_HEAD(usb_qdss_ch_list);
+
+static struct usb_interface_descriptor qdss_data_intf_desc = {
+ .bLength = sizeof qdss_data_intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_data_ep_comp_desc = {
+ .bLength = sizeof qdss_data_ep_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_interface_descriptor qdss_ctrl_intf_desc = {
+ .bLength = sizeof qdss_ctrl_intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_hs_ctrl_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_ss_ctrl_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor qdss_hs_ctrl_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_ss_ctrl_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(0x400),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_ctrl_in_ep_comp_desc = {
+ .bLength = sizeof qdss_ctrl_in_ep_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_ctrl_out_ep_comp_desc = {
+ .bLength = sizeof qdss_ctrl_out_ep_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *qdss_hs_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_hs_data_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_intf_desc,
+ (struct usb_descriptor_header *) &qdss_hs_ctrl_in_desc,
+ (struct usb_descriptor_header *) &qdss_hs_ctrl_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_ss_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_ss_data_desc,
+ (struct usb_descriptor_header *) &qdss_data_ep_comp_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_intf_desc,
+ (struct usb_descriptor_header *) &qdss_ss_ctrl_in_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_in_ep_comp_desc,
+ (struct usb_descriptor_header *) &qdss_ss_ctrl_out_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_out_ep_comp_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_ss_data_desc,
+ (struct usb_descriptor_header *) &qdss_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+#define MSM_QDSS_DATA_IDX 0
+#define MSM_QDSS_CTRL_IDX 1
+#define MDM_QDSS_DATA_IDX 2
+#define MDM_QDSS_CTRL_IDX 3
+
+static struct usb_string qdss_string_defs[] = {
+ [MSM_QDSS_DATA_IDX].s = "MSM QDSS Data",
+ [MSM_QDSS_CTRL_IDX].s = "MSM QDSS Control",
+ [MDM_QDSS_DATA_IDX].s = "MDM QDSS Data",
+ [MDM_QDSS_CTRL_IDX].s = "MDM QDSS Control",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings qdss_string_table = {
+ .language = 0x0409,
+ .strings = qdss_string_defs,
+};
+
+static struct usb_gadget_strings *qdss_strings[] = {
+ &qdss_string_table,
+ NULL,
+};
+
+static inline struct f_qdss *func_to_qdss(struct usb_function *f)
+{
+ return container_of(f, struct f_qdss, port.function);
+}
+
+static struct usb_qdss_opts *to_fi_usb_qdss_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct usb_qdss_opts, func_inst);
+}
+/*----------------------------------------------------------------------*/
+
+static void qdss_write_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_qdss *qdss = ep->driver_data;
+ struct qdss_request *d_req = req->context;
+ struct usb_ep *in;
+ struct list_head *list_pool;
+ enum qdss_state state;
+ unsigned long flags;
+
+ pr_debug("qdss_ctrl_write_complete\n");
+
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ state = USB_QDSS_CTRL_WRITE_DONE;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ state = USB_QDSS_DATA_WRITE_DONE;
+ }
+
+ if (!req->status) {
+ /* send zlp */
+ if ((req->length >= ep->maxpacket) &&
+ ((req->length % ep->maxpacket) == 0)) {
+ req->length = 0;
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ if (!usb_ep_queue(in, req, GFP_ATOMIC))
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, list_pool);
+ if (req->length != 0) {
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ }
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, state, d_req, NULL);
+}
+
+static void qdss_ctrl_read_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_qdss *qdss = ep->driver_data;
+ struct qdss_request *d_req = req->context;
+ unsigned long flags;
+
+ pr_debug("qdss_ctrl_read_complete\n");
+
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->ctrl_read_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, USB_QDSS_CTRL_READ_DONE, d_req,
+ NULL);
+}
+
+void usb_qdss_free_req(struct usb_qdss_ch *ch)
+{
+ struct f_qdss *qdss;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+
+ pr_debug("usb_qdss_free_req\n");
+
+ qdss = ch->priv_usb;
+ if (!qdss) {
+ pr_err("usb_qdss_free_req: qdss ctx is NULL\n");
+ return;
+ }
+
+ list_for_each_safe(act, tmp, &qdss->data_write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.data, req);
+ }
+
+ list_for_each_safe(act, tmp, &qdss->ctrl_write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.ctrl_in, req);
+ }
+
+ list_for_each_safe(act, tmp, &qdss->ctrl_read_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.ctrl_out, req);
+ }
+}
+EXPORT_SYMBOL(usb_qdss_free_req);
+
+int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf,
+ int no_read_buf)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ struct usb_request *req;
+ struct usb_ep *in;
+ struct list_head *list_pool;
+ int i;
+
+ pr_debug("usb_qdss_alloc_req\n");
+
+ if (!qdss) {
+ pr_err("usb_qdss_alloc_req: channel %s closed\n", ch->name);
+ return -ENODEV;
+ }
+
+ if ((qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf <= 0)) ||
+ (!qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf))) {
+ pr_err("usb_qdss_alloc_req: missing params\n");
+ return -ENODEV;
+ }
+
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ }
+
+ for (i = 0; i < no_write_buf; i++) {
+ req = usb_ep_alloc_request(in, GFP_ATOMIC);
+ if (!req) {
+ pr_err("usb_qdss_alloc_req: ctrl_in allocation err\n");
+ goto fail;
+ }
+ req->complete = qdss_write_complete;
+ list_add_tail(&req->list, list_pool);
+ }
+
+ for (i = 0; i < no_read_buf; i++) {
+ req = usb_ep_alloc_request(qdss->port.ctrl_out, GFP_ATOMIC);
+ if (!req) {
+ pr_err("usb_qdss_alloc_req:ctrl_out allocation err\n");
+ goto fail;
+ }
+ req->complete = qdss_ctrl_read_complete;
+ list_add_tail(&req->list, &qdss->ctrl_read_pool);
+ }
+
+ return 0;
+
+fail:
+ usb_qdss_free_req(ch);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(usb_qdss_alloc_req);
+
+static void clear_eps(struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+
+ pr_debug("clear_eps\n");
+
+ if (qdss->port.ctrl_in)
+ qdss->port.ctrl_in->driver_data = NULL;
+ if (qdss->port.ctrl_out)
+ qdss->port.ctrl_out->driver_data = NULL;
+ if (qdss->port.data)
+ qdss->port.data->driver_data = NULL;
+}
+
+static void clear_desc(struct usb_gadget *gadget, struct usb_function *f)
+{
+ pr_debug("clear_desc\n");
+
+ if (gadget_is_superspeed(gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+}
+
+static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_gadget *gadget = c->cdev->gadget;
+ struct f_qdss *qdss = func_to_qdss(f);
+ struct usb_ep *ep;
+ int iface, id, str_data_id, str_ctrl_id;
+
+ pr_debug("qdss_bind\n");
+
+ if (!gadget_is_dualspeed(gadget) && !gadget_is_superspeed(gadget)) {
+ pr_err("qdss_bind: full-speed is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* Allocate data I/F */
+ iface = usb_interface_id(c, f);
+ if (iface < 0) {
+ pr_err("interface allocation error\n");
+ return iface;
+ }
+ qdss_data_intf_desc.bInterfaceNumber = iface;
+ qdss->data_iface_id = iface;
+
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+
+ str_data_id = MSM_QDSS_DATA_IDX;
+ str_ctrl_id = MSM_QDSS_CTRL_IDX;
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
+ str_data_id = MDM_QDSS_DATA_IDX;
+ str_ctrl_id = MDM_QDSS_CTRL_IDX;
+ }
+
+ qdss_string_defs[str_data_id].id = id;
+ qdss_data_intf_desc.iInterface = id;
+
+ if (qdss->debug_inface_enabled) {
+ /* Allocate ctrl I/F */
+ iface = usb_interface_id(c, f);
+ if (iface < 0) {
+ pr_err("interface allocation error\n");
+ return iface;
+ }
+ qdss_ctrl_intf_desc.bInterfaceNumber = iface;
+ qdss->ctrl_iface_id = iface;
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[str_ctrl_id].id = id;
+ qdss_ctrl_intf_desc.iInterface = id;
+ }
+
+ ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
+ &qdss_data_ep_comp_desc);
+ if (!ep) {
+ pr_err("ep_autoconfig error\n");
+ goto fail;
+ }
+ qdss->port.data = ep;
+ ep->driver_data = qdss;
+
+ if (qdss->debug_inface_enabled) {
+ ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_ctrl_in_desc,
+ &qdss_ctrl_in_ep_comp_desc);
+ if (!ep) {
+ pr_err("ep_autoconfig error\n");
+ goto fail;
+ }
+ qdss->port.ctrl_in = ep;
+ ep->driver_data = qdss;
+
+ ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_ctrl_out_desc,
+ &qdss_ctrl_out_ep_comp_desc);
+ if (!ep) {
+ pr_err("ep_autoconfig error\n");
+ goto fail;
+ }
+ qdss->port.ctrl_out = ep;
+ ep->driver_data = qdss;
+ }
+
+ /*update descriptors*/
+ qdss_hs_data_desc.bEndpointAddress =
+ qdss_ss_data_desc.bEndpointAddress;
+ if (qdss->debug_inface_enabled) {
+ qdss_hs_ctrl_in_desc.bEndpointAddress =
+ qdss_ss_ctrl_in_desc.bEndpointAddress;
+ qdss_hs_ctrl_out_desc.bEndpointAddress =
+ qdss_ss_ctrl_out_desc.bEndpointAddress;
+ f->hs_descriptors = usb_copy_descriptors(qdss_hs_desc);
+ } else
+ f->hs_descriptors = usb_copy_descriptors(
+ qdss_hs_data_only_desc);
+ if (!f->hs_descriptors) {
+ pr_err("usb_copy_descriptors error\n");
+ goto fail;
+ }
+
+ /* update ss descriptors */
+ if (gadget_is_superspeed(gadget)) {
+ if (qdss->debug_inface_enabled)
+ f->ss_descriptors =
+ usb_copy_descriptors(qdss_ss_desc);
+ else
+ f->ss_descriptors =
+ usb_copy_descriptors(qdss_ss_data_only_desc);
+ if (!f->ss_descriptors) {
+ pr_err("usb_copy_descriptors error\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ clear_eps(f);
+ clear_desc(gadget, f);
+ return -ENOTSUPP;
+}
+
+
+static void qdss_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
+
+ pr_debug("qdss_unbind\n");
+
+ flush_workqueue(qdss->wq);
+
+ clear_eps(f);
+ clear_desc(gadget, f);
+}
+
+static void qdss_eps_disable(struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+
+ pr_debug("qdss_eps_disable\n");
+
+ if (qdss->ctrl_in_enabled) {
+ usb_ep_disable(qdss->port.ctrl_in);
+ qdss->ctrl_in_enabled = 0;
+ }
+
+ if (qdss->ctrl_out_enabled) {
+ usb_ep_disable(qdss->port.ctrl_out);
+ qdss->ctrl_out_enabled = 0;
+ }
+
+ if (qdss->data_enabled) {
+ usb_ep_disable(qdss->port.data);
+ qdss->data_enabled = 0;
+ }
+}
+
+static void usb_qdss_disconnect_work(struct work_struct *work)
+{
+ struct f_qdss *qdss;
+ int status;
+ unsigned long flags;
+
+ qdss = container_of(work, struct f_qdss, disconnect_w);
+ pr_debug("usb_qdss_disconnect_work\n");
+
+
+ /* Notify qdss to cancel all active transfers */
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv,
+ USB_QDSS_DISCONNECT,
+ NULL,
+ NULL);
+
+ /* Uninitialized init data i.e. ep specific operation */
+ if (qdss->ch.app_conn && !strcmp(qdss->ch.name, USB_QDSS_CH_MSM)) {
+ status = uninit_data(qdss->port.data);
+ if (status)
+ pr_err("%s: uninit_data error\n", __func__);
+
+ status = set_qdss_data_connection(qdss, 0);
+ if (status)
+ pr_err("qdss_disconnect error");
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ if (qdss->endless_req) {
+ usb_ep_free_request(qdss->port.data,
+ qdss->endless_req);
+ qdss->endless_req = NULL;
+ }
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ }
+
+ /*
+ * Decrement usage count which was incremented
+ * before calling connect work
+ */
+ usb_gadget_autopm_put_async(qdss->gadget);
+}
+
+static void qdss_disable(struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+ unsigned long flags;
+
+ pr_debug("qdss_disable\n");
+ spin_lock_irqsave(&qdss->lock, flags);
+ if (!qdss->usb_connected) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return;
+ }
+
+ qdss->usb_connected = 0;
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ /*cancell all active xfers*/
+ qdss_eps_disable(f);
+ queue_work(qdss->wq, &qdss->disconnect_w);
+}
+
+static void usb_qdss_connect_work(struct work_struct *work)
+{
+ struct f_qdss *qdss;
+ int status;
+ struct usb_request *req = NULL;
+ unsigned long flags;
+
+ qdss = container_of(work, struct f_qdss, connect_w);
+
+ /* If cable is already removed, discard connect_work */
+ if (qdss->usb_connected == 0) {
+ pr_debug("%s: discard connect_work\n", __func__);
+ cancel_work_sync(&qdss->disconnect_w);
+ return;
+ }
+
+ pr_debug("usb_qdss_connect_work\n");
+
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+ goto notify;
+
+ status = set_qdss_data_connection(qdss, 1);
+ if (status) {
+ pr_err("set_qdss_data_connection error(%d)", status);
+ return;
+ }
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ req = qdss->endless_req;
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ if (!req)
+ return;
+
+ status = usb_ep_queue(qdss->port.data, req, GFP_ATOMIC);
+ if (status) {
+ pr_err("%s: usb_ep_queue error (%d)\n", __func__, status);
+ return;
+ }
+
+notify:
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
+ NULL, &qdss->ch);
+}
+
+static int qdss_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+ struct usb_gadget *gadget = f->config->cdev->gadget;
+ struct usb_qdss_ch *ch = &qdss->ch;
+ int ret = 0;
+
+ pr_debug("qdss_set_alt qdss pointer = %pK\n", qdss);
+ qdss->gadget = gadget;
+
+ if (alt != 0)
+ goto fail1;
+
+ if (gadget->speed != USB_SPEED_SUPER &&
+ gadget->speed != USB_SPEED_HIGH) {
+ pr_err("qdss_st_alt: qdss supportes HS or SS only\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ if (intf == qdss->data_iface_id) {
+ /* Increment usage count on connect */
+ usb_gadget_autopm_get_async(qdss->gadget);
+
+ if (config_ep_by_speed(gadget, f, qdss->port.data)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = usb_ep_enable(qdss->port.data);
+ if (ret)
+ goto fail;
+
+ qdss->port.data->driver_data = qdss;
+ qdss->data_enabled = 1;
+
+
+ } else if ((intf == qdss->ctrl_iface_id) &&
+ (qdss->debug_inface_enabled)) {
+
+ if (config_ep_by_speed(gadget, f, qdss->port.ctrl_in)) {
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ ret = usb_ep_enable(qdss->port.ctrl_in);
+ if (ret)
+ goto fail1;
+
+ qdss->port.ctrl_in->driver_data = qdss;
+ qdss->ctrl_in_enabled = 1;
+
+ if (config_ep_by_speed(gadget, f, qdss->port.ctrl_out)) {
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+
+ ret = usb_ep_enable(qdss->port.ctrl_out);
+ if (ret)
+ goto fail1;
+
+ qdss->port.ctrl_out->driver_data = qdss;
+ qdss->ctrl_out_enabled = 1;
+ }
+
+ if (qdss->debug_inface_enabled) {
+ if (qdss->ctrl_out_enabled && qdss->ctrl_in_enabled &&
+ qdss->data_enabled) {
+ qdss->usb_connected = 1;
+ pr_debug("qdss_set_alt usb_connected INTF enabled\n");
+ }
+ } else {
+ if (qdss->data_enabled) {
+ qdss->usb_connected = 1;
+ pr_debug("qdss_set_alt usb_connected INTF disabled\n");
+ }
+ }
+
+ if (qdss->usb_connected && ch->app_conn)
+ queue_work(qdss->wq, &qdss->connect_w);
+
+ return 0;
+fail:
+ /* Decrement usage count in case of failure */
+ usb_gadget_autopm_put_async(qdss->gadget);
+fail1:
+ pr_err("qdss_set_alt failed\n");
+ qdss_eps_disable(f);
+ return ret;
+}
+
+static struct f_qdss *alloc_usb_qdss(char *channel_name)
+{
+ struct f_qdss *qdss;
+ int found = 0;
+ struct usb_qdss_ch *ch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ list_for_each_entry(ch, &usb_qdss_ch_list, list) {
+ if (!strcmp(channel_name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ pr_err("%s: (%s) is already available.\n",
+ __func__, channel_name);
+ return ERR_PTR(-EEXIST);
+ }
+
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ qdss = kzalloc(sizeof(struct f_qdss), GFP_KERNEL);
+ if (!qdss) {
+ pr_err("%s: Unable to allocate qdss device\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qdss->wq = create_singlethread_workqueue(channel_name);
+ if (!qdss->wq) {
+ kfree(qdss);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ ch = &qdss->ch;
+ ch->name = channel_name;
+ list_add_tail(&ch->list, &usb_qdss_ch_list);
+ spin_unlock_irqrestore(&qdss_lock, flags);
+
+ spin_lock_init(&qdss->lock);
+ INIT_LIST_HEAD(&qdss->ctrl_read_pool);
+ INIT_LIST_HEAD(&qdss->ctrl_write_pool);
+ INIT_LIST_HEAD(&qdss->data_write_pool);
+ INIT_WORK(&qdss->connect_w, usb_qdss_connect_work);
+ INIT_WORK(&qdss->disconnect_w, usb_qdss_disconnect_work);
+
+ return qdss;
+}
+
+int usb_qdss_ctrl_read(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_read\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->ctrl_read_pool)) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("error: usb_qdss_ctrl_read list is empty\n");
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->ctrl_read_pool, struct usb_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+
+ if (usb_ep_queue(qdss->port.ctrl_out, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->ctrl_read_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_ctrl_read);
+
+int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_write\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->ctrl_write_pool)) {
+ pr_err("error: usb_qdss_ctrl_write list is empty\n");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->ctrl_write_pool, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(qdss->port.ctrl_in, req, GFP_ATOMIC)) {
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->ctrl_write_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_ctrl_write);
+
+int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_write\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->data_write_pool)) {
+ pr_err("error: usb_qdss_data_write list is empty\n");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->data_write_pool, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->data_write_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_write);
+
+struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv,
+ void (*notify)(void *, unsigned, struct qdss_request *,
+ struct usb_qdss_ch *))
+{
+ struct usb_qdss_ch *ch;
+ struct f_qdss *qdss;
+ unsigned long flags;
+ int found = 0;
+
+ pr_debug("usb_qdss_open\n");
+
+ if (!notify) {
+ pr_err("usb_qdss_open: notification func is missing\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ /* Check if we already have a channel with this name */
+ list_for_each_entry(ch, &usb_qdss_ch_list, list) {
+ if (!strcmp(name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ pr_debug("usb_qdss_open failed as %s not found\n", name);
+ return NULL;
+ } else {
+ pr_debug("usb_qdss_open: qdss ctx found\n");
+ qdss = container_of(ch, struct f_qdss, ch);
+ ch->priv_usb = qdss;
+ }
+
+ ch->priv = priv;
+ ch->notify = notify;
+ ch->app_conn = 1;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+
+ /* the case USB cabel was connected befor qdss called qdss_open*/
+ if (qdss->usb_connected == 1)
+ queue_work(qdss->wq, &qdss->connect_w);
+
+ return ch;
+}
+EXPORT_SYMBOL(usb_qdss_open);
+
+void usb_qdss_close(struct usb_qdss_ch *ch)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ struct usb_gadget *gadget;
+ unsigned long flags;
+ int status;
+
+ pr_debug("usb_qdss_close\n");
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ ch->priv_usb = NULL;
+ if (!qdss || !qdss->usb_connected ||
+ !strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
+ ch->app_conn = 0;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ return;
+ }
+
+ if (qdss->endless_req) {
+ usb_ep_dequeue(qdss->port.data, qdss->endless_req);
+ usb_ep_free_request(qdss->port.data, qdss->endless_req);
+ qdss->endless_req = NULL;
+ }
+ gadget = qdss->gadget;
+ ch->app_conn = 0;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+
+ status = uninit_data(qdss->port.data);
+ if (status)
+ pr_err("%s: uninit_data error\n", __func__);
+
+ status = set_qdss_data_connection(qdss, 0);
+ if (status)
+ pr_err("%s:qdss_disconnect error\n", __func__);
+}
+EXPORT_SYMBOL(usb_qdss_close);
+
+static void qdss_cleanup(void)
+{
+ struct f_qdss *qdss;
+ struct list_head *act, *tmp;
+ struct usb_qdss_ch *_ch;
+ unsigned long flags;
+
+ pr_debug("qdss_cleanup\n");
+
+ list_for_each_safe(act, tmp, &usb_qdss_ch_list) {
+ _ch = list_entry(act, struct usb_qdss_ch, list);
+ qdss = container_of(_ch, struct f_qdss, ch);
+ spin_lock_irqsave(&qdss_lock, flags);
+ destroy_workqueue(qdss->wq);
+ if (!_ch->priv) {
+ list_del(&_ch->list);
+ kfree(qdss);
+ }
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ }
+}
+
+static void qdss_free_func(struct usb_function *f)
+{
+ /* Do nothing as usb_qdss_alloc() doesn't alloc anything. */
+}
+
+static inline struct usb_qdss_opts *to_f_qdss_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct usb_qdss_opts,
+ func_inst.group);
+}
+
+static void qdss_attr_release(struct config_item *item)
+{
+ struct usb_qdss_opts *opts = to_f_qdss_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qdss_item_ops = {
+ .release = qdss_attr_release,
+};
+
+static ssize_t qdss_enable_debug_inface_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ (to_f_qdss_opts(item)->usb_qdss->debug_inface_enabled == 1) ?
+ "Enabled" : "Disabled");
+}
+
+static ssize_t qdss_enable_debug_inface_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_qdss *qdss = to_f_qdss_opts(item)->usb_qdss;
+ unsigned long flags;
+ u8 stats;
+
+ if (page == NULL) {
+ pr_err("Invalid buffer");
+ return len;
+ }
+
+ if (kstrtou8(page, 0, &stats) != 0 && (stats != 0 || stats != 1)) {
+ pr_err("(%u)Wrong value. enter 0 to disable or 1 to enable.\n",
+ stats);
+ return len;
+ }
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ qdss->debug_inface_enabled = (stats == 1 ? "true" : "false");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return len;
+}
+
+CONFIGFS_ATTR(qdss_, enable_debug_inface);
+static struct configfs_attribute *qdss_attrs[] = {
+ &qdss_attr_enable_debug_inface,
+ NULL,
+};
+
+static struct config_item_type qdss_func_type = {
+ .ct_item_ops = &qdss_item_ops,
+ .ct_attrs = qdss_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void usb_qdss_free_inst(struct usb_function_instance *fi)
+{
+ struct usb_qdss_opts *opts;
+
+ opts = container_of(fi, struct usb_qdss_opts, func_inst);
+ kfree(opts->usb_qdss);
+ kfree(opts);
+}
+
+static int usb_qdss_set_inst_name(struct usb_function_instance *f, const char *name)
+{
+ struct usb_qdss_opts *opts =
+ container_of(f, struct usb_qdss_opts, func_inst);
+ char *ptr;
+ size_t name_len;
+ struct f_qdss *usb_qdss;
+
+ /* get channel_name as expected input qdss.<channel_name> */
+ name_len = strlen(name) + 1;
+ if (name_len > 15)
+ return -ENAMETOOLONG;
+
+ /* get channel name */
+ ptr = kstrndup(name, name_len, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("error:%ld\n", PTR_ERR(ptr));
+ return -ENOMEM;
+ }
+
+ opts->channel_name = ptr;
+ pr_debug("qdss: channel_name:%s\n", opts->channel_name);
+
+ usb_qdss = alloc_usb_qdss(opts->channel_name);
+ if (IS_ERR(usb_qdss)) {
+ pr_err("Failed to create usb_qdss port(%s)\n", opts->channel_name);
+ return -ENOMEM;
+ }
+
+ opts->usb_qdss = usb_qdss;
+ return 0;
+}
+
+static struct usb_function_instance *qdss_alloc_inst(void)
+{
+ struct usb_qdss_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.free_func_inst = usb_qdss_free_inst;
+ opts->func_inst.set_inst_name = usb_qdss_set_inst_name;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &qdss_func_type);
+ return &opts->func_inst;
+}
+
+static struct usb_function *qdss_alloc(struct usb_function_instance *fi)
+{
+ struct usb_qdss_opts *opts = to_fi_usb_qdss_opts(fi);
+ struct f_qdss *usb_qdss = opts->usb_qdss;
+
+ usb_qdss->port.function.name = "usb_qdss";
+ usb_qdss->port.function.fs_descriptors = qdss_hs_desc;
+ usb_qdss->port.function.hs_descriptors = qdss_hs_desc;
+ usb_qdss->port.function.strings = qdss_strings;
+ usb_qdss->port.function.bind = qdss_bind;
+ usb_qdss->port.function.unbind = qdss_unbind;
+ usb_qdss->port.function.set_alt = qdss_set_alt;
+ usb_qdss->port.function.disable = qdss_disable;
+ usb_qdss->port.function.setup = NULL;
+ usb_qdss->port.function.free_func = qdss_free_func;
+
+ return &usb_qdss->port.function;
+}
+
+DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc);
+static int __init usb_qdss_init(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&usb_qdss_ch_list);
+ ret = usb_function_register(&qdssusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit usb_qdss_exit(void)
+{
+ usb_function_unregister(&qdssusb_func);
+ qdss_cleanup();
+}
+
+module_init(usb_qdss_init);
+module_exit(usb_qdss_exit);
+MODULE_DESCRIPTION("USB QDSS Function Driver");
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
new file mode 100644
index 000000000000..fb7c01c0f939
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef _F_QDSS_H
+#define _F_QDSS_H
+
+#include <linux/kernel.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/usb_qdss.h>
+
+#include "u_rmnet.h"
+
+struct usb_qdss_bam_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ unsigned long usb_bam_handle;
+ struct sps_mem_buffer *data_fifo;
+};
+
+struct gqdss {
+ struct usb_function function;
+ struct usb_ep *ctrl_out;
+ struct usb_ep *ctrl_in;
+ struct usb_ep *data;
+ int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+ void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+};
+
+/* struct f_qdss - USB qdss function driver private structure */
+struct f_qdss {
+ struct gqdss port;
+ struct usb_qdss_bam_connect_info bam_info;
+ struct usb_gadget *gadget;
+ short int port_num;
+ u8 ctrl_iface_id;
+ u8 data_iface_id;
+ int usb_connected;
+ bool debug_inface_enabled;
+ struct usb_request *endless_req;
+ struct usb_qdss_ch ch;
+ struct list_head ctrl_read_pool;
+ struct list_head ctrl_write_pool;
+
+ /* for mdm channel SW path */
+ struct list_head data_write_pool;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ spinlock_t lock;
+ unsigned int data_enabled:1;
+ unsigned int ctrl_in_enabled:1;
+ unsigned int ctrl_out_enabled:1;
+ struct workqueue_struct *wq;
+};
+
+struct usb_qdss_opts {
+ struct usb_function_instance func_inst;
+ struct f_qdss *usb_qdss;
+ char *channel_name;
+};
+
+int uninit_data(struct usb_ep *ep);
+int set_qdss_data_connection(struct f_qdss *qdss, int enable);
+#endif
diff --git a/drivers/usb/gadget/function/f_rmnet.c b/drivers/usb/gadget/function/f_rmnet.c
new file mode 100644
index 000000000000..6b54e8d4fe8b
--- /dev/null
+++ b/drivers/usb/gadget/function/f_rmnet.c
@@ -0,0 +1,1271 @@
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/usb_bam.h>
+#include <linux/module.h>
+
+#include "u_rmnet.h"
+#include "u_data_ipa.h"
+#include "configfs.h"
+
+#define RMNET_NOTIFY_INTERVAL 5
+#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+#define ACM_CTRL_DTR (1 << 0)
+
+struct f_rmnet {
+ struct usb_function func;
+ enum qti_port_type qti_port_type;
+ enum ipa_func_type func_type;
+ struct grmnet port;
+ int ifc_id;
+ atomic_t online;
+ atomic_t ctrl_online;
+ struct usb_composite_dev *cdev;
+ struct gadget_ipa_port ipa_port;
+ spinlock_t lock;
+
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+
+ /* control info */
+ struct list_head cpkt_resp_q;
+ unsigned long notify_count;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
+ .bLength = sizeof(rmnet_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
+ .bLength = sizeof(rmnet_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
+ .bLength = sizeof(rmnet_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *rmnet_ss_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_in_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_out_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+static struct usb_interface_descriptor dpl_data_intf_desc = {
+ .bLength = sizeof(dpl_data_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor dpl_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dpl_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dpl_data_ep_comp_desc = {
+ .bLength = sizeof(dpl_data_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *dpl_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *dpl_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_ss_data_desc,
+ (struct usb_descriptor_header *) &dpl_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string dpl_string_defs[] = {
+ [0].s = "QDSS DATA",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings dpl_string_table = {
+ .language = 0x0409,
+ .strings = dpl_string_defs,
+};
+
+static struct usb_gadget_strings *dpl_strings[] = {
+ &dpl_string_table,
+ NULL,
+};
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev);
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+ return container_of(f, struct f_rmnet, func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+ return container_of(r, struct f_rmnet, port);
+}
+
+int name_to_prot(struct f_rmnet *dev, const char *name)
+{
+ if (!name)
+ goto error;
+
+ if (!strncasecmp("rmnet", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_RMNET;
+ dev->func_type = USB_IPA_FUNC_RMNET;
+ } else if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_DPL;
+ dev->func_type = USB_IPA_FUNC_DPL;
+ }
+ return 0;
+
+error:
+ return -EINVAL;
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->length = len;
+
+ return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gport_rmnet_connect(struct f_rmnet *dev)
+{
+ int ret;
+ int src_connection_idx = 0, dst_connection_idx = 0;
+ struct usb_gadget *gadget = dev->cdev->gadget;
+ enum usb_ctrl usb_bam_type;
+ int bam_pipe_num = (dev->qti_port_type == QTI_PORT_DPL) ? 1 : 0;
+
+ ret = gqti_ctrl_connect(&dev->port, dev->qti_port_type, dev->ifc_id);
+ if (ret) {
+ pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ if (dev->qti_port_type == QTI_PORT_DPL)
+ dev->port.send_encap_cmd(QTI_PORT_DPL, NULL, 0);
+ dev->ipa_port.cdev = dev->cdev;
+ ipa_data_port_select(dev->func_type);
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+
+ if (dev->ipa_port.in) {
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (dev->ipa_port.out) {
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (dst_connection_idx < 0 || src_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ return -EINVAL;
+ }
+ ret = ipa_data_connect(&dev->ipa_port, dev->func_type,
+ src_connection_idx, dst_connection_idx);
+ if (ret) {
+ pr_err("%s: ipa_data_connect failed: err:%d\n",
+ __func__, ret);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ return ret;
+ }
+ return 0;
+}
+
+static int gport_rmnet_disconnect(struct f_rmnet *dev)
+{
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ ipa_data_disconnect(&dev->ipa_port, dev->func_type);
+ return 0;
+}
+
+static void frmnet_free(struct usb_function *f)
+{
+ struct f_rmnet_opts *opts;
+
+ opts = container_of(f->fi, struct f_rmnet_opts, func_inst);
+ opts->refcnt--;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
+
+ pr_debug("%s: start unbinding\nclear_desc\n", __func__);
+ if (gadget_is_superspeed(gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
+}
+
+static void frmnet_purge_responses(struct f_rmnet *dev)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: Purging responses\n", __func__);
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->cpkt_resp_q)) {
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void frmnet_suspend(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s: dev: %pK remote_wakeup: %d\n", __func__, dev,
+ remote_wakeup_allowed);
+
+ if (dev->notify) {
+ usb_ep_fifo_flush(dev->notify);
+ frmnet_purge_responses(dev);
+ }
+ ipa_data_suspend(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
+}
+
+static void frmnet_resume(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s: dev: %pK remote_wakeup: %d\n", __func__, dev,
+ remote_wakeup_allowed);
+
+ ipa_data_resume(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+
+ pr_debug("%s: Disabling\n", __func__);
+ atomic_set(&dev->online, 0);
+ if (dev->notify) {
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+ frmnet_purge_responses(dev);
+ }
+
+ gport_rmnet_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret = 0;
+
+ pr_debug("%s:dev:%pK\n", __func__, dev);
+ dev->cdev = cdev;
+ if (dev->notify) {
+ if (dev->notify->driver_data) {
+ pr_debug("%s: reset port\n", __func__);
+ usb_ep_disable(dev->notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev,
+ "config_ep_by_speed failed for ep %s, result %d\n",
+ dev->notify->name, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, dev->notify->name, ret);
+ dev->notify->desc = NULL;
+ return ret;
+ }
+
+ dev->notify->driver_data = dev;
+ }
+
+ if (dev->ipa_port.in && !dev->ipa_port.in->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.in->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+
+ if (dev->ipa_port.out && !dev->ipa_port.out->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.out->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+
+ ret = gport_rmnet_connect(dev);
+ if (ret) {
+ pr_err("%s(): gport_rmnet_connect fail with err:%d\n",
+ __func__, ret);
+ goto err_disable_ep;
+ }
+
+ atomic_set(&dev->online, 1);
+ /*
+ * In case notifications were aborted, but there are
+ * pending control packets in the response queue,
+ * re-add the notifications.
+ */
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ struct list_head *cpkt;
+
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ frmnet_ctrl_response_available(dev);
+ }
+
+ return ret;
+err_disable_ep:
+ if (dev->notify && dev->notify->driver_data)
+ usb_ep_disable(dev->notify);
+
+ return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event;
+ unsigned long flags;
+ int ret;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s:dev:%pK\n", __func__, dev);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!atomic_read(&dev->online) || !req || !req->buf) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ if (++dev->notify_count != 1) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ event = req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (ret) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", ret);
+ }
+}
+
+static void frmnet_connect(struct grmnet *gr)
+{
+ struct f_rmnet *dev;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%pK\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ atomic_set(&dev->ctrl_online, 1);
+}
+
+static void frmnet_disconnect(struct grmnet *gr)
+{
+ struct f_rmnet *dev;
+ struct usb_cdc_notification *event;
+ int status;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%pK\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ atomic_set(&dev->ctrl_online, 0);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: nothing to do\n", __func__);
+ return;
+ }
+
+ usb_ep_fifo_flush(dev->notify);
+
+ event = dev->notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ if (!atomic_read(&dev->online))
+ return;
+ pr_err("%s: rmnet notify ep enqueue error %d\n",
+ __func__, status);
+ }
+
+ frmnet_purge_responses(dev);
+}
+
+static int
+frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
+{
+ struct f_rmnet *dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ if (!gr || !buf) {
+ pr_err("%s: Invalid grmnet/buf, grmnet:%pK buf:%pK\n",
+ __func__, gr, buf);
+ return -ENODEV;
+ }
+ cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ dev = port_to_rmnet(gr);
+
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
+ rmnet_free_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ frmnet_ctrl_response_available(dev);
+
+ return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ struct usb_composite_dev *cdev;
+
+ if (!dev) {
+ pr_err("%s: rmnet dev is null\n", __func__);
+ return;
+ }
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ cdev = dev->cdev;
+
+ if (dev->port.send_encap_cmd) {
+ dev->port.send_encap_cmd(QTI_PORT_RMNET, req->buf, req->actual);
+ }
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ int status = req->status;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ default:
+ pr_err("rmnet notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (!atomic_read(&dev->ctrl_online))
+ break;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->notify_count > 0) {
+ dev->notify_count--;
+ if (dev->notify_count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ } else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_err("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock,
+ flags);
+ break;
+ }
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", status);
+ }
+ break;
+ }
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = cdev->req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int ret = -EOPNOTSUPP;
+
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ if (!atomic_read(&dev->online)) {
+ pr_warn("%s: usb cable is not connected\n", __func__);
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ pr_debug("%s: USB_CDC_SEND_ENCAPSULATED_COMMAND\n"
+ , __func__);
+ ret = w_length;
+ req->complete = frmnet_cmd_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ pr_debug("%s: USB_CDC_GET_ENCAPSULATED_RESPONSE\n", __func__);
+ if (w_value) {
+ pr_err("%s: invalid w_value = %04x\n",
+ __func__, w_value);
+ goto invalid;
+ } else {
+ unsigned len;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty: ");
+ pr_err("req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ ret = 0;
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d\n",
+ __func__, w_value & ACM_CTRL_DTR ? 1 : 0);
+ if (dev->port.notify_modem) {
+ dev->port.notify_modem(&dev->port,
+ QTI_PORT_RMNET, w_value);
+ }
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int ipa_update_function_bind_params(struct f_rmnet *dev,
+ struct usb_composite_dev *cdev, struct ipa_function_bind_info *info)
+{
+ struct usb_ep *ep;
+ struct usb_function *f = &dev->func;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (info->string_defs[0].id != 0)
+ goto skip_string_id_alloc;
+
+ if (info->data_str_idx >= 0 && info->data_desc) {
+ /* data interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->data_str_idx].id = status;
+ info->data_desc->iInterface = status;
+ }
+
+skip_string_id_alloc:
+ if (info->data_desc)
+ info->data_desc->bInterfaceNumber = dev->ifc_id;
+
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n",
+ __func__);
+ return -ENODEV;
+ }
+ dev->ipa_port.in = ep;
+ ep->driver_data = cdev;
+ }
+
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ dev->ipa_port.out = ep;
+ ep->driver_data = cdev;
+ }
+
+ if (info->fs_notify_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+ dev->notify_req = frmnet_alloc_req(ep,
+ sizeof(struct usb_cdc_notification),
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ status = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
+
+ dev->notify_req->complete = frmnet_notify_complete;
+ dev->notify_req->context = dev;
+ }
+
+ status = -ENOMEM;
+ f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
+ if (!f->fs_descriptors) {
+ pr_err("%s: no descriptors, usb_copy descriptors(fs)failed\n",
+ __func__);
+ goto fail;
+ }
+
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ if (info->fs_in_desc && info->hs_in_desc)
+ info->hs_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+ if (info->fs_out_desc && info->hs_out_desc)
+ info->hs_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->hs_notify_desc)
+ info->hs_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
+ if (!f->hs_descriptors) {
+ pr_err("%s: no hs_descriptors, usb_copy descriptors(hs)failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ if (info->fs_in_desc && info->ss_in_desc)
+ info->ss_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+
+ if (info->fs_out_desc && info->ss_out_desc)
+ info->ss_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->ss_notify_desc)
+ info->ss_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
+ if (!f->ss_descriptors) {
+ pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
+ep_notify_alloc_fail:
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
+ep_auto_notify_fail:
+ dev->ipa_port.out->driver_data = NULL;
+ dev->ipa_port.out = NULL;
+ep_auto_out_fail:
+ dev->ipa_port.in->driver_data = NULL;
+ dev->ipa_port.in = NULL;
+
+ return status;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+ struct ipa_function_bind_info info = {0};
+
+ pr_debug("%s: start binding\n", __func__);
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d\n",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+
+ info.data_str_idx = 0;
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ info.string_defs = rmnet_string_defs;
+ info.data_desc = &rmnet_interface_desc;
+ info.fs_in_desc = &rmnet_fs_in_desc;
+ info.fs_out_desc = &rmnet_fs_out_desc;
+ info.fs_notify_desc = &rmnet_fs_notify_desc;
+ info.hs_in_desc = &rmnet_hs_in_desc;
+ info.hs_out_desc = &rmnet_hs_out_desc;
+ info.hs_notify_desc = &rmnet_hs_notify_desc;
+ info.ss_in_desc = &rmnet_ss_in_desc;
+ info.ss_out_desc = &rmnet_ss_out_desc;
+ info.ss_notify_desc = &rmnet_ss_notify_desc;
+ info.fs_desc_hdr = rmnet_fs_function;
+ info.hs_desc_hdr = rmnet_hs_function;
+ info.ss_desc_hdr = rmnet_ss_function;
+ } else {
+ info.string_defs = dpl_string_defs;
+ info.data_desc = &dpl_data_intf_desc;
+ info.fs_in_desc = &dpl_hs_data_desc;
+ info.hs_in_desc = &dpl_hs_data_desc;
+ info.ss_in_desc = &dpl_ss_data_desc;
+ info.fs_desc_hdr = dpl_hs_data_only_desc;
+ info.hs_desc_hdr = dpl_hs_data_only_desc;
+ info.ss_desc_hdr = dpl_ss_data_only_desc;
+ }
+
+ ret = ipa_update_function_bind_params(dev, cdev, &info);
+
+ return ret;
+}
+
+static struct usb_function *frmnet_bind_config(struct usb_function_instance *fi)
+{
+ struct f_rmnet_opts *opts;
+ struct f_rmnet *dev;
+ struct usb_function *f;
+
+ opts = container_of(fi, struct f_rmnet_opts, func_inst);
+ opts->refcnt++;
+ dev = opts->dev;
+ f = &dev->func;
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ f->name = "rmnet";
+ f->strings = rmnet_strings;
+ } else {
+ f->name = "dpl";
+ f->strings = dpl_strings;
+ }
+
+ f->bind = frmnet_bind;
+ f->unbind = frmnet_unbind;
+ f->disable = frmnet_disable;
+ f->set_alt = frmnet_set_alt;
+ f->setup = frmnet_setup;
+ f->suspend = frmnet_suspend;
+ f->resume = frmnet_resume;
+ f->free_func = frmnet_free;
+ dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+ dev->port.disconnect = frmnet_disconnect;
+ dev->port.connect = frmnet_connect;
+
+ pr_debug("%s: complete\n", __func__);
+
+ return f;
+}
+
+static int rmnet_init(void)
+{
+ return gqti_ctrl_init();
+}
+
+static void frmnet_cleanup(void)
+{
+ gqti_ctrl_cleanup();
+}
+
+static void rmnet_free_inst(struct usb_function_instance *f)
+{
+ struct f_rmnet_opts *opts = container_of(f, struct f_rmnet_opts,
+ func_inst);
+ ipa_data_free(opts->dev->func_type);
+ kfree(opts->dev);
+ kfree(opts);
+}
+
+static int rmnet_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int name_len, ret = 0;
+ struct f_rmnet *dev;
+ struct f_rmnet_opts *opts = container_of(fi,
+ struct f_rmnet_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ /* Update qti->qti_port_type */
+ ret = name_to_prot(dev, name);
+ if (ret < 0) {
+ pr_err("%s: failed to find prot for %s instance\n",
+ __func__, name);
+ goto fail;
+ }
+
+ if (dev->qti_port_type >= QTI_NUM_PORTS ||
+ dev->func_type >= USB_IPA_NUM_FUNCS) {
+ pr_err("%s: invalid prot\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+ ret = ipa_data_setup(dev->func_type);
+ if (ret)
+ goto fail;
+
+ opts->dev = dev;
+ return 0;
+
+fail:
+ kfree(dev);
+ return ret;
+}
+
+static inline struct f_rmnet_opts *to_f_rmnet_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_rmnet_opts,
+ func_inst.group);
+}
+
+static void rmnet_opts_release(struct config_item *item)
+{
+ struct f_rmnet_opts *opts = to_f_rmnet_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+};
+
+static struct configfs_item_operations rmnet_item_ops = {
+ .release = rmnet_opts_release,
+};
+
+static struct config_item_type rmnet_func_type = {
+ .ct_item_ops = &rmnet_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct usb_function_instance *rmnet_alloc_inst(void)
+{
+ struct f_rmnet_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = rmnet_set_inst_name;
+ opts->func_inst.free_func_inst = rmnet_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &rmnet_func_type);
+ return &opts->func_inst;
+}
+
+static struct usb_function *rmnet_alloc(struct usb_function_instance *fi)
+{
+ return frmnet_bind_config(fi);
+}
+
+DECLARE_USB_FUNCTION(rmnet_bam, rmnet_alloc_inst, rmnet_alloc);
+
+static int __init usb_rmnet_init(void)
+{
+ int ret;
+
+ ret = rmnet_init();
+ if (!ret) {
+ ret = usb_function_register(&rmnet_bamusb_func);
+ if (ret) {
+ pr_err("%s: failed to register rmnet %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void __exit usb_rmnet_exit(void)
+{
+ usb_function_unregister(&rmnet_bamusb_func);
+ frmnet_cleanup();
+}
+
+module_init(usb_rmnet_init);
+module_exit(usb_rmnet_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 829204f7971d..9e1b838ce86f 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -465,6 +465,12 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
int status;
rndis_init_msg_type *buf;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error:%d\n",
+ __func__, req->status);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
@@ -1021,7 +1027,7 @@ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
rndis->port.func.disable = rndis_disable;
rndis->port.func.free_func = rndis_free;
- params = rndis_register(rndis_response_available, rndis);
+ params = rndis_register(rndis_response_available, rndis, NULL);
if (IS_ERR(params)) {
kfree(rndis);
return ERR_CAST(params);
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 6bb44d613bab..8f98c1089e12 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -4,6 +4,7 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
@@ -14,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/usb/cdc.h>
#include "u_serial.h"
@@ -31,8 +33,35 @@ struct f_gser {
struct gserial port;
u8 data_id;
u8 port_num;
+ u8 pending;
+ spinlock_t lock;
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ struct usb_cdc_line_coding port_line_coding;
+ u8 online;
+ /* SetControlLineState request */
+ u16 port_handshake_bits;
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+ /* SerialState notification */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
};
+static inline struct f_gser *port_to_gser(struct gserial *p)
+{
+ return container_of(p, struct f_gser, port);
+}
+
+#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
+
static inline struct f_gser *func_to_gser(struct usb_function *f)
{
return container_of(f, struct f_gser, port.func);
@@ -46,15 +75,55 @@ static struct usb_interface_descriptor gser_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
- .bNumEndpoints = 2,
+ .bNumEndpoints = 3,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
+static struct usb_cdc_header_desc gser_header_desc = {
+ .bLength = sizeof(gser_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+ gser_call_mgmt_descriptor = {
+ .bLength = sizeof(gser_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+ .bmCapabilities = 0,
+ /* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor gser_descriptor = {
+ .bLength = sizeof(gser_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+ .bmCapabilities = USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc gser_union_desc = {
+ .bLength = sizeof(gser_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
/* full speed support: */
+static struct usb_endpoint_descriptor gser_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
static struct usb_endpoint_descriptor gser_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -71,12 +140,25 @@ static struct usb_endpoint_descriptor gser_fs_out_desc = {
static struct usb_descriptor_header *gser_fs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_fs_notify_desc,
(struct usb_descriptor_header *) &gser_fs_in_desc,
(struct usb_descriptor_header *) &gser_fs_out_desc,
NULL,
};
/* high speed support: */
+static struct usb_endpoint_descriptor gser_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
static struct usb_endpoint_descriptor gser_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
@@ -94,6 +176,11 @@ static struct usb_endpoint_descriptor gser_hs_out_desc = {
static struct usb_descriptor_header *gser_hs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_hs_notify_desc,
(struct usb_descriptor_header *) &gser_hs_in_desc,
(struct usb_descriptor_header *) &gser_hs_out_desc,
NULL,
@@ -114,12 +201,36 @@ static struct usb_endpoint_descriptor gser_ss_out_desc = {
};
static struct usb_ss_ep_comp_descriptor gser_ss_bulk_comp_desc = {
- .bLength = sizeof gser_ss_bulk_comp_desc,
+ .bLength = sizeof(gser_ss_bulk_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
+static struct usb_endpoint_descriptor gser_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_ss_ep_comp_descriptor gser_ss_notify_comp_desc = {
+ .bLength = sizeof(gser_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+};
+
static struct usb_descriptor_header *gser_ss_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_ss_notify_desc,
+ (struct usb_descriptor_header *) &gser_ss_notify_comp_desc,
(struct usb_descriptor_header *) &gser_ss_in_desc,
(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &gser_ss_out_desc,
@@ -145,13 +256,131 @@ static struct usb_gadget_strings *gser_strings[] = {
};
/*-------------------------------------------------------------------------*/
+static void gser_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gser *gser = ep->driver_data;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ if (req->status != 0) {
+ dev_dbg(&cdev->gadget->dev, "gser ttyGS%d completion, err %d\n",
+ gser->port_num, req->status);
+ return;
+ }
+
+ /* normal completion */
+ if (req->actual != sizeof(gser->port_line_coding)) {
+ dev_dbg(&cdev->gadget->dev, "gser ttyGS%d short resp, len %d\n",
+ gser->port_num, req->actual);
+ usb_ep_set_halt(ep);
+ } else {
+ struct usb_cdc_line_coding *value = req->buf;
+
+ gser->port_line_coding = *value;
+ }
+}
+
+static int
+gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gser *gser = func_to_gser(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* SET_LINE_CODING ... just read and save what the host sends */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_LINE_CODING:
+ if (w_length != sizeof(struct usb_cdc_line_coding))
+ goto invalid;
+
+ value = w_length;
+ cdev->gadget->ep0->driver_data = gser;
+ req->complete = gser_complete_set_line_coding;
+ break;
+
+ /* GET_LINE_CODING ... return what host sent, or initial value */
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_GET_LINE_CODING:
+ value = min_t(unsigned, w_length,
+ sizeof(struct usb_cdc_line_coding));
+ memcpy(req->buf, &gser->port_line_coding, value);
+ break;
+
+ /* SET_CONTROL_LINE_STATE ... save what the host sent */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+ value = 0;
+ gser->port_handshake_bits = w_value;
+ pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d RST:%d\n",
+ __func__, w_value & ACM_CTRL_DTR ? 1 : 0,
+ w_value & ACM_CTRL_RTS ? 1 : 0);
+
+ if (gser->port.notify_modem)
+ gser->port.notify_modem(&gser->port, 0, w_value);
+
+ break;
+
+ default:
+invalid:
+ dev_dbg(&cdev->gadget->dev,
+ "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ dev_dbg(&cdev->gadget->dev,
+ "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+ gser->port_num, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "gser response on ttyGS%d, err %d\n",
+ gser->port_num, value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
+ int rc = 0;
/* we know alt == 0, so this is an activation or a reset */
+ if (gser->notify->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset generic ctl ttyGS%d\n", gser->port_num);
+ usb_ep_disable(gser->notify);
+ }
+
+ if (!gser->notify->desc) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->notify)) {
+ gser->notify->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ rc = usb_ep_enable(gser->notify);
+ if (rc) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ gser->notify->name, rc);
+ return rc;
+ }
+ gser->notify->driver_data = gser;
if (gser->port.in->enabled) {
dev_dbg(&cdev->gadget->dev,
@@ -169,7 +398,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
}
gserial_connect(&gser->port, gser->port_num);
- return 0;
+ gser->online = 1;
+ return rc;
}
static void gser_disable(struct usb_function *f)
@@ -180,6 +410,176 @@ static void gser_disable(struct usb_function *f)
dev_dbg(&cdev->gadget->dev,
"generic ttyGS%d deactivated\n", gser->port_num);
gserial_disconnect(&gser->port);
+ usb_ep_fifo_flush(gser->notify);
+ usb_ep_disable(gser->notify);
+ gser->online = 0;
+}
+
+static int gser_notify(struct f_gser *gser, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = gser->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ req = gser->notify_req;
+ gser->notify_req = NULL;
+ gser->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(gser->data_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
+ gser->port_num, status);
+ gser->notify_req = req;
+ }
+
+ return status;
+}
+
+static int gser_notify_serial_state(struct f_gser *gser)
+{
+ int status;
+ unsigned long flags;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (gser->notify_req) {
+ DBG(cdev, "gser ttyGS%d serial state %04x\n",
+ gser->port_num, gser->serial_state);
+ status = gser_notify(gser, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &gser->serial_state,
+ sizeof(gser->serial_state));
+ } else {
+ gser->pending = true;
+ status = 0;
+ }
+
+ spin_unlock_irqrestore(&gser->lock, flags);
+ return status;
+}
+
+static void gser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gser *gser = req->context;
+ u8 doit = false;
+ unsigned long flags;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (req->status != -ESHUTDOWN)
+ doit = gser->pending;
+
+ gser->notify_req = req;
+ spin_unlock_irqrestore(&gser->lock, flags);
+
+ if (doit && gser->online)
+ gser_notify_serial_state(gser);
+}
+
+static void gser_connect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ gser_notify_serial_state(gser);
+}
+
+unsigned int gser_get_dtr(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_DTR)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gser_get_rts(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_RTS)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gser_send_carrier_detect(struct gserial *port, unsigned int yes)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_DCD;
+ if (yes)
+ state |= ACM_CTRL_DCD;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+unsigned int gser_send_ring_indicator(struct gserial *port, unsigned int yes)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_RI;
+ if (yes)
+ state |= ACM_CTRL_RI;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static void gser_disconnect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ gser_notify_serial_state(gser);
+}
+
+static int gser_send_break(struct gserial *port, int duration)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static int gser_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state = ctrl_bits;
+
+ return gser_notify_serial_state(gser);
}
/*-------------------------------------------------------------------------*/
@@ -225,6 +625,21 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
gser->port.out = ep;
+ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ gser->notify = ep;
+
+ /* allocate notification */
+ gser->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!gser->notify_req)
+ goto fail;
+
+ gser->notify_req->complete = gser_notify_complete;
+ gser->notify_req->context = gser;
+
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -235,6 +650,11 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress;
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
+ gser_hs_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+ gser_ss_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
gser_ss_function);
if (status)
@@ -247,6 +667,9 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
+ if (gser->notify_req)
+ gs_free_req(gser->notify, gser->notify_req);
+
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
@@ -327,7 +750,10 @@ static void gser_free(struct usb_function *f)
static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct f_gser *gser = func_to_gser(f);
+
usb_free_all_descriptors(f);
+ gs_free_req(gser->notify, gser->notify_req);
}
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
@@ -342,6 +768,7 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
opts = container_of(fi, struct f_serial_opts, func_inst);
+ spin_lock_init(&gser->lock);
gser->port_num = opts->port_num;
gser->port.func.name = "gser";
@@ -352,6 +779,24 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
gser->port.func.disable = gser_disable;
gser->port.func.free_func = gser_free;
+ /* We support only three ports for now */
+ if (opts->port_num == 0)
+ gser->port.func.name = "modem";
+ else if (opts->port_num == 1)
+ gser->port.func.name = "nmea";
+ else
+ gser->port.func.name = "modem2";
+
+ gser->port.func.setup = gser_setup;
+ gser->port.connect = gser_connect;
+ gser->port.get_dtr = gser_get_dtr;
+ gser->port.get_rts = gser_get_rts;
+ gser->port.send_carrier_detect = gser_send_carrier_detect;
+ gser->port.send_ring_indicator = gser_send_ring_indicator;
+ gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
+ gser->port.disconnect = gser_disconnect;
+ gser->port.send_break = gser_send_break;
+
return &gser->port.func;
}
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index d7d095781be1..77681c43318d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -298,9 +298,7 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct f_sourcesink *ss = ep->driver_data;
-
- return alloc_ep_req(ep, len, ss->buflen);
+ return alloc_ep_req(ep, len);
}
static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
@@ -611,7 +609,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
req = ss_alloc_ep_req(ep, size);
} else {
ep = is_in ? ss->in_ep : ss->out_ep;
- req = ss_alloc_ep_req(ep, 0);
+ req = ss_alloc_ep_req(ep, ss->buflen);
}
if (!req)
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index a7782ce673d6..0445b2e1d8b5 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -1,24 +1,38 @@
/*
- * f_audio.c -- USB Audio class function driver
- *
- * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
- * Copyright (C) 2008 Analog Devices, Inc
+ * f_uac1.c -- USB Audio Class 1.0 Function (using u_audio API)
*
- * Enter bugs at http://blackfin.uclinux.org/
+ * Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
*
- * Licensed under the GPL-2 or later.
+ * This driver doesn't expect any real Audio codec to be present
+ * on the device - the audio streams are simply sinked to and
+ * sourced from a virtual ALSA sound card created.
+ *
+ * This file is based on f_uac1.c which is
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*/
-#include <linux/slab.h>
-#include <linux/kernel.h>
+#include <linux/usb/audio.h>
#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/atomic.h>
+#include "u_audio.h"
#include "u_uac1.h"
-static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
-static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+struct f_uac1 {
+ struct g_audio g_audio;
+ u8 ac_intf, as_in_intf, as_out_intf;
+ u8 ac_alt, as_in_alt, as_out_alt; /* needed for get_alt() */
+};
+
+static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
+{
+ return container_of(f, struct f_uac1, g_audio.func);
+}
/*
* DESCRIPTORS ... most are static, but strings and full
@@ -26,12 +40,17 @@ static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
*/
/*
- * We have two interfaces- AudioControl and AudioStreaming
- * TODO: only supcard playback currently
+ * We have three interfaces - one AudioControl and two AudioStreaming
+ *
+ * The driver implements a simple UAC_1 topology.
+ * USB-OUT -> IT_1 -> OT_2 -> ALSA_Capture
+ * ALSA_Playback -> IT_3 -> OT_4 -> USB-IN
*/
-#define F_AUDIO_AC_INTERFACE 0
-#define F_AUDIO_AS_INTERFACE 1
-#define F_AUDIO_NUM_INTERFACES 1
+#define F_AUDIO_AC_INTERFACE 0
+#define F_AUDIO_AS_OUT_INTERFACE 1
+#define F_AUDIO_AS_IN_INTERFACE 2
+/* Number of streaming interfaces */
+#define F_AUDIO_NUM_INTERFACES 2
/* B.3.1 Standard AC Interface Descriptor */
static struct usb_interface_descriptor ac_interface_desc = {
@@ -46,89 +65,86 @@ static struct usb_interface_descriptor ac_interface_desc = {
* The number of AudioStreaming and MIDIStreaming interfaces
* in the Audio Interface Collection
*/
-DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
-/* 1 input terminal, 1 output terminal and 1 feature unit */
-#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
- + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* 2 input terminals and 2 output terminals */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+ + 2*UAC_DT_INPUT_TERMINAL_SIZE + 2*UAC_DT_OUTPUT_TERMINAL_SIZE)
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_1 ac_header_desc = {
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
- .bcdADC = __constant_cpu_to_le16(0x0100),
- .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bcdADC = cpu_to_le16(0x0100),
+ .wTotalLength = cpu_to_le16(UAC_DT_TOTAL_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
- .baInterfaceNr = {
- /* Interface number of the first AudioStream interface */
- [0] = 1,
- }
};
-#define INPUT_TERMINAL_ID 1
-static struct uac_input_terminal_descriptor input_terminal_desc = {
+#define USB_OUT_IT_ID 1
+static struct uac_input_terminal_descriptor usb_out_it_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = INPUT_TERMINAL_ID,
- .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bTerminalID = USB_OUT_IT_ID,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .wChannelConfig = 0x3,
+ .wChannelConfig = cpu_to_le16(0x3),
};
-DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
-
-#define FEATURE_UNIT_ID 2
-static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
- .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+#define IO_OUT_OT_ID 2
+static struct uac1_output_terminal_descriptor io_out_ot_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = UAC_FEATURE_UNIT,
- .bUnitID = FEATURE_UNIT_ID,
- .bSourceID = INPUT_TERMINAL_ID,
- .bControlSize = 2,
- .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = IO_OUT_OT_ID,
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
+ .bAssocTerminal = 0,
+ .bSourceID = USB_OUT_IT_ID,
};
-static struct usb_audio_control mute_control = {
- .list = LIST_HEAD_INIT(mute_control.list),
- .name = "Mute Control",
- .type = UAC_FU_MUTE,
- /* Todo: add real Mute control code */
- .set = generic_set_cmd,
- .get = generic_get_cmd,
+#define IO_IN_IT_ID 3
+static struct uac_input_terminal_descriptor io_in_it_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = IO_IN_IT_ID,
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
+ .bAssocTerminal = 0,
+ .wChannelConfig = cpu_to_le16(0x3),
};
-static struct usb_audio_control volume_control = {
- .list = LIST_HEAD_INIT(volume_control.list),
- .name = "Volume Control",
- .type = UAC_FU_VOLUME,
- /* Todo: add real Volume control code */
- .set = generic_set_cmd,
- .get = generic_get_cmd,
+#define USB_IN_OT_ID 4
+static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = USB_IN_OT_ID,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
+ .bAssocTerminal = 0,
+ .bSourceID = IO_IN_IT_ID,
};
-static struct usb_audio_control_selector feature_unit = {
- .list = LIST_HEAD_INIT(feature_unit.list),
- .id = FEATURE_UNIT_ID,
- .name = "Mute & Volume Control",
- .type = UAC_FEATURE_UNIT,
- .desc = (struct usb_descriptor_header *)&feature_unit_desc,
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_out_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-#define OUTPUT_TERMINAL_ID 3
-static struct uac1_output_terminal_descriptor output_terminal_desc = {
- .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = OUTPUT_TERMINAL_ID,
- .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
- .bAssocTerminal = FEATURE_UNIT_ID,
- .bSourceID = FEATURE_UNIT_ID,
+static struct usb_interface_descriptor as_out_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-/* B.4.1 Standard AS Interface Descriptor */
-static struct usb_interface_descriptor as_interface_alt_0_desc = {
+static struct usb_interface_descriptor as_in_interface_alt_0_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
@@ -137,7 +153,7 @@ static struct usb_interface_descriptor as_interface_alt_0_desc = {
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-static struct usb_interface_descriptor as_interface_alt_1_desc = {
+static struct usb_interface_descriptor as_in_interface_alt_1_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
@@ -147,18 +163,27 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
};
/* B.4.2 Class-Specific AS Interface Descriptor */
-static struct uac1_as_header_descriptor as_header_desc = {
+static struct uac1_as_header_descriptor as_out_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = USB_OUT_IT_ID,
+ .bDelay = 1,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
+};
+
+static struct uac1_as_header_descriptor as_in_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = INPUT_TERMINAL_ID,
+ .bTerminalLink = USB_IN_OT_ID,
.bDelay = 1,
- .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
-static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+static struct uac_format_type_i_discrete_descriptor_1 as_out_type_i_desc = {
.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -179,53 +204,147 @@ static struct usb_endpoint_descriptor as_out_ep_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor as_out_ep_comp_desc = {
+ .bLength = sizeof(as_out_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ .wBytesPerInterval = cpu_to_le16(1024),
+};
+
/* Class-specific AS ISO OUT Endpoint Descriptor */
static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
- .bmAttributes = 1,
+ .bmAttributes = 1,
.bLockDelayUnits = 1,
- .wLockDelay = __constant_cpu_to_le16(1),
+ .wLockDelay = cpu_to_le16(1),
+};
+
+static struct uac_format_type_i_discrete_descriptor_1 as_in_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_ASYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+static struct usb_ss_ep_comp_descriptor as_in_ep_comp_desc = {
+ .bLength = sizeof(as_in_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ .wBytesPerInterval = cpu_to_le16(1024),
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 0,
+ .wLockDelay = 0,
};
static struct usb_descriptor_header *f_audio_desc[] = {
(struct usb_descriptor_header *)&ac_interface_desc,
(struct usb_descriptor_header *)&ac_header_desc,
- (struct usb_descriptor_header *)&input_terminal_desc,
- (struct usb_descriptor_header *)&output_terminal_desc,
- (struct usb_descriptor_header *)&feature_unit_desc,
+ (struct usb_descriptor_header *)&usb_out_it_desc,
+ (struct usb_descriptor_header *)&io_out_ot_desc,
+ (struct usb_descriptor_header *)&io_in_it_desc,
+ (struct usb_descriptor_header *)&usb_in_ot_desc,
+
+ (struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_out_header_desc,
+
+ (struct usb_descriptor_header *)&as_out_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+
+ (struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_in_header_desc,
+
+ (struct usb_descriptor_header *)&as_in_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *f_audio_ss_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&usb_out_it_desc,
+ (struct usb_descriptor_header *)&io_out_ot_desc,
+ (struct usb_descriptor_header *)&io_in_it_desc,
+ (struct usb_descriptor_header *)&usb_in_ot_desc,
- (struct usb_descriptor_header *)&as_interface_alt_0_desc,
- (struct usb_descriptor_header *)&as_interface_alt_1_desc,
- (struct usb_descriptor_header *)&as_header_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_out_header_desc,
- (struct usb_descriptor_header *)&as_type_i_desc,
+ (struct usb_descriptor_header *)&as_out_type_i_desc,
(struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_out_ep_comp_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
+
+ (struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_in_header_desc,
+
+ (struct usb_descriptor_header *)&as_in_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_in_ep_comp_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
NULL,
};
enum {
STR_AC_IF,
- STR_INPUT_TERMINAL,
- STR_INPUT_TERMINAL_CH_NAMES,
- STR_FEAT_DESC_0,
- STR_OUTPUT_TERMINAL,
- STR_AS_IF_ALT0,
- STR_AS_IF_ALT1,
+ STR_USB_OUT_IT,
+ STR_USB_OUT_IT_CH_NAMES,
+ STR_IO_OUT_OT,
+ STR_IO_IN_IT,
+ STR_IO_IN_IT_CH_NAMES,
+ STR_USB_IN_OT,
+ STR_AS_OUT_IF_ALT0,
+ STR_AS_OUT_IF_ALT1,
+ STR_AS_IN_IF_ALT0,
+ STR_AS_IN_IF_ALT1,
};
static struct usb_string strings_uac1[] = {
[STR_AC_IF].s = "AC Interface",
- [STR_INPUT_TERMINAL].s = "Input terminal",
- [STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
- [STR_FEAT_DESC_0].s = "Volume control & mute",
- [STR_OUTPUT_TERMINAL].s = "Output terminal",
- [STR_AS_IF_ALT0].s = "AS Interface",
- [STR_AS_IF_ALT1].s = "AS Interface",
+ [STR_USB_OUT_IT].s = "Playback Input terminal",
+ [STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels",
+ [STR_IO_OUT_OT].s = "Playback Output terminal",
+ [STR_IO_IN_IT].s = "Capture Input terminal",
+ [STR_IO_IN_IT_CH_NAMES].s = "Capture Channels",
+ [STR_USB_IN_OT].s = "Capture Output terminal",
+ [STR_AS_OUT_IF_ALT0].s = "Playback Inactive",
+ [STR_AS_OUT_IF_ALT1].s = "Playback Active",
+ [STR_AS_IN_IF_ALT0].s = "Capture Inactive",
+ [STR_AS_IN_IF_ALT1].s = "Capture Active",
{ },
};
@@ -243,218 +362,6 @@ static struct usb_gadget_strings *uac1_strings[] = {
* This function is an ALSA sound card following USB Audio Class Spec 1.0.
*/
-/*-------------------------------------------------------------------------*/
-struct f_audio_buf {
- u8 *buf;
- int actual;
- struct list_head list;
-};
-
-static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
-{
- struct f_audio_buf *copy_buf;
-
- copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
- if (!copy_buf)
- return ERR_PTR(-ENOMEM);
-
- copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
- if (!copy_buf->buf) {
- kfree(copy_buf);
- return ERR_PTR(-ENOMEM);
- }
-
- return copy_buf;
-}
-
-static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
-{
- kfree(audio_buf->buf);
- kfree(audio_buf);
-}
-/*-------------------------------------------------------------------------*/
-
-struct f_audio {
- struct gaudio card;
-
- /* endpoints handle full and/or high speeds */
- struct usb_ep *out_ep;
-
- spinlock_t lock;
- struct f_audio_buf *copy_buf;
- struct work_struct playback_work;
- struct list_head play_queue;
-
- /* Control Set command */
- struct list_head cs;
- u8 set_cmd;
- struct usb_audio_control *set_con;
-};
-
-static inline struct f_audio *func_to_audio(struct usb_function *f)
-{
- return container_of(f, struct f_audio, card.func);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void f_audio_playback_work(struct work_struct *data)
-{
- struct f_audio *audio = container_of(data, struct f_audio,
- playback_work);
- struct f_audio_buf *play_buf;
-
- spin_lock_irq(&audio->lock);
- if (list_empty(&audio->play_queue)) {
- spin_unlock_irq(&audio->lock);
- return;
- }
- play_buf = list_first_entry(&audio->play_queue,
- struct f_audio_buf, list);
- list_del(&play_buf->list);
- spin_unlock_irq(&audio->lock);
-
- u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
- f_audio_buffer_free(play_buf);
-}
-
-static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct f_audio *audio = req->context;
- struct usb_composite_dev *cdev = audio->card.func.config->cdev;
- struct f_audio_buf *copy_buf = audio->copy_buf;
- struct f_uac1_opts *opts;
- int audio_buf_size;
- int err;
-
- opts = container_of(audio->card.func.fi, struct f_uac1_opts,
- func_inst);
- audio_buf_size = opts->audio_buf_size;
-
- if (!copy_buf)
- return -EINVAL;
-
- /* Copy buffer is full, add it to the play_queue */
- if (audio_buf_size - copy_buf->actual < req->actual) {
- spin_lock_irq(&audio->lock);
- list_add_tail(&copy_buf->list, &audio->play_queue);
- spin_unlock_irq(&audio->lock);
- schedule_work(&audio->playback_work);
- copy_buf = f_audio_buffer_alloc(audio_buf_size);
- if (IS_ERR(copy_buf))
- return -ENOMEM;
- }
-
- memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
- copy_buf->actual += req->actual;
- audio->copy_buf = copy_buf;
-
- err = usb_ep_queue(ep, req, GFP_ATOMIC);
- if (err)
- ERROR(cdev, "%s queue req: %d\n", ep->name, err);
-
- return 0;
-
-}
-
-static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct f_audio *audio = req->context;
- int status = req->status;
- u32 data = 0;
- struct usb_ep *out_ep = audio->out_ep;
-
- switch (status) {
-
- case 0: /* normal completion? */
- if (ep == out_ep)
- f_audio_out_ep_complete(ep, req);
- else if (audio->set_con) {
- memcpy(&data, req->buf, req->length);
- audio->set_con->set(audio->set_con, audio->set_cmd,
- le16_to_cpu(data));
- audio->set_con = NULL;
- }
- break;
- default:
- break;
- }
-}
-
-static int audio_set_intf_req(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
-{
- struct f_audio *audio = func_to_audio(f);
- struct usb_composite_dev *cdev = f->config->cdev;
- struct usb_request *req = cdev->req;
- u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
- u16 len = le16_to_cpu(ctrl->wLength);
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u8 con_sel = (w_value >> 8) & 0xFF;
- u8 cmd = (ctrl->bRequest & 0x0F);
- struct usb_audio_control_selector *cs;
- struct usb_audio_control *con;
-
- DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
- ctrl->bRequest, w_value, len, id);
-
- list_for_each_entry(cs, &audio->cs, list) {
- if (cs->id == id) {
- list_for_each_entry(con, &cs->control, list) {
- if (con->type == con_sel) {
- audio->set_con = con;
- break;
- }
- }
- break;
- }
- }
-
- audio->set_cmd = cmd;
- req->context = audio;
- req->complete = f_audio_complete;
-
- return len;
-}
-
-static int audio_get_intf_req(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
-{
- struct f_audio *audio = func_to_audio(f);
- struct usb_composite_dev *cdev = f->config->cdev;
- struct usb_request *req = cdev->req;
- int value = -EOPNOTSUPP;
- u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
- u16 len = le16_to_cpu(ctrl->wLength);
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u8 con_sel = (w_value >> 8) & 0xFF;
- u8 cmd = (ctrl->bRequest & 0x0F);
- struct usb_audio_control_selector *cs;
- struct usb_audio_control *con;
-
- DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
- ctrl->bRequest, w_value, len, id);
-
- list_for_each_entry(cs, &audio->cs, list) {
- if (cs->id == id) {
- list_for_each_entry(con, &cs->control, list) {
- if (con->type == con_sel && con->get) {
- value = con->get(con, cmd);
- break;
- }
- }
- break;
- }
- }
-
- req->context = audio;
- req->complete = f_audio_complete;
- len = min_t(size_t, sizeof(value), len);
- memcpy(req->buf, &value, len);
-
- return len;
-}
-
static int audio_set_endpoint_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
@@ -533,14 +440,6 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
* activation uses set_alt().
*/
switch (ctrl->bRequestType) {
- case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
- value = audio_set_intf_req(f, ctrl);
- break;
-
- case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
- value = audio_get_intf_req(f, ctrl);
- break;
-
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_set_endpoint_req(f, ctrl);
break;
@@ -573,143 +472,161 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
- struct f_audio *audio = func_to_audio(f);
struct usb_composite_dev *cdev = f->config->cdev;
- struct usb_ep *out_ep = audio->out_ep;
- struct usb_request *req;
- struct f_uac1_opts *opts;
- int req_buf_size, req_count, audio_buf_size;
- int i = 0, err = 0;
-
- DBG(cdev, "intf %d, alt %d\n", intf, alt);
+ struct usb_gadget *gadget = cdev->gadget;
+ struct device *dev = &gadget->dev;
+ struct f_uac1 *uac1 = func_to_uac1(f);
+ int ret = 0;
+
+ /* No i/f has more than 2 alt settings */
+ if (alt > 1) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
- opts = container_of(f->fi, struct f_uac1_opts, func_inst);
- req_buf_size = opts->req_buf_size;
- req_count = opts->req_count;
- audio_buf_size = opts->audio_buf_size;
-
- if (intf == 1) {
- if (alt == 1) {
- err = config_ep_by_speed(cdev->gadget, f, out_ep);
- if (err)
- return err;
-
- usb_ep_enable(out_ep);
- audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
- if (IS_ERR(audio->copy_buf))
- return -ENOMEM;
-
- /*
- * allocate a bunch of read buffers
- * and queue them all at once.
- */
- for (i = 0; i < req_count && err == 0; i++) {
- req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
- if (req) {
- req->buf = kzalloc(req_buf_size,
- GFP_ATOMIC);
- if (req->buf) {
- req->length = req_buf_size;
- req->context = audio;
- req->complete =
- f_audio_complete;
- err = usb_ep_queue(out_ep,
- req, GFP_ATOMIC);
- if (err)
- ERROR(cdev,
- "%s queue req: %d\n",
- out_ep->name, err);
- } else
- err = -ENOMEM;
- } else
- err = -ENOMEM;
- }
-
- } else {
- struct f_audio_buf *copy_buf = audio->copy_buf;
- if (copy_buf) {
- list_add_tail(&copy_buf->list,
- &audio->play_queue);
- schedule_work(&audio->playback_work);
- }
+ if (intf == uac1->ac_intf) {
+ /* Control I/f has only 1 AltSetting - 0 */
+ if (alt) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
}
+ return 0;
+ }
+
+ if (intf == uac1->as_out_intf) {
+ uac1->as_out_alt = alt;
+
+ if (alt)
+ ret = u_audio_start_capture(&uac1->g_audio);
+ else
+ u_audio_stop_capture(&uac1->g_audio);
+ } else if (intf == uac1->as_in_intf) {
+ uac1->as_in_alt = alt;
+
+ if (alt)
+ ret = u_audio_start_playback(&uac1->g_audio);
+ else
+ u_audio_stop_playback(&uac1->g_audio);
+ } else {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
}
- return err;
+ return ret;
}
-static void f_audio_disable(struct usb_function *f)
+static int f_audio_get_alt(struct usb_function *f, unsigned intf)
{
- return;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_gadget *gadget = cdev->gadget;
+ struct device *dev = &gadget->dev;
+ struct f_uac1 *uac1 = func_to_uac1(f);
+
+ if (intf == uac1->ac_intf)
+ return uac1->ac_alt;
+ else if (intf == uac1->as_out_intf)
+ return uac1->as_out_alt;
+ else if (intf == uac1->as_in_intf)
+ return uac1->as_in_alt;
+ else
+ dev_err(dev, "%s:%d Invalid Interface %d!\n",
+ __func__, __LINE__, intf);
+
+ return -EINVAL;
}
-/*-------------------------------------------------------------------------*/
-static void f_audio_build_desc(struct f_audio *audio)
+static void f_audio_disable(struct usb_function *f)
{
- struct gaudio *card = &audio->card;
- u8 *sam_freq;
- int rate;
-
- /* Set channel numbers */
- input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
- as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
-
- /* Set sample rates */
- rate = u_audio_get_playback_rate(card);
- sam_freq = as_type_i_desc.tSamFreq[0];
- memcpy(sam_freq, &rate, 3);
+ struct f_uac1 *uac1 = func_to_uac1(f);
- /* Todo: Set Sample bits and other parameters */
+ uac1->as_out_alt = 0;
+ uac1->as_in_alt = 0;
- return;
+ u_audio_stop_capture(&uac1->g_audio);
+ u_audio_stop_playback(&uac1->g_audio);
}
+/*-------------------------------------------------------------------------*/
+
/* audio function driver setup/binding */
-static int
-f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
{
- struct usb_composite_dev *cdev = c->cdev;
- struct f_audio *audio = func_to_audio(f);
- struct usb_string *us;
- int status;
- struct usb_ep *ep = NULL;
- struct f_uac1_opts *audio_opts;
+ struct usb_composite_dev *cdev = c->cdev;
+ struct usb_gadget *gadget = cdev->gadget;
+ struct f_uac1 *uac1 = func_to_uac1(f);
+ struct g_audio *audio = func_to_g_audio(f);
+ struct f_uac1_opts *audio_opts;
+ struct usb_ep *ep = NULL;
+ struct usb_string *us;
+ u8 *sam_freq;
+ int rate;
+ int status;
audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
- audio->card.gadget = c->cdev->gadget;
- /* set up ASLA audio devices */
- if (!audio_opts->bound) {
- status = gaudio_setup(&audio->card);
- if (status < 0)
- return status;
- audio_opts->bound = true;
- }
+
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
if (IS_ERR(us))
return PTR_ERR(us);
ac_interface_desc.iInterface = us[STR_AC_IF].id;
- input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
- input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
- feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
- output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
- as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
- as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
+ usb_out_it_desc.iTerminal = us[STR_USB_OUT_IT].id;
+ usb_out_it_desc.iChannelNames = us[STR_USB_OUT_IT_CH_NAMES].id;
+ io_out_ot_desc.iTerminal = us[STR_IO_OUT_OT].id;
+ as_out_interface_alt_0_desc.iInterface = us[STR_AS_OUT_IF_ALT0].id;
+ as_out_interface_alt_1_desc.iInterface = us[STR_AS_OUT_IF_ALT1].id;
+ io_in_it_desc.iTerminal = us[STR_IO_IN_IT].id;
+ io_in_it_desc.iChannelNames = us[STR_IO_IN_IT_CH_NAMES].id;
+ usb_in_ot_desc.iTerminal = us[STR_USB_IN_OT].id;
+ as_in_interface_alt_0_desc.iInterface = us[STR_AS_IN_IF_ALT0].id;
+ as_in_interface_alt_1_desc.iInterface = us[STR_AS_IN_IF_ALT1].id;
+ /* Set channel numbers */
+ usb_out_it_desc.bNrChannels = num_channels(audio_opts->c_chmask);
+ usb_out_it_desc.wChannelConfig = cpu_to_le16(audio_opts->c_chmask);
+ as_out_type_i_desc.bNrChannels = num_channels(audio_opts->c_chmask);
+ as_out_type_i_desc.bSubframeSize = audio_opts->c_ssize;
+ as_out_type_i_desc.bBitResolution = audio_opts->c_ssize * 8;
+ io_in_it_desc.bNrChannels = num_channels(audio_opts->p_chmask);
+ io_in_it_desc.wChannelConfig = cpu_to_le16(audio_opts->p_chmask);
+ as_in_type_i_desc.bNrChannels = num_channels(audio_opts->p_chmask);
+ as_in_type_i_desc.bSubframeSize = audio_opts->p_ssize;
+ as_in_type_i_desc.bBitResolution = audio_opts->p_ssize * 8;
- f_audio_build_desc(audio);
+ /* Set sample rates */
+ rate = audio_opts->c_srate;
+ sam_freq = as_out_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+ rate = audio_opts->p_srate;
+ sam_freq = as_in_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ac_interface_desc.bInterfaceNumber = status;
+ uac1->ac_intf = status;
+ uac1->ac_alt = 0;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_header_desc.baInterfaceNr[0] = status;
+ as_out_interface_alt_0_desc.bInterfaceNumber = status;
+ as_out_interface_alt_1_desc.bInterfaceNumber = status;
+ uac1->as_out_intf = status;
+ uac1->as_out_alt = 0;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
- as_interface_alt_0_desc.bInterfaceNumber = status;
- as_interface_alt_1_desc.bInterfaceNumber = status;
+ ac_header_desc.baInterfaceNr[1] = status;
+ as_in_interface_alt_0_desc.bInterfaceNumber = status;
+ as_in_interface_alt_1_desc.bInterfaceNumber = status;
+ uac1->as_in_intf = status;
+ uac1->as_in_alt = 0;
+
+ audio->gadget = gadget;
status = -ENODEV;
@@ -720,51 +637,42 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
audio->out_ep = ep;
audio->out_ep->desc = &as_out_ep_desc;
- status = -ENOMEM;
+ ep = usb_ep_autoconfig(cdev->gadget, &as_in_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->in_ep = ep;
+ audio->in_ep->desc = &as_in_ep_desc;
/* copy descriptors, and track endpoint copies */
- status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL);
+ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc,
+ f_audio_ss_desc);
if (status)
goto fail;
+
+ audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
+ audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
+ audio->params.c_chmask = audio_opts->c_chmask;
+ audio->params.c_srate = audio_opts->c_srate;
+ audio->params.c_ssize = audio_opts->c_ssize;
+ audio->params.p_chmask = audio_opts->p_chmask;
+ audio->params.p_srate = audio_opts->p_srate;
+ audio->params.p_ssize = audio_opts->p_ssize;
+ audio->params.req_number = audio_opts->req_number;
+
+ status = g_audio_setup(audio, "UAC1_PCM", "UAC1_Gadget");
+ if (status)
+ goto err_card_register;
+
return 0;
+err_card_register:
+ usb_free_all_descriptors(f);
fail:
- gaudio_cleanup(&audio->card);
return status;
}
/*-------------------------------------------------------------------------*/
-static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
-{
- con->data[cmd] = value;
-
- return 0;
-}
-
-static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
-{
- return con->data[cmd];
-}
-
-/* Todo: add more control selecotor dynamically */
-static int control_selector_init(struct f_audio *audio)
-{
- INIT_LIST_HEAD(&audio->cs);
- list_add(&feature_unit.list, &audio->cs);
-
- INIT_LIST_HEAD(&feature_unit.control);
- list_add(&mute_control.list, &feature_unit.control);
- list_add(&volume_control.list, &feature_unit.control);
-
- volume_control.data[UAC__CUR] = 0xffc0;
- volume_control.data[UAC__MIN] = 0xe3a0;
- volume_control.data[UAC__MAX] = 0xfff0;
- volume_control.data[UAC__RES] = 0x0030;
-
- return 0;
-}
-
static inline struct f_uac1_opts *to_f_uac1_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uac1_opts,
@@ -782,9 +690,10 @@ static struct configfs_item_operations f_uac1_item_ops = {
.release = f_uac1_attr_release,
};
-#define UAC1_INT_ATTRIBUTE(name) \
-static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
- char *page) \
+#define UAC1_ATTRIBUTE(name) \
+static ssize_t f_uac1_opts_##name##_show( \
+ struct config_item *item, \
+ char *page) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int result; \
@@ -796,7 +705,8 @@ static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
return result; \
} \
\
-static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
+static ssize_t f_uac1_opts_##name##_store( \
+ struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
@@ -823,64 +733,22 @@ end: \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
-UAC1_INT_ATTRIBUTE(req_buf_size);
-UAC1_INT_ATTRIBUTE(req_count);
-UAC1_INT_ATTRIBUTE(audio_buf_size);
-
-#define UAC1_STR_ATTRIBUTE(name) \
-static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
- char *page) \
-{ \
- struct f_uac1_opts *opts = to_f_uac1_opts(item); \
- int result; \
- \
- mutex_lock(&opts->lock); \
- result = sprintf(page, "%s\n", opts->name); \
- mutex_unlock(&opts->lock); \
- \
- return result; \
-} \
- \
-static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
- const char *page, size_t len) \
-{ \
- struct f_uac1_opts *opts = to_f_uac1_opts(item); \
- int ret = -EBUSY; \
- char *tmp; \
- \
- mutex_lock(&opts->lock); \
- if (opts->refcnt) \
- goto end; \
- \
- tmp = kstrndup(page, len, GFP_KERNEL); \
- if (tmp) { \
- ret = -ENOMEM; \
- goto end; \
- } \
- if (opts->name##_alloc) \
- kfree(opts->name); \
- opts->name##_alloc = true; \
- opts->name = tmp; \
- ret = len; \
- \
-end: \
- mutex_unlock(&opts->lock); \
- return ret; \
-} \
- \
-CONFIGFS_ATTR(f_uac1_opts_, name)
-
-UAC1_STR_ATTRIBUTE(fn_play);
-UAC1_STR_ATTRIBUTE(fn_cap);
-UAC1_STR_ATTRIBUTE(fn_cntl);
+UAC1_ATTRIBUTE(c_chmask);
+UAC1_ATTRIBUTE(c_srate);
+UAC1_ATTRIBUTE(c_ssize);
+UAC1_ATTRIBUTE(p_chmask);
+UAC1_ATTRIBUTE(p_srate);
+UAC1_ATTRIBUTE(p_ssize);
+UAC1_ATTRIBUTE(req_number);
static struct configfs_attribute *f_uac1_attrs[] = {
- &f_uac1_opts_attr_req_buf_size,
- &f_uac1_opts_attr_req_count,
- &f_uac1_opts_attr_audio_buf_size,
- &f_uac1_opts_attr_fn_play,
- &f_uac1_opts_attr_fn_cap,
- &f_uac1_opts_attr_fn_cntl,
+ &f_uac1_opts_attr_c_chmask,
+ &f_uac1_opts_attr_c_srate,
+ &f_uac1_opts_attr_c_ssize,
+ &f_uac1_opts_attr_p_chmask,
+ &f_uac1_opts_attr_p_srate,
+ &f_uac1_opts_attr_p_ssize,
+ &f_uac1_opts_attr_req_number,
NULL,
};
@@ -895,12 +763,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
struct f_uac1_opts *opts;
opts = container_of(f, struct f_uac1_opts, func_inst);
- if (opts->fn_play_alloc)
- kfree(opts->fn_play);
- if (opts->fn_cap_alloc)
- kfree(opts->fn_cap);
- if (opts->fn_cntl_alloc)
- kfree(opts->fn_cntl);
kfree(opts);
}
@@ -918,21 +780,22 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
config_group_init_type_name(&opts->func_inst.group, "",
&f_uac1_func_type);
- opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
- opts->req_count = UAC1_REQ_COUNT;
- opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
- opts->fn_play = FILE_PCM_PLAYBACK;
- opts->fn_cap = FILE_PCM_CAPTURE;
- opts->fn_cntl = FILE_CONTROL;
+ opts->c_chmask = UAC1_DEF_CCHMASK;
+ opts->c_srate = UAC1_DEF_CSRATE;
+ opts->c_ssize = UAC1_DEF_CSSIZE;
+ opts->p_chmask = UAC1_DEF_PCHMASK;
+ opts->p_srate = UAC1_DEF_PSRATE;
+ opts->p_ssize = UAC1_DEF_PSSIZE;
+ opts->req_number = UAC1_DEF_REQ_NUM;
return &opts->func_inst;
}
static void f_audio_free(struct usb_function *f)
{
- struct f_audio *audio = func_to_audio(f);
+ struct g_audio *audio;
struct f_uac1_opts *opts;
- gaudio_cleanup(&audio->card);
+ audio = func_to_g_audio(f);
opts = container_of(f->fi, struct f_uac1_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
@@ -942,42 +805,54 @@ static void f_audio_free(struct usb_function *f)
static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct g_audio *audio = func_to_g_audio(f);
+
+ g_audio_cleanup(audio);
usb_free_all_descriptors(f);
+
+ audio->gadget = NULL;
}
static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
{
- struct f_audio *audio;
+ struct f_uac1 *uac1;
struct f_uac1_opts *opts;
/* allocate and initialize one new instance */
- audio = kzalloc(sizeof(*audio), GFP_KERNEL);
- if (!audio)
+ uac1 = kzalloc(sizeof(*uac1), GFP_KERNEL);
+ if (!uac1)
return ERR_PTR(-ENOMEM);
- audio->card.func.name = "g_audio";
-
opts = container_of(fi, struct f_uac1_opts, func_inst);
mutex_lock(&opts->lock);
++opts->refcnt;
mutex_unlock(&opts->lock);
- INIT_LIST_HEAD(&audio->play_queue);
- spin_lock_init(&audio->lock);
- audio->card.func.bind = f_audio_bind;
- audio->card.func.unbind = f_audio_unbind;
- audio->card.func.set_alt = f_audio_set_alt;
- audio->card.func.setup = f_audio_setup;
- audio->card.func.disable = f_audio_disable;
- audio->card.func.free_func = f_audio_free;
+ uac1->g_audio.func.name = "uac1_func";
+ uac1->g_audio.func.bind = f_audio_bind;
+ uac1->g_audio.func.unbind = f_audio_unbind;
+ uac1->g_audio.func.set_alt = f_audio_set_alt;
+ uac1->g_audio.func.get_alt = f_audio_get_alt;
+ uac1->g_audio.func.setup = f_audio_setup;
+ uac1->g_audio.func.disable = f_audio_disable;
+ uac1->g_audio.func.free_func = f_audio_free;
+
+ return &uac1->g_audio.func;
+}
- control_selector_init(audio);
+DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc);
- INIT_WORK(&audio->playback_work, f_audio_playback_work);
+static int __init afunc_init(void)
+{
+ return usb_function_register(&uac1usb_func);
+}
- return &audio->card.func;
+static void __exit afunc_exit(void)
+{
+ usb_function_unregister(&uac1usb_func);
}
-DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc);
+module_init(afunc_init);
+module_exit(afunc_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bryan Wu");
+MODULE_AUTHOR("Ruslan Bilovol");
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
new file mode 100644
index 000000000000..d6c60c08d511
--- /dev/null
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -0,0 +1,1022 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+
+#include "u_uac1_legacy.h"
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE 0
+#define F_AUDIO_AS_INTERFACE 1
+#define F_AUDIO_NUM_INTERFACES 1
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+/*
+ * The number of AudioStreaming and MIDIStreaming interfaces
+ * in the Audio Interface Collection
+ */
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+ + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_1 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bInCollection = F_AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ /* Interface number of the first AudioStream interface */
+ [0] = 1,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+ .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+ .list = LIST_HEAD_INIT(mute_control.list),
+ .name = "Mute Control",
+ .type = UAC_FU_MUTE,
+ /* Todo: add real Mute control code */
+ .set = generic_set_cmd,
+ .get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+ .list = LIST_HEAD_INIT(volume_control.list),
+ .name = "Volume Control",
+ .type = UAC_FU_VOLUME,
+ /* Todo: add real Volume control code */
+ .set = generic_set_cmd,
+ .get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+ .list = LIST_HEAD_INIT(feature_unit.list),
+ .id = FEATURE_UNIT_ID,
+ .name = "Mute & Volume Control",
+ .type = UAC_FEATURE_UNIT,
+ .desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+ NULL,
+};
+
+enum {
+ STR_AC_IF,
+ STR_INPUT_TERMINAL,
+ STR_INPUT_TERMINAL_CH_NAMES,
+ STR_FEAT_DESC_0,
+ STR_OUTPUT_TERMINAL,
+ STR_AS_IF_ALT0,
+ STR_AS_IF_ALT1,
+};
+
+static struct usb_string strings_uac1[] = {
+ [STR_AC_IF].s = "AC Interface",
+ [STR_INPUT_TERMINAL].s = "Input terminal",
+ [STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
+ [STR_FEAT_DESC_0].s = "Volume control & mute",
+ [STR_OUTPUT_TERMINAL].s = "Output terminal",
+ [STR_AS_IF_ALT0].s = "AS Interface",
+ [STR_AS_IF_ALT1].s = "AS Interface",
+ { },
+};
+
+static struct usb_gadget_strings str_uac1 = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_uac1,
+};
+
+static struct usb_gadget_strings *uac1_strings[] = {
+ &str_uac1,
+ NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+ u8 *buf;
+ int actual;
+ struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+ struct f_audio_buf *copy_buf;
+
+ copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+ if (!copy_buf)
+ return ERR_PTR(-ENOMEM);
+
+ copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+ if (!copy_buf->buf) {
+ kfree(copy_buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+ kfree(audio_buf->buf);
+ kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+ struct gaudio card;
+
+ u8 ac_intf, ac_alt;
+ u8 as_intf, as_alt;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *out_ep;
+
+ spinlock_t lock;
+ struct f_audio_buf *copy_buf;
+ struct work_struct playback_work;
+ struct list_head play_queue;
+
+ /* Control Set command */
+ struct list_head cs;
+ u8 set_cmd;
+ struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+ struct f_audio *audio = container_of(data, struct f_audio,
+ playback_work);
+ struct f_audio_buf *play_buf;
+
+ spin_lock_irq(&audio->lock);
+ if (list_empty(&audio->play_queue)) {
+ spin_unlock_irq(&audio->lock);
+ return;
+ }
+ play_buf = list_first_entry(&audio->play_queue,
+ struct f_audio_buf, list);
+ list_del(&play_buf->list);
+ spin_unlock_irq(&audio->lock);
+
+ u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+ f_audio_buffer_free(play_buf);
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_audio *audio = req->context;
+ struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+ struct f_audio_buf *copy_buf = audio->copy_buf;
+ struct f_uac1_legacy_opts *opts;
+ int audio_buf_size;
+ int err;
+
+ opts = container_of(audio->card.func.fi, struct f_uac1_legacy_opts,
+ func_inst);
+ audio_buf_size = opts->audio_buf_size;
+
+ if (!copy_buf)
+ return -EINVAL;
+
+ /* Copy buffer is full, add it to the play_queue */
+ if (audio_buf_size - copy_buf->actual < req->actual) {
+ spin_lock_irq(&audio->lock);
+ list_add_tail(&copy_buf->list, &audio->play_queue);
+ spin_unlock_irq(&audio->lock);
+ schedule_work(&audio->playback_work);
+ copy_buf = f_audio_buffer_alloc(audio_buf_size);
+ if (IS_ERR(copy_buf))
+ return -ENOMEM;
+ }
+
+ memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+ copy_buf->actual += req->actual;
+ audio->copy_buf = copy_buf;
+
+ err = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (err)
+ ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+ return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_audio *audio = req->context;
+ int status = req->status;
+ u32 data = 0;
+ struct usb_ep *out_ep = audio->out_ep;
+
+ switch (status) {
+
+ case 0: /* normal completion? */
+ if (ep == out_ep)
+ f_audio_out_ep_complete(ep, req);
+ else if (audio->set_con) {
+ memcpy(&data, req->buf, req->length);
+ audio->set_con->set(audio->set_con, audio->set_cmd,
+ le16_to_cpu(data));
+ audio->set_con = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 con_sel = (w_value >> 8) & 0xFF;
+ u8 cmd = (ctrl->bRequest & 0x0F);
+ struct usb_audio_control_selector *cs;
+ struct usb_audio_control *con;
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+ ctrl->bRequest, w_value, len, id);
+
+ list_for_each_entry(cs, &audio->cs, list) {
+ if (cs->id == id) {
+ list_for_each_entry(con, &cs->control, list) {
+ if (con->type == con_sel) {
+ audio->set_con = con;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ audio->set_cmd = cmd;
+ req->context = audio;
+ req->complete = f_audio_complete;
+
+ return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 con_sel = (w_value >> 8) & 0xFF;
+ u8 cmd = (ctrl->bRequest & 0x0F);
+ struct usb_audio_control_selector *cs;
+ struct usb_audio_control *con;
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+ ctrl->bRequest, w_value, len, id);
+
+ list_for_each_entry(cs, &audio->cs, list) {
+ if (cs->id == id) {
+ list_for_each_entry(con, &cs->control, list) {
+ if (con->type == con_sel && con->get) {
+ value = con->get(con, cmd);
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ req->context = audio;
+ req->complete = f_audio_complete;
+ len = min_t(size_t, sizeof(value), len);
+ memcpy(req->buf, &value, len);
+
+ return len;
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ value = len;
+ break;
+
+ case UAC_SET_MIN:
+ break;
+
+ case UAC_SET_MAX:
+ break;
+
+ case UAC_SET_RES:
+ break;
+
+ case UAC_SET_MEM:
+ break;
+
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ value = len;
+ break;
+ case UAC_GET_MEM:
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+ value = audio_set_intf_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+ value = audio_get_intf_req(f, ctrl);
+ break;
+
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+
+ default:
+ ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_ep *out_ep = audio->out_ep;
+ struct usb_request *req;
+ struct f_uac1_legacy_opts *opts;
+ int req_buf_size, req_count, audio_buf_size;
+ int i = 0, err = 0;
+
+ DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+ opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+ req_buf_size = opts->req_buf_size;
+ req_count = opts->req_count;
+ audio_buf_size = opts->audio_buf_size;
+
+ /* No i/f has more than 2 alt settings */
+ if (alt > 1) {
+ ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (intf == audio->ac_intf) {
+ /* Control I/f has only 1 AltSetting - 0 */
+ if (alt) {
+ ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+ } else if (intf == audio->as_intf) {
+ if (alt == 1) {
+ err = config_ep_by_speed(cdev->gadget, f, out_ep);
+ if (err)
+ return err;
+
+ usb_ep_enable(out_ep);
+ audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+ if (IS_ERR(audio->copy_buf))
+ return -ENOMEM;
+
+ /*
+ * allocate a bunch of read buffers
+ * and queue them all at once.
+ */
+ for (i = 0; i < req_count && err == 0; i++) {
+ req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+ if (req) {
+ req->buf = kzalloc(req_buf_size,
+ GFP_ATOMIC);
+ if (req->buf) {
+ req->length = req_buf_size;
+ req->context = audio;
+ req->complete =
+ f_audio_complete;
+ err = usb_ep_queue(out_ep,
+ req, GFP_ATOMIC);
+ if (err)
+ ERROR(cdev,
+ "%s queue req: %d\n",
+ out_ep->name, err);
+ } else
+ err = -ENOMEM;
+ } else
+ err = -ENOMEM;
+ }
+
+ } else {
+ struct f_audio_buf *copy_buf = audio->copy_buf;
+ if (copy_buf) {
+ list_add_tail(&copy_buf->list,
+ &audio->play_queue);
+ schedule_work(&audio->playback_work);
+ }
+ }
+ audio->as_alt = alt;
+ }
+
+ return err;
+}
+
+static int f_audio_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ if (intf == audio->ac_intf)
+ return audio->ac_alt;
+ else if (intf == audio->as_intf)
+ return audio->as_alt;
+ else
+ ERROR(cdev, "%s:%d Invalid Interface %d!\n",
+ __func__, __LINE__, intf);
+
+ return -EINVAL;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+ return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+ struct gaudio *card = &audio->card;
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+ as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+ /* Set sample rates */
+ rate = u_audio_get_playback_rate(card);
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+
+ /* Todo: Set Sample bits and other parameters */
+
+ return;
+}
+
+/* audio function driver setup/binding */
+static int
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_string *us;
+ int status;
+ struct usb_ep *ep = NULL;
+ struct f_uac1_legacy_opts *audio_opts;
+
+ audio_opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+ audio->card.gadget = c->cdev->gadget;
+ /* set up ASLA audio devices */
+ if (!audio_opts->bound) {
+ status = gaudio_setup(&audio->card);
+ if (status < 0)
+ return status;
+ audio_opts->bound = true;
+ }
+ us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
+ if (IS_ERR(us))
+ return PTR_ERR(us);
+ ac_interface_desc.iInterface = us[STR_AC_IF].id;
+ input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
+ input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
+ feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
+ output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
+ as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
+ as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
+
+
+ f_audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+ audio->ac_intf = status;
+ audio->ac_alt = 0;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+ audio->as_intf = status;
+ audio->as_alt = 0;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->out_ep = ep;
+ audio->out_ep->desc = &as_out_ep_desc;
+
+ status = -ENOMEM;
+
+ /* copy descriptors, and track endpoint copies */
+ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL);
+ if (status)
+ goto fail;
+ return 0;
+
+fail:
+ gaudio_cleanup(&audio->card);
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+ con->data[cmd] = value;
+
+ return 0;
+}
+
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+ return con->data[cmd];
+}
+
+/* Todo: add more control selecotor dynamically */
+static int control_selector_init(struct f_audio *audio)
+{
+ INIT_LIST_HEAD(&audio->cs);
+ list_add(&feature_unit.list, &audio->cs);
+
+ INIT_LIST_HEAD(&feature_unit.control);
+ list_add(&mute_control.list, &feature_unit.control);
+ list_add(&volume_control.list, &feature_unit.control);
+
+ volume_control.data[UAC__CUR] = 0xffc0;
+ volume_control.data[UAC__MIN] = 0xe3a0;
+ volume_control.data[UAC__MAX] = 0xfff0;
+ volume_control.data[UAC__RES] = 0x0030;
+
+ return 0;
+}
+
+static inline
+struct f_uac1_legacy_opts *to_f_uac1_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_uac1_legacy_opts,
+ func_inst.group);
+}
+
+static void f_uac1_attr_release(struct config_item *item)
+{
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations f_uac1_item_ops = {
+ .release = f_uac1_attr_release,
+};
+
+#define UAC1_INT_ATTRIBUTE(name) \
+static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%u\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int ret; \
+ u32 num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtou32(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ opts->name = num; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_uac1_opts_, name)
+
+UAC1_INT_ATTRIBUTE(req_buf_size);
+UAC1_INT_ATTRIBUTE(req_count);
+UAC1_INT_ATTRIBUTE(audio_buf_size);
+
+#define UAC1_STR_ATTRIBUTE(name) \
+static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%s\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int ret = -EBUSY; \
+ char *tmp; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) \
+ goto end; \
+ \
+ tmp = kstrndup(page, len, GFP_KERNEL); \
+ if (tmp) { \
+ ret = -ENOMEM; \
+ goto end; \
+ } \
+ if (opts->name##_alloc) \
+ kfree(opts->name); \
+ opts->name##_alloc = true; \
+ opts->name = tmp; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_uac1_opts_, name)
+
+UAC1_STR_ATTRIBUTE(fn_play);
+UAC1_STR_ATTRIBUTE(fn_cap);
+UAC1_STR_ATTRIBUTE(fn_cntl);
+
+static struct configfs_attribute *f_uac1_attrs[] = {
+ &f_uac1_opts_attr_req_buf_size,
+ &f_uac1_opts_attr_req_count,
+ &f_uac1_opts_attr_audio_buf_size,
+ &f_uac1_opts_attr_fn_play,
+ &f_uac1_opts_attr_fn_cap,
+ &f_uac1_opts_attr_fn_cntl,
+ NULL,
+};
+
+static struct config_item_type f_uac1_func_type = {
+ .ct_item_ops = &f_uac1_item_ops,
+ .ct_attrs = f_uac1_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void f_audio_free_inst(struct usb_function_instance *f)
+{
+ struct f_uac1_legacy_opts *opts;
+
+ opts = container_of(f, struct f_uac1_legacy_opts, func_inst);
+ if (opts->fn_play_alloc)
+ kfree(opts->fn_play);
+ if (opts->fn_cap_alloc)
+ kfree(opts->fn_cap);
+ if (opts->fn_cntl_alloc)
+ kfree(opts->fn_cntl);
+ kfree(opts);
+}
+
+static struct usb_function_instance *f_audio_alloc_inst(void)
+{
+ struct f_uac1_legacy_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = f_audio_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &f_uac1_func_type);
+
+ opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
+ opts->req_count = UAC1_REQ_COUNT;
+ opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
+ opts->fn_play = FILE_PCM_PLAYBACK;
+ opts->fn_cap = FILE_PCM_CAPTURE;
+ opts->fn_cntl = FILE_CONTROL;
+ return &opts->func_inst;
+}
+
+static void f_audio_free(struct usb_function *f)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct f_uac1_legacy_opts *opts;
+
+ gaudio_cleanup(&audio->card);
+ opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+ kfree(audio);
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+}
+
+static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ usb_free_all_descriptors(f);
+}
+
+static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
+{
+ struct f_audio *audio;
+ struct f_uac1_legacy_opts *opts;
+
+ /* allocate and initialize one new instance */
+ audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ return ERR_PTR(-ENOMEM);
+
+ audio->card.func.name = "g_audio";
+
+ opts = container_of(fi, struct f_uac1_legacy_opts, func_inst);
+ mutex_lock(&opts->lock);
+ ++opts->refcnt;
+ mutex_unlock(&opts->lock);
+ INIT_LIST_HEAD(&audio->play_queue);
+ spin_lock_init(&audio->lock);
+
+ audio->card.func.bind = f_audio_bind;
+ audio->card.func.unbind = f_audio_unbind;
+ audio->card.func.set_alt = f_audio_set_alt;
+ audio->card.func.get_alt = f_audio_get_alt;
+ audio->card.func.setup = f_audio_setup;
+ audio->card.func.disable = f_audio_disable;
+ audio->card.func.free_func = f_audio_free;
+
+ control_selector_init(audio);
+
+ INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+ return &audio->card.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(uac1_legacy, f_audio_alloc_inst, f_audio_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bryan Wu");
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6903d02a933f..59de3f246f42 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -13,18 +13,11 @@
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
+#include "u_audio.h"
#include "u_uac2.h"
-/* Keep everyone on toes */
-#define USB_XFERS 2
-
/*
* The driver implements a simple UAC_2 topology.
* USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
@@ -54,504 +47,23 @@
#define UNFLW_CTRL 8
#define OVFLW_CTRL 10
-static const char *uac2_name = "snd_uac2";
-
-struct uac2_req {
- struct uac2_rtd_params *pp; /* parent param */
- struct usb_request *req;
-};
-
-struct uac2_rtd_params {
- struct snd_uac2_chip *uac2; /* parent chip */
- bool ep_enabled; /* if the ep is enabled */
- /* Size of the ring buffer */
- size_t dma_bytes;
- unsigned char *dma_area;
-
- struct snd_pcm_substream *ss;
-
- /* Ring buffer */
- ssize_t hw_ptr;
-
- void *rbuf;
-
- size_t period_size;
-
- unsigned max_psize;
- struct uac2_req ureq[USB_XFERS];
-
- spinlock_t lock;
-};
-
-struct snd_uac2_chip {
- struct platform_device pdev;
- struct platform_driver pdrv;
-
- struct uac2_rtd_params p_prm;
- struct uac2_rtd_params c_prm;
-
- struct snd_card *card;
- struct snd_pcm *pcm;
-
- /* timekeeping for the playback endpoint */
- unsigned int p_interval;
- unsigned int p_residue;
-
- /* pre-calculated values for playback iso completion */
- unsigned int p_pktsize;
- unsigned int p_pktsize_residue;
- unsigned int p_framesize;
-};
-
-#define BUFF_SIZE_MAX (PAGE_SIZE * 16)
-#define PRD_SIZE_MAX PAGE_SIZE
-#define MIN_PERIODS 4
-
-static struct snd_pcm_hardware uac2_pcm_hardware = {
- .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
- | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
- | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
- .buffer_bytes_max = BUFF_SIZE_MAX,
- .period_bytes_max = PRD_SIZE_MAX,
- .periods_min = MIN_PERIODS,
-};
-
-struct audio_dev {
- u8 ac_intf, ac_alt;
- u8 as_out_intf, as_out_alt;
- u8 as_in_intf, as_in_alt;
-
- struct usb_ep *in_ep, *out_ep;
- struct usb_function func;
-
- /* The ALSA Sound Card it represents on the USB-Client side */
- struct snd_uac2_chip uac2;
+struct f_uac2 {
+ struct g_audio g_audio;
+ u8 ac_intf, as_in_intf, as_out_intf;
+ u8 ac_alt, as_in_alt, as_out_alt; /* needed for get_alt() */
};
-static inline
-struct audio_dev *func_to_agdev(struct usb_function *f)
-{
- return container_of(f, struct audio_dev, func);
-}
-
-static inline
-struct audio_dev *uac2_to_agdev(struct snd_uac2_chip *u)
+static inline struct f_uac2 *func_to_uac2(struct usb_function *f)
{
- return container_of(u, struct audio_dev, uac2);
+ return container_of(f, struct f_uac2, g_audio.func);
}
static inline
-struct snd_uac2_chip *pdev_to_uac2(struct platform_device *p)
-{
- return container_of(p, struct snd_uac2_chip, pdev);
-}
-
-static inline
-struct f_uac2_opts *agdev_to_uac2_opts(struct audio_dev *agdev)
+struct f_uac2_opts *g_audio_to_uac2_opts(struct g_audio *agdev)
{
return container_of(agdev->func.fi, struct f_uac2_opts, func_inst);
}
-static inline
-uint num_channels(uint chanmask)
-{
- uint num = 0;
-
- while (chanmask) {
- num += (chanmask & 1);
- chanmask >>= 1;
- }
-
- return num;
-}
-
-static void
-agdev_iso_complete(struct usb_ep *ep, struct usb_request *req)
-{
- unsigned pending;
- unsigned long flags;
- unsigned int hw_ptr;
- bool update_alsa = false;
- int status = req->status;
- struct uac2_req *ur = req->context;
- struct snd_pcm_substream *substream;
- struct uac2_rtd_params *prm = ur->pp;
- struct snd_uac2_chip *uac2 = prm->uac2;
-
- /* i/f shutting down */
- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
- return;
-
- /*
- * We can't really do much about bad xfers.
- * Afterall, the ISOCH xfers could fail legitimately.
- */
- if (status)
- pr_debug("%s: iso_complete status(%d) %d/%d\n",
- __func__, status, req->actual, req->length);
-
- substream = prm->ss;
-
- /* Do nothing if ALSA isn't active */
- if (!substream)
- goto exit;
-
- spin_lock_irqsave(&prm->lock, flags);
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /*
- * For each IN packet, take the quotient of the current data
- * rate and the endpoint's interval as the base packet size.
- * If there is a residue from this division, add it to the
- * residue accumulator.
- */
- req->length = uac2->p_pktsize;
- uac2->p_residue += uac2->p_pktsize_residue;
-
- /*
- * Whenever there are more bytes in the accumulator than we
- * need to add one more sample frame, increase this packet's
- * size and decrease the accumulator.
- */
- if (uac2->p_residue / uac2->p_interval >= uac2->p_framesize) {
- req->length += uac2->p_framesize;
- uac2->p_residue -= uac2->p_framesize *
- uac2->p_interval;
- }
-
- req->actual = req->length;
- }
-
- pending = prm->hw_ptr % prm->period_size;
- pending += req->actual;
- if (pending >= prm->period_size)
- update_alsa = true;
-
- hw_ptr = prm->hw_ptr;
- prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
-
- spin_unlock_irqrestore(&prm->lock, flags);
-
- /* Pack USB load in ALSA ring buffer */
- pending = prm->dma_bytes - hw_ptr;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (unlikely(pending < req->actual)) {
- memcpy(req->buf, prm->dma_area + hw_ptr, pending);
- memcpy(req->buf + pending, prm->dma_area,
- req->actual - pending);
- } else {
- memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
- }
- } else {
- if (unlikely(pending < req->actual)) {
- memcpy(prm->dma_area + hw_ptr, req->buf, pending);
- memcpy(prm->dma_area, req->buf + pending,
- req->actual - pending);
- } else {
- memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
- }
- }
-
-exit:
- if (usb_ep_queue(ep, req, GFP_ATOMIC))
- dev_err(&uac2->pdev.dev, "%d Error!\n", __LINE__);
-
- if (update_alsa)
- snd_pcm_period_elapsed(substream);
-
- return;
-}
-
-static int
-uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
- unsigned long flags;
- int err = 0;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- spin_lock_irqsave(&prm->lock, flags);
-
- /* Reset */
- prm->hw_ptr = 0;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- prm->ss = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- prm->ss = NULL;
- break;
- default:
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&prm->lock, flags);
-
- /* Clear buffer after Play stops */
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
- memset(prm->rbuf, 0, prm->max_psize * USB_XFERS);
-
- return err;
-}
-
-static snd_pcm_uframes_t uac2_pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- return bytes_to_frames(substream->runtime, prm->hw_ptr);
-}
-
-static int uac2_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
- int err;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- err = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- if (err >= 0) {
- prm->dma_bytes = substream->runtime->dma_bytes;
- prm->dma_area = substream->runtime->dma_area;
- prm->period_size = params_period_bytes(hw_params);
- }
-
- return err;
-}
-
-static int uac2_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- prm->dma_area = NULL;
- prm->dma_bytes = 0;
- prm->period_size = 0;
-
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int uac2_pcm_open(struct snd_pcm_substream *substream)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct audio_dev *audio_dev;
- struct f_uac2_opts *opts;
- int p_ssize, c_ssize;
- int p_srate, c_srate;
- int p_chmask, c_chmask;
-
- audio_dev = uac2_to_agdev(uac2);
- opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst);
- p_ssize = opts->p_ssize;
- c_ssize = opts->c_ssize;
- p_srate = opts->p_srate;
- c_srate = opts->c_srate;
- p_chmask = opts->p_chmask;
- c_chmask = opts->c_chmask;
- uac2->p_residue = 0;
-
- runtime->hw = uac2_pcm_hardware;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- spin_lock_init(&uac2->p_prm.lock);
- runtime->hw.rate_min = p_srate;
- switch (p_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
- runtime->hw.channels_min = num_channels(p_chmask);
- runtime->hw.period_bytes_min = 2 * uac2->p_prm.max_psize
- / runtime->hw.periods_min;
- } else {
- spin_lock_init(&uac2->c_prm.lock);
- runtime->hw.rate_min = c_srate;
- switch (c_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
- runtime->hw.channels_min = num_channels(c_chmask);
- runtime->hw.period_bytes_min = 2 * uac2->c_prm.max_psize
- / runtime->hw.periods_min;
- }
-
- runtime->hw.rate_max = runtime->hw.rate_min;
- runtime->hw.channels_max = runtime->hw.channels_min;
-
- snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-
- return 0;
-}
-
-/* ALSA cries without these function pointers */
-static int uac2_pcm_null(struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
-static struct snd_pcm_ops uac2_pcm_ops = {
- .open = uac2_pcm_open,
- .close = uac2_pcm_null,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uac2_pcm_hw_params,
- .hw_free = uac2_pcm_hw_free,
- .trigger = uac2_pcm_trigger,
- .pointer = uac2_pcm_pointer,
- .prepare = uac2_pcm_null,
-};
-
-static int snd_uac2_probe(struct platform_device *pdev)
-{
- struct snd_uac2_chip *uac2 = pdev_to_uac2(pdev);
- struct snd_card *card;
- struct snd_pcm *pcm;
- struct audio_dev *audio_dev;
- struct f_uac2_opts *opts;
- int err;
- int p_chmask, c_chmask;
-
- audio_dev = uac2_to_agdev(uac2);
- opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst);
- p_chmask = opts->p_chmask;
- c_chmask = opts->c_chmask;
-
- /* Choose any slot, with no id */
- err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
- if (err < 0)
- return err;
-
- uac2->card = card;
-
- /*
- * Create first PCM device
- * Create a substream only for non-zero channel streams
- */
- err = snd_pcm_new(uac2->card, "UAC2 PCM", 0,
- p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
- if (err < 0)
- goto snd_fail;
-
- strcpy(pcm->name, "UAC2 PCM");
- pcm->private_data = uac2;
-
- uac2->pcm = pcm;
-
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac2_pcm_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac2_pcm_ops);
-
- strcpy(card->driver, "UAC2_Gadget");
- strcpy(card->shortname, "UAC2_Gadget");
- sprintf(card->longname, "UAC2_Gadget %i", pdev->id);
-
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
-
- err = snd_card_register(card);
- if (!err) {
- platform_set_drvdata(pdev, card);
- return 0;
- }
-
-snd_fail:
- snd_card_free(card);
-
- uac2->pcm = NULL;
- uac2->card = NULL;
-
- return err;
-}
-
-static int snd_uac2_remove(struct platform_device *pdev)
-{
- struct snd_card *card = platform_get_drvdata(pdev);
-
- if (card)
- return snd_card_free(card);
-
- return 0;
-}
-
-static void snd_uac2_release(struct device *dev)
-{
- dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
-}
-
-static int alsa_uac2_init(struct audio_dev *agdev)
-{
- struct snd_uac2_chip *uac2 = &agdev->uac2;
- int err;
-
- uac2->pdrv.probe = snd_uac2_probe;
- uac2->pdrv.remove = snd_uac2_remove;
- uac2->pdrv.driver.name = uac2_name;
-
- uac2->pdev.id = 0;
- uac2->pdev.name = uac2_name;
- uac2->pdev.dev.release = snd_uac2_release;
-
- /* Register snd_uac2 driver */
- err = platform_driver_register(&uac2->pdrv);
- if (err)
- return err;
-
- /* Register snd_uac2 device */
- err = platform_device_register(&uac2->pdev);
- if (err)
- platform_driver_unregister(&uac2->pdrv);
-
- return err;
-}
-
-static void alsa_uac2_exit(struct audio_dev *agdev)
-{
- struct snd_uac2_chip *uac2 = &agdev->uac2;
-
- platform_driver_unregister(&uac2->pdrv);
- platform_device_unregister(&uac2->pdev);
-}
-
-
/* --------- USB Function Interface ------------- */
enum {
@@ -939,30 +451,6 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static inline void
-free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
-{
- struct snd_uac2_chip *uac2 = prm->uac2;
- int i;
-
- if (!prm->ep_enabled)
- return;
-
- prm->ep_enabled = false;
-
- for (i = 0; i < USB_XFERS; i++) {
- if (prm->ureq[i].req) {
- usb_ep_dequeue(ep, prm->ureq[i].req);
- usb_ep_free_request(ep, prm->ureq[i].req);
- prm->ureq[i].req = NULL;
- }
- }
-
- if (usb_ep_disable(ep))
- dev_err(&uac2->pdev.dev,
- "%s:%d Error!\n", __func__, __LINE__);
-}
-
static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
enum usb_device_speed speed, bool is_playback)
@@ -1007,12 +495,11 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
+ struct g_audio *agdev = func_to_g_audio(fn);
struct usb_composite_dev *cdev = cfg->cdev;
struct usb_gadget *gadget = cdev->gadget;
- struct device *dev = &uac2->pdev.dev;
- struct uac2_rtd_params *prm;
+ struct device *dev = &gadget->dev;
struct f_uac2_opts *uac2_opts;
struct usb_string *us;
int ret;
@@ -1061,8 +548,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
iad_desc.bFirstInterface = ret;
std_ac_if_desc.bInterfaceNumber = ret;
- agdev->ac_intf = ret;
- agdev->ac_alt = 0;
+ uac2->ac_intf = ret;
+ uac2->ac_alt = 0;
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
@@ -1071,8 +558,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
- agdev->as_out_intf = ret;
- agdev->as_out_alt = 0;
+ uac2->as_out_intf = ret;
+ uac2->as_out_alt = 0;
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
@@ -1081,23 +568,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_in_if0_desc.bInterfaceNumber = ret;
std_as_in_if1_desc.bInterfaceNumber = ret;
- agdev->as_in_intf = ret;
- agdev->as_in_alt = 0;
-
- agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
- if (!agdev->out_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
- }
-
- agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
- if (!agdev->in_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
- }
-
- uac2->p_prm.uac2 = uac2;
- uac2->c_prm.uac2 = uac2;
+ uac2->as_in_intf = ret;
+ uac2->as_in_alt = 0;
/* Calculate wMaxPacketSize according to audio bandwidth */
ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
@@ -1128,6 +600,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
return ret;
}
+ agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
+ if (!agdev->out_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
+ if (!agdev->in_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize,
+ hs_epin_desc.wMaxPacketSize);
+ agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize,
+ hs_epout_desc.wMaxPacketSize);
+
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
@@ -1135,47 +624,34 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
if (ret)
return ret;
- prm = &agdev->uac2.c_prm;
- prm->max_psize = hs_epout_desc.wMaxPacketSize;
- prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
- if (!prm->rbuf) {
- prm->max_psize = 0;
- goto err_free_descs;
- }
+ agdev->gadget = gadget;
- prm = &agdev->uac2.p_prm;
- prm->max_psize = hs_epin_desc.wMaxPacketSize;
- prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
- if (!prm->rbuf) {
- prm->max_psize = 0;
- goto err;
- }
-
- ret = alsa_uac2_init(agdev);
+ agdev->params.p_chmask = uac2_opts->p_chmask;
+ agdev->params.p_srate = uac2_opts->p_srate;
+ agdev->params.p_ssize = uac2_opts->p_ssize;
+ agdev->params.c_chmask = uac2_opts->c_chmask;
+ agdev->params.c_srate = uac2_opts->c_srate;
+ agdev->params.c_ssize = uac2_opts->c_ssize;
+ agdev->params.req_number = uac2_opts->req_number;
+ ret = g_audio_setup(agdev, "UAC2 PCM", "UAC2_Gadget");
if (ret)
- goto err;
+ goto err_free_descs;
return 0;
-err:
- kfree(agdev->uac2.p_prm.rbuf);
- kfree(agdev->uac2.c_prm.rbuf);
err_free_descs:
usb_free_all_descriptors(fn);
- return -EINVAL;
+ agdev->gadget = NULL;
+ return ret;
}
static int
afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = fn->config->cdev;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
struct usb_gadget *gadget = cdev->gadget;
- struct device *dev = &uac2->pdev.dev;
- struct usb_request *req;
- struct usb_ep *ep;
- struct uac2_rtd_params *prm;
- int req_len, i;
+ struct device *dev = &gadget->dev;
+ int ret = 0;
/* No i/f has more than 2 alt settings */
if (alt > 1) {
@@ -1183,7 +659,7 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
return -EINVAL;
}
- if (intf == agdev->ac_intf) {
+ if (intf == uac2->ac_intf) {
/* Control I/f has only 1 AltSetting - 0 */
if (alt) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
@@ -1192,96 +668,42 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
return 0;
}
- if (intf == agdev->as_out_intf) {
- ep = agdev->out_ep;
- prm = &uac2->c_prm;
- config_ep_by_speed(gadget, fn, ep);
- agdev->as_out_alt = alt;
- req_len = prm->max_psize;
- } else if (intf == agdev->as_in_intf) {
- struct f_uac2_opts *opts = agdev_to_uac2_opts(agdev);
- unsigned int factor, rate;
- struct usb_endpoint_descriptor *ep_desc;
-
- ep = agdev->in_ep;
- prm = &uac2->p_prm;
- config_ep_by_speed(gadget, fn, ep);
- agdev->as_in_alt = alt;
-
- /* pre-calculate the playback endpoint's interval */
- if (gadget->speed == USB_SPEED_FULL) {
- ep_desc = &fs_epin_desc;
- factor = 1000;
- } else {
- ep_desc = &hs_epin_desc;
- factor = 8000;
- }
-
- /* pre-compute some values for iso_complete() */
- uac2->p_framesize = opts->p_ssize *
- num_channels(opts->p_chmask);
- rate = opts->p_srate * uac2->p_framesize;
- uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
- prm->max_psize);
+ if (intf == uac2->as_out_intf) {
+ uac2->as_out_alt = alt;
- if (uac2->p_pktsize < prm->max_psize)
- uac2->p_pktsize_residue = rate % uac2->p_interval;
+ if (alt)
+ ret = u_audio_start_capture(&uac2->g_audio);
else
- uac2->p_pktsize_residue = 0;
+ u_audio_stop_capture(&uac2->g_audio);
+ } else if (intf == uac2->as_in_intf) {
+ uac2->as_in_alt = alt;
- req_len = uac2->p_pktsize;
- uac2->p_residue = 0;
+ if (alt)
+ ret = u_audio_start_playback(&uac2->g_audio);
+ else
+ u_audio_stop_playback(&uac2->g_audio);
} else {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
- if (alt == 0) {
- free_ep(prm, ep);
- return 0;
- }
-
- prm->ep_enabled = true;
- usb_ep_enable(ep);
-
- for (i = 0; i < USB_XFERS; i++) {
- if (!prm->ureq[i].req) {
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req == NULL)
- return -ENOMEM;
-
- prm->ureq[i].req = req;
- prm->ureq[i].pp = prm;
-
- req->zero = 0;
- req->context = &prm->ureq[i];
- req->length = req_len;
- req->complete = agdev_iso_complete;
- req->buf = prm->rbuf + i * prm->max_psize;
- }
-
- if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- }
-
- return 0;
+ return ret;
}
static int
afunc_get_alt(struct usb_function *fn, unsigned intf)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
-
- if (intf == agdev->ac_intf)
- return agdev->ac_alt;
- else if (intf == agdev->as_out_intf)
- return agdev->as_out_alt;
- else if (intf == agdev->as_in_intf)
- return agdev->as_in_alt;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
+ struct g_audio *agdev = func_to_g_audio(fn);
+
+ if (intf == uac2->ac_intf)
+ return uac2->ac_alt;
+ else if (intf == uac2->as_out_intf)
+ return uac2->as_out_alt;
+ else if (intf == uac2->as_in_intf)
+ return uac2->as_in_alt;
else
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d Invalid Interface %d!\n",
__func__, __LINE__, intf);
@@ -1291,22 +713,19 @@ afunc_get_alt(struct usb_function *fn, unsigned intf)
static void
afunc_disable(struct usb_function *fn)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
-
- free_ep(&uac2->p_prm, agdev->in_ep);
- agdev->as_in_alt = 0;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
- free_ep(&uac2->c_prm, agdev->out_ep);
- agdev->as_out_alt = 0;
+ uac2->as_in_alt = 0;
+ uac2->as_out_alt = 0;
+ u_audio_stop_capture(&uac2->g_audio);
+ u_audio_stop_playback(&uac2->g_audio);
}
static int
in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts;
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
@@ -1316,7 +735,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
int value = -EOPNOTSUPP;
int p_srate, c_srate;
- opts = agdev_to_uac2_opts(agdev);
+ opts = g_audio_to_uac2_opts(agdev);
p_srate = opts->p_srate;
c_srate = opts->c_srate;
@@ -1335,7 +754,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
*(u8 *)req->buf = 1;
value = min_t(unsigned, w_length, 1);
} else {
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
@@ -1347,8 +766,7 @@ static int
in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts;
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
@@ -1359,7 +777,7 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
int value = -EOPNOTSUPP;
int p_srate, c_srate;
- opts = agdev_to_uac2_opts(agdev);
+ opts = g_audio_to_uac2_opts(agdev);
p_srate = opts->p_srate;
c_srate = opts->c_srate;
@@ -1378,7 +796,7 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
value = min_t(unsigned, w_length, sizeof r);
memcpy(req->buf, &r, value);
} else {
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
@@ -1413,13 +831,13 @@ out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
static int
setup_rq_inf(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
+ struct g_audio *agdev = func_to_g_audio(fn);
u16 w_index = le16_to_cpu(cr->wIndex);
u8 intf = w_index & 0xff;
- if (intf != agdev->ac_intf) {
- dev_err(&uac2->pdev.dev,
+ if (intf != uac2->ac_intf) {
+ dev_err(&agdev->gadget->dev,
"%s:%d Error!\n", __func__, __LINE__);
return -EOPNOTSUPP;
}
@@ -1436,8 +854,7 @@ static int
afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_composite_dev *cdev = fn->config->cdev;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct g_audio *agdev = func_to_g_audio(fn);
struct usb_request *req = cdev->req;
u16 w_length = le16_to_cpu(cr->wLength);
int value = -EOPNOTSUPP;
@@ -1449,14 +866,15 @@ afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if ((cr->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE)
value = setup_rq_inf(fn, cr);
else
- dev_err(&uac2->pdev.dev, "%s:%d Error!\n", __func__, __LINE__);
+ dev_err(&agdev->gadget->dev, "%s:%d Error!\n",
+ __func__, __LINE__);
if (value >= 0) {
req->length = value;
req->zero = value < w_length;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d Error!\n", __func__, __LINE__);
req->status = 0;
}
@@ -1529,6 +947,7 @@ UAC2_ATTRIBUTE(p_ssize);
UAC2_ATTRIBUTE(c_chmask);
UAC2_ATTRIBUTE(c_srate);
UAC2_ATTRIBUTE(c_ssize);
+UAC2_ATTRIBUTE(req_number);
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
@@ -1537,6 +956,7 @@ static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_c_chmask,
&f_uac2_opts_attr_c_srate,
&f_uac2_opts_attr_c_ssize,
+ &f_uac2_opts_attr_req_number,
NULL,
};
@@ -1574,15 +994,16 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->c_chmask = UAC2_DEF_CCHMASK;
opts->c_srate = UAC2_DEF_CSRATE;
opts->c_ssize = UAC2_DEF_CSSIZE;
+ opts->req_number = UAC2_DEF_REQ_NUM;
return &opts->func_inst;
}
static void afunc_free(struct usb_function *f)
{
- struct audio_dev *agdev;
+ struct g_audio *agdev;
struct f_uac2_opts *opts;
- agdev = func_to_agdev(f);
+ agdev = func_to_g_audio(f);
opts = container_of(f->fi, struct f_uac2_opts, func_inst);
kfree(agdev);
mutex_lock(&opts->lock);
@@ -1592,26 +1013,21 @@ static void afunc_free(struct usb_function *f)
static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
{
- struct audio_dev *agdev = func_to_agdev(f);
- struct uac2_rtd_params *prm;
+ struct g_audio *agdev = func_to_g_audio(f);
- alsa_uac2_exit(agdev);
-
- prm = &agdev->uac2.p_prm;
- kfree(prm->rbuf);
-
- prm = &agdev->uac2.c_prm;
- kfree(prm->rbuf);
+ g_audio_cleanup(agdev);
usb_free_all_descriptors(f);
+
+ agdev->gadget = NULL;
}
static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
{
- struct audio_dev *agdev;
+ struct f_uac2 *uac2;
struct f_uac2_opts *opts;
- agdev = kzalloc(sizeof(*agdev), GFP_KERNEL);
- if (agdev == NULL)
+ uac2 = kzalloc(sizeof(*uac2), GFP_KERNEL);
+ if (uac2 == NULL)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_uac2_opts, func_inst);
@@ -1619,16 +1035,16 @@ static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
++opts->refcnt;
mutex_unlock(&opts->lock);
- agdev->func.name = "uac2_func";
- agdev->func.bind = afunc_bind;
- agdev->func.unbind = afunc_unbind;
- agdev->func.set_alt = afunc_set_alt;
- agdev->func.get_alt = afunc_get_alt;
- agdev->func.disable = afunc_disable;
- agdev->func.setup = afunc_setup;
- agdev->func.free_func = afunc_free;
+ uac2->g_audio.func.name = "uac2_func";
+ uac2->g_audio.func.bind = afunc_bind;
+ uac2->g_audio.func.unbind = afunc_unbind;
+ uac2->g_audio.func.set_alt = afunc_set_alt;
+ uac2->g_audio.func.get_alt = afunc_get_alt;
+ uac2->g_audio.func.disable = afunc_disable;
+ uac2->g_audio.func.setup = afunc_setup;
+ uac2->g_audio.func.free_func = afunc_free;
- return &agdev->func;
+ return &uac2->g_audio.func;
}
DECLARE_USB_FUNCTION_INIT(uac2, afunc_alloc_inst, afunc_alloc);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 4dba794a6ad5..1d13d79d5070 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -538,14 +538,11 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID,
*/
retval = 0;
if (*params->filter) {
- params->state = RNDIS_DATA_INITIALIZED;
- netif_carrier_on(params->dev);
- if (netif_running(params->dev))
- netif_wake_queue(params->dev);
+ pr_debug("%s(): disable flow control\n", __func__);
+ rndis_flow_control(params, false);
} else {
- params->state = RNDIS_INITIALIZED;
- netif_carrier_off(params->dev);
- netif_stop_queue(params->dev);
+ pr_err("%s(): enable flow control\n", __func__);
+ rndis_flow_control(params, true);
}
break;
@@ -595,10 +592,11 @@ static int rndis_init_response(struct rndis_params *params,
+ sizeof(struct ethhdr)
+ sizeof(struct rndis_packet_msg_type)
+ 22));
- resp->PacketAlignmentFactor = cpu_to_le32(0);
+ resp->PacketAlignmentFactor = cpu_to_le32(params->pkt_alignment_factor);
resp->AFListOffset = cpu_to_le32(0);
resp->AFListSize = cpu_to_le32(0);
+ params->ul_max_xfer_size = le32_to_cpu(resp->MaxTransferSize);
params->resp_avail(params->v);
return 0;
}
@@ -801,7 +799,7 @@ EXPORT_SYMBOL_GPL(rndis_set_host_mac);
*/
int rndis_msg_parser(struct rndis_params *params, u8 *buf)
{
- u32 MsgType, MsgLength;
+ u32 MsgType, MsgLength, major, minor, max_transfer_size;
__le32 *tmp;
if (!buf)
@@ -824,16 +822,36 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
case RNDIS_MSG_INIT:
pr_debug("%s: RNDIS_MSG_INIT\n",
__func__);
+ tmp++; /* to get RequestID */
+ major = get_unaligned_le32(tmp++);
+ minor = get_unaligned_le32(tmp++);
+ max_transfer_size = get_unaligned_le32(tmp++);
+
+ params->host_rndis_major_ver = major;
+ params->host_rndis_minor_ver = minor;
+ params->dl_max_xfer_size = max_transfer_size;
+
+ pr_debug("%s(): RNDIS Host Major:%d Minor:%d version\n",
+ __func__, major, minor);
+ pr_debug("%s(): UL Max Transfer size:%x\n", __func__,
+ max_transfer_size);
+
params->state = RNDIS_INITIALIZED;
return rndis_init_response(params, (rndis_init_msg_type *)buf);
case RNDIS_MSG_HALT:
pr_debug("%s: RNDIS_MSG_HALT\n",
__func__);
- params->state = RNDIS_UNINITIALIZED;
- if (params->dev) {
- netif_carrier_off(params->dev);
- netif_stop_queue(params->dev);
+ if (params->state == RNDIS_DATA_INITIALIZED) {
+ if (params->flow_ctrl_enable) {
+ params->flow_ctrl_enable(true, params);
+ } else {
+ if (params->dev) {
+ netif_carrier_off(params->dev);
+ netif_stop_queue(params->dev);
+ }
+ }
+ params->state = RNDIS_UNINITIALIZED;
}
return 0;
@@ -885,7 +903,8 @@ static inline void rndis_put_nr(int nr)
ida_simple_remove(&rndis_ida, nr);
}
-struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
+struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v,
+ void (*flow_ctrl_enable)(bool enable, struct rndis_params *params))
{
struct rndis_params *params;
int i;
@@ -929,6 +948,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->state = RNDIS_UNINITIALIZED;
params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
params->resp_avail = resp_avail;
+ params->flow_ctrl_enable = flow_ctrl_enable;
params->v = v;
INIT_LIST_HEAD(&(params->resp_queue));
pr_debug("%s: configNr = %d\n", __func__, i);
@@ -1007,6 +1027,18 @@ int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed)
}
EXPORT_SYMBOL_GPL(rndis_set_param_medium);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->dl_max_xfer_size;
+}
+
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->ul_max_xfer_size;
+}
+
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
{
pr_debug("%s:\n", __func__);
@@ -1014,6 +1046,47 @@ void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
params->max_pkt_per_xfer = max_pkt_per_xfer;
}
+/**
+ * rndis_flow_control: enable/disable flow control with USB RNDIS interface
+ * params - RNDIS network parameter
+ * enable_flow_control - true: perform flow control, false: disable flow control
+ *
+ * In hw accelerated mode, this function triggers functionality to start/stop
+ * endless transfers, otherwise it enables/disables RNDIS network interface.
+ */
+void rndis_flow_control(struct rndis_params *params, bool enable_flow_control)
+{
+ if (!params) {
+ pr_err("%s: failed, params NULL\n", __func__);
+ return;
+ }
+
+ pr_debug("%s(): params->state:%x\n", __func__, params->state);
+
+ if (enable_flow_control) {
+ if (params->state == RNDIS_DATA_INITIALIZED) {
+ if (params->flow_ctrl_enable) {
+ params->flow_ctrl_enable(enable_flow_control, params);
+ } else {
+ netif_carrier_off(params->dev);
+ netif_stop_queue(params->dev);
+ }
+ }
+ params->state = RNDIS_INITIALIZED;
+ } else {
+ if (params->state != RNDIS_DATA_INITIALIZED) {
+ if (params->flow_ctrl_enable) {
+ params->flow_ctrl_enable(enable_flow_control, params);
+ } else {
+ netif_carrier_on(params->dev);
+ if (netif_running(params->dev))
+ netif_wake_queue(params->dev);
+ }
+ }
+ params->state = RNDIS_DATA_INITIALIZED;
+ }
+}
+
void rndis_add_hdr(struct sk_buff *skb)
{
struct rndis_packet_msg_type *header;
@@ -1159,6 +1232,19 @@ int rndis_rm_hdr(struct gether *port,
}
EXPORT_SYMBOL_GPL(rndis_rm_hdr);
+void rndis_set_pkt_alignment_factor(struct rndis_params *params,
+ u8 pkt_alignment_factor)
+{
+ pr_debug("%s:\n", __func__);
+
+ if (!params) {
+ pr_err("%s: failed, params NULL\n", __func__);
+ return;
+ }
+
+ params->pkt_alignment_factor = pkt_alignment_factor;
+}
+
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static int rndis_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 310cac3f088e..3d130b0576fc 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -192,14 +192,23 @@ typedef struct rndis_params
u32 vendorID;
u8 max_pkt_per_xfer;
const char *vendorDescr;
+ u8 pkt_alignment_factor;
void (*resp_avail)(void *v);
+ void (*flow_ctrl_enable)(bool enable,
+ struct rndis_params *params);
+
void *v;
struct list_head resp_queue;
+ u32 host_rndis_major_ver;
+ u32 host_rndis_minor_ver;
+ u32 ul_max_xfer_size;
+ u32 dl_max_xfer_size;
} rndis_params;
/* RNDIS Message parser and other useless functions */
int rndis_msg_parser(struct rndis_params *params, u8 *buf);
-struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v);
+struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v,
+ void (*flow_ctrl_enable)(bool enable, struct rndis_params *params));
void rndis_deregister(struct rndis_params *params);
int rndis_set_param_dev(struct rndis_params *params, struct net_device *dev,
u16 *cdc_filter);
@@ -208,6 +217,8 @@ int rndis_set_param_vendor(struct rndis_params *params, u32 vendorID,
int rndis_set_param_medium(struct rndis_params *params, u32 medium,
u32 speed);
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params);
void rndis_add_hdr(struct sk_buff *skb);
int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
struct sk_buff_head *list);
@@ -219,5 +230,8 @@ int rndis_signal_connect(struct rndis_params *params);
int rndis_signal_disconnect(struct rndis_params *params);
int rndis_state(struct rndis_params *params);
extern void rndis_set_host_mac(struct rndis_params *params, const u8 *addr);
+void rndis_flow_control(struct rndis_params *params, bool enable_flow_control);
+void rndis_set_pkt_alignment_factor(struct rndis_params *params,
+ u8 pkt_alignment_factor);
#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
new file mode 100644
index 000000000000..435f0614d572
--- /dev/null
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -0,0 +1,645 @@
+/*
+ * u_audio.c -- interface to USB gadget "ALSA sound card" utilities
+ *
+ * Copyright (C) 2016
+ * Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * Sound card implementation was cut-and-pasted with changes
+ * from f_uac2.c and has:
+ * Copyright (C) 2011
+ * Yadwinder Singh (yadi.brar01@gmail.com)
+ * Jaswinder Singh (jaswinder.singh@linaro.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "u_audio.h"
+
+#define BUFF_SIZE_MAX (PAGE_SIZE * 16)
+#define PRD_SIZE_MAX PAGE_SIZE
+#define MIN_PERIODS 4
+
+struct uac_req {
+ struct uac_rtd_params *pp; /* parent param */
+ struct usb_request *req;
+};
+
+/* Runtime data params for one stream */
+struct uac_rtd_params {
+ struct snd_uac_chip *uac; /* parent chip */
+ bool ep_enabled; /* if the ep is enabled */
+
+ struct snd_pcm_substream *ss;
+
+ /* Ring buffer */
+ ssize_t hw_ptr;
+
+ void *rbuf;
+
+ unsigned max_psize; /* MaxPacketSize of endpoint */
+ struct uac_req *ureq;
+
+ spinlock_t lock;
+};
+
+struct snd_uac_chip {
+ struct g_audio *audio_dev;
+
+ struct uac_rtd_params p_prm;
+ struct uac_rtd_params c_prm;
+
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+
+ /* timekeeping for the playback endpoint */
+ unsigned int p_interval;
+ unsigned int p_residue;
+
+ /* pre-calculated values for playback iso completion */
+ unsigned int p_pktsize;
+ unsigned int p_pktsize_residue;
+ unsigned int p_framesize;
+};
+
+static const struct snd_pcm_hardware uac_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
+ | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
+ | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
+ .buffer_bytes_max = BUFF_SIZE_MAX,
+ .period_bytes_max = PRD_SIZE_MAX,
+ .periods_min = MIN_PERIODS,
+};
+
+static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ unsigned pending;
+ unsigned long flags, flags2;
+ unsigned int hw_ptr;
+ int status = req->status;
+ struct uac_req *ur = req->context;
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+ struct uac_rtd_params *prm = ur->pp;
+ struct snd_uac_chip *uac = prm->uac;
+
+ /* i/f shutting down */
+ if (!prm->ep_enabled || req->status == -ESHUTDOWN)
+ return;
+
+ /*
+ * We can't really do much about bad xfers.
+ * Afterall, the ISOCH xfers could fail legitimately.
+ */
+ if (status)
+ pr_debug("%s: iso_complete status(%d) %d/%d\n",
+ __func__, status, req->actual, req->length);
+
+ substream = prm->ss;
+
+ /* Do nothing if ALSA isn't active */
+ if (!substream)
+ goto exit;
+
+ snd_pcm_stream_lock_irqsave(substream, flags2);
+
+ runtime = substream->runtime;
+ if (!runtime || !snd_pcm_running(substream)) {
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ goto exit;
+ }
+
+ spin_lock_irqsave(&prm->lock, flags);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /*
+ * For each IN packet, take the quotient of the current data
+ * rate and the endpoint's interval as the base packet size.
+ * If there is a residue from this division, add it to the
+ * residue accumulator.
+ */
+ req->length = uac->p_pktsize;
+ uac->p_residue += uac->p_pktsize_residue;
+
+ /*
+ * Whenever there are more bytes in the accumulator than we
+ * need to add one more sample frame, increase this packet's
+ * size and decrease the accumulator.
+ */
+ if (uac->p_residue / uac->p_interval >= uac->p_framesize) {
+ req->length += uac->p_framesize;
+ uac->p_residue -= uac->p_framesize *
+ uac->p_interval;
+ }
+
+ req->actual = req->length;
+ }
+
+ hw_ptr = prm->hw_ptr;
+
+ spin_unlock_irqrestore(&prm->lock, flags);
+
+ /* Pack USB load in ALSA ring buffer */
+ pending = runtime->dma_bytes - hw_ptr;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (unlikely(pending < req->actual)) {
+ memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+ memcpy(req->buf + pending, runtime->dma_area,
+ req->actual - pending);
+ } else {
+ memcpy(req->buf, runtime->dma_area + hw_ptr,
+ req->actual);
+ }
+ } else {
+ if (unlikely(pending < req->actual)) {
+ memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+ memcpy(runtime->dma_area, req->buf + pending,
+ req->actual - pending);
+ } else {
+ memcpy(runtime->dma_area + hw_ptr, req->buf,
+ req->actual);
+ }
+ }
+
+ spin_lock_irqsave(&prm->lock, flags);
+ /* update hw_ptr after data is copied to memory */
+ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+ hw_ptr = prm->hw_ptr;
+ spin_unlock_irqrestore(&prm->lock, flags);
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+ if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+ snd_pcm_period_elapsed(substream);
+
+exit:
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ dev_err(uac->card->dev, "%d Error!\n", __LINE__);
+}
+
+static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+ struct uac_rtd_params *prm;
+ struct g_audio *audio_dev;
+ struct uac_params *params;
+ unsigned long flags;
+ int err = 0;
+
+ audio_dev = uac->audio_dev;
+ params = &audio_dev->params;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ prm = &uac->p_prm;
+ else
+ prm = &uac->c_prm;
+
+ spin_lock_irqsave(&prm->lock, flags);
+
+ /* Reset */
+ prm->hw_ptr = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ prm->ss = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ prm->ss = NULL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&prm->lock, flags);
+
+ /* Clear buffer after Play stops */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
+ memset(prm->rbuf, 0, prm->max_psize * params->req_number);
+
+ return err;
+}
+
+static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+ struct uac_rtd_params *prm;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ prm = &uac->p_prm;
+ else
+ prm = &uac->c_prm;
+
+ return bytes_to_frames(substream->runtime, prm->hw_ptr);
+}
+
+static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int uac_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct g_audio *audio_dev;
+ struct uac_params *params;
+ int p_ssize, c_ssize;
+ int p_srate, c_srate;
+ int p_chmask, c_chmask;
+
+ audio_dev = uac->audio_dev;
+ params = &audio_dev->params;
+ p_ssize = params->p_ssize;
+ c_ssize = params->c_ssize;
+ p_srate = params->p_srate;
+ c_srate = params->c_srate;
+ p_chmask = params->p_chmask;
+ c_chmask = params->c_chmask;
+ uac->p_residue = 0;
+
+ runtime->hw = uac_pcm_hardware;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ spin_lock_init(&uac->p_prm.lock);
+ runtime->hw.rate_min = p_srate;
+ switch (p_ssize) {
+ case 3:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ }
+ runtime->hw.channels_min = num_channels(p_chmask);
+ runtime->hw.period_bytes_min = 2 * uac->p_prm.max_psize
+ / runtime->hw.periods_min;
+ } else {
+ spin_lock_init(&uac->c_prm.lock);
+ runtime->hw.rate_min = c_srate;
+ switch (c_ssize) {
+ case 3:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ }
+ runtime->hw.channels_min = num_channels(c_chmask);
+ runtime->hw.period_bytes_min = 2 * uac->c_prm.max_psize
+ / runtime->hw.periods_min;
+ }
+
+ runtime->hw.rate_max = runtime->hw.rate_min;
+ runtime->hw.channels_max = runtime->hw.channels_min;
+
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+ return 0;
+}
+
+/* ALSA cries without these function pointers */
+static int uac_pcm_null(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+static const struct snd_pcm_ops uac_pcm_ops = {
+ .open = uac_pcm_open,
+ .close = uac_pcm_null,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = uac_pcm_hw_params,
+ .hw_free = uac_pcm_hw_free,
+ .trigger = uac_pcm_trigger,
+ .pointer = uac_pcm_pointer,
+ .prepare = uac_pcm_null,
+};
+
+static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
+{
+ struct snd_uac_chip *uac = prm->uac;
+ struct g_audio *audio_dev;
+ struct uac_params *params;
+ int i;
+
+ if (!prm->ep_enabled)
+ return;
+
+ prm->ep_enabled = false;
+
+ audio_dev = uac->audio_dev;
+ params = &audio_dev->params;
+
+ for (i = 0; i < params->req_number; i++) {
+ if (prm->ureq[i].req) {
+ usb_ep_dequeue(ep, prm->ureq[i].req);
+ usb_ep_free_request(ep, prm->ureq[i].req);
+ prm->ureq[i].req = NULL;
+ }
+ }
+
+ if (usb_ep_disable(ep))
+ dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
+}
+
+
+int u_audio_start_capture(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+ struct usb_gadget *gadget = audio_dev->gadget;
+ struct device *dev = &gadget->dev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ struct uac_rtd_params *prm;
+ struct uac_params *params = &audio_dev->params;
+ int req_len, i, ret;
+
+ ep = audio_dev->out_ep;
+ prm = &uac->c_prm;
+ ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
+ if (ret)
+ return ret;
+
+ req_len = prm->max_psize;
+
+ prm->ep_enabled = true;
+ usb_ep_enable(ep);
+
+ for (i = 0; i < params->req_number; i++) {
+ if (!prm->ureq[i].req) {
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req == NULL)
+ return -ENOMEM;
+
+ prm->ureq[i].req = req;
+ prm->ureq[i].pp = prm;
+
+ req->zero = 0;
+ req->context = &prm->ureq[i];
+ req->length = req_len;
+ req->complete = u_audio_iso_complete;
+ req->buf = prm->rbuf + i * prm->max_psize;
+ }
+
+ if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(u_audio_start_capture);
+
+void u_audio_stop_capture(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+
+ free_ep(&uac->c_prm, audio_dev->out_ep);
+}
+EXPORT_SYMBOL_GPL(u_audio_stop_capture);
+
+int u_audio_start_playback(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+ struct usb_gadget *gadget = audio_dev->gadget;
+ struct device *dev = &gadget->dev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ struct uac_rtd_params *prm;
+ struct uac_params *params = &audio_dev->params;
+ unsigned int factor, rate;
+ const struct usb_endpoint_descriptor *ep_desc;
+ int req_len, i, ret;
+
+ ep = audio_dev->in_ep;
+ prm = &uac->p_prm;
+ ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
+ if (ret)
+ return ret;
+
+ ep_desc = ep->desc;
+
+ /* pre-calculate the playback endpoint's interval */
+ if (gadget->speed == USB_SPEED_FULL)
+ factor = 1000;
+ else
+ factor = 8000;
+
+ /* pre-compute some values for iso_complete() */
+ uac->p_framesize = params->p_ssize *
+ num_channels(params->p_chmask);
+ rate = params->p_srate * uac->p_framesize;
+ uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
+ uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+ prm->max_psize);
+
+ if (uac->p_pktsize < prm->max_psize)
+ uac->p_pktsize_residue = rate % uac->p_interval;
+ else
+ uac->p_pktsize_residue = 0;
+
+ req_len = uac->p_pktsize;
+ uac->p_residue = 0;
+
+ prm->ep_enabled = true;
+ usb_ep_enable(ep);
+
+ for (i = 0; i < params->req_number; i++) {
+ if (!prm->ureq[i].req) {
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req == NULL)
+ return -ENOMEM;
+
+ prm->ureq[i].req = req;
+ prm->ureq[i].pp = prm;
+
+ req->zero = 0;
+ req->context = &prm->ureq[i];
+ req->length = req_len;
+ req->complete = u_audio_iso_complete;
+ req->buf = prm->rbuf + i * prm->max_psize;
+ }
+
+ if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(u_audio_start_playback);
+
+void u_audio_stop_playback(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+
+ free_ep(&uac->p_prm, audio_dev->in_ep);
+}
+EXPORT_SYMBOL_GPL(u_audio_stop_playback);
+
+int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ const char *card_name)
+{
+ struct snd_uac_chip *uac;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct uac_params *params;
+ int p_chmask, c_chmask;
+ int err;
+
+ if (!g_audio)
+ return -EINVAL;
+
+ uac = kzalloc(sizeof(*uac), GFP_KERNEL);
+ if (!uac)
+ return -ENOMEM;
+ g_audio->uac = uac;
+ uac->audio_dev = g_audio;
+
+ params = &g_audio->params;
+ p_chmask = params->p_chmask;
+ c_chmask = params->c_chmask;
+
+ if (c_chmask) {
+ struct uac_rtd_params *prm = &uac->c_prm;
+
+ uac->c_prm.uac = uac;
+ prm->max_psize = g_audio->out_ep_maxpsize;
+
+ prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
+ GFP_KERNEL);
+ if (!prm->ureq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ prm->rbuf = kcalloc(params->req_number, prm->max_psize,
+ GFP_KERNEL);
+ if (!prm->rbuf) {
+ prm->max_psize = 0;
+ err = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ if (p_chmask) {
+ struct uac_rtd_params *prm = &uac->p_prm;
+
+ uac->p_prm.uac = uac;
+ prm->max_psize = g_audio->in_ep_maxpsize;
+
+ prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
+ GFP_KERNEL);
+ if (!prm->ureq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ prm->rbuf = kcalloc(params->req_number, prm->max_psize,
+ GFP_KERNEL);
+ if (!prm->rbuf) {
+ prm->max_psize = 0;
+ err = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ /* Choose any slot, with no id */
+ err = snd_card_new(&g_audio->gadget->dev,
+ -1, NULL, THIS_MODULE, 0, &card);
+ if (err < 0)
+ goto fail;
+
+ uac->card = card;
+
+ /*
+ * Create first PCM device
+ * Create a substream only for non-zero channel streams
+ */
+ err = snd_pcm_new(uac->card, pcm_name, 0,
+ p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
+ if (err < 0)
+ goto snd_fail;
+
+ strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
+ pcm->private_data = uac;
+ uac->pcm = pcm;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
+
+ strlcpy(card->driver, card_name, sizeof(card->driver));
+ strlcpy(card->shortname, card_name, sizeof(card->shortname));
+ sprintf(card->longname, "%s %i", card_name, card->dev->id);
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+
+ err = snd_card_register(card);
+
+ if (!err)
+ return 0;
+
+snd_fail:
+ snd_card_free(card);
+fail:
+ kfree(uac->p_prm.ureq);
+ kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.rbuf);
+ kfree(uac->c_prm.rbuf);
+ kfree(uac);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(g_audio_setup);
+
+void g_audio_cleanup(struct g_audio *g_audio)
+{
+ struct snd_uac_chip *uac;
+ struct snd_card *card;
+
+ if (!g_audio || !g_audio->uac)
+ return;
+
+ uac = g_audio->uac;
+ card = uac->card;
+ if (card)
+ snd_card_free(card);
+
+ kfree(uac->p_prm.ureq);
+ kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.rbuf);
+ kfree(uac->c_prm.rbuf);
+ kfree(uac);
+}
+EXPORT_SYMBOL_GPL(g_audio_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB gadget \"ALSA sound card\" utilities");
+MODULE_AUTHOR("Ruslan Bilovol");
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
new file mode 100644
index 000000000000..07e13784cbb8
--- /dev/null
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -0,0 +1,95 @@
+/*
+ * u_audio.h -- interface to USB gadget "ALSA sound card" utilities
+ *
+ * Copyright (C) 2016
+ * Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/usb/composite.h>
+
+struct uac_params {
+ /* playback */
+ int p_chmask; /* channel mask */
+ int p_srate; /* rate in Hz */
+ int p_ssize; /* sample size */
+
+ /* capture */
+ int c_chmask; /* channel mask */
+ int c_srate; /* rate in Hz */
+ int c_ssize; /* sample size */
+
+ int req_number; /* number of preallocated requests */
+};
+
+struct g_audio {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ /* Max packet size for all in_ep possible speeds */
+ unsigned int in_ep_maxpsize;
+ /* Max packet size for all out_ep possible speeds */
+ unsigned int out_ep_maxpsize;
+
+ /* The ALSA Sound Card it represents on the USB-Client side */
+ struct snd_uac_chip *uac;
+
+ struct uac_params params;
+};
+
+static inline struct g_audio *func_to_g_audio(struct usb_function *f)
+{
+ return container_of(f, struct g_audio, func);
+}
+
+static inline uint num_channels(uint chanmask)
+{
+ uint num = 0;
+
+ while (chanmask) {
+ num += (chanmask & 1);
+ chanmask >>= 1;
+ }
+
+ return num;
+}
+
+/*
+ * g_audio_setup - initialize one virtual ALSA sound card
+ * @g_audio: struct with filled params, in_ep_maxpsize, out_ep_maxpsize
+ * @pcm_name: the id string for a PCM instance of this sound card
+ * @card_name: name of this soundcard
+ *
+ * This sets up the single virtual ALSA sound card that may be exported by a
+ * gadget driver using this framework.
+ *
+ * Context: may sleep
+ *
+ * Returns zero on success, or a negative error on failure.
+ */
+int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ const char *card_name);
+void g_audio_cleanup(struct g_audio *g_audio);
+
+int u_audio_start_capture(struct g_audio *g_audio);
+void u_audio_stop_capture(struct g_audio *g_audio);
+int u_audio_start_playback(struct g_audio *g_audio);
+void u_audio_stop_playback(struct g_audio *g_audio);
+
+#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/gadget/function/u_bam.c b/drivers/usb/gadget/function/u_bam.c
new file mode 100644
index 000000000000..7947bb76f512
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam.c
@@ -0,0 +1,2521 @@
+/* Copyright (c) 2011-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <soc/qcom/smd.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include <soc/qcom/bam_dmux.h>
+
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/usb_bam.h>
+
+#include "usb_gadget_xport.h"
+#include "u_rmnet.h"
+
+#define BAM_N_PORTS 2
+#define BAM2BAM_N_PORTS 4
+
+static struct workqueue_struct *gbam_wq;
+static int n_bam_ports;
+static int n_bam2bam_ports;
+static unsigned n_tx_req_queued;
+
+static unsigned bam_ch_ids[BAM_N_PORTS] = {
+ BAM_DMUX_USB_RMNET_0,
+ BAM_DMUX_USB_DPL
+};
+
+static char bam_ch_names[BAM_N_PORTS][BAM_DMUX_CH_NAME_MAX_LEN];
+
+static const enum ipa_client_type usb_prod[BAM2BAM_N_PORTS] = {
+ IPA_CLIENT_USB_PROD, IPA_CLIENT_USB2_PROD,
+ IPA_CLIENT_USB3_PROD, IPA_CLIENT_USB4_PROD
+};
+static const enum ipa_client_type usb_cons[BAM2BAM_N_PORTS] = {
+ IPA_CLIENT_USB_CONS, IPA_CLIENT_USB2_CONS,
+ IPA_CLIENT_USB3_CONS, IPA_CLIENT_USB4_CONS
+};
+
+#define BAM_PENDING_PKTS_LIMIT 220
+#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
+#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
+#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
+#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
+
+#define BAM_MUX_HDR 8
+
+#define BAM_MUX_RX_Q_SIZE 128
+#define BAM_MUX_TX_Q_SIZE 200
+#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+
+#define DL_INTR_THRESHOLD 20
+#define BAM_PENDING_BYTES_LIMIT (50 * BAM_MUX_RX_REQ_SIZE)
+#define BAM_PENDING_BYTES_FCTRL_EN_TSHOLD (BAM_PENDING_BYTES_LIMIT / 3)
+
+/* Extra buffer size to allocate for tx */
+#define EXTRA_ALLOCATION_SIZE_U_BAM 128
+
+static unsigned int bam_pending_pkts_limit = BAM_PENDING_PKTS_LIMIT;
+module_param(bam_pending_pkts_limit, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_pending_bytes_limit = BAM_PENDING_BYTES_LIMIT;
+module_param(bam_pending_bytes_limit, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_pending_bytes_fctrl_en_thold =
+ BAM_PENDING_BYTES_FCTRL_EN_TSHOLD;
+module_param(bam_pending_bytes_fctrl_en_thold, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
+module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
+module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
+module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
+module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
+module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned long bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
+module_param(bam_mux_rx_req_size, ulong, S_IRUGO);
+
+static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
+module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
+
+#define BAM_CH_OPENED BIT(0)
+#define BAM_CH_READY BIT(1)
+#define BAM_CH_WRITE_INPROGRESS BIT(2)
+
+enum u_bam_event_type {
+ U_BAM_DISCONNECT_E = 0,
+ U_BAM_CONNECT_E,
+ U_BAM_SUSPEND_E,
+ U_BAM_RESUME_E
+};
+
+struct sys2ipa_sw {
+ void *teth_priv;
+ ipa_notify_cb teth_cb;
+};
+
+struct bam_ch_info {
+ unsigned long flags;
+ unsigned id;
+
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_q;
+
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ struct sk_buff_head rx_skb_idle;
+
+ struct gbam_port *port;
+ struct work_struct write_tobam_w;
+ struct work_struct write_tohost_w;
+
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
+
+ /* added to support sys to ipa sw UL path */
+ struct sys2ipa_sw ul_params;
+ enum usb_bam_pipe_type src_pipe_type;
+ enum usb_bam_pipe_type dst_pipe_type;
+
+ /* stats */
+ unsigned int pending_pkts_with_bam;
+ unsigned int pending_bytes_with_bam;
+ unsigned int tohost_drp_cnt;
+ unsigned int tomodem_drp_cnt;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ unsigned long to_modem;
+ unsigned long to_host;
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ unsigned int max_num_pkts_pending_with_bam;
+ unsigned int max_bytes_pending_with_bam;
+ unsigned int delayed_bam_mux_write_done;
+ unsigned long skb_expand_cnt;
+};
+
+struct gbam_port {
+ bool is_connected;
+ enum u_bam_event_type last_event;
+ unsigned port_num;
+ spinlock_t port_lock_ul;
+ spinlock_t port_lock_dl;
+ spinlock_t port_lock;
+
+ struct grmnet *port_usb;
+ struct usb_gadget *gadget;
+
+ struct bam_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+};
+
+static struct bam_portmaster {
+ struct gbam_port *port;
+ struct platform_driver pdrv;
+} bam_ports[BAM_N_PORTS];
+
+struct u_bam_data_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ unsigned long usb_bam_handle;
+};
+
+struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
+static void gbam_start_rx(struct gbam_port *port);
+static void gbam_start_endless_rx(struct gbam_port *port);
+static void gbam_start_endless_tx(struct gbam_port *port);
+static void gbam_notify(void *p, int event, unsigned long data);
+static void gbam_data_write_tobam(struct work_struct *w);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%pK head:%pK num:%d cb:%pK", __func__,
+ ep, head, num, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t gbam_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock_ul lock held */
+static struct sk_buff *gbam_alloc_skb_from_pool(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t skb_buf_dma_addr;
+ struct usb_gadget *gadget;
+
+ if (!port)
+ return NULL;
+
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+
+ if (!skb)
+ goto alloc_exit;
+
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+
+ gadget = port->port_usb->gadget;
+
+ skb_buf_dma_addr =
+ dma_map_single(&gadget->dev, skb->data,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&gadget->dev, skb_buf_dma_addr)) {
+ pr_err("%s: Could not DMA map SKB buffer\n",
+ __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+ } else {
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ if (skb_headroom(skb) < BAM_MUX_HDR)
+ skb_reserve(skb, BAM_MUX_HDR);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+/* This function should be called with port_lock_ul lock held */
+static void gbam_free_skb_to_pool(struct gbam_port *port, struct sk_buff *skb)
+{
+ struct bam_ch_info *d;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void gbam_free_rx_skb_idle_list(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ gadget = port->port_usb->gadget;
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr,
+ sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/*----- sys2bam towards the IPA --------------- */
+static void gbam_ipa_sys2bam_notify_cb(void *priv, enum ipa_dp_evt_type event,
+ unsigned long data)
+{
+ struct sys2ipa_sw *ul = (struct sys2ipa_sw *)priv;
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ switch (event) {
+ case IPA_WRITE_DONE:
+ d = container_of(ul, struct bam_ch_info, ul_params);
+ port = container_of(d, struct gbam_port, data_ch);
+ /* call into bam_demux functionality that'll recycle the data */
+ gbam_notify(port, BAM_DMUX_WRITE_DONE, data);
+ break;
+ case IPA_RECEIVE:
+ /* call the callback given by tethering driver init function
+ * (and was given to ipa_connect)
+ */
+ if (ul->teth_cb)
+ ul->teth_cb(ul->teth_priv, event, data);
+ break;
+ default:
+ /* unexpected event */
+ pr_err("%s: unexpected event %d\n", __func__, event);
+ break;
+ }
+}
+
+
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+ unsigned long flags;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ int ret;
+ int tail_room = 0;
+ int extra_alloc = 0;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ ep = port->port_usb->in;
+
+ while (!list_empty(&d->tx_idle)) {
+ skb = __skb_dequeue(&d->tx_skb_q);
+ if (!skb)
+ break;
+
+ /*
+ * Some UDC requires allocation of some extra bytes for
+ * TX buffer due to hardware requirement. Check if extra
+ * bytes are already there, otherwise allocate new buffer
+ * with extra bytes and do memcpy.
+ */
+ if (port->gadget->extra_buf_alloc)
+ extra_alloc = EXTRA_ALLOCATION_SIZE_U_BAM;
+ tail_room = skb_tailroom(skb);
+ if (tail_room < extra_alloc) {
+ pr_debug("%s: tail_room %d less than %d\n", __func__,
+ tail_room, extra_alloc);
+ new_skb = skb_copy_expand(skb, 0, extra_alloc -
+ tail_room, GFP_ATOMIC);
+ if (!new_skb) {
+ pr_err("skb_copy_expand failed\n");
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ d->skb_expand_cnt++;
+ }
+
+ req = list_first_entry(&d->tx_idle,
+ struct usb_request,
+ list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+ n_tx_req_queued++;
+ if (n_tx_req_queued == dl_intr_threshold) {
+ req->no_interrupt = 0;
+ n_tx_req_queued = 0;
+ } else {
+ req->no_interrupt = 1;
+ }
+
+ /* Send ZLP in case packet length is multiple of maxpacksize */
+ req->zero = 1;
+
+ list_del(&req->list);
+
+ spin_unlock(&port->port_lock_dl);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock_dl);
+ if (ret) {
+ pr_err("%s: usb epIn failed with %d\n", __func__, ret);
+ list_add(&req->list, &d->tx_idle);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ d->to_host++;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_write_data_tohost_w(struct work_struct *w)
+{
+ struct bam_ch_info *d;
+ struct gbam_port *port;
+
+ d = container_of(w, struct bam_ch_info, write_tohost_w);
+ port = d->port;
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ pr_debug("%s: p:%pK#%d d:%pK skb_len:%d\n", __func__,
+ port, port->port_num, d, skb->len);
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
+ d->tohost_drp_cnt++;
+ if (printk_ratelimited())
+ pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
+ __func__, d->tohost_drp_cnt);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ __skb_queue_tail(&d->tx_skb_q, skb);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ gbam_free_skb_to_pool(port, skb);
+
+ pr_debug("%s:port:%pK d:%pK tom:%lu ppkt:%u pbytes:%u pno:%d\n",
+ __func__,
+ port, d, d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ /*
+ * If BAM doesn't have much pending data then push new data from here:
+ * write_complete notify only to avoid any underruns due to wq latency
+ */
+ if (d->pending_bytes_with_bam <= bam_pending_bytes_fctrl_en_thold) {
+ gbam_data_write_tobam(&d->write_tobam_w);
+ } else {
+ d->delayed_bam_mux_write_done++;
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+}
+
+/* This function should be called with port_lock_ul spinlock acquired */
+static bool gbam_ul_bam_limit_reached(struct bam_ch_info *data_ch)
+{
+ unsigned int curr_pending_pkts = data_ch->pending_pkts_with_bam;
+ unsigned int curr_pending_bytes = data_ch->pending_bytes_with_bam;
+ struct sk_buff *skb;
+
+ if (curr_pending_pkts >= bam_pending_pkts_limit)
+ return true;
+
+ /* check if next skb length doesn't exceed pending_bytes_limit */
+ skb = skb_peek(&data_ch->rx_skb_q);
+ if (!skb)
+ return false;
+
+ if ((curr_pending_bytes + skb->len) > bam_pending_bytes_limit)
+ return true;
+ else
+ return false;
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+ int qlen;
+
+ d = container_of(w, struct bam_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ /* Bail out if already in progress */
+ if (test_bit(BAM_CH_WRITE_INPROGRESS, &d->flags)) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ set_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+
+ while (!gbam_ul_bam_limit_reached(d) &&
+ (d->trans != USB_GADGET_XPORT_BAM2BAM_IPA ||
+ usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx))) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_pkts_with_bam++;
+ d->pending_bytes_with_bam += skb->len;
+ d->to_modem++;
+
+ pr_debug("%s: port:%pK d:%pK tom:%lu ppkts:%u pbytes:%u pno:%d\n",
+ __func__, port, d,
+ d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ dma_addr_t skb_dma_addr;
+ struct ipa_tx_meta ipa_meta = {0x0};
+
+ skb_dma_addr = gbam_get_dma_from_skb(skb);
+ if (skb_dma_addr != DMA_ERROR_CODE) {
+ ipa_meta.dma_address = skb_dma_addr;
+ ipa_meta.dma_address_valid = true;
+ }
+
+ ret = ipa_tx_dp(usb_prod[port->port_num],
+ skb,
+ &ipa_meta);
+ } else {
+ ret = msm_bam_dmux_write(d->id, skb);
+ }
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ pr_debug("%s: write error:%d\n", __func__, ret);
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ d->to_modem--;
+ d->tomodem_drp_cnt++;
+ gbam_free_skb_to_pool(port, skb);
+ break;
+ }
+ if (d->pending_pkts_with_bam > d->max_num_pkts_pending_with_bam)
+ d->max_num_pkts_pending_with_bam =
+ d->pending_pkts_with_bam;
+ if (d->pending_bytes_with_bam > d->max_bytes_pending_with_bam)
+ d->max_bytes_pending_with_bam =
+ d->pending_bytes_with_bam;
+ }
+
+ qlen = d->rx_skb_q.qlen;
+
+ clear_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (qlen < bam_mux_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ gbam_start_rx(port);
+ }
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: data tx ep error %d\n",
+ __func__, status);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ if (!port)
+ return;
+
+ spin_lock(&port->port_lock_dl);
+ d = &port->data_ch;
+ list_add_tail(&req->list, &d->tx_idle);
+ spin_unlock(&port->port_lock_dl);
+
+ queue_work(gbam_wq, &d->write_tohost_w);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ if (printk_ratelimited())
+ pr_err("%s: %s response error %d, %d/%d\n",
+ __func__, ep->name, status,
+ req->ac_tual, req->length);
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ break;
+ }
+
+ spin_lock(&port->port_lock_ul);
+
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ if ((d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
+ !usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+
+ /* TODO: Handle flow control gracefully by having
+ * having call back mechanism from bam driver
+ */
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+ spin_unlock(&port->port_lock_ul);
+
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+
+ if (printk_ratelimit())
+ pr_err("%s: data rx enqueue err %d\n",
+ __func__, status);
+
+ spin_lock(&port->port_lock_ul);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ }
+}
+
+static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s status: %d\n", __func__, status);
+}
+
+static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s status: %d\n", __func__, status);
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+ struct usb_request *req;
+ struct bam_ch_info *d;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb || !port->port_usb->out) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ gbam_free_skb_to_pool(port, skb);
+
+ if (printk_ratelimit())
+ pr_err("%s: rx queue failed %d\n",
+ __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_start_endless_rx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_start_endless_tx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_stop_endless_rx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ unsigned long flags;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->rx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_stop_endless_tx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ unsigned long flags;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->tx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+
+/*
+ * This function configured data fifo based on index passed to get bam2bam
+ * configuration.
+ */
+static void configure_data_fifo(enum usb_ctrl bam_type, u8 idx,
+ struct usb_ep *ep, enum usb_bam_pipe_type pipe_type)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ if (pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+
+ msm_data_fifo_config(ep,
+ data_fifo.phys_base,
+ data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+ }
+}
+
+
+static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct gbam_port *port = param;
+ struct usb_gadget *gadget = NULL;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (port == NULL) {
+ pr_err("%s: port is NULL\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb == NULL) {
+ pr_err("%s: port_usb is NULL, disconnected\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ gadget = port->port_usb->gadget;
+ d = &port->data_ch;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (gadget == NULL) {
+ pr_err("%s: gadget is NULL\n", __func__);
+ return;
+ }
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gbam_start_endless_rx(port);
+ else {
+ gbam_start_rx(port);
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+ } else {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ }
+ gbam_start_endless_tx(port);
+ }
+}
+
+static void gbam_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct gbam_port *port = param;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ /*
+ * Only handling BAM2BAM, as there is no equivalent to
+ * gbam_stop_endless_rx() for the SYS2BAM use case
+ */
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gbam_stop_endless_rx(port);
+ } else {
+ gbam_stop_endless_tx(port);
+ }
+}
+
+static int _gbam_start_io(struct gbam_port *port, bool in)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned queue_size;
+ spinlock_t *spinlock;
+ void (*ep_complete)(struct usb_ep *, struct usb_request *);
+
+ if (in)
+ spinlock = &port->port_lock_dl;
+ else
+ spinlock = &port->port_lock_ul;
+
+ spin_lock_irqsave(spinlock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(spinlock, flags);
+ return -EBUSY;
+ }
+
+ if (in) {
+ ep = port->port_usb->in;
+ idle = &port->data_ch.tx_idle;
+ queue_size = bam_mux_tx_q_size;
+ ep_complete = gbam_epin_complete;
+ } else {
+ ep = port->port_usb->out;
+ if (!ep)
+ goto out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_mux_rx_q_size;
+ ep_complete = gbam_epout_complete;
+ }
+
+ ret = gbam_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+out:
+ spin_unlock_irqrestore(spinlock, flags);
+ if (ret)
+ pr_err("%s: allocation failed\n", __func__);
+
+ return ret;
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+ unsigned long flags;
+
+ pr_debug("%s: port:%pK\n", __func__, port);
+
+ if (_gbam_start_io(port, true))
+ return;
+
+ if (_gbam_start_io(port, false)) {
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (port->port_usb)
+ gbam_free_requests(port->port_usb->in,
+ &port->data_ch.tx_idle);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ /* queue out requests */
+ gbam_start_rx(port);
+}
+
+static void gbam_notify(void *p, int event, unsigned long data)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+
+ if (port == NULL)
+ pr_err("BAM DMUX notifying after channel close\n");
+
+ switch (event) {
+ case BAM_DMUX_RECEIVE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_recv_cb(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_WRITE_DONE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_write_done(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_TRANSMIT_SIZE:
+ d = &port->data_ch;
+ if (test_bit(BAM_CH_OPENED, &d->flags))
+ pr_warn("%s, BAM channel opened already", __func__);
+ bam_mux_rx_req_size = data;
+ pr_debug("%s rx_req_size: %lu", __func__, bam_mux_rx_req_size);
+ break;
+ }
+}
+
+static void gbam_free_rx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ if (!port->port_usb || !port->port_usb->out)
+ goto free_rx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ gbam_free_rx_skb_idle_list(port);
+
+free_rx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_free_tx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+
+ if (!port->port_usb)
+ goto free_tx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->in, &d->tx_idle);
+
+ while ((skb = __skb_dequeue(&d->tx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+free_tx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+ gbam_free_rx_buffers(port);
+ gbam_free_tx_buffers(port);
+}
+
+static void gbam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ if (!test_bit(BAM_CH_OPENED, &d->flags)) {
+ pr_err("%s: Bam channel is not opened\n", __func__);
+ goto exit;
+ }
+
+ msm_bam_dmux_close(d->id);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+exit:
+ return;
+}
+
+static void gbam2bam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->is_connected) {
+ pr_debug("%s: Port already disconnected. Bailing out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->is_connected = false;
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ ret = usb_bam_disconnect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret)
+ pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
+ __func__, ret);
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+ teth_bridge_disconnect(d->ipa_params.src_client);
+ /*
+ * Decrement usage count which was incremented upon cable
+ * connect or cable disconnect in suspended state
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+ }
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct bam_ch_info *d = &port->data_ch;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (!test_bit(BAM_CH_READY, &d->flags)) {
+ pr_err("%s: Bam channel is not ready\n", __func__);
+ return;
+ }
+
+ ret = msm_bam_dmux_open(d->id, port, gbam_notify);
+ if (ret) {
+ pr_err("%s: unable open bam ch:%d err:%d\n",
+ __func__, d->id, ret);
+ return;
+ }
+
+ set_bit(BAM_CH_OPENED, &d->flags);
+
+ gbam_start_io(port);
+
+ pr_debug("%s: done\n", __func__);
+}
+
+static void gbam2bam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct usb_gadget *gadget = NULL;
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ struct bam_ch_info *d;
+ u32 sps_params;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnected. Bailing out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->is_connected = true;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ gadget = port->port_usb->gadget;
+ if (!gadget) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb.gadget is NULL, exiting\n", __func__);
+ return;
+ }
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ d->ipa_params.usb_connection_speed = gadget->speed;
+
+ /*
+ * Invalidate prod and cons client handles from previous
+ * disconnect.
+ */
+ d->ipa_params.cons_clnt_hdl = -1;
+ d->ipa_params.prod_clnt_hdl = -1;
+
+ if (usb_bam_get_pipe_type(d->usb_bam_type, d->ipa_params.src_idx,
+ &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(d->usb_bam_type, d->ipa_params.dst_idx,
+ &d->dst_pipe_type)) {
+ pr_err("%s:usb_bam_get_pipe_type() failed\n", __func__);
+ return;
+ }
+ if (d->dst_pipe_type != USB_BAM_PIPE_BAM2BAM) {
+ pr_err("%s: no software preparation for DL not using bam2bam\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_alloc_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_alloc_fifos(d->usb_bam_type, d->dst_connection_idx);
+ gadget->bam2bam_func_enabled = true;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (!port || !port->port_usb) {
+ pr_debug("%s: cable is disconnected.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock,
+ flags);
+ goto free_fifos;
+ }
+ if (gadget_is_dwc3(gadget)) {
+ /* Configure for RX */
+ configure_data_fifo(d->usb_bam_type, d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | MSM_PRODUCER |
+ d->src_pipe_idx;
+ d->rx_req->length = 32*1024;
+ d->rx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->out, d->rx_req);
+
+ /* Configure for TX */
+ configure_data_fifo(d->usb_bam_type, d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | d->dst_pipe_idx;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->in, d->tx_req);
+
+ } else {
+ /* Configure for RX */
+ sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ d->rx_req->udc_priv = sps_params;
+
+ /* Configure for TX */
+ sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ teth_bridge_params.client = d->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n", __func__);
+ goto ep_unconfig;
+ }
+
+ /* Support for UL using system-to-IPA */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ul_params.teth_priv =
+ teth_bridge_params.private_data;
+ d->ul_params.teth_cb =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.notify = gbam_ipa_sys2bam_notify_cb;
+ d->ipa_params.priv = &d->ul_params;
+ d->ipa_params.reset_pipe_after_lpm = false;
+
+ } else {
+ d->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.priv =
+ teth_bridge_params.private_data;
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ }
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ d->ipa_params.skip_ep_cfg = teth_bridge_params.skip_ep_cfg;
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto ep_unconfig;
+ }
+
+ /* Remove support for UL using system-to-IPA towards DL */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ipa_params.notify = d->ul_params.teth_cb;
+ d->ipa_params.priv = d->ul_params.teth_priv;
+ }
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ else
+ d->ipa_params.reset_pipe_after_lpm = false;
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto ep_unconfig;
+ }
+
+ gqti_ctrl_update_ipa_pipes(port->port_usb, port->port_num,
+ d->ipa_params.ipa_prod_ep_idx,
+ d->ipa_params.ipa_cons_ep_idx);
+
+ connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
+ connect_params.client_type = d->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n", __func__);
+ goto ep_unconfig;
+ }
+
+ /* queue in & out requests */
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ gbam_start_endless_rx(port);
+ } else {
+ /* The use-case of UL (OUT) ports using sys2bam is based on
+ * partial reuse of the system-to-bam_demux code. The following
+ * lines perform the branching out of the standard bam2bam flow
+ * on the USB side of the UL channel
+ */
+ if (_gbam_start_io(port, false)) {
+ pr_err("%s: _gbam_start_io failed\n", __func__);
+ return;
+ }
+ gbam_start_rx(port);
+ }
+ gbam_start_endless_tx(port);
+
+ pr_debug("%s: done\n", __func__);
+ return;
+
+ep_unconfig:
+ if (gadget_is_dwc3(gadget)) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+free_fifos:
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+}
+
+static int gbam_wake_cb(void *param)
+{
+ struct gbam_port *port = (struct gbam_port *)param;
+ struct usb_gadget *gadget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENODEV;
+ }
+
+ gadget = port->port_usb->gadget;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ return usb_gadget_wakeup(gadget);
+}
+
+static void gbam2bam_suspend_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if ((port->last_event == U_BAM_DISCONNECT_E) ||
+ (port->last_event == U_BAM_RESUME_E)) {
+ pr_debug("%s: Port is about to disconnect/resume. Bail out\n",
+ __func__);
+ goto exit;
+ }
+
+ d = &port->data_ch;
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ gbam_wake_cb, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(d->usb_bam_type,
+ d->dst_connection_idx, gbam_start, gbam_stop, port);
+
+ /*
+ * release lock here because gbam_start() or
+ * gbam_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(d->usb_bam_type, &d->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+
+exit:
+ /*
+ * Decrement usage count after IPA handshake is done to allow gadget
+ * parent to go to lpm. This counter was incremented upon cable connect
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void gbam2bam_resume_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
+ struct bam_ch_info *d;
+ struct usb_gadget *gadget = NULL;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (port->last_event == U_BAM_DISCONNECT_E || !port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n",
+ __func__);
+ goto exit;
+ }
+
+ d = &port->data_ch;
+ gadget = port->port_usb->gadget;
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ NULL, NULL);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ usb_bam_resume(d->usb_bam_type, &d->ipa_params);
+ }
+
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/* BAM data channel ready, allow attempt to open */
+static int gbam_data_ch_probe(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+ bool do_work = false;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ d = &port->data_ch;
+
+ if (!strncmp(bam_ch_names[i], pdev->name,
+ BAM_DMUX_CH_NAME_MAX_LEN)) {
+ set_bit(BAM_CH_READY, &d->flags);
+
+ /* if usb is online, try opening bam_ch */
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb)
+ do_work = true;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (do_work)
+ queue_work(gbam_wq, &port->connect_w);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* BAM data channel went inactive, so close it */
+static int gbam_data_ch_remove(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct usb_ep *ep_in = NULL;
+ struct usb_ep *ep_out = NULL;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_bam_ports; i++) {
+ if (!strncmp(bam_ch_names[i], pdev->name,
+ BAM_DMUX_CH_NAME_MAX_LEN)) {
+ port = bam_ports[i].port;
+ d = &port->data_ch;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb) {
+ ep_in = port->port_usb->in;
+ ep_out = port->port_usb->out;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (ep_in)
+ usb_ep_fifo_flush(ep_in);
+ if (ep_out)
+ usb_ep_fifo_flush(ep_out);
+
+ gbam_free_buffers(port);
+
+ msm_bam_dmux_close(d->id);
+
+ /* bam dmux will free all pending skbs */
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+
+ clear_bit(BAM_CH_READY, &d->flags);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+ }
+ }
+
+ return 0;
+}
+
+static void gbam_port_free(int portno)
+{
+ struct gbam_port *port = bam_ports[portno].port;
+ struct platform_driver *pdrv = &bam_ports[portno].pdrv;
+
+ kfree(port);
+ platform_driver_unregister(pdrv);
+}
+
+static void gbam2bam_port_free(int portno)
+{
+ struct gbam_port *port = bam2bam_ports[portno];
+
+ kfree(port);
+}
+
+static int gbam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct platform_driver *pdrv;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ port->is_connected = false;
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+ INIT_WORK(&port->connect_w, gbam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ INIT_LIST_HEAD(&d->tx_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+ INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
+ skb_queue_head_init(&d->tx_skb_q);
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ d->id = bam_ch_ids[portno];
+
+ bam_ports[portno].port = port;
+
+ scnprintf(bam_ch_names[portno], BAM_DMUX_CH_NAME_MAX_LEN,
+ "bam_dmux_ch_%d", bam_ch_ids[portno]);
+ pdrv = &bam_ports[portno].pdrv;
+ pdrv->probe = gbam_data_ch_probe;
+ pdrv->remove = gbam_data_ch_remove;
+ pdrv->driver.name = bam_ch_names[portno];
+ pdrv->driver.owner = THIS_MODULE;
+
+ platform_driver_register(pdrv);
+ pr_debug("%s: port:%pK portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+static int gbam2bam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ port->is_connected = false;
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, gbam2bam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
+ INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
+ INIT_WORK(&port->resume_w, gbam2bam_resume_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ d->ipa_params.src_client = usb_prod[portno];
+ d->ipa_params.dst_client = usb_cons[portno];
+ bam2bam_ports[portno] = port;
+
+ /* UL workaround requirements */
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+
+ pr_debug("%s: port:%pK portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port:%pK data_ch:%pK#\n"
+ "dpkts_to_usbhost: %lu\n"
+ "dpkts_to_modem: %lu\n"
+ "dpkts_pwith_bam: %u\n"
+ "dbytes_pwith_bam: %u\n"
+ "to_usbhost_dcnt: %u\n"
+ "tomodem__dcnt: %u\n"
+ "rx_flow_control_disable_count: %u\n"
+ "rx_flow_control_enable_count: %u\n"
+ "rx_flow_control_triggered: %u\n"
+ "max_num_pkts_pending_with_bam: %u\n"
+ "max_bytes_pending_with_bam: %u\n"
+ "delayed_bam_mux_write_done: %u\n"
+ "tx_buf_len: %u\n"
+ "rx_buf_len: %u\n"
+ "data_ch_open: %d\n"
+ "data_ch_ready: %d\n"
+ "skb_expand_cnt: %lu\n",
+ i, port, &port->data_ch,
+ d->to_host, d->to_modem,
+ d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam,
+ d->tohost_drp_cnt, d->tomodem_drp_cnt,
+ d->rx_flow_control_disable,
+ d->rx_flow_control_enable,
+ d->rx_flow_control_triggered,
+ d->max_num_pkts_pending_with_bam,
+ d->max_bytes_pending_with_bam,
+ d->delayed_bam_mux_write_done,
+ d->tx_skb_q.qlen, d->rx_skb_q.qlen,
+ test_bit(BAM_CH_OPENED, &d->flags),
+ test_bit(BAM_CH_READY, &d->flags),
+ d->skb_expand_cnt);
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ d->skb_expand_cnt = 0;
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+ return count;
+}
+
+const struct file_operations gbam_stats_ops = {
+ .read = gbam_read_stats,
+ .write = gbam_reset_stats,
+};
+
+struct dentry *gbam_dent;
+static void gbam_debugfs_init(void)
+{
+ struct dentry *dfile;
+
+ if (gbam_dent)
+ return;
+
+ gbam_dent = debugfs_create_dir("usb_rmnet", 0);
+ if (!gbam_dent || IS_ERR(gbam_dent))
+ return;
+
+ dfile = debugfs_create_file("status", 0444, gbam_dent, 0,
+ &gbam_stats_ops);
+ if (!dfile || IS_ERR(dfile)) {
+ debugfs_remove(gbam_dent);
+ gbam_dent = NULL;
+ return;
+ }
+}
+static void gbam_debugfs_remove(void)
+{
+ debugfs_remove_recursive(gbam_dent);
+}
+#else
+static inline void gbam_debugfs_init(void) {}
+static inline void gbam_debugfs_remove(void) {}
+#endif
+
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ unsigned long flags, flags_ul, flags_dl;
+ struct bam_ch_info *d;
+
+ pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, port_num);
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM) {
+ pr_err("%s: invalid xport#%d\n", __func__, trans);
+ return;
+ }
+ if (trans == USB_GADGET_XPORT_BAM_DMUX &&
+ port_num >= n_bam_ports) {
+ pr_err("%s: invalid bam portno#%d\n",
+ __func__, port_num);
+ return;
+ }
+
+ if ((trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
+ port_num >= n_bam2bam_ports) {
+ pr_err("%s: invalid bam2bam portno#%d\n",
+ __func__, port_num);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ port = bam_ports[port_num].port;
+ else
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ /*
+ * Suspend with remote wakeup enabled. Increment usage
+ * count when disconnect happens in suspended state.
+ * Corresponding decrement happens in the end of this
+ * function if IPA handshake is already done or it is done
+ * in disconnect work after finishing IPA handshake.
+ */
+ if (port->last_event == U_BAM_SUSPEND_E)
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ port->port_usb = gr;
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ gbam_free_buffers(port);
+ else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ gbam_free_rx_buffers(port);
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = 0;
+ n_tx_req_queued = 0;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ /* disable endpoints */
+ if (gr->out) {
+ usb_ep_disable(gr->out);
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ if (d->rx_req) {
+ usb_ep_free_request(gr->out, d->rx_req);
+ d->rx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ }
+ }
+ usb_ep_disable(gr->in);
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ spin_lock_irqsave(&port->port_lock_dl, flags_dl);
+ if (d->tx_req) {
+ usb_ep_free_request(gr->in, d->tx_req);
+ d->tx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags_dl);
+ }
+
+ /*
+ * Set endless flag to false as USB Endpoint is already
+ * disable.
+ */
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gr->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM && gr->out)
+ gr->out->endless = false;
+ }
+
+ gr->in->driver_data = NULL;
+ if (gr->out)
+ gr->out->driver_data = NULL;
+
+ port->last_event = U_BAM_DISCONNECT_E;
+ /* Disable usb irq for CI gadget. It will be enabled in
+ * usb_bam_disconnect_pipe() after disconnecting all pipes
+ * and USB BAM reset is done.
+ */
+ if (!gadget_is_dwc3(port->gadget) &&
+ (trans == USB_GADGET_XPORT_BAM2BAM_IPA))
+ msm_usb_irq_disable(true);
+
+ queue_work(gbam_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_connect(struct grmnet *gr, u8 port_num,
+ enum transport_type trans, u8 src_connection_idx,
+ u8 dst_connection_idx)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, port_num);
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!gr->gadget) {
+ pr_err("%s: gadget handle not passed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM) {
+ pr_err("%s: invalid xport#%d\n", __func__, trans);
+ return -EINVAL;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX && port_num >= n_bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if ((trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ && port_num >= n_bam2bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ port = bam_ports[port_num].port;
+ else
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ d->trans = trans;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = gr;
+ port->gadget = port->port_usb->gadget;
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out,
+ GFP_ATOMIC);
+ if (!d->rx_req) {
+ pr_err("%s: RX request allocation failed\n", __func__);
+ d->rx_req = NULL;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENOMEM;
+ }
+
+ d->rx_req->context = port;
+ d->rx_req->complete = gbam_endless_rx_complete;
+ d->rx_req->length = 0;
+ d->rx_req->no_interrupt = 1;
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in,
+ GFP_ATOMIC);
+ if (!d->tx_req) {
+ pr_err("%s: TX request allocation failed\n", __func__);
+ d->tx_req = NULL;
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENOMEM;
+ }
+
+ d->tx_req->context = port;
+ d->tx_req->complete = gbam_endless_tx_complete;
+ d->tx_req->length = 0;
+ d->tx_req->no_interrupt = 1;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM_DMUX) {
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ }
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->src_connection_idx = src_connection_idx;
+ d->dst_connection_idx = dst_connection_idx;
+ d->usb_bam_type = usb_bam_get_bam_type(gr->gadget->name);
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.src_idx = src_connection_idx;
+ d->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Query pipe type using IPA src/dst index with
+ * usbbam driver. It is being set either as
+ * BAM2BAM or SYS2BAM.
+ */
+ if (usb_bam_get_pipe_type(d->usb_bam_type,
+ d->ipa_params.src_idx, &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(d->usb_bam_type,
+ d->ipa_params.dst_idx, &d->dst_pipe_type)) {
+ pr_err("%s:usb_bam_get_pipe_type() failed\n",
+ __func__);
+ ret = -EINVAL;
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ goto exit;
+ }
+ /*
+ * Check for pipe_type. If it is BAM2BAM, then it is required
+ * to disable Xfer complete and Xfer not ready interrupts for
+ * that particular endpoint. Hence it set endless flag based
+ * it which is considered into UDC driver while enabling
+ * USB Endpoint.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = true;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = true;
+ }
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%pK",
+ __func__, gr->in);
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+ goto exit;
+ }
+ gr->in->driver_data = port;
+
+ /*
+ * DPL traffic is routed through BAM-DMUX on some targets.
+ * DPL function has only 1 IN endpoint. Add out endpoint
+ * checks for BAM-DMUX transport.
+ */
+ if (gr->out) {
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%pK",
+ __func__, gr->out);
+ gr->in->driver_data = 0;
+ usb_ep_disable(gr->in);
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+ goto exit;
+ }
+ gr->out->driver_data = port;
+ }
+
+ port->last_event = U_BAM_CONNECT_E;
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(gbam_wq, &port->connect_w);
+
+ ret = 0;
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int gbam_setup(unsigned int no_bam_port)
+{
+ int i;
+ int ret;
+ int bam_port_start = n_bam_ports;
+ int total_bam_ports = bam_port_start + no_bam_port;
+
+ pr_debug("%s: requested BAM ports:%d\n", __func__, no_bam_port);
+
+ if (!no_bam_port || total_bam_ports > BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, no_bam_port);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ for (i = bam_port_start; i < (bam_port_start + no_bam_port); i++) {
+ n_bam_ports++;
+ pr_debug("gbam_port_alloc called for %d\n", i);
+ ret = gbam_port_alloc(i);
+ if (ret) {
+ n_bam_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam_ports;
+ }
+ }
+
+ gbam_debugfs_init();
+
+ return bam_port_start;
+
+free_bam_ports:
+ for (i = 0; i < n_bam_ports; i++)
+ gbam_port_free(i);
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+int gbam2bam_setup(unsigned int no_bam2bam_port)
+{
+ int i;
+ int ret;
+ int bam2bam_port_start = n_bam2bam_ports;
+ int total_bam2bam_ports = bam2bam_port_start + no_bam2bam_port;
+
+ pr_debug("%s: requested BAM2BAM ports:%d\n", __func__, no_bam2bam_port);
+
+ if (!no_bam2bam_port || total_bam2bam_ports > BAM2BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, no_bam2bam_port);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ for (i = bam2bam_port_start; i < (bam2bam_port_start +
+ no_bam2bam_port); i++) {
+ n_bam2bam_ports++;
+ ret = gbam2bam_port_alloc(i);
+ if (ret) {
+ n_bam2bam_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam2bam_ports;
+ }
+ }
+
+ gbam_debugfs_init();
+
+ return bam2bam_port_start;
+
+free_bam2bam_ports:
+ for (i = 0; i < n_bam2bam_ports; i++)
+ gbam2bam_port_free(i);
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+void gbam_cleanup(void)
+{
+ gbam_debugfs_remove();
+}
+
+void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return;
+
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ pr_debug("%s: suspended port %d\n", __func__, port_num);
+
+ port->last_event = U_BAM_SUSPEND_E;
+ queue_work(gbam_wq, &port->suspend_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return;
+
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ port->last_event = U_BAM_RESUME_E;
+ /*
+ * Increment usage count here to disallow gadget parent suspend.
+ * This counter will decrement after IPA handshake is done in
+ * disconnect work (due to cable disconnect) or in bam_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(gbam_wq, &port->resume_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
+ struct usb_ep *out)
+{
+ struct grmnet *gr;
+
+ gr = kzalloc(sizeof(*gr), GFP_ATOMIC);
+ if (!gr)
+ return -ENOMEM;
+ gr->in = in;
+ gr->out = out;
+ gr->gadget = g;
+
+ return gbam_connect(gr, 0, USB_GADGET_XPORT_BAM_DMUX, 0, 0);
+}
+
+void gbam_mbim_disconnect(void)
+{
+ struct gbam_port *port = bam_ports[0].port;
+ struct grmnet *gr = port->port_usb;
+
+ if (!gr) {
+ pr_err("%s: port_usb is NULL\n", __func__);
+ return;
+ }
+
+ gbam_disconnect(gr, 0, USB_GADGET_XPORT_BAM_DMUX);
+ kfree(gr);
+}
+
+int gbam_mbim_setup(void)
+{
+ int ret = 0;
+
+ /*
+ * MBIM requires only 1 USB_GADGET_XPORT_BAM_DMUX
+ * port. The port is always 0 and is shared
+ * between RMNET and MBIM.
+ */
+ if (!n_bam_ports)
+ ret = gbam_setup(1);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/u_bam_data.c b/drivers/usb/gadget/function/u_bam_data.c
new file mode 100644
index 000000000000..56bb5724ea52
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_data.c
@@ -0,0 +1,2109 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/usb/gadget.h>
+
+#include <linux/usb_bam.h>
+
+#include "u_bam_data.h"
+
+#define BAM_DATA_RX_Q_SIZE 128
+#define BAM_DATA_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+#define BAM_DATA_PENDING_LIMIT 220
+
+#define SYS_BAM_RX_PKT_FLOW_CTRL_SUPPORT 1
+#define SYS_BAM_RX_PKT_FCTRL_EN_TSHOLD 500
+#define SYS_BAM_RX_PKT_FCTRL_DIS_TSHOLD 300
+
+static unsigned int bam_ipa_rx_fctrl_support = SYS_BAM_RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(bam_ipa_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_ipa_rx_fctrl_en_thld = SYS_BAM_RX_PKT_FCTRL_EN_TSHOLD;
+module_param(bam_ipa_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_ipa_rx_fctrl_dis_thld = SYS_BAM_RX_PKT_FCTRL_DIS_TSHOLD;
+module_param(bam_ipa_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+static struct workqueue_struct *bam_data_wq;
+static int n_bam2bam_data_ports;
+
+unsigned int bam_data_rx_q_size = BAM_DATA_RX_Q_SIZE;
+module_param(bam_data_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_data_mux_rx_req_size = BAM_DATA_MUX_RX_REQ_SIZE;
+module_param(bam_data_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+#define SPS_PARAMS_SPS_MODE BIT(5)
+#define SPS_PARAMS_TBE BIT(6)
+#define MSM_VENDOR_ID BIT(16)
+
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
+struct sys2ipa_sw_data {
+ void *teth_priv;
+ ipa_notify_cb teth_cb;
+};
+
+struct bam_data_ch_info {
+ unsigned long flags;
+ unsigned id;
+
+ struct bam_data_port *port;
+ struct work_struct write_tobam_w;
+
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ enum function_type func_type;
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
+
+ /* UL workaround parameters */
+ struct sys2ipa_sw_data ul_params;
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ int total_skb;
+ int freed_skb;
+ int freed_rx_reqs;
+ int alloc_rx_reqs;
+ struct sk_buff_head rx_skb_idle;
+ enum usb_bam_pipe_type src_pipe_type;
+ enum usb_bam_pipe_type dst_pipe_type;
+ unsigned int pending_with_bam;
+ int rx_buffer_size;
+
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ /*
+ * used for RNDIS/ECM network interface based design
+ * to indicate ecm/rndis pipe connect notifiaction is sent
+ * to ecm_ipa/rndis_ipa.
+ */
+ atomic_t pipe_connect_notified;
+ bool tx_req_dequeued;
+ bool rx_req_dequeued;
+};
+
+enum u_bam_data_event_type {
+ U_BAM_DATA_DISCONNECT_E = 0,
+ U_BAM_DATA_CONNECT_E,
+ U_BAM_DATA_SUSPEND_E,
+ U_BAM_DATA_RESUME_E
+};
+
+struct bam_data_port {
+ bool is_ipa_connected;
+ enum u_bam_data_event_type last_event;
+ unsigned port_num;
+ spinlock_t port_lock;
+ unsigned int ref_count;
+ struct data_port *port_usb;
+ struct usb_gadget *gadget;
+ struct bam_data_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+};
+struct usb_bam_data_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ u32 usb_bam_handle;
+};
+
+struct bam_data_port *bam2bam_data_ports[BAM2BAM_DATA_N_PORTS];
+static struct rndis_data_ch_info rndis_data;
+
+static void bam2bam_data_suspend_work(struct work_struct *w);
+static void bam2bam_data_resume_work(struct work_struct *w);
+static void bam_data_free_reqs(struct bam_data_port *port);
+
+/*----- sys2bam towards the IPA (UL workaround) --------------- */
+
+static int bam_data_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct bam_data_port *port = ep->driver_data;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%pK head:%pK num:%d cb:%pK", __func__,
+ ep, head, num, cb);
+
+ if (d->alloc_rx_reqs) {
+ pr_err("%s(): reqs are already allocated.\n", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ d->alloc_rx_reqs++;
+ req->complete = cb;
+ list_add_tail(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t bam_data_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock lock taken */
+static struct sk_buff *bam_data_alloc_skb_from_pool(
+ struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb = NULL;
+ dma_addr_t skb_buf_dma_addr;
+ struct data_port *data_port;
+ struct usb_gadget *gadget;
+
+ if (!port)
+ return NULL;
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(d->rx_buffer_size + BAM_MUX_HDR, GFP_ATOMIC);
+ if (!skb) {
+ pr_err("%s: alloc skb failed\n", __func__);
+ goto alloc_exit;
+ }
+
+ d->total_skb++;
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ data_port = port->port_usb;
+ if (data_port && data_port->cdev && data_port->cdev->gadget) {
+ gadget = data_port->cdev->gadget;
+
+ skb_buf_dma_addr =
+ dma_map_single(&gadget->dev, skb->data,
+ d->rx_buffer_size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&gadget->dev, skb_buf_dma_addr)) {
+ pr_err("%s: Could not DMA map SKB buffer\n",
+ __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+ } else {
+ pr_err("%s: Could not DMA map SKB buffer\n", __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+static void bam_data_free_skb_to_pool(
+ struct bam_data_port *port,
+ struct sk_buff *skb)
+{
+ struct bam_data_ch_info *d;
+
+ if (!port) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ d = &port->data_ch;
+ if (!d) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void bam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct bam_data_port *port = p;
+ struct bam_data_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+
+ d->pending_with_bam--;
+
+ pr_debug("%s: port:%pK d:%pK pbam:%u, pno:%d\n", __func__,
+ port, d, d->pending_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ queue_work(bam_data_wq, &d->write_tobam_w);
+}
+
+static void bam_data_ipa_sys2bam_notify_cb(void *priv,
+ enum ipa_dp_evt_type event, unsigned long data)
+{
+ struct sys2ipa_sw_data *ul = (struct sys2ipa_sw_data *)priv;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ switch (event) {
+ case IPA_WRITE_DONE:
+ d = container_of(ul, struct bam_data_ch_info, ul_params);
+ port = container_of(d, struct bam_data_port, data_ch);
+ /* call into bam_demux functionality that'll recycle the data */
+ bam_data_write_done(port, (struct sk_buff *)(data));
+ break;
+ case IPA_RECEIVE:
+ /* call the callback given by tethering driver init function
+ * (and was given to ipa_connect)
+ */
+ if (ul->teth_cb)
+ ul->teth_cb(ul->teth_priv, event, data);
+ break;
+ default:
+ /* unexpected event */
+ pr_err("%s: unexpected event %d\n", __func__, event);
+ break;
+ }
+}
+
+
+static void bam_data_start_rx(struct bam_data_port *port)
+{
+ struct usb_request *req;
+ struct bam_data_ch_info *d;
+ struct usb_ep *ep;
+ int ret;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!port->port_usb)
+ return;
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_ipa_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+ skb = bam_data_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = bam_data_get_dma_from_skb(skb);
+ req->length = d->rx_buffer_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ bam_data_free_skb_to_pool(port, skb);
+
+ pr_err("%s: rx queue failed %d\n", __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct bam_data_port *port = ep->driver_data;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+ unsigned long flags;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+ d->freed_rx_reqs++;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: %s response error %d, %d/%d\n", __func__,
+ ep->name, status, req->actual, req->length);
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ break;
+ }
+
+ spin_lock(&port->port_lock);
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ if (!usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ pr_err_ratelimited("usb bam prod is not granted.\n");
+ return;
+ }
+ queue_work(bam_data_wq, &d->write_tobam_w);
+ }
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+
+ skb = bam_data_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+ spin_unlock(&port->port_lock);
+
+ req->buf = skb->data;
+ req->dma = bam_data_get_dma_from_skb(skb);
+ req->length = d->rx_buffer_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ pr_err_ratelimited("%s: data rx enqueue err %d\n",
+ __func__, status);
+ spin_lock(&port->port_lock);
+ bam_data_free_skb_to_pool(port, skb);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ }
+}
+/* It should be called with port_lock acquire. */
+static int bam_data_sys2bam_alloc_req(struct bam_data_port *port, bool in)
+{
+ int ret;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned queue_size;
+ void (*ep_complete)(struct usb_ep *, struct usb_request *);
+
+ if (!port->port_usb)
+ return -EBUSY;
+ if (in)
+ return -ENODEV;
+
+ ep = port->port_usb->out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_data_rx_q_size;
+ ep_complete = bam_data_epout_complete;
+
+ ret = bam_data_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+ if (ret)
+ pr_err("%s: allocation failed\n", __func__);
+
+ return ret;
+}
+
+static void bam_data_write_toipa(struct work_struct *w)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb;
+ int ret;
+ int qlen;
+ unsigned long flags;
+ dma_addr_t skb_dma_addr;
+ struct ipa_tx_meta ipa_meta = {0x0};
+
+ d = container_of(w, struct bam_data_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ while (d->pending_with_bam < BAM_PENDING_PKTS_LIMIT &&
+ usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_with_bam++;
+
+ pr_debug("%s: port:%pK d:%pK pbam:%u pno:%d\n", __func__,
+ port, d, d->pending_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ skb_dma_addr = bam_data_get_dma_from_skb(skb);
+ if (skb_dma_addr != DMA_ERROR_CODE) {
+ ipa_meta.dma_address = skb_dma_addr;
+ ipa_meta.dma_address_valid = true;
+ }
+
+ ret = ipa_tx_dp(IPA_CLIENT_USB_PROD, skb, &ipa_meta);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ pr_debug_ratelimited("%s: write error:%d\n",
+ __func__, ret);
+ d->pending_with_bam--;
+ bam_data_free_skb_to_pool(port, skb);
+ break;
+ }
+ }
+
+ qlen = d->rx_skb_q.qlen;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (qlen < bam_ipa_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ bam_data_start_rx(port);
+ }
+
+}
+
+/*------------data_path----------------------------*/
+
+static void bam_data_endless_rx_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s: status: %d\n", __func__, status);
+}
+
+static void bam_data_endless_tx_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s: status: %d\n", __func__, status);
+}
+
+static void bam_data_start_endless_rx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !d->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing transfer, %d\n", status);
+}
+
+static void bam_data_start_endless_tx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !d->tx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing transfer, %d\n", status);
+}
+
+static void bam_data_stop_endless_rx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d->rx_req_dequeued = true;
+
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam_data_stop_endless_tx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->in;
+ d->tx_req_dequeued = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->tx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+static void bam2bam_free_rx_skb_idle_list(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+ if (!d) {
+ pr_err("%s(): port->data_ch is NULL.\n", __func__);
+ return;
+ }
+
+ if (!port->port_usb) {
+ pr_err("%s(): port->port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ if (!port->port_usb->cdev) {
+ pr_err("port->port_usb->cdev is NULL");
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ pr_err("%s(): gadget is NULL.\n", __func__);
+ return;
+ }
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr, sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ d->freed_skb++;
+ }
+
+ pr_debug("%s(): Freed %d SKBs from rx_skb_idle queue\n", __func__,
+ d->freed_skb);
+}
+
+/*
+ * bam_data_ipa_disconnect()- Perform USB IPA function level disconnect
+ * struct bam_data_ch_info - Per USB IPA port data structure
+ *
+ * Make sure to call IPA rndis/ecm/mbim related disconnect APIs() only
+ * if those APIs init counterpart is already performed.
+ * MBIM: teth_bridge_connect() is NO_OPS and teth_bridge_init() is
+ * being called with atomic context on cable connect, hence there is no
+ * need to consider for this check. pipe_connect_notified is being used
+ * for RNDIS/ECM driver due to its different design with usage of
+ * network interface created by IPA driver.
+ */
+static void bam_data_ipa_disconnect(struct bam_data_ch_info *d)
+{
+ pr_debug("%s(): pipe_connect_notified:%d\n",
+ __func__, atomic_read(&d->pipe_connect_notified));
+ /*
+ * Check if pipe_connect_notified is set to 1, then perform disconnect
+ * part and set pipe_connect_notified to zero.
+ */
+ if (atomic_xchg(&d->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ if (d->func_type == USB_FUNC_ECM) {
+ priv = ecm_qc_get_ipa_priv();
+ ecm_ipa_disconnect(priv);
+ } else if (d->func_type == USB_FUNC_RNDIS) {
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ pr_debug("%s(): net interface is disconnected.\n", __func__);
+ }
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ pr_debug("%s(): teth_bridge() disconnected\n", __func__);
+ teth_bridge_disconnect(d->ipa_params.src_client);
+ }
+}
+
+static void bam2bam_data_disconnect_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, disconnect_w);
+ struct bam_data_ch_info *d;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->is_ipa_connected) {
+ pr_debug("%s: Already disconnected. Bailing out.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = usb_bam_disconnect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related pipes
+ * before calling IPA tethered function related disconnect API. IPA
+ * tethered function related disconnect API delete depedency graph
+ * with IPA RM which would results into IPA not pulling data although
+ * there is pending data on USB BAM producer pipe.
+ */
+ bam_data_ipa_disconnect(d);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_ipa_connected = false;
+
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("Disconnect workqueue done (port %pK)\n", port);
+}
+/*
+ * This function configured data fifo based on index passed to get bam2bam
+ * configuration.
+ */
+static void configure_usb_data_fifo(enum usb_ctrl bam_type,
+ u8 idx, struct usb_ep *ep, enum usb_bam_pipe_type pipe_type)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ if (pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+
+ msm_data_fifo_config(ep,
+ data_fifo.phys_base,
+ data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+ }
+}
+
+/* Start RX transfers according to pipe_type */
+static inline void bam_data_start_rx_transfers(struct bam_data_ch_info *d,
+ struct bam_data_port *port)
+{
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_start_endless_rx(port);
+ else
+ bam_data_start_rx(port);
+}
+
+static void bam2bam_data_connect_work(struct work_struct *w)
+{
+ struct bam_data_port *port = container_of(w, struct bam_data_port,
+ connect_w);
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ struct bam_data_ch_info *d;
+ struct data_port *d_port;
+ struct usb_gadget *gadget = NULL;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: Connect workqueue started", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ d_port = port->port_usb;
+
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnect. Bail out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (d_port && d_port->cdev)
+ gadget = d_port->cdev->gadget;
+
+ if (!gadget) {
+ pr_err("%s: NULL gadget\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->port_usb) {
+ pr_err("port_usb is NULL");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->port_usb->out) {
+ pr_err("port_usb->out (bulk out ep) is NULL");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * bam_data_connect()
+ */
+ if (port->is_ipa_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
+ d->ipa_params.usb_connection_speed = gadget->speed;
+ d->ipa_params.cons_clnt_hdl = -1;
+ d->ipa_params.prod_clnt_hdl = -1;
+
+ if (d->dst_pipe_type != USB_BAM_PIPE_BAM2BAM) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: no software preparation for DL not using bam2bam\n",
+ __func__);
+ return;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ usb_bam_alloc_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_alloc_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ pr_err("Disconnected.port_usb is NULL\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto free_fifos;
+ }
+
+ if (gadget_is_dwc3(gadget)) {
+ /* Configure RX */
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | d->src_pipe_idx;
+ d->rx_req->length = 32*1024;
+ d->rx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->out, d->rx_req);
+
+ /* Configure TX */
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | d->dst_pipe_idx;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->in, d->tx_req);
+
+ } else {
+ /* Configure RX */
+ sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
+ MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
+ d->rx_req->udc_priv = sps_params;
+
+ /* Configure TX */
+ sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
+ d->tx_req->udc_priv = sps_params;
+ }
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ teth_bridge_params.client = d->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:teth_bridge_init() failed\n",
+ __func__);
+ goto free_fifos;
+ }
+ d->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.priv =
+ teth_bridge_params.private_data;
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ d->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg = ecm_qc_get_skip_ep_config();
+ }
+
+ if (d->func_type == USB_FUNC_RNDIS) {
+ d->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = rndis_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ /* Support for UL using system-to-IPA */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ul_params.teth_cb = d->ipa_params.notify;
+ d->ipa_params.notify =
+ bam_data_ipa_sys2bam_notify_cb;
+ d->ul_params.teth_priv = d->ipa_params.priv;
+ d->ipa_params.priv = &d->ul_params;
+ d->ipa_params.reset_pipe_after_lpm = false;
+ } else {
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto free_fifos;
+ }
+ gadget->bam2bam_func_enabled = true;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:%d: Port is being disconnected.\n",
+ __func__, __LINE__);
+ goto disconnect_ipa;
+ }
+
+ d_port->ipa_consumer_ep = d->ipa_params.ipa_cons_ep_idx;
+
+ /* Remove support for UL using system-to-IPA towards DL */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ipa_params.notify = d->ul_params.teth_cb;
+ d->ipa_params.priv = d->ul_params.teth_priv;
+ }
+
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg = ecm_qc_get_skip_ep_config();
+ }
+ if (d->func_type == USB_FUNC_RNDIS) {
+ d->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = rndis_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ } else {
+ d->ipa_params.reset_pipe_after_lpm = false;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto disconnect_ipa;
+ }
+
+ /*
+ * Cable might have been disconnected after releasing the
+ * spinlock and re-enabling IRQs. Hence check again.
+ */
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:%d: port is beind disconnected.\n",
+ __func__, __LINE__);
+ goto disconnect_ipa;
+ }
+
+ port->is_ipa_connected = true;
+
+ d_port->ipa_producer_ep = d->ipa_params.ipa_prod_ep_idx;
+ pr_debug("%s(): ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ __func__, d_port->ipa_producer_ep,
+ d_port->ipa_consumer_ep);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ connect_params.ipa_usb_pipe_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_MBIM;
+ connect_params.client_type = d->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n",
+ __func__);
+ return;
+ }
+ }
+
+ if (d->func_type == USB_FUNC_ECM) {
+ ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
+ d->ipa_params.prod_clnt_hdl,
+ d->ipa_params.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ if (d->func_type == USB_FUNC_RNDIS) {
+ rndis_data.prod_clnt_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ rndis_data.cons_clnt_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ rndis_data.priv = d->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data.ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data.ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data.dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data.cons_clnt_hdl,
+ rndis_data.prod_clnt_hdl,
+ rndis_data.ul_max_transfer_size,
+ rndis_data.ul_max_packets_number,
+ rndis_data.dl_max_transfer_size,
+ rndis_data.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+ atomic_set(&d->pipe_connect_notified, 1);
+
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (d->func_type == USB_FUNC_RNDIS || d->func_type == USB_FUNC_ECM) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
+ /* queue in & out requests */
+ bam_data_start_rx_transfers(d, port);
+ bam_data_start_endless_tx(port);
+
+ pr_debug("Connect workqueue done (port %pK)", port);
+ return;
+
+disconnect_ipa:
+ /* let disconnect work take care of ipa disconnect */
+ port->is_ipa_connected = true;
+ return;
+
+free_fifos:
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void bam_data_start_rx_tx(u8 port_num)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ unsigned long flags;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+
+ /* queue in & out requests */
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ d = &port->data_ch;
+
+ if (!port->port_usb || !port->port_usb->in->driver_data
+ || !port->port_usb->out->driver_data) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ goto out;
+ }
+
+ if (!d->rx_req || !d->tx_req) {
+ pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+ d->rx_req, d->tx_req);
+ goto out;
+ }
+ if (!port->is_ipa_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ bam_data_start_rx_transfers(d, port);
+
+ pr_debug("%s: Starting tx", __func__);
+ bam_data_start_endless_tx(port);
+
+ return;
+out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+inline int u_bam_data_func_to_port(enum function_type func, u8 func_port)
+{
+ if (func >= USB_NUM_FUNCS || func_port >= PORTS_PER_FUNC) {
+ pr_err("func=%d and func_port=%d are an illegal combination\n",
+ func, func_port);
+ return -EINVAL;
+ }
+ return (PORTS_PER_FUNC * func) + func_port;
+}
+
+static int bam2bam_data_port_alloc(int portno)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ if (bam2bam_data_ports[portno] != NULL) {
+ pr_debug("port %d already allocated.\n", portno);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct bam_data_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ bam2bam_data_ports[portno] = port;
+ d = &port->data_ch;
+ d->port = port;
+
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
+ INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+ INIT_WORK(&d->write_tobam_w, bam_data_write_toipa);
+ return 0;
+}
+
+void u_bam_data_start_rndis_ipa(void)
+{
+ int port_num;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ pr_debug("%s\n", __func__);
+
+ port_num = u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT);
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ if (!atomic_read(&d->pipe_connect_notified)) {
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(bam_data_wq, &port->connect_w);
+ } else {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ }
+}
+
+void u_bam_data_stop_rndis_ipa(void)
+{
+ int port_num;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ pr_debug("%s\n", __func__);
+
+ port_num = u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT);
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ if (atomic_read(&d->pipe_connect_notified)) {
+ rndis_ipa_reset_trigger();
+ bam_data_stop_endless_tx(port);
+ bam_data_stop_endless_rx(port);
+ queue_work(bam_data_wq, &port->disconnect_w);
+ }
+}
+
+void bam_data_flow_control_enable(bool enable)
+{
+ if (enable)
+ u_bam_data_stop_rndis_ipa();
+ else
+ u_bam_data_start_rndis_ipa();
+}
+
+static void bam_data_free_reqs(struct bam_data_port *port)
+{
+
+ struct list_head *head;
+ struct usb_request *req;
+
+ if (port->data_ch.src_pipe_type != USB_BAM_PIPE_SYS2BAM)
+ return;
+
+ head = &port->data_ch.rx_idle;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(port->port_usb->out, req);
+ port->data_ch.freed_rx_reqs++;
+ }
+}
+
+void bam_data_disconnect(struct data_port *gr, enum function_type func,
+ u8 dev_port_num)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("dev:%pK port number:%d\n", gr, port_num);
+
+ if (!gr) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = bam2bam_data_ports[port_num];
+
+ if (!port) {
+ pr_err("port %u is NULL", port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ /*
+ * Suspend with remote wakeup enabled. Increment usage
+ * count when disconnect happens in suspended state.
+ * Corresponding decrement happens in the end of this
+ * function if IPA handshake is already done or it is done
+ * in disconnect work after finishing IPA handshake.
+ * In case of RNDIS, if connect_w by rndis_flow_control is not triggered
+ * yet then don't perform pm_runtime_get as suspend_w would have bailed
+ * w/o runtime_get.
+ * And restrict check to only RNDIS to handle cases where connect_w is
+ * already scheduled but execution is pending which must be rare though.
+ */
+ if (port->last_event == U_BAM_DATA_SUSPEND_E &&
+ (d->func_type != USB_FUNC_RNDIS || port->is_ipa_connected))
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ if (port->port_usb) {
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in && port->port_usb->in->driver_data) {
+
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ if (d->rx_req) {
+ usb_ep_free_request(port->port_usb->out,
+ d->rx_req);
+ d->rx_req = NULL;
+ }
+
+ usb_ep_disable(port->port_usb->in);
+ if (d->tx_req) {
+ usb_ep_free_request(port->port_usb->in,
+ d->tx_req);
+ d->tx_req = NULL;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* Only for SYS2BAM mode related UL workaround */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+
+ pr_debug("SKBs_RX_Q: freed:%d\n",
+ d->rx_skb_q.qlen);
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ bam2bam_free_rx_skb_idle_list(port);
+ pr_debug("SKBs: allocated:%d freed:%d\n",
+ d->total_skb, d->freed_skb);
+ pr_debug("rx_reqs: allocated:%d freed:%d\n",
+ d->alloc_rx_reqs, d->freed_rx_reqs);
+
+ /* reset all skb/reqs related statistics */
+ d->total_skb = 0;
+ d->freed_skb = 0;
+ d->freed_rx_reqs = 0;
+ d->alloc_rx_reqs = 0;
+ }
+
+ /*
+ * Set endless flag to false as USB Endpoint
+ * is already disable.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+
+ port->port_usb->in->driver_data = NULL;
+ port->port_usb->out->driver_data = NULL;
+
+ port->port_usb = NULL;
+ }
+ }
+
+ port->last_event = U_BAM_DATA_DISCONNECT_E;
+ /* Disable usb irq for CI gadget. It will be enabled in
+ * usb_bam_disconnect_pipe() after disconnecting all pipes
+ * and USB BAM reset is done.
+ */
+ if (!gadget_is_dwc3(port->gadget))
+ msm_usb_irq_disable(true);
+
+ queue_work(bam_data_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int bam_data_connect(struct data_port *gr, enum transport_type trans,
+ u8 dev_port_num, enum function_type func)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ int ret, port_num;
+ unsigned long flags;
+ u8 src_connection_idx, dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ if (!gr) {
+ pr_err("data port is null\n");
+ return -ENODEV;
+ }
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid portno#%d\n", port_num);
+ return -EINVAL;
+ }
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ pr_err("invalid xport#%d\n", trans);
+ return -EINVAL;
+ }
+
+ pr_debug("dev:%pK port#%d\n", gr, port_num);
+
+ usb_bam_type = usb_bam_get_bam_type(gr->cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ dev_port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ dev_port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
+ return ret;
+ }
+
+ port = bam2bam_data_ports[port_num];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ port->port_usb = gr;
+ port->gadget = gr->cdev->gadget;
+ d = &port->data_ch;
+ d->src_connection_idx = src_connection_idx;
+ d->dst_connection_idx = dst_connection_idx;
+ d->usb_bam_type = usb_bam_type;
+
+ d->trans = trans;
+ d->func_type = func;
+ d->rx_buffer_size = (gr->rx_buffer_size ? gr->rx_buffer_size :
+ bam_mux_rx_req_size);
+
+ if (usb_bam_type == HSIC_CTRL) {
+ d->ipa_params.src_client = IPA_CLIENT_HSIC1_PROD;
+ d->ipa_params.dst_client = IPA_CLIENT_HSIC1_CONS;
+ } else {
+ d->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ d->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ }
+
+ pr_debug("%s(): rx_buffer_size:%d\n", __func__, d->rx_buffer_size);
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.src_idx = src_connection_idx;
+ d->ipa_params.dst_idx = dst_connection_idx;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+
+ /*
+ * Query pipe type using IPA src/dst index with
+ * usbbam driver. It is being set either as
+ * BAM2BAM or SYS2BAM.
+ */
+ if (usb_bam_get_pipe_type(usb_bam_type, d->ipa_params.src_idx,
+ &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(usb_bam_type, d->ipa_params.dst_idx,
+ &d->dst_pipe_type)) {
+ pr_err("usb_bam_get_pipe_type() failed\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * Check for pipe_type. If it is BAM2BAM, then it is required
+ * to disable Xfer complete and Xfer not ready interrupts for
+ * that particular endpoint. Hence it set endless flag based
+ * it which is considered into UDC driver while enabling
+ * USB Endpoint.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = true;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = true;
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK", gr->in);
+ goto exit;
+ }
+
+ gr->in->driver_data = port;
+
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK", gr->out);
+ goto disable_in_ep;
+ }
+
+ gr->out->driver_data = port;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+
+ /* UL workaround requirements */
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+
+ ret = bam_data_sys2bam_alloc_req(port, false);
+ if (ret) {
+ pr_err("%s: sys2bam_alloc_req failed(%d)",
+ __func__, ret);
+ goto disable_out_ep;
+ }
+ }
+
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out,
+ GFP_ATOMIC);
+ if (!d->rx_req) {
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ goto bam_data_free;
+ }
+ d->rx_req->context = port;
+ d->rx_req->complete = bam_data_endless_rx_complete;
+ d->rx_req->length = 0;
+ d->rx_req->no_interrupt = 1;
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in,
+ GFP_ATOMIC);
+ if (!d->tx_req) {
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto ep_out_req_free;
+ }
+
+ d->tx_req->context = port;
+ d->tx_req->complete = bam_data_endless_tx_complete;
+ d->tx_req->length = 0;
+ d->tx_req->no_interrupt = 1;
+
+ gr->out->driver_data = port;
+
+ port->last_event = U_BAM_DATA_CONNECT_E;
+
+ /* Wait for host to enable flow_control */
+ if (d->func_type == USB_FUNC_RNDIS) {
+ ret = 0;
+ goto exit;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ queue_work(bam_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return 0;
+
+ep_out_req_free:
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+bam_data_free:
+ bam_data_free_reqs(port);
+disable_out_ep:
+ gr->out->driver_data = 0;
+ usb_ep_disable(gr->out);
+disable_in_ep:
+ gr->in->driver_data = 0;
+ usb_ep_disable(gr->in);
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int bam_data_setup(enum function_type func, unsigned int no_bam2bam_port)
+{
+ int i;
+ int ret;
+
+ pr_debug("requested %d BAM2BAM ports", no_bam2bam_port);
+
+ if (!no_bam2bam_port || no_bam2bam_port > PORTS_PER_FUNC ||
+ func >= USB_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d or function type:%d\n",
+ no_bam2bam_port, func);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < no_bam2bam_port; i++) {
+ n_bam2bam_data_ports++;
+ ret = bam2bam_data_port_alloc(u_bam_data_func_to_port(func, i));
+ if (ret) {
+ n_bam2bam_data_ports--;
+ pr_err("Failed to alloc port:%d\n", i);
+ goto free_bam_ports;
+ }
+ }
+
+ pr_debug("n_bam2bam_data_ports:%d\n", n_bam2bam_data_ports);
+
+ if (bam_data_wq) {
+ pr_debug("bam_data is already setup.");
+ return 0;
+ }
+
+ bam_data_wq = alloc_workqueue("k_bam_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!bam_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_bam_ports;
+ }
+
+ return 0;
+
+free_bam_ports:
+ for (i = 0; i < n_bam2bam_data_ports; i++) {
+ kfree(bam2bam_data_ports[i]);
+ bam2bam_data_ports[i] = NULL;
+ if (bam_data_wq) {
+ destroy_workqueue(bam_data_wq);
+ bam_data_wq = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int bam_data_wake_cb(void *param)
+{
+ int ret;
+ struct bam_data_port *port = (struct bam_data_port *)param;
+ struct data_port *d_port = port->port_usb;
+ struct usb_gadget *gadget;
+ struct usb_function *func;
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ if (!d_port) {
+ pr_err("FAILED: d_port == NULL");
+ return -ENODEV;
+ }
+
+ if (!d_port->cdev) {
+ pr_err("FAILED: d_port->cdev == NULL");
+ return -ENODEV;
+ }
+
+ gadget = d_port->cdev->gadget;
+ if (!gadget) {
+ pr_err("FAILED: d_port->cdev->gadget == NULL");
+ return -ENODEV;
+ }
+
+ func = d_port->func;
+
+ /*
+ * In Super-Speed mode, remote wakeup is not allowed for suspended
+ * functions which have been disallowed by the host to issue Function
+ * Remote Wakeup.
+ * Note - We deviate here from the USB 3.0 spec and allow
+ * non-suspended functions to issue remote-wakeup even if they were not
+ * allowed to do so by the host. This is done in order to support non
+ * fully USB 3.0 compatible hosts.
+ */
+ if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended))
+ ret = usb_func_wakeup(func);
+ else
+ ret = usb_gadget_wakeup(gadget);
+
+ if ((ret == -EBUSY) || (ret == -EAGAIN))
+ pr_debug("Remote wakeup is delayed due to LPM exit.\n");
+ else if (ret)
+ pr_err("Failed to wake up the USB core. ret=%d.\n", ret);
+
+ return ret;
+}
+
+static void bam_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct bam_data_port *port = param;
+ struct data_port *d_port = port->port_usb;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_gadget *gadget;
+
+ if (!d_port || !d_port->cdev || !d_port->cdev->gadget) {
+ pr_err("%s:d_port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+ if (port->last_event != U_BAM_DATA_RESUME_E) {
+ pr_err("%s: Port state changed since resume. Bail out.\n",
+ __func__);
+ return;
+ }
+
+ gadget = d_port->cdev->gadget;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_start_endless_rx(port);
+ else {
+ bam_data_start_rx(port);
+ queue_work(bam_data_wq, &d->write_tobam_w);
+ }
+ } else {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ }
+ bam_data_start_endless_tx(port);
+ }
+
+}
+
+static void bam_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct bam_data_port *port = param;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ /*
+ * Only handling BAM2BAM, as there is no equivalent to
+ * bam_data_stop_endless_rx() for the SYS2BAM use case
+ */
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_stop_endless_rx(port);
+ } else {
+ bam_data_stop_endless_tx(port);
+ }
+}
+
+void bam_data_suspend(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled)
+{
+ struct bam_data_port *port;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("%s: suspended port %d\n", __func__, port_num);
+
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ port_usb->in_ep_desc_backup = port_usb->in->desc;
+ port_usb->out_ep_desc_backup = port_usb->out->desc;
+
+ pr_debug("in_ep_desc_backup = %pK, out_ep_desc_backup = %pK",
+ port_usb->in_ep_desc_backup,
+ port_usb->out_ep_desc_backup);
+
+ bam_data_disconnect(port_usb, func, dev_port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->last_event = U_BAM_DATA_SUSPEND_E;
+ queue_work(bam_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void bam_data_resume(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled)
+{
+ struct bam_data_port *port;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /* Restore endpoint descriptors info. */
+ port_usb->in->desc = port_usb->in_ep_desc_backup;
+ port_usb->out->desc = port_usb->out_ep_desc_backup;
+
+ pr_debug("in_ep_desc_backup = %pK, out_ep_desc_backup = %pK",
+ port_usb->in_ep_desc_backup,
+ port_usb->out_ep_desc_backup);
+
+ bam_data_connect(port_usb, port->data_ch.trans,
+ dev_port_num, func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->last_event = U_BAM_DATA_RESUME_E;
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(bam_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void bam_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(bam_data_wq);
+}
+
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, suspend_w);
+ struct bam_data_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_ipa_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if ((port->last_event == U_BAM_DATA_DISCONNECT_E) ||
+ (port->last_event == U_BAM_DATA_RESUME_E)) {
+ pr_debug("%s: Port is about to disconnect/resume. Bail out.\n",
+ __func__);
+ goto exit;
+ }
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ bam_data_wake_cb, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ usb_bam_register_start_stop_cbs(d->usb_bam_type, d->dst_connection_idx,
+ bam_data_start, bam_data_stop,
+ port);
+
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(d->usb_bam_type, &d->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+exit:
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, resume_w);
+ struct bam_data_ch_info *d;
+ struct data_port *d_port;
+ struct usb_gadget *gadget;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ pr_err("port->port_usb is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev) {
+ pr_err("!port->port_usb->cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("!port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
+ d = &port->data_ch;
+ d_port = port->port_usb;
+ gadget = d_port->cdev->gadget;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnect. Bail out.\n",
+ __func__);
+ goto exit;
+ }
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ NULL, NULL);
+ if (ret) {
+ pr_err("%s(): Failed to un-register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ /*
+ * If usb_req was dequeued as part of bus suspend then
+ * corresponding DBM IN and OUT EPs should also be reset.
+ * There is a possbility that usb_bam may not have dequeued the
+ * request in case of quick back to back usb bus suspend resume.
+ */
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ if (d->tx_req_dequeued) {
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ if (d->rx_req_dequeued) {
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ }
+ d->tx_req_dequeued = false;
+ d->rx_req_dequeued = false;
+ usb_bam_resume(d->usb_bam_type, &d->ipa_params);
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void u_bam_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data.dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void u_bam_data_set_ul_max_pkt_num(u8 max_packets_number)
+
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data.ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data.ul_aggregation_enable = true;
+ else
+ rndis_data.ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d\n", __func__,
+ rndis_data.ul_aggregation_enable);
+ pr_debug("%s(): ul_max_packets_number:%d\n", __func__,
+ max_packets_number);
+}
+
+void u_bam_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data.ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
diff --git a/drivers/usb/gadget/function/u_bam_data.h b/drivers/usb/gadget/function/u_bam_data.h
new file mode 100644
index 000000000000..e3acbd0c56a0
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_data.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_BAM_DATA_H
+#define __U_BAM_DATA_H
+
+#include "usb_gadget_xport.h"
+
+enum function_type {
+ USB_FUNC_ECM,
+ USB_FUNC_MBIM,
+ USB_FUNC_RNDIS,
+ USB_NUM_FUNCS,
+};
+
+#define PORTS_PER_FUNC 1
+#define BAM2BAM_DATA_N_PORTS (USB_NUM_FUNCS * PORTS_PER_FUNC)
+
+struct data_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ struct usb_ep *in;
+ int rx_buffer_size;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+};
+
+void bam_data_disconnect(struct data_port *gr, enum function_type func,
+ u8 dev_port_num);
+
+int bam_data_connect(struct data_port *gr, enum transport_type trans,
+ u8 dev_port_num, enum function_type func);
+
+int bam_data_setup(enum function_type func, unsigned int no_bam2bam_port);
+
+void bam_data_flush_workqueue(void);
+
+void bam_data_suspend(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled);
+
+void bam_data_resume(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled);
+
+void bam_data_flow_control_enable(bool enable);
+
+void u_bam_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void u_bam_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void u_bam_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void u_bam_data_start_rndis_ipa(void);
+
+void u_bam_data_stop_rndis_ipa(void);
+
+void bam_data_start_rx_tx(u8 port_num);
+
+int u_bam_data_func_to_port(enum function_type func, u8 func_port);
+#endif /* __U_BAM_DATA_H */
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
new file mode 100644
index 000000000000..013c54da0d0a
--- /dev/null
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+
+#include "u_rmnet.h"
+#include "f_qdss.h"
+
+#define RMNET_CTRL_QTI_NAME "rmnet_ctrl"
+#define DPL_CTRL_QTI_NAME "dpl_ctrl"
+/*
+ * Use size of gadget's qti control name. Here currently RMNET and DPL
+ * gadget is using QTI as control transport. Hence using RMNET ctrl name
+ * (as it is bigger in size) for QTI_CTRL_NAME_LEN.
+ */
+#define QTI_CTRL_NAME_LEN (sizeof(RMNET_CTRL_QTI_NAME)+2)
+
+struct qti_ctrl_port {
+ void *port_usb;
+ char name[QTI_CTRL_NAME_LEN];
+ struct miscdevice ctrl_device;
+
+ bool is_open;
+ int index;
+ unsigned intf;
+ int ipa_prod_idx;
+ int ipa_cons_idx;
+ enum peripheral_ep_type ep_type;
+
+ atomic_t connected;
+ atomic_t line_state;
+
+ atomic_t open_excl;
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t ioctl_excl;
+
+ wait_queue_head_t read_wq;
+
+ struct list_head cpkt_req_q;
+
+ spinlock_t lock;
+ enum qti_port_type port_type;
+ unsigned host_to_modem;
+ unsigned copied_to_modem;
+ unsigned copied_from_modem;
+ unsigned modem_to_host;
+ unsigned drp_cpkt_cnt;
+};
+static struct qti_ctrl_port *ctrl_port[QTI_NUM_PORTS];
+
+static inline int qti_ctrl_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1)
+ return 0;
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void qti_ctrl_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static struct rmnet_ctrl_pkt *alloc_rmnet_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void free_rmnet_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+
+static void qti_ctrl_queue_notify(struct qti_ctrl_port *port)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt = NULL;
+
+ pr_debug("%s: Queue empty packet for QTI for port%d",
+ __func__, port->index);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!port->is_open) {
+ pr_err("%s: rmnet ctrl file handler %pK is not open",
+ __func__, port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ cpkt = alloc_rmnet_ctrl_pkt(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ list_add_tail(&cpkt->list, &port->cpkt_req_q);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s: Wake up read queue", __func__);
+ wake_up(&port->read_wq);
+}
+
+static int gqti_ctrl_send_cpkt_tomodem(enum qti_port_type qport,
+ void *buf, size_t len)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ if (len > MAX_QTI_PKT_SIZE) {
+ pr_err("given pkt size too big:%zu > max_pkt_size:%d\n",
+ len, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return -ENODEV;
+ }
+ port = ctrl_port[qport];
+ cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ pr_debug("%s: port type:%d: Add to cpkt_req_q packet with len = %zu\n",
+ __func__, port->port_type, len);
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* drop cpkt if port is not open */
+ if (!port->is_open) {
+ pr_debug("rmnet file handler %pK(index=%d) is not open",
+ port, port->index);
+ port->drp_cpkt_cnt++;
+ spin_unlock_irqrestore(&port->lock, flags);
+ free_rmnet_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ list_add_tail(&cpkt->list, &port->cpkt_req_q);
+ port->host_to_modem++;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* wakeup read thread */
+ pr_debug("%s: Wake up read queue", __func__);
+ wake_up(&port->read_wq);
+
+ return 0;
+}
+
+static void
+gqti_ctrl_notify_modem(void *gptr, enum qti_port_type qport, int val)
+{
+ struct qti_ctrl_port *port;
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return;
+ }
+ port = ctrl_port[qport];
+ atomic_set(&port->line_state, val);
+
+ /* send 0 len pkt to qti to notify state change */
+ qti_ctrl_queue_notify(port);
+}
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf)
+{
+ struct qti_ctrl_port *port;
+ struct grmnet *g_rmnet = NULL;
+ unsigned long flags;
+
+ pr_debug("%s: port type:%d gadget:%pK\n", __func__, qport, gr);
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return -ENODEV;
+ }
+
+ port = ctrl_port[qport];
+ if (!port) {
+ pr_err("%s: gadget port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->port_type = qport;
+ port->ep_type = DATA_EP_TYPE_HSUSB;
+ port->intf = intf;
+
+ if (gr) {
+ port->port_usb = gr;
+ g_rmnet = (struct grmnet *)gr;
+ g_rmnet->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
+ g_rmnet->notify_modem = gqti_ctrl_notify_modem;
+ if (port->port_type == QTI_PORT_DPL)
+ atomic_set(&port->line_state, 1);
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
+ pr_err("%s(): Port is used without port type.\n", __func__);
+ return -ENODEV;
+ }
+
+ port->host_to_modem = 0;
+ port->copied_to_modem = 0;
+ port->copied_from_modem = 0;
+ port->modem_to_host = 0;
+ port->drp_cpkt_cnt = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ atomic_set(&port->connected, 1);
+ wake_up(&port->read_wq);
+ if (port->port_usb && g_rmnet && g_rmnet->connect)
+ g_rmnet->connect(port->port_usb);
+
+ return 0;
+}
+
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
+{
+ struct qti_ctrl_port *port;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+ struct grmnet *g_rmnet = NULL;
+
+ pr_debug("%s: gadget:%pK\n", __func__, gr);
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return;
+ }
+
+ port = ctrl_port[qport];
+ if (!port) {
+ pr_err("%s: gadget port is null\n", __func__);
+ return;
+ }
+
+ atomic_set(&port->connected, 0);
+ atomic_set(&port->line_state, 0);
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* reset ipa eps to -1 */
+ port->ipa_prod_idx = -1;
+ port->ipa_cons_idx = -1;
+ port->port_usb = NULL;
+
+ if (gr) {
+ g_rmnet = (struct grmnet *)gr;
+ g_rmnet->send_encap_cmd = NULL;
+ g_rmnet->notify_modem = NULL;
+ } else {
+ pr_err("%s(): unrecognized gadget type(%d).\n",
+ __func__, port->port_type);
+ }
+
+ while (!list_empty(&port->cpkt_req_q)) {
+ cpkt = list_first_entry(&port->cpkt_req_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ free_rmnet_ctrl_pkt(cpkt);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* send 0 len pkt to qti to notify state change */
+ qti_ctrl_queue_notify(port);
+}
+
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+ u32 ipa_prod, u32 ipa_cons)
+{
+ struct qti_ctrl_port *port;
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return;
+ }
+
+ port = ctrl_port[qport];
+ port->ipa_prod_idx = ipa_prod;
+ port->ipa_cons_idx = ipa_cons;
+
+}
+
+
+static int qti_ctrl_open(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+
+ pr_debug("Open rmnet_ctrl_qti device file name=%s(index=%d)\n",
+ port->name, port->index);
+
+ if (qti_ctrl_lock(&port->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->is_open = true;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static int qti_ctrl_release(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+
+ pr_debug("Close rmnet control file");
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->is_open = false;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ qti_ctrl_unlock(&port->open_excl);
+
+ return 0;
+}
+
+static ssize_t
+qti_ctrl_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ struct rmnet_ctrl_pkt *cpkt = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("%s: Enter(%zu)\n", __func__, count);
+
+ if (count > MAX_QTI_PKT_SIZE) {
+ pr_err("Buffer size is too big %zu, should be at most %d\n",
+ count, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qti_ctrl_lock(&port->read_excl)) {
+ pr_err("Previous reading is not finished yet\n");
+ return -EBUSY;
+ }
+
+ /* block until a new packet is available */
+ do {
+ spin_lock_irqsave(&port->lock, flags);
+ if (!list_empty(&port->cpkt_req_q))
+ break;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s: Requests list is empty. Wait.\n", __func__);
+ ret = wait_event_interruptible(port->read_wq,
+ !list_empty(&port->cpkt_req_q));
+ if (ret < 0) {
+ pr_debug("Waiting failed\n");
+ qti_ctrl_unlock(&port->read_excl);
+ return -ERESTARTSYS;
+ }
+ } while (1);
+
+ cpkt = list_first_entry(&port->cpkt_req_q, struct rmnet_ctrl_pkt,
+ list);
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (cpkt->len > count) {
+ pr_err("cpkt size too big:%d > buf size:%zu\n",
+ cpkt->len, count);
+ qti_ctrl_unlock(&port->read_excl);
+ free_rmnet_ctrl_pkt(cpkt);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s: cpkt size:%d\n", __func__, cpkt->len);
+
+
+ qti_ctrl_unlock(&port->read_excl);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ pr_err("copy_to_user failed: err %d\n", ret);
+ ret = -EFAULT;
+ } else {
+ pr_debug("%s: copied %d bytes to user\n", __func__, cpkt->len);
+ ret = cpkt->len;
+ port->copied_to_modem++;
+ }
+
+ free_rmnet_ctrl_pkt(cpkt);
+
+ return ret;
+}
+
+static ssize_t
+qti_ctrl_write(struct file *fp, const char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ void *kbuf;
+ unsigned long flags;
+ int ret = 0;
+ struct grmnet *g_rmnet = NULL;
+
+ pr_debug("%s: Enter(%zu) port_index=%d", __func__, count, port->index);
+
+ if (!count) {
+ pr_debug("zero length ctrl pkt\n");
+ return -EINVAL;
+ }
+
+ if (count > MAX_QTI_PKT_SIZE) {
+ pr_debug("given pkt size too big:%zu > max_pkt_size:%d\n",
+ count, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qti_ctrl_lock(&port->write_excl)) {
+ pr_err("Previous writing not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&port->connected)) {
+ pr_debug("USB cable not connected\n");
+ qti_ctrl_unlock(&port->write_excl);
+ return -EPIPE;
+ }
+
+ kbuf = kmalloc(count, GFP_KERNEL);
+ if (!kbuf) {
+ qti_ctrl_unlock(&port->write_excl);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed err:%d\n", ret);
+ kfree(kbuf);
+ qti_ctrl_unlock(&port->write_excl);
+ return -EFAULT;
+ }
+ port->copied_from_modem++;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->port_usb) {
+ if (port->port_type == QTI_PORT_RMNET) {
+ g_rmnet = (struct grmnet *)port->port_usb;
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
+ pr_err("%s(): unrecognized gadget type(%d).\n",
+ __func__, port->port_type);
+ return -EINVAL;
+ }
+
+ if (g_rmnet && g_rmnet->send_cpkt_response) {
+ ret = g_rmnet->send_cpkt_response(port->port_usb,
+ kbuf, count);
+ if (ret)
+ pr_err("%d failed to send ctrl packet.\n", ret);
+ port->modem_to_host++;
+ } else {
+ pr_err("send_cpkt_response callback is NULL\n");
+ ret = -EINVAL;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ kfree(kbuf);
+ qti_ctrl_unlock(&port->write_excl);
+
+ pr_debug("%s: Exit(%zu)", __func__, count);
+ return (ret) ? ret : count;
+}
+
+static long qti_ctrl_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ struct grmnet *gr = NULL;
+ struct ep_info info;
+ int val, ret = 0;
+
+ pr_debug("%s: Received command %d for port type:%d\n",
+ __func__, cmd, port->port_type);
+
+ if (qti_ctrl_lock(&port->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case QTI_CTRL_MODEM_OFFLINE:
+ if (port && (port->port_type == QTI_PORT_DPL)) {
+ pr_err("%s(): Modem Offline not handled\n", __func__);
+ goto exit_ioctl;
+ }
+
+ if (port && port->port_usb)
+ gr = port->port_usb;
+
+ if (gr && gr->disconnect)
+ gr->disconnect(gr);
+ break;
+ case QTI_CTRL_MODEM_ONLINE:
+ if (port && (port->port_type == QTI_PORT_DPL)) {
+ pr_err("%s(): Modem Online not handled\n", __func__);
+ goto exit_ioctl;
+ }
+
+ if (port && port->port_usb)
+ gr = port->port_usb;
+
+ if (gr && gr->connect)
+ gr->connect(gr);
+ break;
+ case QTI_CTRL_GET_LINE_STATE:
+ val = atomic_read(&port->line_state);
+ ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ pr_debug("%s: Sent line_state: %d for port type:%d\n", __func__,
+ atomic_read(&port->line_state), port->port_type);
+ break;
+ case QTI_CTRL_EP_LOOKUP:
+
+ pr_debug("%s(): EP_LOOKUP for port type:%d\n", __func__,
+ port->port_type);
+ val = atomic_read(&port->connected);
+ if (!val) {
+ pr_err_ratelimited("EP_LOOKUP failed: not connected\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (port->ipa_prod_idx == -1 && port->ipa_cons_idx == -1) {
+ pr_err_ratelimited("EP_LOOKUP ipa pipes not updated\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = port->ep_type;
+ info.ph_ep_info.peripheral_iface_id = port->intf;
+ info.ipa_ep_pair.cons_pipe_num = port->ipa_cons_idx;
+ info.ipa_ep_pair.prod_pipe_num = port->ipa_prod_idx;
+
+ pr_debug("%s(): port type:%d ep_type:%d intf:%d\n",
+ __func__, port->port_type, info.ph_ep_info.ep_type,
+ info.ph_ep_info.peripheral_iface_id);
+
+ pr_debug("%s(): ipa_cons_idx:%d ipa_prod_idx:%d\n",
+ __func__, info.ipa_ep_pair.cons_pipe_num,
+ info.ipa_ep_pair.prod_pipe_num);
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ break;
+ default:
+ pr_err("wrong parameter");
+ ret = -EINVAL;
+ }
+
+exit_ioctl:
+ qti_ctrl_unlock(&port->ioctl_excl);
+
+ return ret;
+}
+
+static unsigned int qti_ctrl_poll(struct file *file, poll_table *wait)
+{
+ struct qti_ctrl_port *port = container_of(file->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ unsigned long flags;
+ unsigned int mask = 0;
+
+ if (!port) {
+ pr_err("%s on a NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ poll_wait(file, &port->read_wq, wait);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!list_empty(&port->cpkt_req_q)) {
+ mask |= POLLIN | POLLRDNORM;
+ pr_debug("%s sets POLLIN for rmnet_ctrl_qti_port\n", __func__);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return mask;
+}
+
+static int qti_ctrl_read_stats(struct seq_file *s, void *unused)
+{
+ struct qti_ctrl_port *port = s->private;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ port = ctrl_port[i];
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->lock, flags);
+
+ seq_printf(s, "\n#PORT:%d port: %pK\n", i, port);
+ seq_printf(s, "name: %s\n", port->name);
+ seq_printf(s, "host_to_modem: %d\n",
+ port->host_to_modem);
+ seq_printf(s, "copied_to_modem: %d\n",
+ port->copied_to_modem);
+ seq_printf(s, "copied_from_modem: %d\n",
+ port->copied_from_modem);
+ seq_printf(s, "modem_to_host: %d\n",
+ port->modem_to_host);
+ seq_printf(s, "cpkt_drp_cnt: %d\n",
+ port->drp_cpkt_cnt);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return 0;
+}
+
+static int qti_ctrl_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qti_ctrl_read_stats, inode->i_private);
+}
+
+static ssize_t qti_ctrl_reset_stats(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct qti_ctrl_port *port = s->private;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ port = ctrl_port[i];
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->host_to_modem = 0;
+ port->copied_to_modem = 0;
+ port->copied_from_modem = 0;
+ port->modem_to_host = 0;
+ port->drp_cpkt_cnt = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+ return count;
+}
+
+const struct file_operations qti_ctrl_stats_ops = {
+ .open = qti_ctrl_stats_open,
+ .read = seq_read,
+ .write = qti_ctrl_reset_stats,
+};
+
+static struct dentry *qti_ctrl_dent;
+static void qti_ctrl_debugfs_init(void)
+{
+ struct dentry *qti_ctrl_dfile;
+
+ qti_ctrl_dent = debugfs_create_dir("usb_qti", 0);
+ if (IS_ERR(qti_ctrl_dent))
+ return;
+
+ qti_ctrl_dfile =
+ debugfs_create_file("status", 0444, qti_ctrl_dent, 0,
+ &qti_ctrl_stats_ops);
+ if (!qti_ctrl_dfile || IS_ERR(qti_ctrl_dfile))
+ debugfs_remove(qti_ctrl_dent);
+}
+
+static void qti_ctrl_debugfs_exit(void)
+{
+ debugfs_remove_recursive(qti_ctrl_dent);
+}
+
+/* file operations for rmnet device /dev/rmnet_ctrl */
+static const struct file_operations qti_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qti_ctrl_open,
+ .release = qti_ctrl_release,
+ .read = qti_ctrl_read,
+ .write = qti_ctrl_write,
+ .unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = qti_ctrl_ioctl,
+#endif
+ .poll = qti_ctrl_poll,
+};
+/* file operations for DPL device /dev/dpl_ctrl */
+static const struct file_operations dpl_qti_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qti_ctrl_open,
+ .release = qti_ctrl_release,
+ .read = qti_ctrl_read,
+ .write = NULL,
+ .unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = qti_ctrl_ioctl,
+#endif
+ .poll = qti_ctrl_poll,
+};
+
+int gqti_ctrl_init(void)
+{
+ int ret, i, sz = QTI_CTRL_NAME_LEN;
+ struct qti_ctrl_port *port = NULL;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ port = kzalloc(sizeof(struct qti_ctrl_port), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ goto fail_init;
+ }
+
+ INIT_LIST_HEAD(&port->cpkt_req_q);
+ spin_lock_init(&port->lock);
+
+ atomic_set(&port->open_excl, 0);
+ atomic_set(&port->read_excl, 0);
+ atomic_set(&port->write_excl, 0);
+ atomic_set(&port->ioctl_excl, 0);
+ atomic_set(&port->connected, 0);
+ atomic_set(&port->line_state, 0);
+
+ init_waitqueue_head(&port->read_wq);
+
+ ctrl_port[i] = port;
+ port->index = i;
+ port->ipa_prod_idx = -1;
+ port->ipa_cons_idx = -1;
+
+ if (i == QTI_PORT_RMNET)
+ strlcat(port->name, RMNET_CTRL_QTI_NAME, sz);
+ else if (i == QTI_PORT_DPL)
+ strlcat(port->name, DPL_CTRL_QTI_NAME, sz);
+ else
+ snprintf(port->name, sz, "%s%d",
+ RMNET_CTRL_QTI_NAME, i);
+
+ port->ctrl_device.name = port->name;
+ if (i == QTI_PORT_DPL)
+ port->ctrl_device.fops = &dpl_qti_ctrl_fops;
+ else
+ port->ctrl_device.fops = &qti_ctrl_fops;
+ port->ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&port->ctrl_device);
+ if (ret) {
+ pr_err("rmnet control driver failed to register");
+ goto fail_init;
+ }
+ }
+ qti_ctrl_debugfs_init();
+ return ret;
+
+fail_init:
+ for (i--; i >= 0; i--) {
+ misc_deregister(&ctrl_port[i]->ctrl_device);
+ kfree(ctrl_port[i]);
+ ctrl_port[i] = NULL;
+ }
+ return ret;
+}
+
+void gqti_ctrl_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ misc_deregister(&ctrl_port[i]->ctrl_device);
+ kfree(ctrl_port[i]);
+ ctrl_port[i] = NULL;
+ }
+ qti_ctrl_debugfs_exit();
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
new file mode 100644
index 000000000000..d9a0b0e0b271
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -0,0 +1,1401 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/usb_bam.h>
+
+#include "u_data_ipa.h"
+#include "u_rmnet.h"
+
+struct ipa_data_ch_info {
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+ unsigned long flags;
+ unsigned id;
+ enum ipa_func_type func_type;
+ bool is_connected;
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ struct gadget_ipa_port *port_usb;
+ struct usb_gadget *gadget;
+ atomic_t pipe_connect_notified;
+ struct usb_bam_connect_ipa_params ipa_params;
+};
+
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
+static struct workqueue_struct *ipa_data_wq;
+struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+static struct rndis_data_ch_info *rndis_data;
+/**
+ * ipa_data_endless_complete() - completion callback for endless TX/RX request
+ * @ep: USB endpoint for which this completion happen
+ * @req: USB endless request
+ *
+ * This completion is being called when endless (TX/RX) transfer is terminated
+ * i.e. disconnect or suspend case.
+ */
+static void ipa_data_endless_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_debug("%s: endless complete for(%s) with status: %d\n",
+ __func__, ep->name, req->status);
+}
+
+/**
+ * ipa_data_start_endless_xfer() - configure USB endpoint and
+ * queue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX)
+ *
+ * It is being used to queue endless TX/RX request with UDC driver.
+ * It does set required DBM endpoint configuration before queueing endless
+ * TX/RX request.
+ */
+static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ unsigned long flags;
+ int status;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || (in && !port->tx_req)
+ || (!in && !port->rx_req)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb/req is NULL.\n", __func__);
+ return;
+ }
+
+ if (in)
+ ep = port->port_usb->in;
+ else
+ ep = port->port_usb->out;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (in) {
+ pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless RX_REQ, %d\n", status);
+ }
+}
+
+/**
+ * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX
+ *
+ * It is being used to terminate and dequeue endless TX/RX request with UDC
+ * driver.
+ */
+static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ unsigned long flags;
+ int status;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || (in && !port->tx_req)
+ || (!in && !port->rx_req)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb/req is NULL.\n", __func__);
+ return;
+ }
+
+ if (in)
+ ep = port->port_usb->in;
+ else
+ ep = port->port_usb->out;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (in) {
+ pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_dequeue(ep, port->tx_req);
+ if (status)
+ pr_err("error dequeueing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_dequeue(ep, port->rx_req);
+ if (status)
+ pr_err("error dequeueing endless RX_REQ, %d\n", status);
+ }
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void ipa_data_start_rx_tx(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_ep *epin, *epout;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+ /* queue in & out requests */
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb || !port->port_usb->in ||
+ !port->port_usb->out) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->rx_req || !port->tx_req) {
+ pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+ port->rx_req, port->tx_req);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ if (!port->is_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ epout = port->port_usb->out;
+ epin = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ if (epout)
+ ipa_data_start_endless_xfer(port, false);
+
+ pr_debug("%s: Starting tx", __func__);
+ if (epin)
+ ipa_data_start_endless_xfer(port, true);
+}
+/**
+ * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
+ * @w: disconnect work
+ *
+ * It is being schedule from ipa_data_disconnect() API when particular function
+ * is being disable due to USB disconnect or USB composition switch is being
+ * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to
+ * handshake operation and involvement of SPS related APIs, this functioality
+ * can't be used from atomic context.
+ */
+static void ipa_data_disconnect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ disconnect_w);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("Already disconnected.\n");
+ return;
+ }
+ port->is_connected = false;
+ pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__,
+ port->ipa_params.prod_clnt_hdl,
+ port->ipa_params.cons_clnt_hdl);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related
+ * pipes before calling IPA tethered function related disconnect
+ * API. IPA tethered function related disconnect API delete
+ * depedency graph with IPA RM which would results into IPA not
+ * pulling data although there is pending data on USB BAM
+ * producer pipe.
+ */
+ if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ }
+
+ if (port->ipa_params.prod_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ if (port->ipa_params.cons_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+
+ if (port->func_type == USB_IPA_FUNC_RMNET)
+ teth_bridge_disconnect(port->ipa_params.src_client);
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ pr_debug("%s(): disconnect work completed.\n", __func__);
+}
+
+/**
+ * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint
+ * @gp: USB gadget IPA Port
+ * @port_num: Port num used by function driver which need to be disable
+ *
+ * It is being called from atomic context from gadget driver when particular
+ * function is being disable due to USB cable disconnect or USB composition
+ * switch is being trigger. This API performs restoring USB endpoint operation
+ * and disable USB endpoint used for accelerated path.
+ */
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+
+ pr_debug("dev:%pK port number:%d\n", gp, func);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb) {
+ gadget = port->port_usb->cdev->gadget;
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in) {
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->tx_req) {
+ usb_ep_free_request(port->port_usb->in,
+ port->tx_req);
+ port->tx_req = NULL;
+ }
+ port->port_usb->in->endless = false;
+ }
+
+ if (port->port_usb->out) {
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->rx_req) {
+ usb_ep_free_request(port->port_usb->out,
+ port->rx_req);
+ port->rx_req = NULL;
+ }
+ port->port_usb->out->endless = false;
+ }
+
+ port->port_usb = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
+
+/**
+ * configure_fifo() - Configure USB BAM Pipe's data FIFO
+ * @idx: USB BAM Pipe index
+ * @ep: USB endpoint
+ *
+ * This function configures USB BAM data fifo using fetched pipe configuraion
+ * using provided index value. This function needs to used before starting
+ * endless transfer.
+ */
+static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
+{
+ struct sps_mem_buffer data_fifo = {0};
+ u32 usb_bam_pipe_idx;
+
+ get_bam2bam_connection_info(bam_type, idx,
+ &usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+ msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
+ usb_bam_pipe_idx);
+}
+
+/**
+ * ipa_data_connect_work() - Perform USB IPA BAM connect
+ * @w: connect work
+ *
+ * It is being schedule from ipa_data_connect() API when particular function
+ * which is using USB IPA accelerated path. This API performs allocating request
+ * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be
+ * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for connect sequence.
+ */
+static void ipa_data_connect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct gadget_ipa_port *gport;
+ struct usb_gadget *gadget = NULL;
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+ bool is_ipa_disconnected = true;
+
+ pr_debug("%s: Connect workqueue started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ pr_err("%s(): port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ gport = port->port_usb;
+ if (gport && gport->cdev)
+ gadget = gport->cdev->gadget;
+
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ pr_err("%s: gport is NULL.\n", __func__);
+ return;
+ }
+
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * ipa_data_connect()
+ */
+ if (port->is_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
+ gport->ipa_consumer_ep = -1;
+ gport->ipa_producer_ep = -1;
+
+ port->is_connected = true;
+
+ /* update IPA Parameteres here. */
+ port->ipa_params.usb_connection_speed = gadget->speed;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.skip_ep_cfg = true;
+ port->ipa_params.keep_ipa_awake = true;
+ port->ipa_params.cons_clnt_hdl = -1;
+ port->ipa_params.prod_clnt_hdl = -1;
+
+ if (gport->out) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || port->rx_req == NULL) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb is NULL, or rx_req cleaned\n",
+ __func__);
+ goto out;
+ }
+
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | port->src_pipe_idx;
+ port->rx_req->length = 32*1024;
+ port->rx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->src_connection_idx,
+ port->port_usb->out);
+ ret = msm_ep_config(gport->out, port->rx_req);
+ if (ret) {
+ pr_err("msm_ep_config() failed for OUT EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto out;
+ }
+ }
+
+ if (gport->in) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || port->tx_req == NULL) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb is NULL, or tx_req cleaned\n",
+ __func__);
+ goto unconfig_msm_ep_out;
+ }
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ port->dst_pipe_idx;
+ port->tx_req->length = 32*1024;
+ port->tx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, gport->in);
+ ret = msm_ep_config(gport->in, port->tx_req);
+ if (ret) {
+ pr_err("msm_ep_config() failed for IN EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_out;
+ }
+ }
+
+ if (port->func_type == USB_IPA_FUNC_RMNET) {
+ teth_bridge_params.client = port->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_in;
+ }
+ }
+
+ /*
+ * Perform below operations for Tx from Device (OUT transfer)
+ * 1. Connect with pipe of USB BAM with IPA BAM pipe
+ * 2. Update USB Endpoint related information using SPS Param.
+ * 3. Configure USB Endpoint/DBM for the same.
+ * 4. Override USB ep queue functionality for endless transfer.
+ */
+ if (gport->out) {
+ pr_debug("configure bam ipa connect for USB OUT\n");
+ port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ } else if (port->func_type == USB_IPA_FUNC_RMNET) {
+ port->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ port->ipa_params.priv =
+ teth_bridge_params.private_data;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ port->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ is_ipa_disconnected = false;
+ /* check if USB cable is disconnected or not */
+ if (!port->port_usb) {
+ pr_debug("%s:%d: cable is disconnected.\n",
+ __func__, __LINE__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto disconnect_usb_bam_ipa_out;
+ }
+
+ gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
+ }
+
+ if (gport->in) {
+ pr_debug("configure bam ipa connect for USB IN\n");
+ port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ } else if (port->func_type == USB_IPA_FUNC_RMNET) {
+ port->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ port->ipa_params.priv =
+ teth_bridge_params.private_data;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ port->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+
+ if (port->func_type == USB_IPA_FUNC_DPL)
+ port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ is_ipa_disconnected = false;
+ /* check if USB cable is disconnected or not */
+ if (!port->port_usb) {
+ pr_debug("%s:%d: cable is disconnected.\n",
+ __func__, __LINE__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto disconnect_usb_bam_ipa_out;
+ }
+
+ gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ rndis_data->prod_clnt_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ rndis_data->cons_clnt_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ rndis_data->priv = port->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data->ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data->ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data->dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data->cons_clnt_hdl,
+ rndis_data->prod_clnt_hdl,
+ rndis_data->ul_max_transfer_size,
+ rndis_data->ul_max_packets_number,
+ rndis_data->dl_max_transfer_size,
+ rndis_data->priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ atomic_set(&port->pipe_connect_notified, 1);
+ } else if (port->func_type == USB_IPA_FUNC_RMNET ||
+ port->func_type == USB_IPA_FUNC_DPL) {
+ /* For RmNet and DPL need to update_ipa_pipes to qti */
+ enum qti_port_type qti_port_type = port->func_type ==
+ USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL;
+ gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type,
+ gport->ipa_producer_ep, gport->ipa_consumer_ep);
+ }
+
+ if (port->func_type == USB_IPA_FUNC_RMNET) {
+ connect_params.ipa_usb_pipe_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_RMNET;
+ connect_params.client_type =
+ port->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n", __func__);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ }
+
+ pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ gport->ipa_producer_ep,
+ gport->ipa_consumer_ep);
+
+ pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
+ port->src_connection_idx, port->dst_connection_idx);
+
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
+ if (gport->out)
+ ipa_data_start_endless_xfer(port, false);
+ if (gport->in)
+ ipa_data_start_endless_xfer(port, true);
+
+ pr_debug("Connect workqueue done (port %pK)", port);
+ return;
+
+disconnect_usb_bam_ipa_out:
+ if (!is_ipa_disconnected) {
+ usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ is_ipa_disconnected = true;
+ }
+ if (port->func_type == USB_IPA_FUNC_RMNET)
+ teth_bridge_disconnect(port->ipa_params.src_client);
+unconfig_msm_ep_in:
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb && gport->in)
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+unconfig_msm_ep_out:
+ if (gport->in)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb && gport->out)
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+ if (gport->out)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_connected = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+}
+
+/**
+ * ipa_data_connect() - Prepare IPA params and enable USB endpoints
+ * @gp: USB IPA gadget port
+ * @port_num: port number used by accelerated function
+ * @src_connection_idx: USB BAM pipe index used as producer
+ * @dst_connection_idx: USB BAM pipe index used as consumer
+ *
+ * It is being called from accelerated function driver (from set_alt()) to
+ * initiate USB BAM IPA connection. This API is enabling accelerated endpoints
+ * and schedule connect_work() which establishes USB IPA BAM communication.
+ */
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ u8 src_connection_idx, u8 dst_connection_idx)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n",
+ gp, func, src_connection_idx, dst_connection_idx);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid portno#%d\n", func);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (!gp) {
+ pr_err("gadget port is null\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ port = ipa_data_ports[func];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gp;
+ port->gadget = gp->cdev->gadget;
+
+ if (gp->out) {
+ port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC);
+ if (!port->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ goto err;
+ }
+ port->rx_req->context = port;
+ port->rx_req->complete = ipa_data_endless_complete;
+ port->rx_req->length = 0;
+ port->rx_req->no_interrupt = 1;
+ }
+
+ if (gp->in) {
+ port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC);
+ if (!port->tx_req) {
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto free_rx_req;
+ }
+ port->tx_req->context = port;
+ port->tx_req->complete = ipa_data_endless_complete;
+ port->tx_req->length = 0;
+ port->tx_req->no_interrupt = 1;
+ }
+ port->src_connection_idx = src_connection_idx;
+ port->dst_connection_idx = dst_connection_idx;
+ port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
+
+ port->ipa_params.src_pipe = &(port->src_pipe_idx);
+ port->ipa_params.dst_pipe = &(port->dst_pipe_idx);
+ port->ipa_params.src_idx = src_connection_idx;
+ port->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Disable Xfer complete and Xfer not ready interrupts by
+ * marking endless flag which is used in UDC driver to enable
+ * these interrupts. with this set, these interrupts for selected
+ * endpoints won't be enabled.
+ */
+ if (port->port_usb->in) {
+ port->port_usb->in->endless = true;
+ ret = usb_ep_enable(port->port_usb->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK",
+ port->port_usb->in);
+ usb_ep_free_request(port->port_usb->in, port->tx_req);
+ port->tx_req = NULL;
+ port->port_usb->in->endless = false;
+ goto err_usb_in;
+ }
+ }
+
+ if (port->port_usb->out) {
+ port->port_usb->out->endless = true;
+ ret = usb_ep_enable(port->port_usb->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK",
+ port->port_usb->out);
+ usb_ep_free_request(port->port_usb->out, port->rx_req);
+ port->rx_req = NULL;
+ port->port_usb->out->endless = false;
+ goto err_usb_out;
+ }
+ }
+
+ /* Wait for host to enable flow_control */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = 0;
+ return ret;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ queue_work(ipa_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return ret;
+
+err_usb_out:
+ if (port->port_usb->in) {
+ usb_ep_disable(port->port_usb->in);
+ port->port_usb->in->endless = false;
+ }
+err_usb_in:
+ if (gp->in && port->tx_req) {
+ usb_ep_free_request(gp->in, port->tx_req);
+ port->tx_req = NULL;
+ }
+free_rx_req:
+ if (gp->out && port->rx_req) {
+ usb_ep_free_request(gp->out, port->rx_req);
+ port->rx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+err:
+ pr_debug("%s(): failed with error:%d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * ipa_data_start() - Restart USB endless transfer
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to restart USB endless transfer for USB bus resume.
+ * For USB consumer case, it restarts USB endless RX transfer, whereas
+ * for USB producer case, it resets DBM endpoint and restart USB endless
+ * TX transfer.
+ */
+static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): start endless RX\n", __func__);
+ ipa_data_start_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): start endless TX\n", __func__);
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, port->port_usb->in);
+ }
+ ipa_data_start_endless_xfer(port, true);
+ }
+}
+
+/**
+ * ipa_data_stop() - Stop endless Tx/Rx transfers
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to stop endless Tx/Rx transfers. It is being used
+ * for USB bus suspend functionality.
+ */
+static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): stop endless RX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): stop endless TX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, true);
+ }
+}
+
+void ipa_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(ipa_data_wq);
+}
+
+/**
+ * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB BAM IPA suspend functionality
+ * for USB bus suspend functionality.
+ */
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+ pr_debug("%s: suspended port %d\n", __func__, func);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ if (gp->in) {
+ gp->in_ep_desc_backup = gp->in->desc;
+ pr_debug("in_ep_desc_backup = %pK\n",
+ gp->in_ep_desc_backup);
+ }
+ if (gp->out) {
+ gp->out_ep_desc_backup = gp->out->desc;
+ pr_debug("out_ep_desc_backup = %pK\n",
+ gp->out_ep_desc_backup);
+ }
+ ipa_data_disconnect(gp, func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s: suspend started\n", __func__);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_register_start_stop_cbs(port->usb_bam_type,
+ port->dst_connection_idx, ipa_data_start,
+ ipa_data_stop, port);
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_resume() - Initiate USB resume functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB resume functionality
+ * for USB bus resume case.
+ */
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+ u8 src_connection_idx = 0;
+ u8 dst_connection_idx = 0;
+ enum usb_ctrl usb_bam_type;
+
+ pr_debug("dev:%pK port number:%d\n", gp, func);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ gadget = gp->cdev->gadget;
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ /* Restore endpoint descriptors info. */
+ if (gp->in) {
+ gp->in->desc = gp->in_ep_desc_backup;
+ pr_debug("in_ep_desc_backup = %pK\n",
+ gp->in_ep_desc_backup);
+ dst_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (gp->out) {
+ gp->out->desc = gp->out_ep_desc_backup;
+ pr_debug("out_ep_desc_backup = %pK\n",
+ gp->out_ep_desc_backup);
+ src_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ ipa_data_connect(gp, func,
+ src_connection_idx, dst_connection_idx);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct usb_gadget *gadget;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !port->port_usb->cdev) {
+ pr_err("port->port_usb or cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
+ pr_debug("%s: resume started\n", __func__);
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Gadget is NULL.\n", __func__);
+ return;
+ }
+
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, NULL);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type, port->src_connection_idx,
+ port->port_usb->out);
+ configure_fifo(port->usb_bam_type, port->dst_connection_idx,
+ port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ usb_bam_resume(port->usb_bam_type, &port->ipa_params);
+ }
+
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_port_alloc() - Allocate IPA USB Port structure
+ * @portno: port number to be used by particular USB function
+ *
+ * It is being used by USB function driver to allocate IPA data port
+ * for USB IPA data accelerated path.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+static int ipa_data_port_alloc(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ if (ipa_data_ports[func] != NULL) {
+ pr_debug("port %d already allocated.\n", func);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ipa_data_ports[func] = port;
+
+ pr_debug("port:%pK with portno:%d allocated\n", port, func);
+ return 0;
+}
+
+/**
+ * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
+ * @portno: port number to be used by particular USB function
+ * @func_type: USB gadget function type
+ *
+ * It is being used by USB function driver to select which BAM2BAM IPA
+ * port particular USB function wants to use.
+ *
+ */
+void ipa_data_port_select(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ pr_debug("portno:%d\n", func);
+
+ port = ipa_data_ports[func];
+ port->port_num = func;
+ port->is_connected = false;
+
+ spin_lock_init(&port->port_lock);
+
+ if (!work_pending(&port->connect_w))
+ INIT_WORK(&port->connect_w, ipa_data_connect_work);
+
+ if (!work_pending(&port->disconnect_w))
+ INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+
+ port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ port->func_type = func;
+};
+
+void ipa_data_free(enum ipa_func_type func)
+{
+ pr_debug("freeing %d IPA BAM port", func);
+
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+ if (ipa_data_wq) {
+ destroy_workqueue(ipa_data_wq);
+ ipa_data_wq = NULL;
+ }
+}
+
+/**
+ * ipa_data_setup() - setup BAM2BAM IPA port
+ *
+ * Each USB function who wants to use BAM2BAM IPA port would
+ * be counting number of IPA port to use and initialize those
+ * ports at time of bind_config() in android gadget driver.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+int ipa_data_setup(enum ipa_func_type func)
+{
+ int ret;
+
+ pr_debug("requested %d IPA BAM port", func);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d\n", func);
+ return -EINVAL;
+ }
+
+ ret = ipa_data_port_alloc(func);
+ if (ret) {
+ pr_err("Failed to alloc port:%d\n", func);
+ return ret;
+ }
+
+ if (func == USB_IPA_FUNC_RNDIS) {
+ rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL);
+ if (!rndis_data) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ goto free_ipa_ports;
+ }
+ }
+ if (ipa_data_wq) {
+ pr_debug("ipa_data_wq is already setup.");
+ return 0;
+ }
+
+ ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!ipa_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_rndis_data;
+ }
+
+ return 0;
+
+free_rndis_data:
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+free_ipa_ports:
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+
+ return ret;
+}
+
+void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_ul_max_pkt_num(u8 max_packets_number)
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data->ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data->ul_aggregation_enable = true;
+ else
+ rndis_data->ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n",
+ __func__, rndis_data->ul_aggregation_enable,
+ max_packets_number);
+}
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (atomic_read(&port->pipe_connect_notified)) {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ return;
+ }
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->connect_w);
+}
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (!atomic_read(&port->pipe_connect_notified))
+ return;
+
+ rndis_ipa_reset_trigger();
+ ipa_data_stop_endless_xfer(port, true);
+ ipa_data_stop_endless_xfer(port, false);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
new file mode 100644
index 000000000000..17dccbc4cf16
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -0,0 +1,119 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_DATA_IPA_H
+#define __U_DATA_IPA_H
+
+#include <linux/usb/composite.h>
+#include <linux/rndis_ipa.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/miscdevice.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb_bam.h>
+
+#include "u_rmnet.h"
+
+enum ipa_func_type {
+ USB_IPA_FUNC_ECM,
+ USB_IPA_FUNC_MBIM,
+ USB_IPA_FUNC_RMNET,
+ USB_IPA_FUNC_RNDIS,
+ USB_IPA_FUNC_DPL,
+ USB_IPA_NUM_FUNCS,
+};
+
+/* Max Number of IPA data ports supported */
+#define IPA_N_PORTS USB_IPA_NUM_FUNCS
+
+struct gadget_ipa_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ int rx_buffer_size;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+};
+
+struct ipa_function_bind_info {
+ struct usb_string *string_defs;
+ int data_str_idx;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+};
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN 40
+
+struct f_rndis_qc_opts {
+ struct usb_function_instance func_inst;
+ struct f_rndis_qc *rndis;
+ u32 vendor_id;
+ const char *manufacturer;
+ struct net_device *net;
+ int refcnt;
+};
+
+struct f_rmnet_opts {
+ struct usb_function_instance func_inst;
+ struct f_rmnet *dev;
+ int refcnt;
+};
+
+void ipa_data_port_select(enum ipa_func_type func);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func);
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ u8 src_connection_idx, u8 dst_connection_idx);
+int ipa_data_setup(enum ipa_func_type func);
+void ipa_data_free(enum ipa_func_type func);
+
+void ipa_data_flush_workqueue(void);
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+
+void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void ipa_data_start_rx_tx(enum ipa_func_type func);
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func);
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func);
+
+void *rndis_qc_get_ipa_priv(void);
+void *rndis_qc_get_ipa_rx_cb(void);
+bool rndis_qc_get_skip_ep_config(void);
+void *rndis_qc_get_ipa_tx_cb(void);
+void rndis_ipa_reset_trigger(void);
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+ u32 ipa_prod, u32 ipa_cons);
+#endif
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 0a0eeffc9438..34a337888788 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1075,6 +1075,9 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
struct eth_dev *dev;
u8 new_addr[ETH_ALEN];
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
@@ -1087,6 +1090,9 @@ int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
{
struct eth_dev *dev;
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
return get_ether_addr_str(dev->dev_mac, dev_addr, len);
}
@@ -1097,6 +1103,9 @@ int gether_set_host_addr(struct net_device *net, const char *host_addr)
struct eth_dev *dev;
u8 new_addr[ETH_ALEN];
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
if (get_ether_addr(host_addr, new_addr))
return -EINVAL;
@@ -1109,6 +1118,9 @@ int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
{
struct eth_dev *dev;
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
return get_ether_addr_str(dev->host_mac, host_addr, len);
}
@@ -1141,6 +1153,9 @@ void gether_set_qmult(struct net_device *net, unsigned qmult)
{
struct eth_dev *dev;
+ if (!net)
+ return;
+
dev = netdev_priv(net);
dev->qmult = qmult;
}
@@ -1150,6 +1165,9 @@ unsigned gether_get_qmult(struct net_device *net)
{
struct eth_dev *dev;
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
return dev->qmult;
}
@@ -1157,6 +1175,9 @@ EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len)
{
+ if (!net)
+ return -ENODEV;
+
rtnl_lock();
strlcpy(name, netdev_name(net), len);
rtnl_unlock();
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 4f47289fcf7c..0468459a5c0f 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -35,6 +35,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -48,6 +53,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -70,6 +80,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -83,6 +98,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -105,6 +125,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
unsigned qmult; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
qmult = gether_get_qmult(opts->net); \
mutex_unlock(&opts->lock); \
@@ -118,6 +143,11 @@
u8 val; \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
@@ -144,6 +174,11 @@ out: \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 60139854e0b1..6e6318c94e93 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -176,6 +176,9 @@ struct ffs_data {
struct usb_request *ep0req; /* P: mutex */
struct completion ep0req_completion; /* P: mutex */
+ struct completion epin_completion;
+ struct completion epout_completion;
+
/* reference counter */
atomic_t ref;
/* how many files are opened (EP0 and others) */
diff --git a/drivers/usb/gadget/function/u_qc_ether.c b/drivers/usb/gadget/function/u_qc_ether.c
new file mode 100644
index 000000000000..bacaf52f42d9
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qc_ether.c
@@ -0,0 +1,454 @@
+/*
+ * u_qc_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used. Each end of the link uses one address. The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link. (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ *
+ * This utilities is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+#define UETH__VERSION "29-May-2008"
+
+struct eth_qc_dev {
+ /* lock is held while accessing port_usb
+ * or updating its backlink port_usb->ioport
+ */
+ spinlock_t lock;
+ struct qc_gether *port_usb;
+
+ struct net_device *net;
+ struct usb_gadget *gadget;
+
+ unsigned header_len;
+
+ bool zlp;
+ u8 host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+ printk(level "%s: " fmt, (d)->net->name, ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+ xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define DBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG DBG
+#else
+#define VDBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+ xprintk(dev, KERN_ERR, fmt, ## args)
+#define INFO(dev, fmt, args...) \
+ xprintk(dev, KERN_INFO, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+static int ueth_qc_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ int status = 0;
+
+ /* don't change MTU on "live" link (peer won't know) */
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ status = -EBUSY;
+ else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+ status = -ERANGE;
+ else
+ net->mtu = new_mtu;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return status;
+}
+
+static void eth_qc_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *p)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+
+ strlcpy(p->driver, "g_qc_ether", sizeof(p->driver));
+ strlcpy(p->version, UETH__VERSION, sizeof(p->version));
+ strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
+}
+
+static const struct ethtool_ops qc_ethtool_ops = {
+ .get_drvinfo = eth_qc_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static netdev_tx_t eth_qc_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ return NETDEV_TX_OK;
+}
+
+static int eth_qc_open(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ struct qc_gether *link;
+
+ DBG(dev, "%s\n", __func__);
+ if (netif_carrier_ok(dev->net)) {
+ /* Force the netif to send the RTM_NEWLINK event
+ * that in use to notify on the USB cable status.
+ */
+ netif_carrier_off(dev->net);
+ netif_carrier_on(dev->net);
+ netif_wake_queue(dev->net);
+ }
+
+ spin_lock_irq(&dev->lock);
+ link = dev->port_usb;
+ if (link && link->open)
+ link->open(link);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int eth_qc_stop(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ struct qc_gether *link = dev->port_usb;
+
+ VDBG(dev, "%s\n", __func__);
+ netif_stop_queue(net);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb && link->close)
+ link->close(link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *qc_dev_addr;
+module_param(qc_dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_dev_addr, "QC Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *qc_host_addr;
+module_param(qc_host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_host_addr, "QC Host Ethernet Address");
+
+static int get_qc_ether_addr(const char *str, u8 *dev_addr)
+{
+ if (str) {
+ unsigned i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = hex_to_bin(*str++) << 4;
+ num |= hex_to_bin(*str++);
+ dev_addr[i] = num;
+ }
+ if (is_valid_ether_addr(dev_addr))
+ return 0;
+ }
+ random_ether_addr(dev_addr);
+ return 1;
+}
+
+static const struct net_device_ops eth_qc_netdev_ops = {
+ .ndo_open = eth_qc_open,
+ .ndo_stop = eth_qc_stop,
+ .ndo_start_xmit = eth_qc_start_xmit,
+ .ndo_change_mtu = ueth_qc_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct device_type qc_gadget_type = {
+ .name = "gadget",
+};
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN])
+{
+ if (get_qc_ether_addr(qc_dev_addr, dev_mac))
+ pr_debug("using random dev_mac ethernet address\n");
+ if (get_qc_ether_addr(qc_host_addr, host_mac))
+ pr_debug("using random host_mac ethernet address\n");
+}
+
+/**
+ * gether_qc_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+ return gether_qc_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_qc_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname)
+{
+ struct eth_qc_dev *dev;
+ struct net_device *net;
+ int status;
+
+ net = alloc_etherdev(sizeof(*dev));
+ if (!net)
+ return -ENOMEM;
+
+ dev = netdev_priv(net);
+ spin_lock_init(&dev->lock);
+
+ /* network device setup */
+ dev->net = net;
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+ if (get_qc_ether_addr(qc_dev_addr, net->dev_addr))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "self");
+ if (get_qc_ether_addr(qc_host_addr, dev->host_mac))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "host");
+
+ if (ethaddr)
+ ether_addr_copy(ethaddr, dev->host_mac);
+
+ net->netdev_ops = &eth_qc_netdev_ops;
+ net->ethtool_ops = &qc_ethtool_ops;
+
+ netif_carrier_off(net);
+
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &qc_gadget_type);
+
+ status = register_netdev(net);
+ if (status < 0) {
+ dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+ free_netdev(net);
+ } else {
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ }
+
+ return status;
+}
+
+/**
+ * gether_qc_cleanup_name - remove Ethernet-over-USB device
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_qc_setup().
+ */
+void gether_qc_cleanup_name(const char *netname)
+{
+ struct net_device *net_dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+
+ if (net_dev) {
+ dev_put(net_dev);
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+ }
+}
+
+struct net_device *gether_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+/**
+ * gether_qc_connect_name - notify network layer that USB link
+ * is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ * current device speed, and any framing wrapper(s) set up.
+ * @netname: name for network device (for example, "usb")
+ * Context: irqs blocked
+ * @netif_enable: if true, net interface will be turned on
+ *
+ * This is called to let the network layer know the connection
+ * is active ("carrier detect").
+ */
+struct net_device *gether_qc_connect_name(struct qc_gether *link,
+ const char *netname, bool netif_enable)
+{
+ struct net_device *net_dev;
+ struct eth_qc_dev *dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ dev_put(net_dev);
+ dev = netdev_priv(net_dev);
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ dev->zlp = link->is_zlp_ok;
+ dev->header_len = link->header_len;
+
+ spin_lock(&dev->lock);
+ dev->port_usb = link;
+ link->ioport = dev;
+ if (netif_running(dev->net)) {
+ if (link->open)
+ link->open(link);
+ } else {
+ if (link->close)
+ link->close(link);
+ }
+ spin_unlock(&dev->lock);
+
+ if (netif_enable) {
+ netif_carrier_on(dev->net);
+ if (netif_running(dev->net))
+ netif_wake_queue(dev->net);
+ }
+
+ return dev->net;
+}
+
+/**
+ * gether_qc_disconnect_name - notify network layer that USB
+ * link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * @netname: name for network device (for example, "usb")
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ */
+void gether_qc_disconnect_name(struct qc_gether *link, const char *netname)
+{
+ struct net_device *net_dev;
+ struct eth_qc_dev *dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return;
+
+ dev_put(net_dev);
+ dev = netdev_priv(net_dev);
+
+ if (!dev)
+ return;
+
+ DBG(dev, "%s\n", __func__);
+
+ netif_stop_queue(dev->net);
+ netif_carrier_off(dev->net);
+
+ spin_lock(&dev->lock);
+ dev->port_usb = NULL;
+ link->ioport = NULL;
+ spin_unlock(&dev->lock);
+}
diff --git a/drivers/usb/gadget/function/u_qc_ether.h b/drivers/usb/gadget/function/u_qc_ether.h
new file mode 100644
index 000000000000..c5706edf8d2f
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qc_ether.h
@@ -0,0 +1,101 @@
+/*
+ * u_qc_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __U_QC_ETHER_H
+#define __U_QC_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing. Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration. When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ *
+ * This function is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+struct qc_gether {
+ struct usb_function func;
+
+ /* updated by gether_{connect,disconnect} */
+ struct eth_qc_dev *ioport;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ bool is_zlp_ok;
+
+ u16 cdc_filter;
+
+ /* hooks for added framing, as needed for RNDIS and EEM. */
+ u32 header_len;
+
+ struct sk_buff *(*wrap)(struct qc_gether *port,
+ struct sk_buff *skb);
+ int (*unwrap)(struct qc_gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
+
+ /* called on network open/close */
+ void (*open)(struct qc_gether *);
+ void (*close)(struct qc_gether *);
+};
+
+/* netdev setup/teardown as directed by the gadget driver */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
+void gether_qc_cleanup_name(const char *netname);
+/* variant of gether_setup that allows customizing network device name */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_qc_connect_name(struct qc_gether *link,
+ const char *netname, bool netif_enable);
+struct net_device *gether_qc_get_net(const char *netname);
+void gether_qc_disconnect_name(struct qc_gether *link, const char *netname);
+
+/* each configuration may bind one instance of an ethernet link */
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name);
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer,
+ u8 maxPktPerXfer, u8 pkt_alignment_factor,
+ char *xport_name);
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN]);
+
+#endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
new file mode 100644
index 000000000000..0ef1e2ab34be
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -0,0 +1,128 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb_bam.h>
+
+#include "f_qdss.h"
+static int alloc_sps_req(struct usb_ep *data_ep)
+{
+ struct usb_request *req = NULL;
+ struct f_qdss *qdss = data_ep->driver_data;
+ u32 sps_params = 0;
+
+ pr_debug("send_sps_req\n");
+
+ req = usb_ep_alloc_request(data_ep, GFP_ATOMIC);
+ if (!req) {
+ pr_err("usb_ep_alloc_request failed\n");
+ return -ENOMEM;
+ }
+
+ req->length = 32*1024;
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ qdss->bam_info.usb_bam_pipe_idx;
+ req->udc_priv = sps_params;
+ qdss->endless_req = req;
+
+ return 0;
+}
+
+static int init_data(struct usb_ep *ep);
+int set_qdss_data_connection(struct f_qdss *qdss, int enable)
+{
+ enum usb_ctrl usb_bam_type;
+ int res = 0;
+ int idx;
+ struct usb_qdss_bam_connect_info bam_info;
+ struct usb_gadget *gadget;
+
+ pr_debug("set_qdss_data_connection\n");
+
+ if (!qdss) {
+ pr_err("%s: qdss ptr is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ gadget = qdss->gadget;
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+
+ bam_info = qdss->bam_info;
+ /* There is only one qdss pipe, so the pipe number can be set to 0 */
+ idx = usb_bam_get_connection_idx(usb_bam_type, QDSS_P_BAM,
+ PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE, 0);
+ if (idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
+ return idx;
+ }
+
+ if (enable) {
+ usb_bam_alloc_fifos(usb_bam_type, idx);
+ bam_info.data_fifo =
+ kzalloc(sizeof(struct sps_mem_buffer), GFP_KERNEL);
+ if (!bam_info.data_fifo) {
+ pr_err("qdss_data_connection: memory alloc failed\n");
+ usb_bam_free_fifos(usb_bam_type, idx);
+ return -ENOMEM;
+ }
+ get_bam2bam_connection_info(usb_bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, bam_info.data_fifo, NULL);
+
+ alloc_sps_req(qdss->port.data);
+ msm_data_fifo_config(qdss->port.data,
+ bam_info.data_fifo->phys_base,
+ bam_info.data_fifo->size,
+ bam_info.usb_bam_pipe_idx);
+ init_data(qdss->port.data);
+
+ res = usb_bam_connect(usb_bam_type, idx,
+ &(bam_info.usb_bam_pipe_idx));
+ } else {
+ kfree(bam_info.data_fifo);
+ res = usb_bam_disconnect_pipe(usb_bam_type, idx);
+ if (res)
+ pr_err("usb_bam_disconnection error\n");
+ usb_bam_free_fifos(usb_bam_type, idx);
+ }
+
+ return res;
+}
+
+static int init_data(struct usb_ep *ep)
+{
+ struct f_qdss *qdss = ep->driver_data;
+ int res = 0;
+
+ pr_debug("init_data\n");
+
+ res = msm_ep_config(ep, qdss->endless_req);
+ if (res)
+ pr_err("msm_ep_config failed\n");
+
+ return res;
+}
+
+int uninit_data(struct usb_ep *ep)
+{
+ int res = 0;
+
+ pr_err("uninit_data\n");
+
+ res = msm_ep_unconfig(ep);
+ if (res)
+ pr_err("msm_ep_unconfig failed\n");
+
+ return res;
+}
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
new file mode 100644
index 000000000000..e0843794b594
--- /dev/null
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct rmnet_ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+enum qti_port_type {
+ QTI_PORT_RMNET,
+ QTI_PORT_DPL,
+ QTI_NUM_PORTS
+};
+
+
+struct grmnet {
+ /* to usb host, aka laptop, windows pc etc. Will
+ * be filled by usb driver of rmnet functionality
+ */
+ int (*send_cpkt_response)(void *g, void *buf, size_t len);
+
+ /* to modem, and to be filled by driver implementing
+ * control function
+ */
+ int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+ void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+
+ void (*disconnect)(struct grmnet *g);
+ void (*connect)(struct grmnet *g);
+};
+
+enum ctrl_client {
+ FRMNET_CTRL_CLIENT,
+ GPS_CTRL_CLIENT,
+
+ NR_CTRL_CLIENTS
+};
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf);
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport);
+int gqti_ctrl_init(void);
+void gqti_ctrl_cleanup(void);
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 58a699cfa458..d5fcd3e5f02d 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -4,6 +4,7 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
*
* This code also borrows from usbserial.c, which is
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
@@ -27,6 +28,8 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
#include "u_serial.h"
@@ -77,9 +80,13 @@
* next layer of buffering. For TX that's a circular buffer; for RX
* consider it a NOP. A third layer is provided by the TTY code.
*/
-#define QUEUE_SIZE 16
+#define TX_QUEUE_SIZE 8
+#define TX_BUF_SIZE 4096
#define WRITE_BUF_SIZE 8192 /* TX only */
+#define RX_QUEUE_SIZE 8
+#define RX_BUF_SIZE 4096
+
/* circular buffer */
struct gs_buf {
unsigned buf_size;
@@ -106,7 +113,7 @@ struct gs_port {
int read_allocated;
struct list_head read_queue;
unsigned n_read;
- struct tasklet_struct push;
+ struct work_struct push;
struct list_head write_pool;
int write_started;
@@ -118,6 +125,10 @@ struct gs_port {
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
+ unsigned long nbytes_from_host;
+ unsigned long nbytes_to_tty;
+ unsigned long nbytes_from_tty;
+ unsigned long nbytes_to_host;
};
static struct portmaster {
@@ -125,6 +136,7 @@ static struct portmaster {
struct gs_port *port;
} ports[MAX_U_SERIAL_PORTS];
+static struct workqueue_struct *gserial_wq;
#define GS_CLOSE_TIMEOUT 15 /* seconds */
@@ -360,26 +372,50 @@ __releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
- struct list_head *pool = &port->write_pool;
+ struct list_head *pool;
struct usb_ep *in;
int status = 0;
+ static long prev_len;
bool do_tty_wake = false;
- if (!port->port_usb)
- return status;
+ if (!port || !port->port_usb) {
+ pr_err("Error - port or port->usb is NULL.");
+ return -EIO;
+ }
- in = port->port_usb->in;
+ pool = &port->write_pool;
+ in = port->port_usb->in;
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
- if (port->write_started >= QUEUE_SIZE)
+ if (port->write_started >= TX_QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
- len = gs_send_packet(port, req->buf, in->maxpacket);
+ len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
if (len == 0) {
+ /* Queue zero length packet explicitly to make it
+ * work with UDCs which don't support req->zero flag
+ */
+ if (prev_len && (prev_len % in->maxpacket == 0)) {
+ req->length = 0;
+ list_del(&req->list);
+ spin_unlock(&port->port_lock);
+ status = usb_ep_queue(in, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock);
+ if (!port->port_usb) {
+ gs_free_req(in, req);
+ break;
+ }
+ if (status) {
+ printk(KERN_ERR "%s: %s err %d\n",
+ __func__, "queue", status);
+ list_add(&req->list, pool);
+ }
+ prev_len = 0;
+ }
wake_up_interruptible(&port->drain_wait);
break;
}
@@ -387,7 +423,6 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
- req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
port->port_num, len, *((u8 *)req->buf),
@@ -405,6 +440,16 @@ __acquires(&port->port_lock)
status = usb_ep_queue(in, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
port->write_busy = false;
+ /*
+ * If port_usb is NULL, gserial disconnect is called
+ * while the spinlock is dropped and all requests are
+ * freed. Free the current request here.
+ */
+ if (!port->port_usb) {
+ do_tty_wake = false;
+ gs_free_req(in, req);
+ break;
+ }
if (status) {
pr_debug("%s: %s %s err %d\n",
@@ -413,11 +458,10 @@ __acquires(&port->port_lock)
break;
}
- port->write_started++;
+ prev_len = req->length;
+ port->nbytes_from_tty += req->length;
- /* abort immediately after disconnect */
- if (!port->port_usb)
- break;
+ port->write_started++;
}
if (do_tty_wake && port->port.tty)
@@ -434,8 +478,17 @@ __releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
- struct list_head *pool = &port->read_pool;
- struct usb_ep *out = port->port_usb->out;
+ struct list_head *pool;
+ struct usb_ep *out;
+ unsigned started = 0;
+
+ if (!port || !port->port_usb) {
+ pr_err("Error - port or port->usb is NULL.");
+ return -EIO;
+ }
+
+ pool = &port->read_pool;
+ out = port->port_usb->out;
while (!list_empty(pool)) {
struct usb_request *req;
@@ -447,12 +500,12 @@ __acquires(&port->port_lock)
if (!tty)
break;
- if (port->read_started >= QUEUE_SIZE)
+ if (port->read_started >= RX_QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list);
- req->length = out->maxpacket;
+ req->length = RX_BUF_SIZE;
/* drop lock while we call out; the controller driver
* may need to call us back (e.g. for disconnect)
@@ -461,6 +514,17 @@ __acquires(&port->port_lock)
status = usb_ep_queue(out, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
+ /*
+ * If port_usb is NULL, gserial disconnect is called
+ * while the spinlock is dropped and all requests are
+ * freed. Free the current request here.
+ */
+ if (!port->port_usb) {
+ started = 0;
+ gs_free_req(out, req);
+ break;
+ }
+
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", out->name, status);
@@ -468,10 +532,6 @@ __acquires(&port->port_lock)
break;
}
port->read_started++;
-
- /* abort immediately after disconnect */
- if (!port->port_usb)
- break;
}
return port->read_started;
}
@@ -486,9 +546,9 @@ __acquires(&port->port_lock)
* So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
* can be buffered before the TTY layer's buffers (currently 64 KB).
*/
-static void gs_rx_push(unsigned long _port)
+static void gs_rx_push(struct work_struct *w)
{
- struct gs_port *port = (void *)_port;
+ struct gs_port *port = container_of(w, struct gs_port, push);
struct tty_struct *tty;
struct list_head *queue = &port->read_queue;
bool disconnect = false;
@@ -538,6 +598,7 @@ static void gs_rx_push(unsigned long _port)
count = tty_insert_flip_string(&port->port, packet,
size);
+ port->nbytes_to_tty += count;
if (count)
do_push = true;
if (count != size) {
@@ -566,13 +627,13 @@ static void gs_rx_push(unsigned long _port)
* this time around, there may be trouble unless there's an
* implicit tty_unthrottle() call on its way...
*
- * REVISIT we should probably add a timer to keep the tasklet
+ * REVISIT we should probably add a timer to keep the work queue
* from starving ... but it's not clear that case ever happens.
*/
if (!list_empty(queue) && tty) {
if (!test_bit(TTY_THROTTLED, &tty->flags)) {
if (do_push)
- tasklet_schedule(&port->push);
+ queue_work(gserial_wq, &port->push);
else
pr_warn("ttyGS%d: RX not scheduled?\n",
port->port_num);
@@ -589,19 +650,23 @@ static void gs_rx_push(unsigned long _port)
static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
+ unsigned long flags;
/* Queue all received data until the tty layer is ready for it. */
- spin_lock(&port->port_lock);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_from_host += req->actual;
list_add_tail(&req->list, &port->read_queue);
- tasklet_schedule(&port->push);
- spin_unlock(&port->port_lock);
+ queue_work(gserial_wq, &port->push);
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
+ unsigned long flags;
- spin_lock(&port->port_lock);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host += req->actual;
list_add(&req->list, &port->write_pool);
port->write_started--;
@@ -613,7 +678,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
/* FALL THROUGH */
case 0:
/* normal completion */
- gs_start_tx(port);
+ if (port->port_usb)
+ gs_start_tx(port);
break;
case -ESHUTDOWN:
@@ -622,7 +688,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
break;
}
- spin_unlock(&port->port_lock);
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
@@ -640,19 +706,20 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
}
static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int queue_size, int req_size,
void (*fn)(struct usb_ep *, struct usb_request *),
int *allocated)
{
int i;
struct usb_request *req;
- int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
+ int n = allocated ? queue_size - *allocated : queue_size;
/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
* do quite that many this time, don't fail ... we just won't
* be as speedy as we might otherwise be.
*/
for (i = 0; i < n; i++) {
- req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ req = gs_alloc_req(ep, req_size, GFP_ATOMIC);
if (!req)
return list_empty(head) ? -ENOMEM : 0;
req->complete = fn;
@@ -674,23 +741,32 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
*/
static int gs_start_io(struct gs_port *port)
{
- struct list_head *head = &port->read_pool;
- struct usb_ep *ep = port->port_usb->out;
+ struct list_head *head;
+ struct usb_ep *ep;
int status;
unsigned started;
+ if (!port || !port->port_usb) {
+ pr_err("Error - port or port->usb is NULL.");
+ return -EIO;
+ }
+
+ head = &port->read_pool;
+ ep = port->port_usb->out;
+
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
- status = gs_alloc_requests(ep, head, gs_read_complete,
- &port->read_allocated);
+ status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
+ gs_read_complete, &port->read_allocated);
if (status)
return status;
status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
+ TX_QUEUE_SIZE, TX_BUF_SIZE,
gs_write_complete, &port->write_allocated);
if (status) {
gs_free_requests(ep, head, &port->read_allocated);
@@ -701,6 +777,9 @@ static int gs_start_io(struct gs_port *port)
port->n_read = 0;
started = gs_start_rx(port);
+ if (!port->port_usb)
+ return -EIO;
+
if (started) {
gs_start_tx(port);
/* Unblock any pending writes into our circular buffer, in case
@@ -785,7 +864,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
if (status) {
- pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
+ pr_debug("gs_open: ttyGS%d (%pK,%pK) no buffer\n",
port->port_num, tty, file);
port->openclose = false;
goto exit_unlock_port;
@@ -815,7 +894,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
gser->connect(gser);
}
- pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
+ pr_debug("gs_open: ttyGS%d (%pK,%pK)\n", port->port_num, tty, file);
status = 0;
@@ -851,7 +930,8 @@ static void gs_close(struct tty_struct *tty, struct file *file)
goto exit;
}
- pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
+ pr_debug("gs_close: ttyGS%d (%pK,%pK) ...\n",
+ port->port_num, tty, file);
/* mark port as closing but in use; we can drop port lock
* and sleep if necessary
@@ -877,7 +957,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
- * let the push tasklet fire again until we're re-opened.
+ * let the push work queue fire again until we're re-opened.
*/
if (gser == NULL)
gs_buf_free(&port->port_write_buf);
@@ -888,7 +968,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
port->openclose = false;
- pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
+ pr_debug("gs_close: ttyGS%d (%pK,%pK) done!\n",
port->port_num, tty, file);
wake_up(&port->close_wait);
@@ -902,7 +982,10 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
unsigned long flags;
int status;
- pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
+ if (!port)
+ return 0;
+
+ pr_vdebug("gs_write: ttyGS%d (%pK) writing %d bytes\n",
port->port_num, tty, count);
spin_lock_irqsave(&port->port_lock, flags);
@@ -922,7 +1005,9 @@ static int gs_put_char(struct tty_struct *tty, unsigned char ch)
unsigned long flags;
int status;
- pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %ps\n",
+ if (!port)
+ return 0;
+ pr_vdebug("gs_put_char: (%d,%pK) char=0x%x, called from %pKs\n",
port->port_num, tty, ch, __builtin_return_address(0));
spin_lock_irqsave(&port->port_lock, flags);
@@ -937,7 +1022,9 @@ static void gs_flush_chars(struct tty_struct *tty)
struct gs_port *port = tty->driver_data;
unsigned long flags;
- pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+ if (!port)
+ return;
+ pr_vdebug("gs_flush_chars: (%d,%pK)\n", port->port_num, tty);
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
@@ -951,12 +1038,14 @@ static int gs_write_room(struct tty_struct *tty)
unsigned long flags;
int room = 0;
+ if (!port)
+ return 0;
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
room = gs_buf_space_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
- pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
+ pr_vdebug("gs_write_room: (%d,%pK) room=%d\n",
port->port_num, tty, room);
return room;
@@ -972,7 +1061,7 @@ static int gs_chars_in_buffer(struct tty_struct *tty)
chars = gs_buf_data_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
- pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+ pr_vdebug("gs_chars_in_buffer: (%d,%pK) chars=%d\n",
port->port_num, tty, chars);
return chars;
@@ -984,13 +1073,20 @@ static void gs_unthrottle(struct tty_struct *tty)
struct gs_port *port = tty->driver_data;
unsigned long flags;
+ /*
+ * tty's driver data is set to NULL during port close. Nothing
+ * to do here.
+ */
+ if (!port)
+ return;
+
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb) {
/* Kickstart read queue processing. We don't do xon/xoff,
* rts/cts, or other handshaking with the host, but if the
* read queue backs up enough we'll be NAKing OUT packets.
*/
- tasklet_schedule(&port->push);
+ queue_work(gserial_wq, &port->push);
pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
}
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1002,6 +1098,8 @@ static int gs_break_ctl(struct tty_struct *tty, int duration)
int status = 0;
struct gserial *gser;
+ if (!port)
+ return 0;
pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
port->port_num, duration);
@@ -1014,6 +1112,83 @@ static int gs_break_ctl(struct tty_struct *tty, int duration)
return status;
}
+static int gs_tiocmget(struct tty_struct *tty)
+{
+ struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
+ unsigned int result = 0;
+
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (!gser) {
+ result = -ENODEV;
+ goto fail;
+ }
+
+ if (gser->get_dtr)
+ result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+
+ if (gser->get_rts)
+ result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
+
+ if (gser->serial_state & TIOCM_CD)
+ result |= TIOCM_CD;
+
+ if (gser->serial_state & TIOCM_RI)
+ result |= TIOCM_RI;
+
+fail:
+ spin_unlock_irq(&port->port_lock);
+ return result;
+}
+
+static int gs_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
+ int status = 0;
+
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+
+ if (!gser) {
+ status = -ENODEV;
+ goto fail;
+ }
+
+ if (set & TIOCM_RI) {
+ if (gser->send_ring_indicator) {
+ gser->serial_state |= TIOCM_RI;
+ status = gser->send_ring_indicator(gser, 1);
+ }
+ }
+
+ if (clear & TIOCM_RI) {
+ if (gser->send_ring_indicator) {
+ gser->serial_state &= ~TIOCM_RI;
+ status = gser->send_ring_indicator(gser, 0);
+ }
+ }
+
+ if (set & TIOCM_CD) {
+ if (gser->send_carrier_detect) {
+ gser->serial_state |= TIOCM_CD;
+ status = gser->send_carrier_detect(gser, 1);
+ }
+ }
+
+ if (clear & TIOCM_CD) {
+ if (gser->send_carrier_detect) {
+ gser->serial_state &= ~TIOCM_CD;
+ status = gser->send_carrier_detect(gser, 0);
+ }
+ }
+fail:
+ spin_unlock_irq(&port->port_lock);
+ return status;
+}
+
static const struct tty_operations gs_tty_ops = {
.open = gs_open,
.close = gs_close,
@@ -1024,6 +1199,8 @@ static const struct tty_operations gs_tty_ops = {
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
.break_ctl = gs_break_ctl,
+ .tiocmget = gs_tiocmget,
+ .tiocmset = gs_tiocmset,
};
/*-------------------------------------------------------------------------*/
@@ -1053,7 +1230,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
init_waitqueue_head(&port->drain_wait);
init_waitqueue_head(&port->close_wait);
- tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+ INIT_WORK(&port->push, gs_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
@@ -1068,6 +1245,129 @@ out:
return ret;
}
+#if defined(CONFIG_DEBUG_FS)
+
+#define BUF_SIZE 512
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gs_port *ui_dev = file->private_data;
+ struct tty_struct *tty;
+ struct gserial *gser;
+ char *buf;
+ unsigned long flags;
+ int i = 0;
+ int ret;
+ int result = 0;
+
+ if (!ui_dev)
+ return -EINVAL;
+
+ tty = ui_dev->port.tty;
+ gser = ui_dev->port_usb;
+
+ buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ui_dev->port_lock, flags);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
+
+ i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
+ (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
+
+ i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
+ (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
+
+ if (tty)
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "tty_flags: %lu\n", tty->flags);
+
+ if (gser->get_dtr) {
+ result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "DTR_status: %d\n", result);
+ }
+
+ spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gs_port *ui_dev = file->private_data;
+ unsigned long flags;
+
+ if (!ui_dev)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ui_dev->port_lock, flags);
+ ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
+ ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
+ spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+ return count;
+}
+
+static int serial_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+const struct file_operations debug_rst_ops = {
+ .open = serial_debug_open,
+ .write = debug_write_reset,
+};
+
+const struct file_operations debug_adb_ops = {
+ .open = serial_debug_open,
+ .read = debug_read_status,
+};
+
+struct dentry *gs_dent;
+static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
+{
+ char buf[48];
+
+ if (!ui_dev)
+ return;
+
+ snprintf(buf, 48, "usb_serial%d", port_num);
+ gs_dent = debugfs_create_dir(buf, 0);
+ if (!gs_dent || IS_ERR(gs_dent))
+ return;
+
+ debugfs_create_file("readstatus", 0444, gs_dent, ui_dev,
+ &debug_adb_ops);
+ debugfs_create_file("reset", S_IRUGO | S_IWUSR,
+ gs_dent, ui_dev, &debug_rst_ops);
+}
+
+static void usb_debugfs_remove(void)
+{
+ debugfs_remove_recursive(gs_dent);
+}
+#else
+static inline void usb_debugfs_init(struct gs_port *ui_dev, int port_num) {}
+static inline void usb_debugfs_remove(void) {}
+#endif
+
static int gs_closed(struct gs_port *port)
{
int cond;
@@ -1080,7 +1380,7 @@ static int gs_closed(struct gs_port *port)
static void gserial_free_port(struct gs_port *port)
{
- tasklet_kill(&port->push);
+ cancel_work_sync(&port->push);
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
WARN_ON(port->port_usb != NULL);
@@ -1286,6 +1586,9 @@ void gserial_disconnect(struct gserial *gser)
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
+ port->nbytes_from_host = port->nbytes_to_tty =
+ port->nbytes_from_tty = port->nbytes_to_host = 0;
+
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
@@ -1305,7 +1608,8 @@ static int userial_init(void)
gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_RESET_TERMIOS;
gs_tty_driver->init_termios = tty_std_termios;
/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
@@ -1321,6 +1625,12 @@ static int userial_init(void)
for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
mutex_init(&ports[i].lock);
+ gserial_wq = create_singlethread_workqueue("k_gserial");
+ if (!gserial_wq) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* export the driver ... */
status = tty_register_driver(gs_tty_driver);
if (status) {
@@ -1329,6 +1639,9 @@ static int userial_init(void)
goto fail;
}
+ for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
+ usb_debugfs_init(ports[i].port, i);
+
pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
MAX_U_SERIAL_PORTS,
(MAX_U_SERIAL_PORTS == 1) ? "" : "s");
@@ -1336,6 +1649,8 @@ static int userial_init(void)
return status;
fail:
put_tty_driver(gs_tty_driver);
+ if (gserial_wq)
+ destroy_workqueue(gserial_wq);
gs_tty_driver = NULL;
return status;
}
@@ -1343,6 +1658,8 @@ module_init(userial_init);
static void userial_cleanup(void)
{
+ usb_debugfs_remove();
+ destroy_workqueue(gserial_wq);
tty_unregister_driver(gs_tty_driver);
put_tty_driver(gs_tty_driver);
gs_tty_driver = NULL;
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index c20210c0babd..50c801cd16d2 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -45,11 +45,21 @@ struct gserial {
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+ u16 serial_state;
+
+ /* control signal callbacks*/
+ unsigned int (*get_dtr)(struct gserial *p);
+ unsigned int (*get_rts)(struct gserial *p);
/* notification callbacks */
void (*connect)(struct gserial *p);
void (*disconnect)(struct gserial *p);
int (*send_break)(struct gserial *p, int duration);
+ unsigned int (*send_carrier_detect)(struct gserial *p, unsigned int);
+ unsigned int (*send_ring_indicator)(struct gserial *p, unsigned int);
+ int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits);
+ /* notification changes to modem */
+ void (*notify_modem)(void *gser, u8 portno, int ctrl_bits);
};
/* utilities to allocate/free request and buffer */
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 5c2ac8e8456d..3317d3222184 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,82 +1,41 @@
/*
- * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ * u_uac1.h - Utility definitions for UAC1 function
*
- * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
- * Copyright (C) 2008 Analog Devices, Inc
+ * Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
*
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
-#ifndef __U_AUDIO_H
-#define __U_AUDIO_H
+#ifndef __U_UAC1_H
+#define __U_UAC1_H
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/usb/audio.h>
#include <linux/usb/composite.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
-#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
-#define FILE_CONTROL "/dev/snd/controlC0"
-
#define UAC1_OUT_EP_MAX_PACKET_SIZE 200
-#define UAC1_REQ_COUNT 256
-#define UAC1_AUDIO_BUF_SIZE 48000
-
-/*
- * This represents the USB side of an audio card device, managed by a USB
- * function which provides control and stream interfaces.
- */
-
-struct gaudio_snd_dev {
- struct gaudio *card;
- struct file *filp;
- struct snd_pcm_substream *substream;
- int access;
- int format;
- int channels;
- int rate;
-};
-
-struct gaudio {
- struct usb_function func;
- struct usb_gadget *gadget;
+#define UAC1_DEF_CCHMASK 0x3
+#define UAC1_DEF_CSRATE 48000
+#define UAC1_DEF_CSSIZE 2
+#define UAC1_DEF_PCHMASK 0x3
+#define UAC1_DEF_PSRATE 48000
+#define UAC1_DEF_PSSIZE 2
+#define UAC1_DEF_REQ_NUM 8
- /* ALSA sound device interfaces */
- struct gaudio_snd_dev control;
- struct gaudio_snd_dev playback;
- struct gaudio_snd_dev capture;
-
- /* TODO */
-};
struct f_uac1_opts {
struct usb_function_instance func_inst;
- int req_buf_size;
- int req_count;
- int audio_buf_size;
- char *fn_play;
- char *fn_cap;
- char *fn_cntl;
+ int c_chmask;
+ int c_srate;
+ int c_ssize;
+ int p_chmask;
+ int p_srate;
+ int p_ssize;
+ int req_number;
unsigned bound:1;
- unsigned fn_play_alloc:1;
- unsigned fn_cap_alloc:1;
- unsigned fn_cntl_alloc:1;
+
struct mutex lock;
int refcnt;
};
-int gaudio_setup(struct gaudio *card);
-void gaudio_cleanup(struct gaudio *the_card);
-
-size_t u_audio_playback(struct gaudio *card, void *buf, size_t count);
-int u_audio_get_playback_channels(struct gaudio *card);
-int u_audio_get_playback_rate(struct gaudio *card);
-
-#endif /* __U_AUDIO_H */
+#endif /* __U_UAC1_H */
diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index c78c84138a28..4d32492ae084 100644
--- a/drivers/usb/gadget/function/u_uac1.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -18,7 +18,7 @@
#include <linux/random.h>
#include <linux/syscalls.h>
-#include "u_uac1.h"
+#include "u_uac1_legacy.h"
/*
* This component encapsulates the ALSA devices for USB audio gadget
@@ -205,10 +205,11 @@ static int gaudio_open_snd_dev(struct gaudio *card)
{
struct snd_pcm_file *pcm_file;
struct gaudio_snd_dev *snd;
- struct f_uac1_opts *opts;
+ struct f_uac1_legacy_opts *opts;
char *fn_play, *fn_cap, *fn_cntl;
- opts = container_of(card->func.fi, struct f_uac1_opts, func_inst);
+ opts = container_of(card->func.fi, struct f_uac1_legacy_opts,
+ func_inst);
fn_play = opts->fn_play;
fn_cap = opts->fn_cap;
fn_cntl = opts->fn_cntl;
@@ -266,18 +267,24 @@ static int gaudio_close_snd_dev(struct gaudio *gau)
/* Close control device */
snd = &gau->control;
- if (snd->filp)
+ if (snd->filp) {
filp_close(snd->filp, NULL);
+ snd->filp = NULL;
+ }
/* Close PCM playback device and setup substream */
snd = &gau->playback;
- if (snd->filp)
+ if (snd->filp) {
filp_close(snd->filp, NULL);
+ snd->filp = NULL;
+ }
/* Close PCM capture device and setup substream */
snd = &gau->capture;
- if (snd->filp)
+ if (snd->filp) {
filp_close(snd->filp, NULL);
+ snd->filp = NULL;
+ }
return 0;
}
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
new file mode 100644
index 000000000000..d715b1af56a4
--- /dev/null
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -0,0 +1,82 @@
+/*
+ * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_UAC1_LEGACY_H
+#define __U_UAC1_LEGACY_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
+#define FILE_CONTROL "/dev/snd/controlC0"
+
+#define UAC1_OUT_EP_MAX_PACKET_SIZE 200
+#define UAC1_REQ_COUNT 256
+#define UAC1_AUDIO_BUF_SIZE 48000
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+ struct gaudio *card;
+ struct file *filp;
+ struct snd_pcm_substream *substream;
+ int access;
+ int format;
+ int channels;
+ int rate;
+};
+
+struct gaudio {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+
+ /* ALSA sound device interfaces */
+ struct gaudio_snd_dev control;
+ struct gaudio_snd_dev playback;
+ struct gaudio_snd_dev capture;
+
+ /* TODO */
+};
+
+struct f_uac1_legacy_opts {
+ struct usb_function_instance func_inst;
+ int req_buf_size;
+ int req_count;
+ int audio_buf_size;
+ char *fn_play;
+ char *fn_cap;
+ char *fn_cntl;
+ unsigned bound:1;
+ unsigned fn_play_alloc:1;
+ unsigned fn_cap_alloc:1;
+ unsigned fn_cntl_alloc:1;
+ struct mutex lock;
+ int refcnt;
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(struct gaudio *the_card);
+
+size_t u_audio_playback(struct gaudio *card, void *buf, size_t count);
+int u_audio_get_playback_channels(struct gaudio *card);
+int u_audio_get_playback_rate(struct gaudio *card);
+
+#endif /* __U_UAC1_LEGACY_H */
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 78dd37279bd4..19eeb83538a5 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -24,6 +24,7 @@
#define UAC2_DEF_CCHMASK 0x3
#define UAC2_DEF_CSRATE 64000
#define UAC2_DEF_CSSIZE 2
+#define UAC2_DEF_REQ_NUM 2
struct f_uac2_opts {
struct usb_function_instance func_inst;
@@ -33,6 +34,7 @@ struct f_uac2_opts {
int c_chmask;
int c_srate;
int c_ssize;
+ int req_number;
bool bound;
struct mutex lock;