summaryrefslogtreecommitdiff
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/common/common.c1
-rw-r--r--drivers/usb/core/config.c9
-rw-r--r--drivers/usb/core/driver.c13
-rw-r--r--drivers/usb/core/generic.c38
-rw-r--r--drivers/usb/core/hcd.c61
-rw-r--r--drivers/usb/core/hub.c72
-rw-r--r--drivers/usb/core/message.c157
-rw-r--r--drivers/usb/core/notify.c31
-rw-r--r--drivers/usb/core/usb.c48
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc3/Makefile3
-rw-r--r--drivers/usb/dwc3/core.c546
-rw-r--r--drivers/usb/dwc3/core.h222
-rw-r--r--drivers/usb/dwc3/dbm.c643
-rw-r--r--drivers/usb/dwc3/dbm.h75
-rw-r--r--drivers/usb/dwc3/debug.h20
-rw-r--r--drivers/usb/dwc3/debugfs.c683
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c4356
-rw-r--r--drivers/usb/dwc3/ep0.c187
-rw-r--r--drivers/usb/dwc3/gadget.c1261
-rw-r--r--drivers/usb/dwc3/gadget.h29
-rw-r--r--drivers/usb/dwc3/host.c23
-rw-r--r--drivers/usb/dwc3/io.h4
-rw-r--r--drivers/usb/dwc3/trace.h4
-rw-r--r--drivers/usb/gadget/Kconfig101
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/composite.c352
-rw-r--r--drivers/usb/gadget/configfs.c73
-rw-r--r--drivers/usb/gadget/debug.c133
-rw-r--r--drivers/usb/gadget/debug.h55
-rw-r--r--drivers/usb/gadget/epautoconf.c39
-rw-r--r--drivers/usb/gadget/function/Makefile19
-rw-r--r--drivers/usb/gadget/function/f_accessory.c168
-rw-r--r--drivers/usb/gadget/function/f_acm.c2
-rw-r--r--drivers/usb/gadget/function/f_audio_source.c75
-rw-r--r--drivers/usb/gadget/function/f_ccid.c1176
-rw-r--r--drivers/usb/gadget/function/f_ccid.h83
-rw-r--r--drivers/usb/gadget/function/f_cdev.c1847
-rw-r--r--drivers/usb/gadget/function/f_diag.c1116
-rw-r--r--drivers/usb/gadget/function/f_fs.c892
-rw-r--r--drivers/usb/gadget/function/f_gsi.c3302
-rw-r--r--drivers/usb/gadget/function/f_gsi.h1374
-rw-r--r--drivers/usb/gadget/function/f_hid.c304
-rw-r--r--drivers/usb/gadget/function/f_loopback.c6
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c49
-rw-r--r--drivers/usb/gadget/function/f_mbim.c2147
-rw-r--r--drivers/usb/gadget/function/f_midi.c62
-rw-r--r--drivers/usb/gadget/function/f_mtp.c664
-rw-r--r--drivers/usb/gadget/function/f_ncm.c141
-rw-r--r--drivers/usb/gadget/function/f_obex.c2
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_qc_ecm.c1166
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c1552
-rw-r--r--drivers/usb/gadget/function/f_qdss.c1187
-rw-r--r--drivers/usb/gadget/function/f_qdss.h77
-rw-r--r--drivers/usb/gadget/function/f_rmnet.c1271
-rw-r--r--drivers/usb/gadget/function/f_rndis.c8
-rw-r--r--drivers/usb/gadget/function/f_serial.c451
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c6
-rw-r--r--drivers/usb/gadget/function/f_uac1.c967
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c1022
-rw-r--r--drivers/usb/gadget/function/f_uac2.c808
-rw-r--r--drivers/usb/gadget/function/rndis.c114
-rw-r--r--drivers/usb/gadget/function/rndis.h16
-rw-r--r--drivers/usb/gadget/function/u_audio.c645
-rw-r--r--drivers/usb/gadget/function/u_audio.h95
-rw-r--r--drivers/usb/gadget/function/u_bam.c2521
-rw-r--r--drivers/usb/gadget/function/u_bam_data.c2109
-rw-r--r--drivers/usb/gadget/function/u_bam_data.h71
-rw-r--r--drivers/usb/gadget/function/u_ctrl_qti.c826
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c1401
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.h119
-rw-r--r--drivers/usb/gadget/function/u_ether.c21
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h35
-rw-r--r--drivers/usb/gadget/function/u_fs.h3
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.c454
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.h101
-rw-r--r--drivers/usb/gadget/function/u_qdss.c128
-rw-r--r--drivers/usb/gadget/function/u_rmnet.h61
-rw-r--r--drivers/usb/gadget/function/u_serial.c419
-rw-r--r--drivers/usb/gadget/function/u_serial.h10
-rw-r--r--drivers/usb/gadget/function/u_uac1.h87
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c (renamed from drivers/usb/gadget/function/u_uac1.c)19
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h82
-rw-r--r--drivers/usb/gadget/function/u_uac2.h2
-rw-r--r--drivers/usb/gadget/legacy/Kconfig15
-rw-r--r--drivers/usb/gadget/legacy/audio.c56
-rw-r--r--drivers/usb/gadget/u_f.c7
-rw-r--r--drivers/usb/gadget/u_f.h3
-rw-r--r--drivers/usb/gadget/udc/udc-core.c10
-rw-r--r--drivers/usb/host/ehci-dbg.c22
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-q.c8
-rw-r--r--drivers/usb/host/ehci-sched.c22
-rw-r--r--drivers/usb/host/ohci-dbg.c10
-rw-r--r--drivers/usb/host/ohci-hcd.c6
-rw-r--r--drivers/usb/host/ohci-mem.c2
-rw-r--r--drivers/usb/host/ohci-q.c10
-rw-r--r--drivers/usb/host/uhci-debug.c12
-rw-r--r--drivers/usb/host/uhci-q.c10
-rw-r--r--drivers/usb/host/xhci-dbg.c72
-rw-r--r--drivers/usb/host/xhci-hub.c227
-rw-r--r--drivers/usb/host/xhci-mem.c424
-rw-r--r--drivers/usb/host/xhci-plat.c227
-rw-r--r--drivers/usb/host/xhci-ring.c199
-rw-r--r--drivers/usb/host/xhci-trace.h4
-rw-r--r--drivers/usb/host/xhci.c134
-rw-r--r--drivers/usb/host/xhci.h17
-rw-r--r--drivers/usb/misc/Kconfig34
-rw-r--r--drivers/usb/misc/Makefile3
-rw-r--r--drivers/usb/misc/diag_ipc_bridge.c859
-rw-r--r--drivers/usb/misc/ehset.c304
-rw-r--r--drivers/usb/misc/ks_bridge.c1155
-rw-r--r--drivers/usb/misc/lvstest.c48
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/pd/Kconfig25
-rw-r--r--drivers/usb/pd/Makefile6
-rw-r--r--drivers/usb/pd/policy_engine.c4038
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c914
-rw-r--r--drivers/usb/pd/usbpd.h106
-rw-r--r--drivers/usb/phy/Kconfig40
-rw-r--r--drivers/usb/phy/Makefile4
-rw-r--r--drivers/usb/phy/phy-msm-hsusb.c858
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c1141
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c1543
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c842
-rw-r--r--drivers/usb/phy/phy-msm-ssusb.c595
-rw-r--r--drivers/usb/phy/phy-msm-usb.c4
130 files changed, 51893 insertions, 2661 deletions
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 8ed451dd651e..1edaaf65c560 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -106,6 +106,8 @@ source "drivers/usb/chipidea/Kconfig"
source "drivers/usb/isp1760/Kconfig"
+source "drivers/usb/pd/Kconfig"
+
comment "USB port drivers"
if USB
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index d5c57f1e98fd..a0712e28c7d8 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -61,3 +61,5 @@ obj-$(CONFIG_USB_GADGET) += gadget/
obj-$(CONFIG_USB_COMMON) += common/
obj-$(CONFIG_USBIP_CORE) += usbip/
+
+obj-$(CONFIG_USB_PD) += pd/
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index a00bfb93acc3..ddc6bfb02164 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -34,6 +34,7 @@ const char *usb_otg_state_string(enum usb_otg_state state)
[OTG_STATE_B_PERIPHERAL] = "b_peripheral",
[OTG_STATE_B_WAIT_ACON] = "b_wait_acon",
[OTG_STATE_B_HOST] = "b_host",
+ [OTG_STATE_B_SUSPEND] = "b_suspend",
};
if (state < 0 || state >= ARRAY_SIZE(names))
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index eca8d04cfb3e..ae2ea73cb24e 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1052,6 +1052,15 @@ int usb_get_bos_descriptor(struct usb_device *dev)
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
+ break;
+ case USB_CAP_TYPE_CONFIG_SUMMARY:
+ /* one such desc per configuration */
+ if (!dev->bos->num_config_summary_desc)
+ dev->bos->config_summary =
+ (struct usb_config_summary_descriptor *)buffer;
+
+ dev->bos->num_config_summary_desc++;
+ break;
default:
break;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 654199c6a36c..8089e5820be4 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1454,6 +1454,9 @@ int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
+ if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
+ return 0;
+
unbind_no_pm_drivers_interfaces(udev);
/* From now on we are sure all drivers support suspend/resume
@@ -1483,6 +1486,16 @@ int usb_resume(struct device *dev, pm_message_t msg)
struct usb_device *udev = to_usb_device(dev);
int status;
+ /*
+ * Some buses would like to keep their devices in suspend
+ * state after system resume. Their resume happen when
+ * a remote wakeup is detected or interface driver start
+ * I/O. And in the case when the system is restoring from
+ * hibernation, make sure all the devices are resumed.
+ */
+ if (udev->bus->skip_resume && msg.event != PM_EVENT_RESTORE)
+ return 0;
+
/* For all calls, take the device back to full power and
* tell the PM core in case it was autosuspended previously.
* Unbind the interfaces that will need rebinding later,
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index a5240b4d7ab9..619e5446cbe8 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,8 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v3.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -40,6 +42,36 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
+static int usb_audio_max_rev_config(struct usb_host_bos *bos)
+{
+ int desc_cnt, func_cnt, numfunc;
+ int num_cfg_desc;
+ struct usb_config_summary_descriptor *conf_summary;
+
+ if (!bos || !bos->config_summary)
+ goto done;
+
+ conf_summary = bos->config_summary;
+ num_cfg_desc = bos->num_config_summary_desc;
+
+ for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) {
+ numfunc = conf_summary->bNumFunctions;
+ for (func_cnt = 0; func_cnt < numfunc; func_cnt++) {
+ /* look for BADD 3.0 */
+ if (conf_summary->cs_info[func_cnt].bClass ==
+ USB_CLASS_AUDIO &&
+ conf_summary->cs_info[func_cnt].bProtocol ==
+ UAC_VERSION_3 &&
+ conf_summary->cs_info[func_cnt].bSubClass !=
+ FULL_ADC_PROFILE)
+ return conf_summary->bConfigurationValue;
+ }
+ }
+
+done:
+ return -EINVAL;
+}
+
int usb_choose_configuration(struct usb_device *udev)
{
int i;
@@ -130,7 +162,6 @@ int usb_choose_configuration(struct usb_device *udev)
best = c;
break;
}
-
/* If all the remaining configs are vendor-specific,
* choose the first one. */
else if (!best)
@@ -143,7 +174,10 @@ int usb_choose_configuration(struct usb_device *udev)
insufficient_power, plural(insufficient_power));
if (best) {
- i = best->desc.bConfigurationValue;
+ /* choose usb audio class preferred config if available */
+ i = usb_audio_max_rev_config(udev->bos);
+ if (i < 0)
+ i = best->desc.bConfigurationValue;
dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
i, num_configs, plural(num_configs));
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 99c146f4b6b5..4740c307e02d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2205,8 +2205,65 @@ int usb_hcd_get_frame_number (struct usb_device *udev)
return hcd->driver->get_frame_number (hcd);
}
+int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+ unsigned intr_num)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!HCD_RH_RUNNING(hcd))
+ return 0;
+
+ return hcd->driver->sec_event_ring_setup(hcd, intr_num);
+}
+
+int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+ unsigned intr_num)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!HCD_RH_RUNNING(hcd))
+ return 0;
+
+ return hcd->driver->sec_event_ring_cleanup(hcd, intr_num);
+}
+
/*-------------------------------------------------------------------------*/
+dma_addr_t
+usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+ unsigned intr_num)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!HCD_RH_RUNNING(hcd))
+ return 0;
+
+ return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+}
+
+dma_addr_t
+usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!HCD_RH_RUNNING(hcd))
+ return 0;
+
+ return hcd->driver->get_dcba_dma_addr(hcd, udev);
+}
+
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!HCD_RH_RUNNING(hcd))
+ return 0;
+
+ return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+}
+
#ifdef CONFIG_PM
int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
@@ -2462,6 +2519,7 @@ void usb_hc_died (struct usb_hcd *hcd)
}
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
/* Make sure that the other roothub is also deallocated. */
+ usb_atomic_notify_dead_bus(&hcd->self);
}
EXPORT_SYMBOL_GPL (usb_hc_died);
@@ -2947,6 +3005,9 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->wakeup_work);
#endif
+ /* handle any pending hub events before XHCI stops */
+ usb_flush_hub_wq();
+
mutex_lock(&usb_bus_list_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_list_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6910a6d7c63e..f017d6bd8263 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -36,6 +36,8 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+extern int deny_new_usb;
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -48,6 +50,11 @@ static void hub_event(struct work_struct *work);
/* synchronize hub-port add/remove and peering operations */
DEFINE_MUTEX(usb_port_peer_mutex);
+static bool skip_extended_resume_delay = 1;
+module_param(skip_extended_resume_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_extended_resume_delay,
+ "removes extra delay added to finish bus resume");
+
/* cycle leds on hubs that aren't blinking for attention */
static bool blinkenlights = 0;
module_param(blinkenlights, bool, S_IRUGO);
@@ -622,6 +629,12 @@ void usb_kick_hub_wq(struct usb_device *hdev)
kick_hub_wq(hub);
}
+void usb_flush_hub_wq(void)
+{
+ flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
/*
* Let the USB core know that a USB 3.0 device has sent a Function Wake Device
* Notification, which indicates it had initiated remote wakeup.
@@ -1685,47 +1698,6 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
hdev = interface_to_usbdev(intf);
/*
- * Set default autosuspend delay as 0 to speedup bus suspend,
- * based on the below considerations:
- *
- * - Unlike other drivers, the hub driver does not rely on the
- * autosuspend delay to provide enough time to handle a wakeup
- * event, and the submitted status URB is just to check future
- * change on hub downstream ports, so it is safe to do it.
- *
- * - The patch might cause one or more auto supend/resume for
- * below very rare devices when they are plugged into hub
- * first time:
- *
- * devices having trouble initializing, and disconnect
- * themselves from the bus and then reconnect a second
- * or so later
- *
- * devices just for downloading firmware, and disconnects
- * themselves after completing it
- *
- * For these quite rare devices, their drivers may change the
- * autosuspend delay of their parent hub in the probe() to one
- * appropriate value to avoid the subtle problem if someone
- * does care it.
- *
- * - The patch may cause one or more auto suspend/resume on
- * hub during running 'lsusb', but it is probably too
- * infrequent to worry about.
- *
- * - Change autosuspend delay of hub can avoid unnecessary auto
- * suspend timer for hub, also may decrease power consumption
- * of USB bus.
- *
- * - If user has indicated to prevent autosuspend by passing
- * usbcore.autosuspend = -1 then keep autosuspend disabled.
- */
-#ifdef CONFIG_PM
- if (hdev->dev.power.autosuspend_delay >= 0)
- pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
-#endif
-
- /*
* Hubs have proper suspend/resume support, except for root hubs
* where the controller driver doesn't have bus_suspend and
* bus_resume methods.
@@ -3423,7 +3395,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* drive resume for USB_RESUME_TIMEOUT msec */
dev_dbg(&udev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
- msleep(USB_RESUME_TIMEOUT);
+ if (!skip_extended_resume_delay)
+ usleep_range(USB_RESUME_TIMEOUT * 1000,
+ (USB_RESUME_TIMEOUT + 1) * 1000);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
@@ -3446,7 +3420,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
}
/* TRSMRCY = 10 msec */
- msleep(10);
+ usleep_range(10000, 10500);
}
if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
@@ -4378,6 +4352,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
int devnum = udev->devnum;
+ char *error_event[] = {
+ "USB_DEVICE_ERROR=Device_No_Response", NULL };
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
@@ -4553,6 +4529,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (r != -ENODEV)
dev_err(&udev->dev, "device descriptor read/64, error %d\n",
r);
+ kobject_uevent_env(&udev->parent->dev.kobj,
+ KOBJ_CHANGE, error_event);
retval = -EMSGSIZE;
continue;
}
@@ -4605,6 +4583,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
dev_err(&udev->dev,
"device descriptor read/8, error %d\n",
retval);
+ kobject_uevent_env(&udev->parent->dev.kobj,
+ KOBJ_CHANGE, error_event);
if (retval >= 0)
retval = -EMSGSIZE;
} else {
@@ -4820,6 +4800,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
+
+ if (deny_new_usb) {
+ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
+ goto done;
+ }
+
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e568325cb6e1..f41cb37adb07 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -12,6 +12,7 @@
#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
+#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <asm/byteorder.h>
@@ -2039,3 +2040,159 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
return 0;
}
EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
+
+/**
+ * cdc_parse_cdc_header - parse the extra headers present in CDC devices
+ * @hdr: the place to put the results of the parsing
+ * @intf: the interface for which parsing is requested
+ * @buffer: pointer to the extra headers to be parsed
+ * @buflen: length of the extra headers
+ *
+ * This evaluates the extra headers present in CDC devices which
+ * bind the interfaces for data and control and provide details
+ * about the capabilities of the device.
+ *
+ * Return: number of descriptors parsed or -EINVAL
+ * if the header is contradictory beyond salvage
+ */
+
+int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+ struct usb_interface *intf,
+ u8 *buffer,
+ int buflen)
+{
+ /* duplicates are ignored */
+ struct usb_cdc_union_desc *union_header = NULL;
+
+ /* duplicates are not tolerated */
+ struct usb_cdc_header_desc *header = NULL;
+ struct usb_cdc_ether_desc *ether = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
+ struct usb_cdc_mdlm_desc *desc = NULL;
+
+ unsigned int elength;
+ int cnt = 0;
+
+ memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
+ hdr->phonet_magic_present = false;
+ while (buflen > 0) {
+ elength = buffer[0];
+ if (!elength) {
+ dev_err(&intf->dev, "skipping garbage byte\n");
+ elength = 1;
+ goto next_desc;
+ }
+ if ((buflen < elength) || (elength < 3)) {
+ dev_err(&intf->dev, "invalid descriptor buffer length\n");
+ break;
+ }
+ if (buffer[1] != USB_DT_CS_INTERFACE) {
+ dev_err(&intf->dev, "skipping garbage\n");
+ goto next_desc;
+ }
+
+ switch (buffer[2]) {
+ case USB_CDC_UNION_TYPE: /* we've found it */
+ if (elength < sizeof(struct usb_cdc_union_desc))
+ goto next_desc;
+ if (union_header) {
+ dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
+ goto next_desc;
+ }
+ union_header = (struct usb_cdc_union_desc *)buffer;
+ break;
+ case USB_CDC_COUNTRY_TYPE:
+ if (elength < sizeof(struct usb_cdc_country_functional_desc))
+ goto next_desc;
+ hdr->usb_cdc_country_functional_desc =
+ (struct usb_cdc_country_functional_desc *)buffer;
+ break;
+ case USB_CDC_HEADER_TYPE:
+ if (elength != sizeof(struct usb_cdc_header_desc))
+ goto next_desc;
+ if (header)
+ return -EINVAL;
+ header = (struct usb_cdc_header_desc *)buffer;
+ break;
+ case USB_CDC_ACM_TYPE:
+ if (elength < sizeof(struct usb_cdc_acm_descriptor))
+ goto next_desc;
+ hdr->usb_cdc_acm_descriptor =
+ (struct usb_cdc_acm_descriptor *)buffer;
+ break;
+ case USB_CDC_ETHERNET_TYPE:
+ if (elength != sizeof(struct usb_cdc_ether_desc))
+ goto next_desc;
+ if (ether)
+ return -EINVAL;
+ ether = (struct usb_cdc_ether_desc *)buffer;
+ break;
+ case USB_CDC_CALL_MANAGEMENT_TYPE:
+ if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
+ goto next_desc;
+ hdr->usb_cdc_call_mgmt_descriptor =
+ (struct usb_cdc_call_mgmt_descriptor *)buffer;
+ break;
+ case USB_CDC_DMM_TYPE:
+ if (elength < sizeof(struct usb_cdc_dmm_desc))
+ goto next_desc;
+ hdr->usb_cdc_dmm_desc =
+ (struct usb_cdc_dmm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_TYPE:
+ if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ goto next_desc;
+ if (desc)
+ return -EINVAL;
+ desc = (struct usb_cdc_mdlm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ goto next_desc;
+ if (detail)
+ return -EINVAL;
+ detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
+ break;
+ case USB_CDC_NCM_TYPE:
+ if (elength < sizeof(struct usb_cdc_ncm_desc))
+ goto next_desc;
+ hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
+ break;
+ case USB_CDC_MBIM_TYPE:
+ if (elength < sizeof(struct usb_cdc_mbim_desc))
+ goto next_desc;
+
+ hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
+ break;
+ case USB_CDC_MBIM_EXTENDED_TYPE:
+ if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
+ break;
+ hdr->usb_cdc_mbim_extended_desc =
+ (struct usb_cdc_mbim_extended_desc *)buffer;
+ break;
+ case CDC_PHONET_MAGIC_NUMBER:
+ hdr->phonet_magic_present = true;
+ break;
+ default:
+ /*
+ * there are LOTS more CDC descriptors that
+ * could legitimately be found here.
+ */
+ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
+ buffer[2], elength);
+ goto next_desc;
+ }
+ cnt++;
+next_desc:
+ buflen -= elength;
+ buffer += elength;
+ }
+ hdr->usb_cdc_union_desc = union_header;
+ hdr->usb_cdc_header_desc = header;
+ hdr->usb_cdc_mdlm_detail_desc = detail;
+ hdr->usb_cdc_mdlm_desc = desc;
+ hdr->usb_cdc_ether_desc = ether;
+ return cnt;
+}
+
+EXPORT_SYMBOL(cdc_parse_cdc_header);
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2e..af91b1e7146c 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -17,6 +17,7 @@
#include "usb.h"
static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(usb_atomic_notifier_list);
/**
* usb_register_notify - register a notifier callback whenever a usb change happens
@@ -67,3 +68,33 @@ void usb_notify_remove_bus(struct usb_bus *ubus)
{
blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
}
+
+/**
+ * usb_register_atomic_notify - register a atomic notifier callback whenever a
+ * HC dies
+ * @nb: pointer to the atomic notifier block for the callback events.
+ *
+ */
+void usb_register_atomic_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_register_atomic_notify);
+
+/**
+ * usb_unregister_atomic_notify - unregister a atomic notifier callback
+ * @nb: pointer to the notifier block for the callback events.
+ *
+ */
+void usb_unregister_atomic_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_unregister_atomic_notify);
+
+
+void usb_atomic_notify_dead_bus(struct usb_bus *ubus)
+{
+ atomic_notifier_call_chain(&usb_atomic_notifier_list, USB_BUS_DIED,
+ ubus);
+}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 415e9cf407ba..ebb53165df42 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -754,6 +754,54 @@ int usb_get_current_frame_number(struct usb_device *dev)
}
EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
+int usb_sec_event_ring_setup(struct usb_device *dev,
+ unsigned intr_num)
+{
+ if (dev->state == USB_STATE_NOTATTACHED)
+ return 0;
+
+ return usb_hcd_sec_event_ring_setup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_setup);
+
+int usb_sec_event_ring_cleanup(struct usb_device *dev,
+ unsigned intr_num)
+{
+ return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
+
+dma_addr_t
+usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+ unsigned intr_num)
+{
+ if (dev->state == USB_STATE_NOTATTACHED)
+ return 0;
+
+ return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+
+dma_addr_t
+usb_get_dcba_dma_addr(struct usb_device *dev)
+{
+ if (dev->state == USB_STATE_NOTATTACHED)
+ return 0;
+
+ return usb_hcd_get_dcba_dma_addr(dev);
+}
+EXPORT_SYMBOL(usb_get_dcba_dma_addr);
+
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+ struct usb_host_endpoint *ep)
+{
+ if (dev->state == USB_STATE_NOTATTACHED)
+ return 0;
+
+ return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+}
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+
/*-------------------------------------------------------------------*/
/*
* __usb_get_extra_descriptor() finds a descriptor of specific type in the
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 462a00c749b8..6447ea618a08 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -184,6 +184,7 @@ extern void usb_notify_add_device(struct usb_device *udev);
extern void usb_notify_remove_device(struct usb_device *udev);
extern void usb_notify_add_bus(struct usb_bus *ubus);
extern void usb_notify_remove_bus(struct usb_bus *ubus);
+extern void usb_atomic_notify_dead_bus(struct usb_bus *ubus);
extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
struct usb_hub_descriptor *desc);
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index acc951d46c27..389936296141 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,5 +1,6 @@
# define_trace.h needs to know how to find our header
CFLAGS_trace.o := -I$(src)
+CFLAGS_dwc3-msm.o := -Idrivers/usb/host -Idrivers/base/power
obj-$(CONFIG_USB_DWC3) += dwc3.o
@@ -37,5 +38,5 @@ obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
-obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
+obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o dwc3-msm.o dbm.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 591bc3f7be76..3191825710af 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -35,6 +35,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/irq.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -50,6 +51,20 @@
/* -------------------------------------------------------------------------- */
+void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+ if (suspend)
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ else
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+}
+
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
{
u32 reg;
@@ -57,35 +72,74 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ /*
+ * Set this bit so that device attempts three more times at SS, even
+ * if it failed previously to operate in SS mode.
+ */
+ reg |= DWC3_GCTL_U2RSTECN;
+ reg &= ~(DWC3_GCTL_SOFITPSYNC);
+ reg &= ~(DWC3_GCTL_PWRDNSCALEMASK);
+ reg |= DWC3_GCTL_PWRDNSCALE(2);
+ reg |= DWC3_GCTL_U2EXIT_LFPS;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ if (mode == DWC3_GCTL_PRTCAP_OTG || mode == DWC3_GCTL_PRTCAP_HOST) {
+ /*
+ * Allow ITP generated off of ref clk based counter instead
+ * of UTMI/ULPI clk based counter, when superspeed only is
+ * active so that UTMI/ULPI PHY can be suspened.
+ *
+ * Starting with revision 2.50A, GFLADJ_REFCLK_LPM_SEL is used
+ * instead.
+ */
+ if (dwc->revision < DWC3_REVISION_250A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_SOFITPSYNC;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ } else {
+ reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
+ }
+ }
}
/**
- * dwc3_core_soft_reset - Issues core soft reset and PHY reset
+ * Peforms initialization of HS and SS PHYs.
+ * If used as a part of POR or init sequence it is recommended
+ * that we should perform hard reset of the PHYs prior to invoking
+ * this function.
* @dwc: pointer to our context structure
- */
-static int dwc3_core_soft_reset(struct dwc3 *dwc)
+*/
+static int dwc3_init_usb_phys(struct dwc3 *dwc)
{
- u32 reg;
int ret;
- /* Before Resetting PHY, put Core in Reset */
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg |= DWC3_GCTL_CORESOFTRESET;
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ /* Bring up PHYs */
+ ret = usb_phy_init(dwc->usb2_phy);
+ if (ret) {
+ pr_err("%s: usb_phy_init(dwc->usb2_phy) returned %d\n",
+ __func__, ret);
+ return ret;
+ }
- /* Assert USB3 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ if (dwc->maximum_speed == USB_SPEED_HIGH)
+ goto generic_phy_init;
- /* Assert USB2 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ ret = usb_phy_init(dwc->usb3_phy);
+ if (ret == -EBUSY) {
+ /*
+ * Setting Max speed as high when USB3 PHY initialiation
+ * is failing and USB superspeed can't be supported.
+ */
+ dwc->maximum_speed = USB_SPEED_HIGH;
+ } else if (ret) {
+ pr_err("%s: usb_phy_init(dwc->usb3_phy) returned %d\n",
+ __func__, ret);
+ return ret;
+ }
- usb_phy_init(dwc->usb2_phy);
- usb_phy_init(dwc->usb3_phy);
+generic_phy_init:
ret = phy_init(dwc->usb2_generic_phy);
if (ret < 0)
return ret;
@@ -95,24 +149,45 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
phy_exit(dwc->usb2_generic_phy);
return ret;
}
- mdelay(100);
- /* Clear USB3 PHY reset */
+ return 0;
+}
+
+/**
+ * dwc3_core_reset - Issues core soft reset and PHY reset
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_core_reset(struct dwc3 *dwc)
+{
+ int ret;
+ u32 reg;
+
+ /* Reset PHYs */
+ usb_phy_reset(dwc->usb2_phy);
+
+ if (dwc->maximum_speed == USB_SPEED_SUPER)
+ usb_phy_reset(dwc->usb3_phy);
+
+ /* Initialize PHYs */
+ ret = dwc3_init_usb_phys(dwc);
+ if (ret) {
+ pr_err("%s: dwc3_init_phys returned %d\n",
+ __func__, ret);
+ return ret;
+ }
+
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ reg &= ~DWC3_GUSB3PIPECTL_DELAYP1TRANS;
- /* Clear USB2 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ /* core exits U1/U2/U3 only in PHY power state P1/P2/P3 respectively */
+ if (dwc->revision <= DWC3_REVISION_310A)
+ reg |= DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX;
- mdelay(100);
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
- /* After PHYs are stable we can take Core out of reset state */
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg &= ~DWC3_GCTL_CORESOFTRESET;
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT, 0);
+
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT, 0);
return 0;
}
@@ -190,7 +265,7 @@ static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
* otherwise ERR_PTR(errno).
*/
static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
- unsigned length)
+ unsigned length, enum event_buf_type type)
{
struct dwc3_event_buffer *evt;
@@ -200,6 +275,7 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
evt->dwc = dwc;
evt->length = length;
+ evt->type = type;
evt->buf = dma_alloc_coherent(dwc->dev, length,
&evt->dma, GFP_KERNEL);
if (!evt->buf)
@@ -234,26 +310,40 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
*/
static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
- int num;
- int i;
+ int i;
+ int j = 0;
- num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
- dwc->num_event_buffers = num;
+ dwc->num_event_buffers = dwc->num_normal_event_buffers +
+ dwc->num_gsi_event_buffers;
- dwc->ev_buffs = devm_kzalloc(dwc->dev, sizeof(*dwc->ev_buffs) * num,
+ dwc->ev_buffs = devm_kzalloc(dwc->dev,
+ sizeof(*dwc->ev_buffs) * dwc->num_event_buffers,
GFP_KERNEL);
if (!dwc->ev_buffs)
return -ENOMEM;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < dwc->num_normal_event_buffers; i++) {
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc3_alloc_one_event_buffer(dwc, length,
+ EVT_BUF_TYPE_NORMAL);
+ if (IS_ERR(evt)) {
+ dev_err(dwc->dev, "can't allocate event buffer\n");
+ return PTR_ERR(evt);
+ }
+ dwc->ev_buffs[j++] = evt;
+ }
+
+ for (i = 0; i < dwc->num_gsi_event_buffers; i++) {
struct dwc3_event_buffer *evt;
- evt = dwc3_alloc_one_event_buffer(dwc, length);
+ evt = dwc3_alloc_one_event_buffer(dwc, length,
+ EVT_BUF_TYPE_GSI);
if (IS_ERR(evt)) {
dev_err(dwc->dev, "can't allocate event buffer\n");
return PTR_ERR(evt);
}
- dwc->ev_buffs[i] = evt;
+ dwc->ev_buffs[j++] = evt;
}
return 0;
@@ -265,25 +355,40 @@ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
*
* Returns 0 on success otherwise negative errno.
*/
-static int dwc3_event_buffers_setup(struct dwc3 *dwc)
+int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
int n;
for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
- dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
+ dev_dbg(dwc->dev, "Event buf %pK dma %08llx length %d\n",
evt->buf, (unsigned long long) evt->dma,
evt->length);
+ memset(evt->buf, 0, evt->length);
+
evt->lpos = 0;
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
lower_32_bits(evt->dma));
- dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
- upper_32_bits(evt->dma));
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
- DWC3_GEVNTSIZ_SIZE(evt->length));
+
+ if (evt->type == EVT_BUF_TYPE_NORMAL) {
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
+ upper_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
+ } else {
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
+ DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
+ DWC3_GEVENT_TYPE_GSI) |
+ DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n));
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
+ DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
+ ((evt->length) & 0xffff));
+ }
+
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
}
@@ -529,7 +634,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
*
* Returns 0 on success otherwise negative errno.
*/
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
{
u32 hwparams4 = dwc->hwparams.hwparams4;
u32 reg;
@@ -559,16 +664,28 @@ static int dwc3_core_init(struct dwc3 *dwc)
/* Handle USB2.0-only core configuration */
if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
- if (dwc->maximum_speed == USB_SPEED_SUPER)
- dwc->maximum_speed = USB_SPEED_HIGH;
+ if (dwc->max_hw_supp_speed == USB_SPEED_SUPER) {
+ dwc->max_hw_supp_speed = USB_SPEED_HIGH;
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
+ }
}
- /* issue device SoftReset too */
- ret = dwc3_soft_reset(dwc);
+ /*
+ * Workaround for STAR 9000961433 which affects only version
+ * 3.00a of the DWC_usb3 core. This prevents the controller
+ * interrupt from being masked while handling events. IMOD
+ * allows us to work around this issue. Enable it for the
+ * affected version.
+ */
+ if (!dwc->imod_interval && (dwc->revision == DWC3_REVISION_300A))
+ dwc->imod_interval = 1;
+
+ ret = dwc3_core_reset(dwc);
if (ret)
goto err0;
- ret = dwc3_core_soft_reset(dwc);
+ /* issue device SoftReset too */
+ ret = dwc3_soft_reset(dwc);
if (ret)
goto err0;
@@ -639,6 +756,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_core_num_eps(dwc);
+ /*
+ * Disable clock gating to work around a known HW bug that causes the
+ * internal RAM clock to get stuck when entering low power modes.
+ */
+ if (dwc->disable_clk_gating) {
+ dev_dbg(dwc->dev, "Disabling controller clock gating.\n");
+ reg |= DWC3_GCTL_DSBLCLKGTNG;
+ }
+
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
ret = dwc3_alloc_scratch_buffers(dwc);
@@ -649,6 +775,17 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err2;
+ /*
+ * clear Elastic buffer mode in GUSBPIPE_CTRL(0) register, otherwise
+ * it results in high link errors and could cause SS mode transfer
+ * failure.
+ */
+ if (!dwc->nominal_elastic_buffer) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg &= ~DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ }
+
return 0;
err2:
@@ -743,38 +880,16 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
static int dwc3_core_init_mode(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
- int ret;
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
- ret = dwc3_gadget_init(dwc);
- if (ret) {
- dev_err(dev, "failed to initialize gadget\n");
- return ret;
- }
break;
case USB_DR_MODE_HOST:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
- ret = dwc3_host_init(dwc);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
break;
case USB_DR_MODE_OTG:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
- ret = dwc3_host_init(dwc);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- ret = dwc3_gadget_init(dwc);
- if (ret) {
- dev_err(dev, "failed to initialize gadget\n");
- return ret;
- }
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -801,13 +916,109 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
/* do nothing */
break;
}
+}
+
+/* XHCI reset, resets other CORE registers as well, re-init those */
+void dwc3_post_host_reset_core_init(struct dwc3 *dwc)
+{
+ dwc3_core_init(dwc);
+ dwc3_gadget_restart(dwc);
+}
+
+static void (*notify_event)(struct dwc3 *, unsigned, unsigned);
+void dwc3_set_notifier(void (*notify)(struct dwc3 *, unsigned, unsigned))
+{
+ notify_event = notify;
+}
+EXPORT_SYMBOL(dwc3_set_notifier);
+
+int dwc3_notify_event(struct dwc3 *dwc, unsigned event, unsigned value)
+{
+ int ret = 0;
+
+ if (dwc->notify_event)
+ dwc->notify_event(dwc, event, value);
+ else
+ ret = -ENODEV;
+
+ return ret;
+}
+EXPORT_SYMBOL(dwc3_notify_event);
+
+int dwc3_core_pre_init(struct dwc3 *dwc)
+{
+ int ret;
+
+ dwc3_cache_hwparams(dwc);
+
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
+ if (!dwc->ev_buffs) {
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+ if (ret) {
+ dev_err(dwc->dev, "failed to allocate event buffers\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ ret = dwc3_core_init(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to initialize core\n");
+ goto err2;
+ }
+
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err3;
+
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err4;
+
+ ret = dwc3_event_buffers_setup(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to setup event buffers\n");
+ goto err5;
+ }
- /* de-assert DRVVBUS for HOST and OTG mode */
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ ret = dwc3_core_init_mode(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set mode with dwc3 core\n");
+ goto err6;
+ }
+
+ return ret;
+
+err6:
+ dwc3_event_buffers_cleanup(dwc);
+err5:
+ phy_power_off(dwc->usb3_generic_phy);
+err4:
+ phy_power_off(dwc->usb2_generic_phy);
+err3:
+ dwc3_core_exit(dwc);
+err2:
+ dwc3_free_event_buffers(dwc);
+err1:
+ dwc3_ulpi_exit(dwc);
+err0:
+ return ret;
}
#define DWC3_ALIGN_MASK (16 - 1)
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+ return ((dwc3_is_usb3(dwc) &&
+ dwc->revision >= DWC3_REVISION_300A) ||
+ (dwc3_is_usb31(dwc) &&
+ dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -818,7 +1029,8 @@ static int dwc3_probe(struct platform_device *pdev)
u8 tx_de_emphasis;
u8 hird_threshold;
u32 fladj = 0;
-
+ u32 num_evt_buffs;
+ int irq;
int ret;
void __iomem *regs;
@@ -832,6 +1044,7 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->mem = mem;
dwc->dev = dev;
+ dwc->notify_event = notify_event;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "missing IRQ\n");
@@ -842,12 +1055,27 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->xhci_resources[1].flags = res->flags;
dwc->xhci_resources[1].name = res->name;
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+
+ /* will be enabled in dwc3_msm_resume() */
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
+ dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ return -ENODEV;
+ }
+
+ dwc->irq = irq;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing memory resource\n");
return -ENODEV;
}
+ dwc->reg_phys = res->start;
dwc->xhci_resources[0].start = res->start;
dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
DWC3_XHCI_REGS_END;
@@ -882,6 +1110,7 @@ static int dwc3_probe(struct platform_device *pdev)
hird_threshold = 12;
dwc->maximum_speed = usb_get_maximum_speed(dev);
+ dwc->max_hw_supp_speed = dwc->maximum_speed;
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->has_lpm_erratum = device_property_read_bool(dev,
@@ -930,8 +1159,32 @@ static int dwc3_probe(struct platform_device *pdev)
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&fladj);
+ dwc->nominal_elastic_buffer = device_property_read_bool(dev,
+ "snps,nominal-elastic-buffer");
+ dwc->usb3_u1u2_disable = device_property_read_bool(dev,
+ "snps,usb3-u1u2-disable");
+ dwc->disable_clk_gating = device_property_read_bool(dev,
+ "snps,disable-clk-gating");
+ dwc->enable_bus_suspend = device_property_read_bool(dev,
+ "snps,bus-suspend-enable");
+
+ dwc->num_normal_event_buffers = 1;
+ ret = device_property_read_u32(dev,
+ "snps,num-normal-evt-buffs", &num_evt_buffs);
+ if (!ret)
+ dwc->num_normal_event_buffers = num_evt_buffs;
+
+ ret = device_property_read_u32(dev,
+ "snps,num-gsi-evt-buffs", &dwc->num_gsi_event_buffers);
+
+ if (dwc->enable_bus_suspend) {
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ pm_runtime_use_autosuspend(dev);
+ }
+
if (pdata) {
dwc->maximum_speed = pdata->maximum_speed;
+ dwc->max_hw_supp_speed = dwc->maximum_speed;
dwc->has_lpm_erratum = pdata->has_lpm_erratum;
if (pdata->lpm_nyet_threshold)
lpm_nyet_threshold = pdata->lpm_nyet_threshold;
@@ -965,7 +1218,7 @@ static int dwc3_probe(struct platform_device *pdev)
/* default to superspeed if no maximum_speed passed */
if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
- dwc->maximum_speed = USB_SPEED_SUPER;
+ dwc->max_hw_supp_speed = dwc->maximum_speed = USB_SPEED_SUPER;
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
@@ -973,104 +1226,84 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->hird_threshold = hird_threshold
| (dwc->is_utmi_l1_suspend << 4);
+ init_waitqueue_head(&dwc->wait_linkstate);
platform_set_drvdata(pdev, dwc);
- dwc3_cache_hwparams(dwc);
-
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
-
ret = dwc3_core_get_phy(dwc);
if (ret)
goto err0;
spin_lock_init(&dwc->lock);
- if (!dev->dma_mask) {
- dev->dma_mask = dev->parent->dma_mask;
- dev->dma_parms = dev->parent->dma_parms;
- dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+ dev->dma_mask = dev->parent->dma_mask;
+ dev->dma_parms = dev->parent->dma_parms;
+ dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+
+ dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
+ if (!dwc->dwc_wq) {
+ pr_err("%s: Unable to create workqueue dwc_wq\n", __func__);
+ return -ENOMEM;
}
+ INIT_WORK(&dwc->bh_work, dwc3_bh_work);
+
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
pm_runtime_forbid(dev);
- ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
- if (ret) {
- dev_err(dwc->dev, "failed to allocate event buffers\n");
- ret = -ENOMEM;
- goto err1;
- }
-
if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
dwc->dr_mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
- if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) {
dwc->dr_mode = USB_DR_MODE_OTG;
-
- ret = dwc3_core_init(dwc);
- if (ret) {
- dev_err(dev, "failed to initialize core\n");
- goto err1;
+ dwc->is_drd = true;
}
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc, fladj);
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
- ret = phy_power_on(dwc->usb2_generic_phy);
- if (ret < 0)
- goto err2;
-
- ret = phy_power_on(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err3;
+ /* Hardcode number of eps */
+ dwc->num_in_eps = 16;
+ dwc->num_out_eps = 16;
- ret = dwc3_event_buffers_setup(dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to setup event buffers\n");
- goto err4;
+ if (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(dev, "failed to initialize gadget\n");
+ goto err0;
+ }
}
- ret = dwc3_core_init_mode(dwc);
- if (ret)
- goto err5;
+ if (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_HOST) {
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ goto err_gadget;
+ }
+ }
ret = dwc3_debugfs_init(dwc);
if (ret) {
dev_err(dev, "failed to initialize debugfs\n");
- goto err6;
+ goto err_host;
}
pm_runtime_allow(dev);
return 0;
-err6:
- dwc3_core_exit_mode(dwc);
-
-err5:
- dwc3_event_buffers_cleanup(dwc);
-
-err4:
- phy_power_off(dwc->usb3_generic_phy);
-
-err3:
- phy_power_off(dwc->usb2_generic_phy);
-
-err2:
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- dwc3_core_exit(dwc);
-
-err1:
- dwc3_free_event_buffers(dwc);
- dwc3_ulpi_exit(dwc);
-
+err_host:
+ if (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_HOST)
+ dwc3_host_exit(dwc);
+err_gadget:
+ if (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_PERIPHERAL)
+ dwc3_gadget_exit(dwc);
err0:
/*
* restore res->start back to its original value so that, in case the
@@ -1078,6 +1311,7 @@ err0:
* memory region the next time probe is called.
*/
res->start -= DWC3_GLOBALS_REGS_START;
+ destroy_workqueue(dwc->dwc_wq);
return ret;
}
@@ -1099,14 +1333,14 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_event_buffers_cleanup(dwc);
dwc3_free_event_buffers(dwc);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
+ destroy_workqueue(dwc->dwc_wq);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1119,6 +1353,10 @@ static int dwc3_suspend(struct device *dev)
struct dwc3 *dwc = dev_get_drvdata(dev);
unsigned long flags;
+ /* Check if platform glue driver handling PM, if not then handle here */
+ if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT, 0))
+ return 0;
+
spin_lock_irqsave(&dwc->lock, flags);
switch (dwc->dr_mode) {
@@ -1151,6 +1389,10 @@ static int dwc3_resume(struct device *dev)
unsigned long flags;
int ret;
+ /* Check if platform glue driver handling PM, if not then handle here */
+ if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT, 0))
+ return 0;
+
pinctrl_pm_select_default_state(dev);
usb_phy_init(dwc->usb3_phy);
@@ -1193,8 +1435,26 @@ err_usb2phy_init:
return ret;
}
+static int dwc3_pm_restore(struct device *dev)
+{
+ /*
+ * Set the core as runtime active to prevent the runtime
+ * PM ops being called before the PM restore is completed.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops dwc3_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ .suspend = dwc3_suspend,
+ .resume = dwc3_resume,
+ .freeze = dwc3_suspend,
+ .thaw = dwc3_pm_restore,
+ .poweroff = dwc3_suspend,
+ .restore = dwc3_pm_restore,
};
#define DWC3_PM_OPS &(dwc3_dev_pm_ops)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c19250bc550c..2e56d167ba05 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -26,6 +26,8 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -57,12 +59,15 @@
#define DWC3_DEVICE_EVENT_WAKEUP 4
#define DWC3_DEVICE_EVENT_HIBER_REQ 5
#define DWC3_DEVICE_EVENT_EOPF 6
+/* For version 2.30a and above */
+#define DWC3_DEVICE_EVENT_SUSPEND 6
#define DWC3_DEVICE_EVENT_SOF 7
#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9
#define DWC3_DEVICE_EVENT_CMD_CMPL 10
#define DWC3_DEVICE_EVENT_OVERFLOW 11
#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GEVNTCOUNT_EHB (1 << 31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
@@ -125,6 +130,11 @@
#define DWC3_GEVNTSIZ(n) (0xc408 + (n * 0x10))
#define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10))
+#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
+#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
+#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
+#define DWC3_GEVENT_TYPE_GSI 0x3
+
#define DWC3_GHWPARAMS8 0xc600
#define DWC3_GFLADJ 0xc630
@@ -141,6 +151,8 @@
#define DWC3_DEPCMDPAR0(n) (0xc808 + (n * 0x10))
#define DWC3_DEPCMD(n) (0xc80c + (n * 0x10))
+#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4))
+
/* OTG Registers */
#define DWC3_OCFG 0xcc00
#define DWC3_OCTL 0xcc04
@@ -150,9 +162,16 @@
/* Bit fields */
+/* Global SoC Bus Configuration Register 1 */
+#define DWC3_GSBUSCFG1_PIPETRANSLIMIT_MASK (0x0f << 8)
+#define DWC3_GSBUSCFG1_PIPETRANSLIMIT(n) ((n) << 8)
+
/* Global Configuration Register */
#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19)
+#define DWC3_GCTL_PWRDNSCALEMASK (0xFFF80000)
#define DWC3_GCTL_U2RSTECN (1 << 16)
+#define DWC3_GCTL_SOFITPSYNC (1 << 10)
+#define DWC3_GCTL_U2EXIT_LFPS (1 << 2)
#define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6)
#define DWC3_GCTL_CLK_BUS (0)
#define DWC3_GCTL_CLK_PIPE (1)
@@ -174,8 +193,15 @@
#define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1)
#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+/* Global User Control Register */
+#define DWC3_GUCTL_REFCLKPER (0x3FF << 22)
+
+/* Global Debug LTSSM Register */
+#define DWC3_GDBGLTSSM_LINKSTATE_MASK (0xF << 22)
+
/* Global USB2 PHY Configuration Register */
#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
+#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
#define DWC3_GUSB2PHYCFG_ULPI_UTMI (1 << 4)
#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
@@ -191,6 +217,7 @@
/* Global USB3 PIPE Control Register */
#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX (1 << 27)
#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
@@ -201,6 +228,8 @@
#define DWC3_GUSB3PIPECTL_RX_DETOPOLL (1 << 8)
#define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3)
#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
+#define DWC3_GUSB3PIPECTL_DELAYP1TRANS (1 << 18)
+#define DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE (1 << 0)
/* Global TX Fifo Size Register */
#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
@@ -244,6 +273,12 @@
#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+/* Global Frame Length Adjustment Register */
+#define DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1 (1 << 31)
+#define DWC3_GFLADJ_REFCLK_240MHZ_DECR (0x7F << 24)
+#define DWC3_GFLADJ_REFCLK_LPM_SEL (1 << 23)
+#define DWC3_GFLADJ_REFCLK_FLADJ (0x3FFF << 8)
+
/* Device Configuration Register */
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -309,6 +344,8 @@
#define DWC3_DEVTEN_ERRTICERREN (1 << 9)
#define DWC3_DEVTEN_SOFEN (1 << 7)
#define DWC3_DEVTEN_EOPFEN (1 << 6)
+/* For version 2.30a and above*/
+#define DWC3_DEVTEN_SUSPEND (1 << 6)
#define DWC3_DEVTEN_HIBERNATIONREQEVTEN (1 << 5)
#define DWC3_DEVTEN_WKUPEVTEN (1 << 4)
#define DWC3_DEVTEN_ULSTCNGEN (1 << 3)
@@ -349,6 +386,7 @@
#define DWC3_DGCMD_SET_LMP 0x01
#define DWC3_DGCMD_SET_PERIODIC_PAR 0x02
#define DWC3_DGCMD_XMIT_FUNCTION 0x03
+#define DWC3_DGCMD_XMIT_DEV 0x07
/* These apply for core versions 1.94a and later */
#define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO 0x04
@@ -401,10 +439,20 @@
#define DWC3_DEPCMD_TYPE_BULK 2
#define DWC3_DEPCMD_TYPE_INTR 3
+#define DWC3_DEV_IMOD_COUNT_SHIFT 16
+#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
/* Structures */
struct dwc3_trb;
+enum event_buf_type {
+ EVT_BUF_TYPE_NORMAL,
+ EVT_BUF_TYPE_GSI
+};
+
/**
* struct dwc3_event_buffer - Software event buffer representation
* @buf: _THE_ buffer
@@ -418,6 +466,7 @@ struct dwc3_trb;
struct dwc3_event_buffer {
void *buf;
unsigned length;
+ enum event_buf_type type;
unsigned int lpos;
unsigned int count;
unsigned int flags;
@@ -429,6 +478,36 @@ struct dwc3_event_buffer {
struct dwc3 *dwc;
};
+struct dwc3_gadget_events {
+ unsigned int disconnect;
+ unsigned int reset;
+ unsigned int connect;
+ unsigned int wakeup;
+ unsigned int link_status_change;
+ unsigned int eopf;
+ unsigned int suspend;
+ unsigned int sof;
+ unsigned int erratic_error;
+ unsigned int overflow;
+ unsigned int vendor_dev_test_lmp;
+ unsigned int cmdcmplt;
+ unsigned int unknown_event;
+};
+
+struct dwc3_ep_events {
+ unsigned int xfercomplete;
+ unsigned int xfernotready;
+ unsigned int control_data;
+ unsigned int control_status;
+ unsigned int xferinprogress;
+ unsigned int rxtxfifoevent;
+ unsigned int streamevent;
+ unsigned int epcmdcomplete;
+ unsigned int cmdcmplt;
+ unsigned int unknown_event;
+ unsigned int total;
+};
+
#define DWC3_EP_FLAG_STALLED (1 << 0)
#define DWC3_EP_FLAG_WEDGED (1 << 1)
@@ -443,8 +522,10 @@ struct dwc3_event_buffer {
* @endpoint: usb endpoint
* @request_list: list of requests for this endpoint
* @req_queued: list of requests on this ep which have TRBs setup
+ * @trb_dma_pool: dma pool used to get aligned trb memory pool
* @trb_pool: array of transaction buffers
* @trb_pool_dma: dma address of @trb_pool
+ * @num_trbs: num of trbs in the trb dma pool
* @free_slot: next slot which is going to be used
* @busy_slot: first slot which is owned by HW
* @desc: usb_endpoint_descriptor pointer
@@ -454,18 +535,25 @@ struct dwc3_event_buffer {
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
+ * @current_uf: Current uf received through last event parameter
* @interval: the interval on which the ISOC transfer is started
* @name: a human readable name e.g. ep1out-bulk
* @direction: true for TX, false for RX
* @stream_capable: true when streams are enabled
+ * @dbg_ep_events: different events counter for endpoint
+ * @dbg_ep_events_diff: differential events counter for endpoint
+ * @dbg_ep_events_ts: timestamp for previous event counters
+ * @fifo_depth: allocated TXFIFO depth
*/
struct dwc3_ep {
struct usb_ep endpoint;
struct list_head request_list;
struct list_head req_queued;
+ struct dma_pool *trb_dma_pool;
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
+ u32 num_trbs;
u32 free_slot;
u32 busy_slot;
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -486,12 +574,17 @@ struct dwc3_ep {
u8 number;
u8 type;
u8 resource_index;
+ u16 current_uf;
u32 interval;
char name[20];
unsigned direction:1;
unsigned stream_capable:1;
+ struct dwc3_ep_events dbg_ep_events;
+ struct dwc3_ep_events dbg_ep_events_diff;
+ struct timespec dbg_ep_events_ts;
+ int fifo_depth;
};
enum dwc3_phy {
@@ -643,6 +736,18 @@ struct dwc3_scratchpad_array {
__le64 dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS];
};
+#define DWC3_CONTROLLER_ERROR_EVENT 0
+#define DWC3_CONTROLLER_RESET_EVENT 1
+#define DWC3_CONTROLLER_POST_RESET_EVENT 2
+#define DWC3_CORE_PM_SUSPEND_EVENT 3
+#define DWC3_CORE_PM_RESUME_EVENT 4
+#define DWC3_CONTROLLER_CONNDONE_EVENT 5
+#define DWC3_CONTROLLER_NOTIFY_OTG_EVENT 6
+#define DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT 7
+#define DWC3_CONTROLLER_RESTART_USB_SESSION 8
+#define DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER 9
+
+#define MAX_INTR_STATS 10
/**
* struct dwc3 - representation of our controller
* @ctrl_req: usb control request which is used for ep0
@@ -663,10 +768,12 @@ struct dwc3_scratchpad_array {
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
+ * @reg_phys: physical base address of dwc3 core register address space
* @nr_scratch: number of scratch buffers
* @num_event_buffers: calculated number of event buffers
* @u1u2: only used on revisions <1.83a for workaround
- * @maximum_speed: maximum speed requested (mainly for testing purposes)
+ * @maximum_speed: maximum speed to operate as requested by sw
+ * @max_hw_supp_speed: maximum speed supported by hw design
* @revision: revision register contents
* @dr_mode: requested mode of operation
* @usb2_phy: pointer to USB2 PHY
@@ -708,7 +815,6 @@ struct dwc3_scratchpad_array {
* @is_fpga: true when we are using the FPGA board
* @needs_fifo_resize: not all users might want fifo resizing, flag it
* @pullups_connected: true when Run/Stop bit is set
- * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @start_config_issued: true when StartConfig command has been issued
* @three_stage_setup: set if we perform a three phase setup
@@ -731,6 +837,24 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
+ * @is_drd: device supports dual-role or not
+ * @err_evt_seen: previous event in queue was erratic error
+ * @usb3_u1u2_disable: if true, disable U1U2 low power modes in Superspeed mode.
+ * @in_lpm: indicates if controller is in low power mode (no clocks)
+ * @tx_fifo_size: Available RAM size for TX fifo allocation
+ * @irq: irq number
+ * @bh: tasklet which handles the interrupt
+ * @irq_cnt: total irq count
+ * @last_irq_cnt: last irq count
+ * @bh_completion_time: time taken for taklet completion
+ * @bh_handled_evt_cnt: no. of events handled by tasklet per interrupt
+ * @bh_dbg_index: index for capturing bh_completion_time and bh_handled_evt_cnt
+ * @wait_linkstate: waitqueue for waiting LINK to move into required state
+ * @vbus_draw: current to be drawn from USB
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
+ * @create_reg_debugfs: create debugfs entry to allow dwc3 register dump
+ * @last_fifo_depth: total TXFIFO depth of all enabled USB IN/INT endpoints
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
@@ -769,6 +893,7 @@ struct dwc3 {
void __iomem *regs;
size_t regs_size;
+ phys_addr_t reg_phys;
enum usb_dr_mode dr_mode;
@@ -778,8 +903,13 @@ struct dwc3 {
u32 nr_scratch;
u32 num_event_buffers;
+ u32 num_normal_event_buffers;
+ u32 num_gsi_event_buffers;
+
+ u32 u1;
u32 u1u2;
u32 maximum_speed;
+ u32 max_hw_supp_speed;
/*
* All 3.1 IP version constants are greater than the 3.0 IP
@@ -809,6 +939,8 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_300A 0x5533300a
+#define DWC3_REVISION_310A 0x5533310a
/*
* NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
@@ -816,6 +948,7 @@ struct dwc3 {
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_USB31)
+#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -845,6 +978,9 @@ struct dwc3 {
const char *hsphy_interface;
+ void (*notify_event)(struct dwc3 *, unsigned, unsigned);
+ struct work_struct wakeup_work;
+
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
@@ -854,7 +990,6 @@ struct dwc3 {
unsigned is_fpga:1;
unsigned needs_fifo_resize:1;
unsigned pullups_connected:1;
- unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
@@ -873,6 +1008,50 @@ struct dwc3 {
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+
+ unsigned is_drd:1;
+ /* Indicate if the gadget was powered by the otg driver */
+ unsigned vbus_active:1;
+ /* Indicate if software connect was issued by the usb_gadget_driver */
+ unsigned softconnect:1;
+ unsigned nominal_elastic_buffer:1;
+ unsigned err_evt_seen:1;
+ unsigned usb3_u1u2_disable:1;
+ /* Indicate if need to disable controller internal clkgating */
+ unsigned disable_clk_gating:1;
+ unsigned enable_bus_suspend:1;
+
+ struct dwc3_gadget_events dbg_gadget_events;
+
+ atomic_t in_lpm;
+ int tx_fifo_size;
+ bool b_suspend;
+ unsigned vbus_draw;
+
+ u16 imod_interval;
+
+ struct workqueue_struct *dwc_wq;
+ struct work_struct bh_work;
+
+ /* IRQ timing statistics */
+ int irq;
+ unsigned long irq_cnt;
+ unsigned long last_irq_cnt;
+ unsigned long ep_cmd_timeout_cnt;
+ unsigned bh_completion_time[MAX_INTR_STATS];
+ unsigned bh_handled_evt_cnt[MAX_INTR_STATS];
+ unsigned bh_dbg_index;
+ ktime_t irq_start_time[MAX_INTR_STATS];
+ ktime_t t_pwr_evt_irq;
+ unsigned irq_completion_time[MAX_INTR_STATS];
+ unsigned irq_event_count[MAX_INTR_STATS];
+ unsigned irq_dbg_index;
+
+ unsigned long l1_remote_wakeup_cnt;
+
+ wait_queue_head_t wait_linkstate;
+ bool create_reg_debugfs;
+ int last_fifo_depth;
};
/* -------------------------------------------------------------------------- */
@@ -1022,7 +1201,21 @@ struct dwc3_gadget_ep_cmd_params {
/* prototypes */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
-int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
+int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+ return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
+/* check whether we are on the DWC_usb31 core */
+static inline bool dwc3_is_usb31(struct dwc3 *dwc)
+{
+ return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
+bool dwc3_has_imod(struct dwc3 *dwc);
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
@@ -1037,17 +1230,22 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
+void dwc3_gadget_restart(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
int dwc3_gadget_get_link_state(struct dwc3 *dwc);
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+void dwc3_gadget_enable_irq(struct dwc3 *dwc);
+void dwc3_gadget_disable_irq(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
static inline void dwc3_gadget_exit(struct dwc3 *dwc)
{ }
+static inline void dwc3_gadget_restart(struct dwc3 *dwc)
+{ }
static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
{ return 0; }
static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
@@ -1062,6 +1260,10 @@ static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
int cmd, u32 param)
{ return 0; }
+static inline void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+{ }
+static inline void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+{ }
#endif
/* power management interface */
@@ -1080,6 +1282,7 @@ static inline int dwc3_gadget_resume(struct dwc3 *dwc)
}
#endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
+
#if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
int dwc3_ulpi_init(struct dwc3 *dwc);
void dwc3_ulpi_exit(struct dwc3 *dwc);
@@ -1090,4 +1293,15 @@ static inline void dwc3_ulpi_exit(struct dwc3 *dwc)
{ }
#endif
+
+int dwc3_core_init(struct dwc3 *dwc);
+int dwc3_core_pre_init(struct dwc3 *dwc);
+void dwc3_post_host_reset_core_init(struct dwc3 *dwc);
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
+void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend);
+
+extern void dwc3_set_notifier(
+ void (*notify)(struct dwc3 *dwc3, unsigned event, unsigned value));
+extern int dwc3_notify_event(struct dwc3 *dwc3, unsigned event, unsigned value);
+
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
new file mode 100644
index 000000000000..cc7fb4026fb8
--- /dev/null
+++ b/drivers/usb/dwc3/dbm.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "dbm.h"
+
+/**
+* USB DBM Hardware registers.
+*
+*/
+enum dbm_reg {
+ DBM_EP_CFG,
+ DBM_DATA_FIFO,
+ DBM_DATA_FIFO_SIZE,
+ DBM_DATA_FIFO_EN,
+ DBM_GEVNTADR,
+ DBM_GEVNTSIZ,
+ DBM_DBG_CNFG,
+ DBM_HW_TRB0_EP,
+ DBM_HW_TRB1_EP,
+ DBM_HW_TRB2_EP,
+ DBM_HW_TRB3_EP,
+ DBM_PIPE_CFG,
+ DBM_DISABLE_UPDXFER,
+ DBM_SOFT_RESET,
+ DBM_GEN_CFG,
+ DBM_GEVNTADR_LSB,
+ DBM_GEVNTADR_MSB,
+ DBM_DATA_FIFO_LSB,
+ DBM_DATA_FIFO_MSB,
+ DBM_DATA_FIFO_ADDR_EN,
+ DBM_DATA_FIFO_SIZE_EN,
+};
+
+struct dbm_reg_data {
+ u32 offset;
+ unsigned int ep_mult;
+};
+
+#define DBM_1_4_NUM_EP 4
+#define DBM_1_5_NUM_EP 8
+
+struct dbm {
+ void __iomem *base;
+ const struct dbm_reg_data *reg_table;
+
+ struct device *dev;
+ struct list_head head;
+
+ int dbm_num_eps;
+ u8 ep_num_mapping[DBM_1_5_NUM_EP];
+ bool dbm_reset_ep_after_lpm;
+
+ bool is_1p4;
+};
+
+static const struct dbm_reg_data dbm_1_4_regtable[] = {
+ [DBM_EP_CFG] = { 0x0000, 0x4 },
+ [DBM_DATA_FIFO] = { 0x0010, 0x4 },
+ [DBM_DATA_FIFO_SIZE] = { 0x0020, 0x4 },
+ [DBM_DATA_FIFO_EN] = { 0x0030, 0x0 },
+ [DBM_GEVNTADR] = { 0x0034, 0x0 },
+ [DBM_GEVNTSIZ] = { 0x0038, 0x0 },
+ [DBM_DBG_CNFG] = { 0x003C, 0x0 },
+ [DBM_HW_TRB0_EP] = { 0x0040, 0x4 },
+ [DBM_HW_TRB1_EP] = { 0x0050, 0x4 },
+ [DBM_HW_TRB2_EP] = { 0x0060, 0x4 },
+ [DBM_HW_TRB3_EP] = { 0x0070, 0x4 },
+ [DBM_PIPE_CFG] = { 0x0080, 0x0 },
+ [DBM_SOFT_RESET] = { 0x0084, 0x0 },
+ [DBM_GEN_CFG] = { 0x0088, 0x0 },
+ [DBM_GEVNTADR_LSB] = { 0x0098, 0x0 },
+ [DBM_GEVNTADR_MSB] = { 0x009C, 0x0 },
+ [DBM_DATA_FIFO_LSB] = { 0x00A0, 0x8 },
+ [DBM_DATA_FIFO_MSB] = { 0x00A4, 0x8 },
+};
+
+static const struct dbm_reg_data dbm_1_5_regtable[] = {
+ [DBM_EP_CFG] = { 0x0000, 0x4 },
+ [DBM_DATA_FIFO] = { 0x0280, 0x4 },
+ [DBM_DATA_FIFO_SIZE] = { 0x0080, 0x4 },
+ [DBM_DATA_FIFO_EN] = { 0x026C, 0x0 },
+ [DBM_GEVNTADR] = { 0x0270, 0x0 },
+ [DBM_GEVNTSIZ] = { 0x0268, 0x0 },
+ [DBM_DBG_CNFG] = { 0x0208, 0x0 },
+ [DBM_HW_TRB0_EP] = { 0x0220, 0x4 },
+ [DBM_HW_TRB1_EP] = { 0x0230, 0x4 },
+ [DBM_HW_TRB2_EP] = { 0x0240, 0x4 },
+ [DBM_HW_TRB3_EP] = { 0x0250, 0x4 },
+ [DBM_PIPE_CFG] = { 0x0274, 0x0 },
+ [DBM_DISABLE_UPDXFER] = { 0x0298, 0x0 },
+ [DBM_SOFT_RESET] = { 0x020C, 0x0 },
+ [DBM_GEN_CFG] = { 0x0210, 0x0 },
+ [DBM_GEVNTADR_LSB] = { 0x0260, 0x0 },
+ [DBM_GEVNTADR_MSB] = { 0x0264, 0x0 },
+ [DBM_DATA_FIFO_LSB] = { 0x0100, 0x8 },
+ [DBM_DATA_FIFO_MSB] = { 0x0104, 0x8 },
+ [DBM_DATA_FIFO_ADDR_EN] = { 0x0200, 0x0 },
+ [DBM_DATA_FIFO_SIZE_EN] = { 0x0204, 0x0 },
+};
+
+static LIST_HEAD(dbm_list);
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void msm_dbm_write_ep_reg_field(struct dbm *dbm,
+ enum dbm_reg reg, int ep,
+ const u32 mask, u32 val)
+{
+ u32 shift = find_first_bit((void *)&mask, 32);
+ u32 offset = dbm->reg_table[reg].offset +
+ (dbm->reg_table[reg].ep_mult * ep);
+ u32 tmp = ioread32(dbm->base + offset);
+
+ tmp &= ~mask; /* clear written bits */
+ val = tmp | (val << shift);
+ iowrite32(val, dbm->base + offset);
+}
+
+#define msm_dbm_write_reg_field(d, r, m, v) \
+ msm_dbm_write_ep_reg_field(d, r, 0, m, v)
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ *
+ * @return u32
+ */
+static inline u32 msm_dbm_read_ep_reg(struct dbm *dbm, enum dbm_reg reg, int ep)
+{
+ u32 offset = dbm->reg_table[reg].offset +
+ (dbm->reg_table[reg].ep_mult * ep);
+ return ioread32(dbm->base + offset);
+}
+
+#define msm_dbm_read_reg(d, r) msm_dbm_read_ep_reg(d, r, 0)
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ *
+ */
+static inline void msm_dbm_write_ep_reg(struct dbm *dbm, enum dbm_reg reg,
+ int ep, u32 val)
+{
+ u32 offset = dbm->reg_table[reg].offset +
+ (dbm->reg_table[reg].ep_mult * ep);
+ iowrite32(val, dbm->base + offset);
+}
+
+#define msm_dbm_write_reg(d, r, v) msm_dbm_write_ep_reg(d, r, 0, v)
+
+/**
+ * Return DBM EP number according to usb endpoint number.
+ *
+ */
+static int find_matching_dbm_ep(struct dbm *dbm, u8 usb_ep)
+{
+ int i;
+
+ for (i = 0; i < dbm->dbm_num_eps; i++)
+ if (dbm->ep_num_mapping[i] == usb_ep)
+ return i;
+
+ pr_debug("%s: No DBM EP matches USB EP %d", __func__, usb_ep);
+ return -ENODEV; /* Not found */
+}
+
+
+/**
+ * Reset the DBM registers upon initialization.
+ *
+ */
+int dbm_soft_reset(struct dbm *dbm, bool reset)
+{
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ pr_debug("%s DBM reset\n", (reset ? "Enter" : "Exit"));
+
+ msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET, DBM_SFT_RST_MASK, reset);
+
+ return 0;
+}
+
+/**
+ * Soft reset specific DBM ep.
+ * This function is called by the function driver upon events
+ * such as transfer aborting, USB re-enumeration and USB
+ * disconnection.
+ *
+ * @dbm_ep - DBM ep number.
+ * @enter_reset - should we enter a reset state or get out of it.
+ *
+ */
+static int ep_soft_reset(struct dbm *dbm, u8 dbm_ep, bool enter_reset)
+{
+ pr_debug("Setting DBM ep %d reset to %d\n", dbm_ep, enter_reset);
+
+ if (dbm_ep >= dbm->dbm_num_eps) {
+ pr_err("Invalid DBM ep index %d\n", dbm_ep);
+ return -ENODEV;
+ }
+
+ if (enter_reset) {
+ msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET,
+ DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 1);
+ } else {
+ msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET,
+ DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 0);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Soft reset specific DBM ep (by USB EP number).
+ * This function is called by the function driver upon events
+ * such as transfer aborting, USB re-enumeration and USB
+ * disconnection.
+ *
+ * The function relies on ep_soft_reset() for checking
+ * the legality of the resulting DBM ep number.
+ *
+ * @usb_ep - USB ep number.
+ * @enter_reset - should we enter a reset state or get out of it.
+ *
+ */
+int dbm_ep_soft_reset(struct dbm *dbm, u8 usb_ep, bool enter_reset)
+{
+ int dbm_ep;
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+ pr_debug("Setting USB ep %d reset to %d\n", usb_ep, enter_reset);
+ return ep_soft_reset(dbm, dbm_ep, enter_reset);
+}
+
+/**
+ * Configure a USB DBM ep to work in BAM mode.
+ *
+ *
+ * @usb_ep - USB physical EP number.
+ * @producer - producer/consumer.
+ * @disable_wb - disable write back to system memory.
+ * @internal_mem - use internal USB memory for data fifo.
+ * @ioc - enable interrupt on completion.
+ *
+ * @return int - DBM ep number.
+ */
+int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
+ bool disable_wb, bool internal_mem, bool ioc)
+{
+ int dbm_ep;
+ u32 ep_cfg;
+ u32 data;
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ pr_debug("Configuring DBM ep\n");
+
+ dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+ if (dbm_ep < 0) {
+ pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+ return -ENODEV;
+ }
+
+ /* Due to HW issue, EP 7 can be set as IN EP only */
+ if (!dbm->is_1p4 && dbm_ep == 7 && producer) {
+ pr_err("last DBM EP can't be OUT EP\n");
+ return -ENODEV;
+ }
+
+ /* Set ioc bit for dbm_ep if needed */
+ msm_dbm_write_reg_field(dbm, DBM_DBG_CNFG,
+ DBM_ENABLE_IOC_MASK & 1 << dbm_ep, ioc ? 1 : 0);
+
+ ep_cfg = (producer ? DBM_PRODUCER : 0) |
+ (disable_wb ? DBM_DISABLE_WB : 0) |
+ (internal_mem ? DBM_INT_RAM_ACC : 0);
+
+ msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep,
+ DBM_PRODUCER | DBM_DISABLE_WB | DBM_INT_RAM_ACC, ep_cfg >> 8);
+
+ msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, USB3_EPNUM,
+ usb_ep);
+
+ if (dbm->is_1p4) {
+ msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep,
+ DBM_BAM_PIPE_NUM, bam_pipe);
+ msm_dbm_write_reg_field(dbm, DBM_PIPE_CFG, 0x000000ff, 0xe4);
+ }
+
+ msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, DBM_EN_EP, 1);
+
+ data = msm_dbm_read_reg(dbm, DBM_DISABLE_UPDXFER);
+ data &= ~(0x1 << dbm_ep);
+ msm_dbm_write_reg(dbm, DBM_DISABLE_UPDXFER, data);
+
+ return dbm_ep;
+}
+
+/**
+ * Return number of configured DBM endpoints.
+ */
+int dbm_get_num_of_eps_configured(struct dbm *dbm)
+{
+ int i;
+ int count = 0;
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ for (i = 0; i < dbm->dbm_num_eps; i++)
+ if (dbm->ep_num_mapping[i])
+ count++;
+
+ return count;
+}
+
+/**
+ * Configure a USB DBM ep to work in normal mode.
+ *
+ * @usb_ep - USB ep number.
+ *
+ */
+int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep)
+{
+ int dbm_ep;
+ u32 data;
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ pr_debug("Unconfiguring DB ep\n");
+
+ dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+ if (dbm_ep < 0) {
+ pr_debug("usb ep index %d has no corespondng dbm ep\n", usb_ep);
+ return -ENODEV;
+ }
+
+ dbm->ep_num_mapping[dbm_ep] = 0;
+
+ data = msm_dbm_read_ep_reg(dbm, DBM_EP_CFG, dbm_ep);
+ data &= (~0x1);
+ msm_dbm_write_ep_reg(dbm, DBM_EP_CFG, dbm_ep, data);
+
+ /*
+ * ep_soft_reset is not required during disconnect as pipe reset on
+ * next connect will take care of the same.
+ */
+ return 0;
+}
+
+/**
+ * Configure the DBM with the USB3 core event buffer.
+ * This function is called by the SNPS UDC upon initialization.
+ *
+ * @addr - address of the event buffer.
+ * @size - size of the event buffer.
+ *
+ */
+int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi, int size)
+{
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ pr_debug("Configuring event buffer\n");
+
+ if (size < 0) {
+ pr_err("Invalid size. size = %d", size);
+ return -EINVAL;
+ }
+
+ /* In case event buffer is already configured, Do nothing. */
+ if (msm_dbm_read_reg(dbm, DBM_GEVNTSIZ))
+ return 0;
+
+ if (!dbm->is_1p4 || sizeof(phys_addr_t) > sizeof(u32)) {
+ msm_dbm_write_reg(dbm, DBM_GEVNTADR_LSB, addr_lo);
+ msm_dbm_write_reg(dbm, DBM_GEVNTADR_MSB, addr_hi);
+ } else {
+ msm_dbm_write_reg(dbm, DBM_GEVNTADR, addr_lo);
+ }
+
+ msm_dbm_write_reg_field(dbm, DBM_GEVNTSIZ, DBM_GEVNTSIZ_MASK, size);
+
+ return 0;
+}
+
+/**
+ * Disable update xfer before queueing stop xfer command to USB3 core.
+ *
+ * @usb_ep - USB physical EP number.
+ *
+ */
+int dwc3_dbm_disable_update_xfer(struct dbm *dbm, u8 usb_ep)
+{
+ u32 data;
+ u8 dbm_ep;
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+ if (dbm_ep < 0) {
+ pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+ return -ENODEV;
+ }
+
+ data = msm_dbm_read_reg(dbm, DBM_DISABLE_UPDXFER);
+ data |= (0x1 << dbm_ep);
+ msm_dbm_write_reg(dbm, DBM_DISABLE_UPDXFER, data);
+
+ return 0;
+}
+
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+ u32 size, u8 dst_pipe_idx)
+{
+ u8 dbm_ep = dst_pipe_idx;
+ u32 lo = lower_32_bits(addr);
+ u32 hi = upper_32_bits(addr);
+
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return -EPERM;
+ }
+
+ dbm->ep_num_mapping[dbm_ep] = dep_num;
+
+ if (!dbm->is_1p4 || sizeof(addr) > sizeof(u32)) {
+ msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO_LSB, dbm_ep, lo);
+ msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO_MSB, dbm_ep, hi);
+ } else {
+ msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO, dbm_ep, addr);
+ }
+
+ msm_dbm_write_ep_reg_field(dbm, DBM_DATA_FIFO_SIZE, dbm_ep,
+ DBM_DATA_FIFO_SIZE_MASK, size);
+
+ return 0;
+}
+
+void dbm_set_speed(struct dbm *dbm, bool speed)
+{
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return;
+ }
+
+ msm_dbm_write_reg(dbm, DBM_GEN_CFG, speed);
+}
+
+void dbm_enable(struct dbm *dbm)
+{
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return;
+ }
+
+ if (dbm->is_1p4) /* no-op */
+ return;
+
+ msm_dbm_write_reg(dbm, DBM_DATA_FIFO_ADDR_EN, 0x000000FF);
+ msm_dbm_write_reg(dbm, DBM_DATA_FIFO_SIZE_EN, 0x000000FF);
+}
+
+bool dbm_reset_ep_after_lpm(struct dbm *dbm)
+{
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return false;
+ }
+
+ return dbm->dbm_reset_ep_after_lpm;
+}
+
+bool dbm_l1_lpm_interrupt(struct dbm *dbm)
+{
+ if (!dbm) {
+ pr_err("%s: dbm pointer is NULL!\n", __func__);
+ return false;
+ }
+
+ return !dbm->is_1p4;
+}
+
+static const struct of_device_id msm_dbm_id_table[] = {
+ { .compatible = "qcom,usb-dbm-1p4", .data = &dbm_1_4_regtable },
+ { .compatible = "qcom,usb-dbm-1p5", .data = &dbm_1_5_regtable },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm_dbm_id_table);
+
+static int msm_dbm_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct dbm *dbm;
+ struct resource *res;
+
+ dbm = devm_kzalloc(&pdev->dev, sizeof(*dbm), GFP_KERNEL);
+ if (!dbm)
+ return -ENOMEM;
+
+ match = of_match_node(msm_dbm_id_table, node);
+ if (!match) {
+ dev_err(&pdev->dev, "Unsupported DBM module\n");
+ return -ENODEV;
+ }
+ dbm->reg_table = match->data;
+
+ if (!strcmp(match->compatible, "qcom,usb-dbm-1p4")) {
+ dbm->dbm_num_eps = DBM_1_4_NUM_EP;
+ dbm->is_1p4 = true;
+ } else {
+ dbm->dbm_num_eps = DBM_1_5_NUM_EP;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing memory base resource\n");
+ return -ENODEV;
+ }
+
+ dbm->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dbm->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ dbm->dbm_reset_ep_after_lpm = of_property_read_bool(node,
+ "qcom,reset-ep-after-lpm-resume");
+
+ dbm->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, dbm);
+
+ list_add_tail(&dbm->head, &dbm_list);
+
+ return 0;
+}
+
+static struct platform_driver msm_dbm_driver = {
+ .probe = msm_dbm_probe,
+ .driver = {
+ .name = "msm-usb-dbm",
+ .of_match_table = of_match_ptr(msm_dbm_id_table),
+ },
+};
+
+module_platform_driver(msm_dbm_driver);
+
+static struct dbm *of_usb_find_dbm(struct device_node *node)
+{
+ struct dbm *dbm;
+
+ list_for_each_entry(dbm, &dbm_list, head) {
+ if (node != dbm->dev->of_node)
+ continue;
+ return dbm;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+struct dbm *usb_get_dbm_by_phandle(struct device *dev, const char *phandle)
+{
+ struct device_node *node;
+
+ if (!dev->of_node) {
+ dev_dbg(dev, "device does not have a device node entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ node = of_parse_phandle(dev->of_node, phandle, 0);
+ if (!node) {
+ dev_dbg(dev, "failed to get %s phandle in %s node\n", phandle,
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return of_usb_find_dbm(node);
+}
+
+MODULE_DESCRIPTION("MSM USB DBM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/dwc3/dbm.h b/drivers/usb/dwc3/dbm.h
new file mode 100644
index 000000000000..bf20d7cbd454
--- /dev/null
+++ b/drivers/usb/dwc3/dbm.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DBM_H
+#define __DBM_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ * USB DBM Hardware registers bitmask.
+ *
+ */
+/* DBM_EP_CFG */
+#define DBM_EN_EP 0x00000001
+#define USB3_EPNUM 0x0000003E
+#define DBM_BAM_PIPE_NUM 0x000000C0
+#define DBM_PRODUCER 0x00000100
+#define DBM_DISABLE_WB 0x00000200
+#define DBM_INT_RAM_ACC 0x00000400
+
+/* DBM_DATA_FIFO_SIZE */
+#define DBM_DATA_FIFO_SIZE_MASK 0x0000ffff
+
+/* DBM_GEVNTSIZ */
+#define DBM_GEVNTSIZ_MASK 0x0000ffff
+
+/* DBM_DBG_CNFG */
+#define DBM_ENABLE_IOC_MASK 0x0000000f
+
+/* DBM_SOFT_RESET */
+#define DBM_SFT_RST_EP0 0x00000001
+#define DBM_SFT_RST_EP1 0x00000002
+#define DBM_SFT_RST_EP2 0x00000004
+#define DBM_SFT_RST_EP3 0x00000008
+#define DBM_SFT_RST_EPS_MASK 0x0000000F
+#define DBM_SFT_RST_MASK 0x80000000
+#define DBM_EN_MASK 0x00000002
+
+/* DBM TRB configurations */
+#define DBM_TRB_BIT 0x80000000
+#define DBM_TRB_DATA_SRC 0x40000000
+#define DBM_TRB_DMA 0x20000000
+#define DBM_TRB_EP_NUM(ep) (ep<<24)
+
+struct dbm;
+
+struct dbm *usb_get_dbm_by_phandle(struct device *dev, const char *phandle);
+
+int dbm_soft_reset(struct dbm *dbm, bool enter_reset);
+int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
+ bool disable_wb, bool internal_mem, bool ioc);
+int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep);
+int dbm_get_num_of_eps_configured(struct dbm *dbm);
+int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi,
+ int size);
+int dwc3_dbm_disable_update_xfer(struct dbm *dbm, u8 usb_ep);
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+ u32 size, u8 dst_pipe_idx);
+void dbm_set_speed(struct dbm *dbm, bool speed);
+void dbm_enable(struct dbm *dbm);
+int dbm_ep_soft_reset(struct dbm *dbm, u8 usb_ep, bool enter_reset);
+bool dbm_reset_ep_after_lpm(struct dbm *dbm);
+bool dbm_l1_lpm_interrupt(struct dbm *dbm);
+
+#endif /* __DBM_H */
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 07fbc2d94fd4..2cafa949bb12 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -217,9 +217,29 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
+extern void dbg_event(u8, const char*, int);
+extern void dbg_print(u8, const char*, int, const char*);
+extern void dbg_done(u8, const u32, int);
+extern void dbg_queue(u8, const struct usb_request*, int);
+extern void dbg_setup(u8, const struct usb_ctrlrequest*);
extern int dwc3_debugfs_init(struct dwc3 *);
extern void dwc3_debugfs_exit(struct dwc3 *);
+extern void dbg_print_reg(const char *name, int reg);
#else
+static inline void dbg_event(u8 ep_num, const char *name, int status)
+{ }
+static inline void dbg_print(u8 ep_num, const char *name, int status,
+ const char *extra)
+{ }
+static inline void dbg_done(u8 ep_num, const u32 count, int status)
+{ }
+static inline void dbg_queue(u8 ep_num, const struct usb_request *req,
+ int status)
+{ }
+static inline void dbg_setup(u8 ep_num, const struct usb_ctrlrequest *req)
+{ }
+static inline void dbg_print_reg(const char *name, int reg)
+{ }
static inline int dwc3_debugfs_init(struct dwc3 *d)
{ return 0; }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe1b6a7..2c00b3596055 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ptrace.h>
@@ -39,6 +40,9 @@
.offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
}
+#define ep_event_rate(ev, c, p, dt) \
+ ((dt) ? ((c.ev - p.ev) * (MSEC_PER_SEC)) / (dt) : 0)
+
static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
@@ -210,6 +214,7 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GEVNTCOUNT(0)),
dump_register(GHWPARAMS8),
+ dump_register(GFLADJ),
dump_register(DCFG),
dump_register(DCTL),
dump_register(DEVTEN),
@@ -363,6 +368,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
unsigned long flags;
u32 reg;
+ if (atomic_read(&dwc->in_lpm)) {
+ seq_puts(s, "USB device is powered off\n");
+ return 0;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -396,7 +406,12 @@ static ssize_t dwc3_mode_write(struct file *file,
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 mode = 0;
- char buf[32];
+ char buf[32] = {};
+
+ if (atomic_read(&dwc->in_lpm)) {
+ dev_err(dwc->dev, "USB device is powered off\n");
+ return count;
+ }
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -432,6 +447,12 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
unsigned long flags;
u32 reg;
+
+ if (atomic_read(&dwc->in_lpm)) {
+ seq_puts(s, "USB device is powered off\n");
+ return 0;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= DWC3_DCTL_TSTCTRL_MASK;
@@ -476,7 +497,12 @@ static ssize_t dwc3_testmode_write(struct file *file,
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 testmode = 0;
- char buf[32];
+ char buf[32] = {};
+
+ if (atomic_read(&dwc->in_lpm)) {
+ seq_puts(s, "USB device is powered off\n");
+ return count;
+ }
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -516,6 +542,11 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
enum dwc3_link_state state;
u32 reg;
+ if (atomic_read(&dwc->in_lpm)) {
+ seq_puts(s, "USB device is powered off\n");
+ return 0;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
state = DWC3_DSTS_USBLNKST(reg);
@@ -583,7 +614,12 @@ static ssize_t dwc3_link_state_write(struct file *file,
struct dwc3 *dwc = s->private;
unsigned long flags;
enum dwc3_link_state state = 0;
- char buf[32];
+ char buf[32] = {};
+
+ if (atomic_read(&dwc->in_lpm)) {
+ seq_puts(s, "USB device is powered off\n");
+ return count;
+ }
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -618,6 +654,600 @@ static const struct file_operations dwc3_link_state_fops = {
.release = single_release,
};
+static int ep_num;
+static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ char kbuf[10] = {};
+ unsigned int num, dir, temp;
+ unsigned long flags;
+
+ if (copy_from_user(kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
+ return -EFAULT;
+
+ if (sscanf(kbuf, "%u %u", &num, &dir) != 2)
+ return -EINVAL;
+
+ if (dir != 0 && dir != 1)
+ return -EINVAL;
+
+ temp = (num << 1) + dir;
+ if (temp >= (dwc->num_in_eps + dwc->num_out_eps) ||
+ temp >= DWC3_ENDPOINTS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ep_num = temp;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return count;
+}
+
+static int dwc3_ep_req_list_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ struct dwc3_ep *dep;
+ struct dwc3_request *req = NULL;
+ struct list_head *ptr = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dep = dwc->eps[ep_num];
+
+ seq_printf(s, "%s request list: flags: 0x%x\n", dep->name, dep->flags);
+ list_for_each(ptr, &dep->request_list) {
+ req = list_entry(ptr, struct dwc3_request, list);
+
+ seq_printf(s,
+ "req:0x%pK len: %d sts: %d dma:0x%pa num_sgs: %d\n",
+ req, req->request.length, req->request.status,
+ &req->request.dma, req->request.num_sgs);
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_ep_req_list_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_ep_req_list_show, inode->i_private);
+}
+
+static const struct file_operations dwc3_ep_req_list_fops = {
+ .open = dwc3_ep_req_list_open,
+ .write = dwc3_store_ep_num,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dwc3_ep_queued_req_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ struct dwc3_ep *dep;
+ struct dwc3_request *req = NULL;
+ struct list_head *ptr = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dep = dwc->eps[ep_num];
+
+ seq_printf(s, "%s queued reqs to HW: flags:0x%x\n", dep->name,
+ dep->flags);
+ list_for_each(ptr, &dep->req_queued) {
+ req = list_entry(ptr, struct dwc3_request, list);
+
+ seq_printf(s,
+ "req:0x%pK len:%d sts:%d dma:%pa nsg:%d trb:0x%pK\n",
+ req, req->request.length, req->request.status,
+ &req->request.dma, req->request.num_sgs, req->trb);
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_ep_queued_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_ep_queued_req_show, inode->i_private);
+}
+
+const struct file_operations dwc3_ep_req_queued_fops = {
+ .open = dwc3_ep_queued_req_open,
+ .write = dwc3_store_ep_num,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dwc3_ep_trbs_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ struct dwc3_ep *dep;
+ struct dwc3_trb *trb;
+ unsigned long flags;
+ int j;
+
+ if (!ep_num)
+ return 0;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dep = dwc->eps[ep_num];
+
+ seq_printf(s, "%s trb pool: flags:0x%x freeslot:%d busyslot:%d\n",
+ dep->name, dep->flags, dep->free_slot, dep->busy_slot);
+ for (j = 0; j < DWC3_TRB_NUM; j++) {
+ trb = &dep->trb_pool[j];
+ seq_printf(s, "trb:0x%pK bph:0x%x bpl:0x%x size:0x%x ctrl: %x\n",
+ trb, trb->bph, trb->bpl, trb->size, trb->ctrl);
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_ep_trbs_list_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_ep_trbs_show, inode->i_private);
+}
+
+const struct file_operations dwc3_ep_trb_list_fops = {
+ .open = dwc3_ep_trbs_list_open,
+ .write = dwc3_store_ep_num,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static unsigned int ep_addr_rxdbg_mask = 1;
+module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR);
+static unsigned int ep_addr_txdbg_mask = 1;
+module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR);
+
+/* Maximum debug message length */
+#define DBG_DATA_MSG 64UL
+
+/* Maximum number of messages */
+#define DBG_DATA_MAX 2048UL
+
+static struct {
+ char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */
+ unsigned idx; /* index */
+ unsigned tty; /* print to console? */
+ rwlock_t lck; /* lock */
+} dbg_dwc3_data = {
+ .idx = 0,
+ .tty = 0,
+ .lck = __RW_LOCK_UNLOCKED(lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static inline void __maybe_unused dbg_dec(unsigned *idx)
+{
+ *idx = (*idx - 1) % DBG_DATA_MAX;
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static inline void dbg_inc(unsigned *idx)
+{
+ *idx = (*idx + 1) % DBG_DATA_MAX;
+}
+
+#define TIME_BUF_LEN 20
+/*get_timestamp - returns time of day in us */
+static char *get_timestamp(char *tbuf)
+{
+ unsigned long long t;
+ unsigned long nanosec_rem;
+
+ t = cpu_clock(smp_processor_id());
+ nanosec_rem = do_div(t, 1000000000)/1000;
+ scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
+ nanosec_rem);
+ return tbuf;
+}
+
+static int allow_dbg_print(u8 ep_num)
+{
+ int dir, num;
+
+ /* allow bus wide events */
+ if (ep_num == 0xff)
+ return 1;
+
+ dir = ep_num & 0x1;
+ num = ep_num >> 1;
+ num = 1 << num;
+
+ if (dir && (num & ep_addr_txdbg_mask))
+ return 1;
+ if (!dir && (num & ep_addr_rxdbg_mask))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dbg_print: prints the common part of the event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ * @extra: extra information
+ */
+void dbg_print(u8 ep_num, const char *name, int status, const char *extra)
+{
+ unsigned long flags;
+ char tbuf[TIME_BUF_LEN];
+
+ if (!allow_dbg_print(ep_num))
+ return;
+
+ write_lock_irqsave(&dbg_dwc3_data.lck, flags);
+
+ scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG,
+ "%s\t? %02X %-12.12s %4i ?\t%s\n",
+ get_timestamp(tbuf), ep_num, name, status, extra);
+
+ dbg_inc(&dbg_dwc3_data.idx);
+
+ write_unlock_irqrestore(&dbg_dwc3_data.lck, flags);
+
+ if (dbg_dwc3_data.tty != 0)
+ pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n",
+ get_timestamp(tbuf), ep_num, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr: endpoint address
+ * @td: transfer descriptor
+ * @status: status
+ */
+void dbg_done(u8 ep_num, const u32 count, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (!allow_dbg_print(ep_num))
+ return;
+
+ scnprintf(msg, sizeof(msg), "%d", count);
+ dbg_print(ep_num, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ */
+void dbg_event(u8 ep_num, const char *name, int status)
+{
+ if (!allow_dbg_print(ep_num))
+ return;
+
+ if (name != NULL)
+ dbg_print(ep_num, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr: endpoint address
+ * @req: USB request
+ * @status: status
+ */
+void dbg_queue(u8 ep_num, const struct usb_request *req, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (!allow_dbg_print(ep_num))
+ return;
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%d %d", !req->no_interrupt, req->length);
+ dbg_print(ep_num, "QUEUE", status, msg);
+ }
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req: setup request
+ */
+void dbg_setup(u8 ep_num, const struct usb_ctrlrequest *req)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (!allow_dbg_print(ep_num))
+ return;
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%02X %02X %04X %04X %d", req->bRequestType,
+ req->bRequest, le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+ dbg_print(ep_num, "SETUP", 0, msg);
+ }
+}
+
+/**
+ * dbg_print_reg: prints a reg value
+ * @name: reg name
+ * @reg: reg value to be printed
+ */
+void dbg_print_reg(const char *name, int reg)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&dbg_dwc3_data.lck, flags);
+
+ scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG,
+ "%s = 0x%08x\n", name, reg);
+
+ dbg_inc(&dbg_dwc3_data.idx);
+
+ write_unlock_irqrestore(&dbg_dwc3_data.lck, flags);
+
+ if (dbg_dwc3_data.tty != 0)
+ pr_notice("%s = 0x%08x\n", name, reg);
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ */
+static ssize_t dwc3_store_events(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ int ret;
+ u8 tty;
+
+ if (buf == NULL) {
+ pr_err("[%s] EINVAL\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = kstrtou8_from_user(buf, count, 0, &tty);
+ if (ret < 0) {
+ pr_err("can't get enter value.\n");
+ return ret;
+ }
+
+ if (tty > 1) {
+ pr_err("<1|0>: enable|disable console log\n");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ dbg_dwc3_data.tty = tty;
+ pr_info("tty = %u", dbg_dwc3_data.tty);
+
+ return count;
+}
+
+static int dwc3_gadget_data_events_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ unsigned i;
+
+ read_lock_irqsave(&dbg_dwc3_data.lck, flags);
+
+ i = dbg_dwc3_data.idx;
+ if (strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG))
+ seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]);
+ for (dbg_inc(&i); i != dbg_dwc3_data.idx; dbg_inc(&i)) {
+ if (!strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG))
+ continue;
+ seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]);
+ }
+
+ read_unlock_irqrestore(&dbg_dwc3_data.lck, flags);
+
+ return 0;
+}
+
+static int dwc3_gadget_data_events_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, dwc3_gadget_data_events_show, inode->i_private);
+}
+
+const struct file_operations dwc3_gadget_dbg_data_fops = {
+ .open = dwc3_gadget_data_events_open,
+ .read = seq_read,
+ .write = dwc3_store_events,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t dwc3_store_int_events(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int i, ret;
+ unsigned long flags;
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ struct dwc3_ep *dep;
+ struct timespec ts;
+ u8 clear_stats;
+
+ if (ubuf == NULL) {
+ pr_err("[%s] EINVAL\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = kstrtou8_from_user(ubuf, count, 0, &clear_stats);
+ if (ret < 0) {
+ pr_err("can't get enter value.\n");
+ return ret;
+ }
+
+ if (clear_stats != 0) {
+ pr_err("Wrong value. To clear stats, enter value as 0.\n");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ pr_debug("%s(): clearing debug interrupt buffers\n", __func__);
+ ts = current_kernel_time();
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ dep = dwc->eps[i];
+ memset(&dep->dbg_ep_events, 0, sizeof(dep->dbg_ep_events));
+ memset(&dep->dbg_ep_events_diff, 0, sizeof(dep->dbg_ep_events));
+ dep->dbg_ep_events_ts = ts;
+ }
+ memset(&dwc->dbg_gadget_events, 0, sizeof(dwc->dbg_gadget_events));
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return count;
+}
+
+static int dwc3_gadget_int_events_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct dwc3 *dwc = s->private;
+ struct dwc3_gadget_events *dbg_gadget_events;
+ struct dwc3_ep *dep;
+ int i;
+ struct timespec ts_delta;
+ struct timespec ts_current;
+ u32 ts_delta_ms;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dbg_gadget_events = &dwc->dbg_gadget_events;
+
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ dep = dwc->eps[i];
+
+ if (dep == NULL || !(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ts_current = current_kernel_time();
+ ts_delta = timespec_sub(ts_current, dep->dbg_ep_events_ts);
+ ts_delta_ms = ts_delta.tv_nsec / NSEC_PER_MSEC +
+ ts_delta.tv_sec * MSEC_PER_SEC;
+
+ seq_printf(s, "\n\n===== dbg_ep_events for EP(%d) %s =====\n",
+ i, dep->name);
+ seq_printf(s, "xfercomplete:%u @ %luHz\n",
+ dep->dbg_ep_events.xfercomplete,
+ ep_event_rate(xfercomplete, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "xfernotready:%u @ %luHz\n",
+ dep->dbg_ep_events.xfernotready,
+ ep_event_rate(xfernotready, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "control_data:%u @ %luHz\n",
+ dep->dbg_ep_events.control_data,
+ ep_event_rate(control_data, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "control_status:%u @ %luHz\n",
+ dep->dbg_ep_events.control_status,
+ ep_event_rate(control_status, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "xferinprogress:%u @ %luHz\n",
+ dep->dbg_ep_events.xferinprogress,
+ ep_event_rate(xferinprogress, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "rxtxfifoevent:%u @ %luHz\n",
+ dep->dbg_ep_events.rxtxfifoevent,
+ ep_event_rate(rxtxfifoevent, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "streamevent:%u @ %luHz\n",
+ dep->dbg_ep_events.streamevent,
+ ep_event_rate(streamevent, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "epcmdcomplt:%u @ %luHz\n",
+ dep->dbg_ep_events.epcmdcomplete,
+ ep_event_rate(epcmdcomplete, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "cmdcmplt:%u @ %luHz\n",
+ dep->dbg_ep_events.cmdcmplt,
+ ep_event_rate(cmdcmplt, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "unknown:%u @ %luHz\n",
+ dep->dbg_ep_events.unknown_event,
+ ep_event_rate(unknown_event, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+ seq_printf(s, "total:%u @ %luHz\n",
+ dep->dbg_ep_events.total,
+ ep_event_rate(total, dep->dbg_ep_events,
+ dep->dbg_ep_events_diff, ts_delta_ms));
+
+ dep->dbg_ep_events_ts = ts_current;
+ dep->dbg_ep_events_diff = dep->dbg_ep_events;
+ }
+
+ seq_puts(s, "\n=== dbg_gadget events ==\n");
+ seq_printf(s, "disconnect:%u\n reset:%u\n",
+ dbg_gadget_events->disconnect, dbg_gadget_events->reset);
+ seq_printf(s, "connect:%u\n wakeup:%u\n",
+ dbg_gadget_events->connect, dbg_gadget_events->wakeup);
+ seq_printf(s, "link_status_change:%u\n eopf:%u\n",
+ dbg_gadget_events->link_status_change, dbg_gadget_events->eopf);
+ seq_printf(s, "sof:%u\n suspend:%u\n",
+ dbg_gadget_events->sof, dbg_gadget_events->suspend);
+ seq_printf(s, "erratic_error:%u\n overflow:%u\n",
+ dbg_gadget_events->erratic_error,
+ dbg_gadget_events->overflow);
+ seq_printf(s, "vendor_dev_test_lmp:%u\n cmdcmplt:%u\n",
+ dbg_gadget_events->vendor_dev_test_lmp,
+ dbg_gadget_events->cmdcmplt);
+ seq_printf(s, "unknown_event:%u\n", dbg_gadget_events->unknown_event);
+
+ seq_printf(s, "\n\t== Last %d interrupts stats ==\t\n", MAX_INTR_STATS);
+ seq_puts(s, "@ time (us):\t");
+ for (i = 0; i < MAX_INTR_STATS; i++)
+ seq_printf(s, "%lld\t", ktime_to_us(dwc->irq_start_time[i]));
+ seq_puts(s, "\nhard irq time (us):\t");
+ for (i = 0; i < MAX_INTR_STATS; i++)
+ seq_printf(s, "%d\t", dwc->irq_completion_time[i]);
+ seq_puts(s, "\nevents count:\t\t");
+ for (i = 0; i < MAX_INTR_STATS; i++)
+ seq_printf(s, "%d\t", dwc->irq_event_count[i]);
+ seq_puts(s, "\nbh handled count:\t");
+ for (i = 0; i < MAX_INTR_STATS; i++)
+ seq_printf(s, "%d\t", dwc->bh_handled_evt_cnt[i]);
+ seq_puts(s, "\nirq thread time (us):\t");
+ for (i = 0; i < MAX_INTR_STATS; i++)
+ seq_printf(s, "%d\t", dwc->bh_completion_time[i]);
+ seq_putc(s, '\n');
+
+ seq_printf(s, "t_pwr evt irq : %lld\n",
+ ktime_to_us(dwc->t_pwr_evt_irq));
+
+ seq_printf(s, "l1_remote_wakeup_cnt : %lu\n",
+ dwc->l1_remote_wakeup_cnt);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+}
+
+static int dwc3_gadget_events_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, dwc3_gadget_int_events_show, inode->i_private);
+}
+
+const struct file_operations dwc3_gadget_dbg_events_fops = {
+ .open = dwc3_gadget_events_open,
+ .read = seq_read,
+ .write = dwc3_store_int_events,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -642,10 +1272,14 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs;
- file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
- if (!file) {
- ret = -ENOMEM;
- goto err1;
+ if (dwc->create_reg_debugfs) {
+ file = debugfs_create_regset32("regdump", 0444,
+ root, dwc->regset);
+ if (!file) {
+ dev_dbg(dwc->dev, "Can't create debugfs regdump\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
}
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -674,6 +1308,41 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
}
}
+ file = debugfs_create_file("trbs", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_ep_trb_list_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("requests", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_ep_req_list_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("queued_reqs", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_ep_req_queued_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("events", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_gadget_dbg_data_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("int_events", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_gadget_dbg_events_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
new file mode 100644
index 000000000000..b6b25c75b80c
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -0,0 +1,4356 @@
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_wakeup.h>
+#include <linux/power_supply.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/msm-bus.h>
+#include <linux/irq.h>
+#include <linux/extcon.h>
+#include <linux/reset.h>
+#include <soc/qcom/boot_stats.h>
+
+#include "power.h"
+#include "core.h"
+#include "gadget.h"
+#include "dbm.h"
+#include "debug.h"
+#include "xhci.h"
+
+#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
+
+/* time out to wait for USB cable status notification (in ms)*/
+#define SM_INIT_TIMEOUT 30000
+#define DWC3_WAKEUP_SRC_TIMEOUT 5000
+/* AHB2PHY register offsets */
+#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
+
+/* AHB2PHY read/write waite value */
+#define ONE_READ_WRITE_WAIT 0x11
+
+/* DP_DM linestate float */
+#define DP_DM_STATE_FLOAT 0x02
+
+/* cpu to fix usb interrupt */
+static int cpu_to_affin;
+module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
+
+/* XHCI registers */
+#define USB3_HCSPARAMS1 (0x4)
+#define USB3_HCCPARAMS2 (0x1c)
+#define HCC_CTC(p) ((p) & (1 << 3))
+#define USB3_PORTSC (0x420)
+
+/**
+ * USB QSCRATCH Hardware registers
+ *
+ */
+#define QSCRATCH_REG_OFFSET (0x000F8800)
+#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
+#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
+#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
+#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
+
+#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
+#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
+#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
+#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
+#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
+
+/* QSCRATCH_GENERAL_CFG register bit offset */
+#define PIPE_UTMI_CLK_SEL BIT(0)
+#define PIPE3_PHYSTATUS_SW BIT(3)
+#define PIPE_UTMI_CLK_DIS BIT(8)
+
+#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
+#define UTMI_OTG_VBUS_VALID BIT(20)
+#define SW_SESSVLD_SEL BIT(28)
+
+#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
+#define LANE0_PWR_PRESENT BIT(24)
+
+/* GSI related registers */
+#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
+#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
+
+#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
+#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
+#define GSI_CLK_EN_MASK BIT(12)
+#define BLOCK_GSI_WR_GO_MASK BIT(1)
+#define GSI_EN_MASK BIT(0)
+
+#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
+#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
+#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
+#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
+
+#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
+#define GSI_WR_CTRL_STATE_MASK BIT(15)
+
+struct dwc3_msm_req_complete {
+ struct list_head list_item;
+ struct usb_request *req;
+ void (*orig_complete)(struct usb_ep *ep,
+ struct usb_request *req);
+};
+
+enum dwc3_drd_state {
+ DRD_STATE_UNDEFINED = 0,
+
+ DRD_STATE_IDLE,
+ DRD_STATE_PERIPHERAL,
+ DRD_STATE_PERIPHERAL_SUSPEND,
+
+ DRD_STATE_HOST_IDLE,
+ DRD_STATE_HOST,
+};
+
+static const char *const state_names[] = {
+ [DRD_STATE_UNDEFINED] = "undefined",
+ [DRD_STATE_IDLE] = "idle",
+ [DRD_STATE_PERIPHERAL] = "peripheral",
+ [DRD_STATE_PERIPHERAL_SUSPEND] = "peripheral_suspend",
+ [DRD_STATE_HOST_IDLE] = "host_idle",
+ [DRD_STATE_HOST] = "host",
+};
+
+static const char *dwc3_drd_state_string(enum dwc3_drd_state state)
+{
+ if (state < 0 || state >= ARRAY_SIZE(state_names))
+ return "UNKNOWN";
+
+ return state_names[state];
+}
+
+enum dwc3_id_state {
+ DWC3_ID_GROUND = 0,
+ DWC3_ID_FLOAT,
+};
+
+/* for type c cable */
+enum plug_orientation {
+ ORIENTATION_NONE,
+ ORIENTATION_CC1,
+ ORIENTATION_CC2,
+};
+
+/* Input bits to state machine (mdwc->inputs) */
+
+#define ID 0
+#define B_SESS_VLD 1
+#define B_SUSPEND 2
+#define WAIT_FOR_LPM 3
+
+#define PM_QOS_SAMPLE_SEC 2
+#define PM_QOS_THRESHOLD 400
+
+struct dwc3_msm {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *ahb2phy_base;
+ struct platform_device *dwc3;
+ const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
+ struct list_head req_complete_list;
+ struct clk *xo_clk;
+ struct clk *core_clk;
+ long core_clk_rate;
+ long core_clk_rate_hs;
+ struct clk *iface_clk;
+ struct clk *sleep_clk;
+ struct clk *utmi_clk;
+ unsigned int utmi_clk_rate;
+ struct clk *utmi_clk_src;
+ struct clk *bus_aggr_clk;
+ struct clk *noc_aggr_clk;
+ struct clk *cfg_ahb_clk;
+ struct reset_control *core_reset;
+ struct regulator *dwc3_gdsc;
+
+ struct usb_phy *hs_phy, *ss_phy;
+
+ struct dbm *dbm;
+
+ /* VBUS regulator for host mode */
+ struct regulator *vbus_reg;
+ int vbus_retry_count;
+ bool resume_pending;
+ atomic_t pm_suspended;
+ int hs_phy_irq;
+ int ss_phy_irq;
+ struct work_struct resume_work;
+ struct work_struct restart_usb_work;
+ bool in_restart;
+ struct workqueue_struct *dwc3_wq;
+ struct workqueue_struct *sm_usb_wq;
+ struct delayed_work sm_work;
+ unsigned long inputs;
+ unsigned max_power;
+ bool charging_disabled;
+ enum dwc3_drd_state drd_state;
+ enum usb_chg_state chg_state;
+ struct work_struct bus_vote_w;
+ unsigned int bus_vote;
+ u32 bus_perf_client;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ struct power_supply *usb_psy;
+ struct work_struct vbus_draw_work;
+ bool in_host_mode;
+ bool in_device_mode;
+ enum usb_device_speed max_rh_port_speed;
+ unsigned int tx_fifo_size;
+ bool vbus_active;
+ bool suspend;
+ bool disable_host_mode_pm;
+ enum dwc3_id_state id_state;
+ unsigned long lpm_flags;
+#define MDWC3_SS_PHY_SUSPEND BIT(0)
+#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
+#define MDWC3_POWER_COLLAPSE BIT(2)
+
+ unsigned int irq_to_affin;
+ struct notifier_block dwc3_cpu_notifier;
+ struct notifier_block usbdev_nb;
+ bool hc_died;
+ bool xhci_ss_compliance_enable;
+ bool no_wakeup_src_in_hostmode;
+ bool check_for_float;
+ bool float_detected;
+
+ struct extcon_dev *extcon_vbus;
+ struct extcon_dev *extcon_id;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+
+ struct notifier_block host_nb;
+ bool host_only_mode;
+
+ int pwr_event_irq;
+ atomic_t in_p3;
+ unsigned int lpm_to_suspend_delay;
+ bool init;
+ enum plug_orientation typec_orientation;
+ int pm_qos_latency;
+ struct pm_qos_request pm_qos_req_dma;
+ struct delayed_work perf_vote_work;
+ struct delayed_work sdp_check;
+ bool usb_compliance_mode;
+ struct mutex suspend_resume_mutex;
+
+ enum usb_device_speed override_usb_speed;
+
+ bool core_init_failed;
+};
+
+#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
+#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
+#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
+
+#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
+#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
+#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
+
+#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
+#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
+#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
+
+#define DSTS_CONNECTSPD_SS 0x4
+
+
+static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA);
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ *
+ * @return u32
+ */
+static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
+{
+ u32 val = ioread32(base + offset);
+ return val;
+}
+
+/**
+ * Read register masked field with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ *
+ * @return u32
+ */
+static inline u32 dwc3_msm_read_reg_field(void *base,
+ u32 offset,
+ const u32 mask)
+{
+ u32 shift = find_first_bit((void *)&mask, 32);
+ u32 val = ioread32(base + offset);
+ val &= mask; /* clear other bits */
+ val >>= shift;
+ return val;
+}
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 shift = find_first_bit((void *)&mask, 32);
+ u32 tmp = ioread32(base + offset);
+
+ tmp &= ~mask; /* clear written bits */
+ val = tmp | (val << shift);
+ iowrite32(val, base + offset);
+}
+
+/**
+ * Write register and read back masked value to confirm it is written
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask specifying what should be updated
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_readback(void *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 write_val, tmp = ioread32(base + offset);
+
+ tmp &= ~mask; /* retain other bits */
+ write_val = tmp | val;
+
+ iowrite32(write_val, base + offset);
+
+ /* Read back to see if val was written */
+ tmp = ioread32(base + offset);
+ tmp &= mask; /* clear other bits */
+
+ if (tmp != val)
+ pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
+ __func__, val, offset);
+}
+
+static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
+{
+ int i, num_ports;
+ u32 reg;
+
+ reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+ num_ports = HCS_MAX_PORTS(reg);
+
+ for (i = 0; i < num_ports; i++) {
+ reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
+ if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
+ return true;
+ }
+
+ return false;
+}
+
+static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
+{
+ int i, num_ports;
+ u32 reg;
+
+ reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+ num_ports = HCS_MAX_PORTS(reg);
+
+ for (i = 0; i < num_ports; i++) {
+ reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
+ if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
+{
+ u8 speed;
+
+ speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
+ return !!(speed & DSTS_CONNECTSPD_SS);
+}
+
+static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
+{
+ if (mdwc->in_host_mode)
+ return dwc3_msm_is_host_superspeed(mdwc);
+
+ return dwc3_msm_is_dev_superspeed(mdwc);
+}
+
+int dwc3_msm_dbm_disable_updxfer(struct dwc3 *dwc, u8 usb_ep)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ dev_dbg(mdwc->dev, "%s\n", __func__);
+ dwc3_dbm_disable_update_xfer(mdwc->dbm, usb_ep);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+/**
+ * Configure the DBM with the BAM's data fifo.
+ * This function is called by the USB BAM Driver
+ * upon initialization.
+ *
+ * @ep - pointer to usb endpoint.
+ * @addr - address of data fifo.
+ * @size - size of data fifo.
+ *
+ */
+int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+ u32 size, u8 dst_pipe_idx)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ dev_dbg(mdwc->dev, "%s\n", __func__);
+
+ return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
+ dst_pipe_idx);
+}
+
+
+/**
+* Cleanups for msm endpoint on request complete.
+*
+* Also call original request complete.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to usb_request instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static void dwc3_msm_req_complete_func(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ struct dwc3_msm_req_complete *req_complete = NULL;
+
+ /* Find original request complete function and remove it from list */
+ list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
+ if (req_complete->req == request)
+ break;
+ }
+ if (!req_complete || req_complete->req != request) {
+ dev_err(dep->dwc->dev, "%s: could not find the request\n",
+ __func__);
+ return;
+ }
+ list_del(&req_complete->list_item);
+
+ /*
+ * Release another one TRB to the pool since DBM queue took 2 TRBs
+ * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
+ * released only one.
+ */
+ dep->busy_slot++;
+
+ /* Unconfigure dbm ep */
+ dbm_ep_unconfig(mdwc->dbm, dep->number);
+
+ /*
+ * If this is the last endpoint we unconfigured, than reset also
+ * the event buffers; unless unconfiguring the ep due to lpm,
+ * in which case the event buffer only gets reset during the
+ * block reset.
+ */
+ if (0 == dbm_get_num_of_eps_configured(mdwc->dbm) &&
+ !dbm_reset_ep_after_lpm(mdwc->dbm))
+ dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
+
+ /*
+ * Call original complete function, notice that dwc->lock is already
+ * taken by the caller of this function (dwc3_gadget_giveback()).
+ */
+ request->complete = req_complete->orig_complete;
+ if (request->complete)
+ request->complete(ep, request);
+
+ kfree(req_complete);
+}
+
+
+/**
+* Helper function
+*
+* Reset DBM endpoint.
+*
+* @mdwc - pointer to dwc3_msm instance.
+* @dep - pointer to dwc3_ep instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
+{
+ int ret;
+
+ dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
+
+ /* Reset the dbm endpoint */
+ ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
+ if (ret) {
+ dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
+ __func__);
+ return ret;
+ }
+
+ /*
+ * The necessary delay between asserting and deasserting the dbm ep
+ * reset is based on the number of active endpoints. If there is more
+ * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
+ * delay will suffice.
+ */
+ if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
+ usleep_range(1000, 1200);
+ else
+ udelay(10);
+ ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
+ if (ret) {
+ dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+* Reset the DBM endpoint which is linked to the given USB endpoint.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+
+int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ return __dwc3_msm_dbm_ep_reset(mdwc, dep);
+}
+EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
+
+
+/**
+* Helper function.
+* See the header of the dwc3_msm_ep_queue function.
+*
+* @dwc3_ep - pointer to dwc3_ep instance.
+* @req - pointer to dwc3_request instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ struct dwc3_trb *trb;
+ struct dwc3_trb *trb_link;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+ int ret = 0;
+
+ /* We push the request to the dep->req_queued list to indicate that
+ * this request is issued with start transfer. The request will be out
+ * from this list in 2 cases. The first is that the transfer will be
+ * completed (not if the transfer is endless using a circular TRBs with
+ * with link TRB). The second case is an option to do stop stransfer,
+ * this can be initiated by the function driver when calling dequeue.
+ */
+ req->queued = true;
+ list_add_tail(&req->list, &dep->req_queued);
+
+ /* First, prepare a normal TRB, point to the fake buffer */
+ trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ dep->free_slot++;
+ memset(trb, 0, sizeof(*trb));
+
+ req->trb = trb;
+ trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
+ trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
+ trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
+ DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+ /* Second, prepare a Link TRB that points to the first TRB*/
+ trb_link = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ dep->free_slot++;
+ memset(trb_link, 0, sizeof *trb_link);
+
+ trb_link->bpl = lower_32_bits(req->trb_dma);
+ trb_link->bph = DBM_TRB_BIT |
+ DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
+ trb_link->size = 0;
+ trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
+
+ /*
+ * Now start the transfer
+ */
+ memset(&params, 0, sizeof(params));
+ params.param0 = 0; /* TDAddr High */
+ params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
+
+ /* DBM requires IOC to be set */
+ cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
+ ret = dwc3_send_gadget_ep_cmd(dep->dwc, dep->number, cmd, &params);
+ if (ret < 0) {
+ dev_dbg(dep->dwc->dev,
+ "%s: failed to send STARTTRANSFER command\n",
+ __func__);
+
+ list_del(&req->list);
+ return ret;
+ }
+ dep->flags |= DWC3_EP_BUSY;
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep->dwc,
+ dep->number);
+
+ return ret;
+}
+
+/**
+* Queue a usb request to the DBM endpoint.
+* This function should be called after the endpoint
+* was enabled by the ep_enable.
+*
+* This function prepares special structure of TRBs which
+* is familiar with the DBM HW, so it will possible to use
+* this endpoint in DBM mode.
+*
+* The TRBs prepared by this function, is one normal TRB
+* which point to a fake buffer, followed by a link TRB
+* that points to the first TRB.
+*
+* The API of this function follow the regular API of
+* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to usb_request instance.
+* @gfp_flags - possible flags.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int dwc3_msm_ep_queue(struct usb_ep *ep,
+ struct usb_request *request, gfp_t gfp_flags)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ struct dwc3_msm_req_complete *req_complete;
+ unsigned long flags;
+ int ret = 0, size;
+ bool superspeed;
+
+ /*
+ * We must obtain the lock of the dwc3 core driver,
+ * including disabling interrupts, so we will be sure
+ * that we are the only ones that configure the HW device
+ * core and ensure that we queuing the request will finish
+ * as soon as possible so we will release back the lock.
+ */
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (!dep->endpoint.desc) {
+ dev_err(mdwc->dev,
+ "%s: trying to queue request %p to disabled ep %s\n",
+ __func__, request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+
+ if (!mdwc->original_ep_ops[dep->number]) {
+ dev_err(mdwc->dev,
+ "ep [%s,%d] was unconfigured as msm endpoint\n",
+ ep->name, dep->number);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+
+ if (!request) {
+ dev_err(mdwc->dev, "%s: request is NULL\n", __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+
+ if (!(request->udc_priv & MSM_SPS_MODE)) {
+ dev_err(mdwc->dev, "%s: sps mode is not set\n",
+ __func__);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+
+ /* HW restriction regarding TRB size (8KB) */
+ if (req->request.length < 0x2000) {
+ dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+
+ if (dep->number == 0 || dep->number == 1) {
+ dev_err(mdwc->dev,
+ "%s: trying to queue dbm request %p to control ep %s\n",
+ __func__, request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+
+ if (dep->busy_slot != dep->free_slot || !list_empty(&dep->request_list)
+ || !list_empty(&dep->req_queued)) {
+ dev_err(mdwc->dev,
+ "%s: trying to queue dbm request %p tp ep %s\n",
+ __func__, request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+ dep->busy_slot = 0;
+ dep->free_slot = 0;
+
+ /*
+ * Override req->complete function, but before doing that,
+ * store it's original pointer in the req_complete_list.
+ */
+ req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
+ if (!req_complete) {
+ dev_err(mdwc->dev, "%s: not enough memory\n", __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -ENOMEM;
+ }
+ req_complete->req = request;
+ req_complete->orig_complete = request->complete;
+ list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
+ request->complete = dwc3_msm_req_complete_func;
+
+ dev_vdbg(dwc->dev, "%s: queing request %pK to ep %s length %d\n",
+ __func__, request, ep->name, request->length);
+ size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
+ dbm_event_buffer_config(mdwc->dbm,
+ dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
+ dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
+ DWC3_GEVNTSIZ_SIZE(size));
+
+ ret = __dwc3_msm_ep_queue(dep, req);
+ if (ret < 0) {
+ dev_err(mdwc->dev,
+ "error %d after calling __dwc3_msm_ep_queue\n", ret);
+ goto err;
+ }
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ superspeed = dwc3_msm_is_dev_superspeed(mdwc);
+ dbm_set_speed(mdwc->dbm, (u8)superspeed);
+
+ return 0;
+
+err:
+ list_del(&req_complete->list_item);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ kfree(req_complete);
+ return ret;
+}
+
+/*
+* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - XferRscIndex
+*/
+static inline int gsi_get_xfer_index(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+
+ return dep->resource_index;
+}
+
+/*
+* Fills up the GSI channel information needed in call to IPA driver
+* for GSI channel creation.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @ch_info - output parameter with requested channel info
+*/
+static void gsi_get_channel_info(struct usb_ep *ep,
+ struct gsi_channel_info *ch_info)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ int last_trb_index = 0;
+ struct dwc3 *dwc = dep->dwc;
+ struct usb_gsi_request *request = ch_info->ch_req;
+
+ /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
+ ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
+ DWC3_DEPCMD(dep->number));
+ ch_info->depcmd_hi_addr = 0;
+
+ ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[0]);
+ /* Convert to multipled of 1KB */
+ ch_info->const_buffer_size = request->buf_len/1024;
+
+ /* IN direction */
+ if (dep->direction) {
+ /*
+ * Multiply by size of each TRB for xfer_ring_len in bytes.
+ * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
+ * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
+ */
+ ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
+ last_trb_index = 2 * request->num_bufs + 2;
+ } else { /* OUT direction */
+ /*
+ * Multiply by size of each TRB for xfer_ring_len in bytes.
+ * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
+ * LINK TRB.
+ */
+ ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
+ last_trb_index = request->num_bufs + 1;
+ }
+
+ /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
+ ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
+ ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
+ DWC3_GEVNTCOUNT(ep->ep_intr_num));
+ ch_info->gevntcount_hi_addr = 0;
+
+ dev_dbg(dwc->dev,
+ "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
+ ch_info->depcmd_low_addr, ch_info->last_trb_addr,
+ ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
+}
+
+/*
+* Perform StartXfer on GSI EP. Stores XferRscIndex.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - 0 on success
+*/
+static int gsi_startxfer_for_ep(struct usb_ep *ep)
+{
+ int ret;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
+ params.param0 |= (ep->ep_intr_num << 16);
+ params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[0]));
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+ cmd |= DWC3_DEPCMD_PARAM(0);
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+
+ if (ret < 0)
+ dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
+ dep->number);
+ dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
+ return ret;
+}
+
+/*
+* Store Ring Base and Doorbell Address for GSI EP
+* for GSI channel creation.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @dbl_addr - Doorbell address obtained from IPA driver
+*/
+static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ int n = ep->ep_intr_num - 1;
+
+ dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
+ dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
+ dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
+
+ dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
+ dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
+ dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
+ dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
+}
+
+/*
+* Rings Doorbell for IN GSI Channel
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. This is used to pass in the
+* address of the GSI doorbell obtained from IPA driver
+*/
+static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
+{
+ void __iomem *gsi_dbl_address_lsb;
+ void __iomem *gsi_dbl_address_msb;
+ dma_addr_t offset;
+ u64 dbl_addr = *((u64 *)request->buf_base_addr);
+ u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
+ u32 dbl_hi_addr = (dbl_addr >> 32);
+ u32 num_trbs = (request->num_bufs * 2 + 2);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
+ dbl_lo_addr, sizeof(u32));
+ if (!gsi_dbl_address_lsb)
+ dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
+
+ gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
+ dbl_hi_addr, sizeof(u32));
+ if (!gsi_dbl_address_msb)
+ dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
+
+ offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
+ dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %pK (%x)\n",
+ &offset, gsi_dbl_address_lsb, dbl_lo_addr);
+
+ writel_relaxed(offset, gsi_dbl_address_lsb);
+ writel_relaxed(0, gsi_dbl_address_msb);
+}
+
+/*
+* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
+*
+* @return int - 0 on success
+*/
+static int gsi_updatexfer_for_ep(struct usb_ep *ep,
+ struct usb_gsi_request *request)
+{
+ int i;
+ int ret;
+ u32 cmd;
+ int num_trbs = request->num_bufs + 1;
+ struct dwc3_trb *trb;
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ for (i = 0; i < num_trbs - 1; i++) {
+ trb = &dep->trb_pool[i];
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+ }
+
+ memset(&params, 0, sizeof(params));
+ cmd = DWC3_DEPCMD_UPDATETRANSFER;
+ cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ dep->flags |= DWC3_EP_BUSY;
+ if (ret < 0)
+ dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
+ return ret;
+}
+
+/*
+* Perform EndXfer on particular GSI EP.
+*
+* @usb_ep - pointer to usb_ep instance.
+*/
+static void gsi_endxfer_for_ep(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+}
+
+/*
+* Allocates and configures TRBs for GSI EPs.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request.
+*
+* @return int - 0 on success
+*/
+static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
+{
+ int i = 0;
+ dma_addr_t buffer_addr = req->dma;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+ int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
+ : (req->num_bufs + 1);
+
+ dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
+ num_trbs * sizeof(struct dwc3_trb),
+ num_trbs * sizeof(struct dwc3_trb), 0);
+ if (!dep->trb_dma_pool) {
+ dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
+ dep->name);
+ return -ENOMEM;
+ }
+
+ dep->num_trbs = num_trbs;
+
+ dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
+ GFP_KERNEL, &dep->trb_pool_dma);
+ if (!dep->trb_pool) {
+ dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
+ dep->name);
+ return -ENOMEM;
+ }
+
+ /* IN direction */
+ if (dep->direction) {
+ for (i = 0; i < num_trbs ; i++) {
+ trb = &dep->trb_pool[i];
+ memset(trb, 0, sizeof(*trb));
+ /* Set up first n+1 TRBs for ZLPs */
+ if (i < (req->num_bufs + 1)) {
+ trb->bpl = 0;
+ trb->bph = 0;
+ trb->size = 0;
+ trb->ctrl = DWC3_TRBCTL_NORMAL
+ | DWC3_TRB_CTRL_IOC;
+ continue;
+ }
+
+ /* Setup n TRBs pointing to valid buffers */
+ trb->bpl = lower_32_bits(buffer_addr);
+ trb->bph = 0;
+ trb->size = 0;
+ trb->ctrl = DWC3_TRBCTL_NORMAL
+ | DWC3_TRB_CTRL_IOC;
+ buffer_addr += req->buf_len;
+
+ /* Set up the Link TRB at the end */
+ if (i == (num_trbs - 1)) {
+ trb->bpl = dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[0]);
+ trb->bph = (1 << 23) | (1 << 21)
+ | (ep->ep_intr_num << 16);
+ trb->size = 0;
+ trb->ctrl = DWC3_TRBCTL_LINK_TRB
+ | DWC3_TRB_CTRL_HWO;
+ }
+ }
+ } else { /* OUT direction */
+
+ for (i = 0; i < num_trbs ; i++) {
+
+ trb = &dep->trb_pool[i];
+ memset(trb, 0, sizeof(*trb));
+ trb->bpl = lower_32_bits(buffer_addr);
+ trb->bph = 0;
+ trb->size = req->buf_len;
+ trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
+ | DWC3_TRB_CTRL_CSP
+ | DWC3_TRB_CTRL_ISP_IMI;
+ buffer_addr += req->buf_len;
+
+ /* Set up the Link TRB at the end */
+ if (i == (num_trbs - 1)) {
+ trb->bpl = dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[0]);
+ trb->bph = (1 << 23) | (1 << 21)
+ | (ep->ep_intr_num << 16);
+ trb->size = 0;
+ trb->ctrl = DWC3_TRBCTL_LINK_TRB
+ | DWC3_TRB_CTRL_HWO;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+* Frees TRBs for GSI EPs.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+*/
+static void gsi_free_trbs(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+
+ if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
+ return;
+
+ /* Free TRBs and TRB pool for EP */
+ if (dep->trb_dma_pool) {
+ dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
+ dep->trb_pool_dma);
+ dma_pool_destroy(dep->trb_dma_pool);
+ dep->trb_pool = NULL;
+ dep->trb_pool_dma = 0;
+ dep->trb_dma_pool = NULL;
+ }
+}
+/*
+* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request.
+*/
+static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ struct dwc3_gadget_ep_cmd_params params;
+ const struct usb_endpoint_descriptor *desc = ep->desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+ u32 reg;
+ int ret;
+
+ memset(&params, 0x00, sizeof(params));
+
+ /* Configure GSI EP */
+ params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+ | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+
+ /* Burst size is only needed in SuperSpeed mode */
+ if (dwc->gadget.speed == USB_SPEED_SUPER) {
+ u32 burst = dep->endpoint.maxburst - 1;
+
+ params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+ }
+
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+ params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+ | DWC3_DEPCFG_STREAM_EVENT_EN;
+ dep->stream_capable = true;
+ }
+
+ /* Set EP number */
+ params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+ /* Set interrupter number for GSI endpoints */
+ params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
+
+ /* Enable XferInProgress and XferComplete Interrupts */
+ params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
+ params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
+ params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
+ /*
+ * We must use the lower 16 TX FIFOs even though
+ * HW might have more
+ */
+ /* Remove FIFO Number for GSI EP*/
+ if (dep->direction)
+ params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+ params.param0 |= DWC3_DEPCFG_ACTION_INIT;
+
+ dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
+ params.param0, params.param1, params.param2, dep->name);
+
+ dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETEPCONFIG, &params);
+
+ /* Set XferRsc Index for GSI EP */
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
+ if (ret)
+ return;
+
+ memset(&params, 0x00, sizeof(params));
+ params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
+ dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+
+ dep->endpoint.desc = desc;
+ dep->comp_desc = comp_desc;
+ dep->type = usb_endpoint_type(desc);
+ dep->flags |= DWC3_EP_ENABLED;
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg |= DWC3_DALEPENA_EP(dep->number);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ }
+
+}
+
+/*
+* Enables USB wrapper for GSI
+*
+* @usb_ep - pointer to usb_ep instance.
+*/
+static void gsi_enable(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ dwc3_msm_write_reg_field(mdwc->base,
+ GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
+ dwc3_msm_write_reg_field(mdwc->base,
+ GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
+ dwc3_msm_write_reg_field(mdwc->base,
+ GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
+ dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
+ dwc3_msm_write_reg_field(mdwc->base,
+ GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
+}
+
+/*
+* Block or allow doorbell towards GSI
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. In this case num_bufs is used as a bool
+* to set or clear the doorbell bit
+*/
+static void gsi_set_clear_dbell(struct usb_ep *ep,
+ bool block_db)
+{
+
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ dwc3_msm_write_reg_field(mdwc->base,
+ GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
+}
+
+/*
+* Performs necessary checks before stopping GSI channels
+*
+* @usb_ep - pointer to usb_ep instance to access DWC3 regs
+*/
+static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
+{
+ u32 timeout = 500;
+ u32 reg = 0;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ while (dwc3_msm_read_reg_field(mdwc->base,
+ GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
+ if (!timeout--) {
+ dev_err(mdwc->dev,
+ "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
+ return false;
+ }
+ usleep_range(20, 22);
+ }
+ /* Check for U3 only if we are not handling Function Suspend */
+ if (!f_suspend) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
+ dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+/**
+* Performs GSI operations or GSI EP related operations.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @op_data - pointer to opcode related data.
+* @op - GSI related or GSI EP related op code.
+*
+* @return int - 0 on success, negative on error.
+* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
+*/
+static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
+ void *op_data, enum gsi_ep_op op)
+{
+ u32 ret = 0;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ struct usb_gsi_request *request;
+ struct gsi_channel_info *ch_info;
+ bool block_db, f_suspend;
+ unsigned long flags;
+
+ switch (op) {
+ case GSI_EP_OP_PREPARE_TRBS:
+ request = (struct usb_gsi_request *)op_data;
+ dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
+ ret = gsi_prepare_trbs(ep, request);
+ break;
+ case GSI_EP_OP_FREE_TRBS:
+ dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
+ gsi_free_trbs(ep);
+ break;
+ case GSI_EP_OP_CONFIG:
+ request = (struct usb_gsi_request *)op_data;
+ dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
+ spin_lock_irqsave(&dwc->lock, flags);
+ gsi_configure_ep(ep, request);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
+ case GSI_EP_OP_STARTXFER:
+ dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = gsi_startxfer_for_ep(ep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
+ case GSI_EP_OP_GET_XFER_IDX:
+ dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
+ ret = gsi_get_xfer_index(ep);
+ break;
+ case GSI_EP_OP_STORE_DBL_INFO:
+ dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
+ gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
+ break;
+ case GSI_EP_OP_ENABLE_GSI:
+ dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
+ gsi_enable(ep);
+ break;
+ case GSI_EP_OP_GET_CH_INFO:
+ ch_info = (struct gsi_channel_info *)op_data;
+ gsi_get_channel_info(ep, ch_info);
+ break;
+ case GSI_EP_OP_RING_IN_DB:
+ request = (struct usb_gsi_request *)op_data;
+ dev_dbg(mdwc->dev, "RING IN EP DB\n");
+ gsi_ring_in_db(ep, request);
+ break;
+ case GSI_EP_OP_UPDATEXFER:
+ request = (struct usb_gsi_request *)op_data;
+ dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = gsi_updatexfer_for_ep(ep, request);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
+ case GSI_EP_OP_ENDXFER:
+ request = (struct usb_gsi_request *)op_data;
+ dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
+ spin_lock_irqsave(&dwc->lock, flags);
+ gsi_endxfer_for_ep(ep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
+ case GSI_EP_OP_SET_CLR_BLOCK_DBL:
+ block_db = *((bool *)op_data);
+ dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
+ block_db);
+ gsi_set_clear_dbell(ep, block_db);
+ break;
+ case GSI_EP_OP_CHECK_FOR_SUSPEND:
+ dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
+ f_suspend = *((bool *)op_data);
+ ret = gsi_check_ready_to_suspend(ep, f_suspend);
+ break;
+ case GSI_EP_OP_DISABLE:
+ dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
+ ret = ep->ops->disable(ep);
+ break;
+ default:
+ dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Configure MSM endpoint.
+ * This function do specific configurations
+ * to an endpoint which need specific implementaion
+ * in the MSM architecture.
+ *
+ * This function should be called by usb function/class
+ * layer which need a support from the specific MSM HW
+ * which wrap the USB3 core. (like GSI or DBM specific endpoints)
+ *
+ * @ep - a pointer to some usb_ep instance
+ *
+ * @return int - 0 on success, negetive on error.
+ */
+int msm_ep_config(struct usb_ep *ep, struct usb_request *request)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ struct usb_ep_ops *new_ep_ops;
+ int ret = 0;
+ u8 bam_pipe;
+ bool producer;
+ bool disable_wb;
+ bool internal_mem;
+ bool ioc;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ /* Save original ep ops for future restore*/
+ if (mdwc->original_ep_ops[dep->number]) {
+ dev_err(mdwc->dev,
+ "ep [%s,%d] already configured as msm endpoint\n",
+ ep->name, dep->number);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+ mdwc->original_ep_ops[dep->number] = ep->ops;
+
+ /* Set new usb ops as we like */
+ new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
+ if (!new_ep_ops) {
+ dev_err(mdwc->dev,
+ "%s: unable to allocate mem for new usb ep ops\n",
+ __func__);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -ENOMEM;
+ }
+ (*new_ep_ops) = (*ep->ops);
+ new_ep_ops->queue = dwc3_msm_ep_queue;
+ new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
+ ep->ops = new_ep_ops;
+
+ if (!mdwc->dbm || !request || (dep->endpoint.ep_type == EP_TYPE_GSI)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
+ /*
+ * Configure the DBM endpoint if required.
+ */
+ bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
+ producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
+ disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
+ internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
+ ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
+
+ ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
+ disable_wb, internal_mem, ioc);
+ if (ret < 0) {
+ dev_err(mdwc->dev,
+ "error %d after calling dbm_ep_config\n", ret);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return ret;
+ }
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_ep_config);
+
+/**
+ * Un-configure MSM endpoint.
+ * Tear down configurations done in the
+ * dwc3_msm_ep_config function.
+ *
+ * @ep - a pointer to some usb_ep instance
+ *
+ * @return int - 0 on success, negative on error.
+ */
+int msm_ep_unconfig(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ struct usb_ep_ops *old_ep_ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ /* Restore original ep ops */
+ if (!mdwc->original_ep_ops[dep->number]) {
+ dev_err(mdwc->dev,
+ "ep [%s,%d] was not configured as msm endpoint\n",
+ ep->name, dep->number);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EINVAL;
+ }
+ old_ep_ops = (struct usb_ep_ops *)ep->ops;
+ ep->ops = mdwc->original_ep_ops[dep->number];
+ mdwc->original_ep_ops[dep->number] = NULL;
+ kfree(old_ep_ops);
+
+ /*
+ * Do HERE more usb endpoint un-configurations
+ * which are specific to MSM.
+ */
+ if (!mdwc->dbm || (dep->endpoint.ep_type == EP_TYPE_GSI)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
+ if (dep->busy_slot == dep->free_slot && list_empty(&dep->request_list)
+ && list_empty(&dep->req_queued)) {
+ dev_dbg(mdwc->dev,
+ "%s: request is not queued, disable DBM ep for ep %s\n",
+ __func__, ep->name);
+ /* Unconfigure dbm ep */
+ dbm_ep_unconfig(mdwc->dbm, dep->number);
+
+ /*
+ * If this is the last endpoint we unconfigured, than reset also
+ * the event buffers; unless unconfiguring the ep due to lpm,
+ * in which case the event buffer only gets reset during the
+ * block reset.
+ */
+ if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
+ !dbm_reset_ep_after_lpm(mdwc->dbm))
+ dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
+ }
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_ep_unconfig);
+#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
+
+static void dwc3_resume_work(struct work_struct *w);
+
+static void dwc3_restart_usb_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+ restart_usb_work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ unsigned timeout = 50;
+
+ dev_dbg(mdwc->dev, "%s\n", __func__);
+
+ if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
+ dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
+ return;
+ }
+
+ /* guard against concurrent VBUS handling */
+ mdwc->in_restart = true;
+
+ if (!mdwc->vbus_active) {
+ dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
+ dwc->err_evt_seen = false;
+ mdwc->in_restart = false;
+ return;
+ }
+
+ dbg_event(0xFF, "RestartUSB", 0);
+
+ /* Reset active USB connection */
+ dwc3_resume_work(&mdwc->resume_work);
+
+ /* Make sure disconnect is processed before sending connect */
+ while (--timeout && !pm_runtime_suspended(mdwc->dev))
+ msleep(20);
+
+ if (!timeout) {
+ dev_dbg(mdwc->dev,
+ "Not in LPM after disconnect, forcing suspend...\n");
+ dbg_event(0xFF, "ReStart:RT SUSP",
+ atomic_read(&mdwc->dev->power.usage_count));
+ pm_runtime_suspend(mdwc->dev);
+ }
+
+ mdwc->in_restart = false;
+ /* Force reconnect only if cable is still connected */
+ if (mdwc->vbus_active) {
+ if (mdwc->override_usb_speed) {
+ dwc->maximum_speed = mdwc->override_usb_speed;
+ dwc->gadget.max_speed = dwc->maximum_speed;
+ dbg_event(0xFF, "override_usb_speed",
+ mdwc->override_usb_speed);
+ mdwc->override_usb_speed = 0;
+ }
+
+ dwc3_resume_work(&mdwc->resume_work);
+ }
+
+ dwc->err_evt_seen = false;
+ flush_delayed_work(&mdwc->sm_work);
+}
+
+static int msm_dwc3_usbdev_notify(struct notifier_block *self,
+ unsigned long action, void *priv)
+{
+ struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_bus *bus = priv;
+
+ /* Interested only in recovery when HC dies */
+ if (action != USB_BUS_DIED)
+ return 0;
+
+ dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
+ /* Recovery already under process */
+ if (mdwc->hc_died)
+ return 0;
+
+ if (bus->controller != &dwc->xhci->dev) {
+ dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
+ return 0;
+ }
+
+ mdwc->hc_died = true;
+ queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0);
+ return 0;
+}
+
+
+/*
+ * Check whether the DWC3 requires resetting the ep
+ * after going to Low Power Mode (lpm)
+ */
+bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
+{
+ struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+ return dbm_reset_ep_after_lpm(mdwc->dbm);
+}
+EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
+
+/*
+ * Config Global Distributed Switch Controller (GDSC)
+ * to support controller power collapse
+ */
+static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
+ return -EPERM;
+
+ if (on) {
+ ret = regulator_enable(mdwc->dwc3_gdsc);
+ if (ret) {
+ dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
+ return ret;
+ }
+ } else {
+ ret = regulator_disable(mdwc->dwc3_gdsc);
+ if (ret) {
+ dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
+{
+ int ret = 0;
+
+ if (assert) {
+ disable_irq(mdwc->pwr_event_irq);
+ /* Using asynchronous block reset to the hardware */
+ dev_dbg(mdwc->dev, "block_reset ASSERT\n");
+ clk_disable_unprepare(mdwc->utmi_clk);
+ clk_disable_unprepare(mdwc->sleep_clk);
+ clk_disable_unprepare(mdwc->core_clk);
+ clk_disable_unprepare(mdwc->iface_clk);
+ ret = reset_control_assert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
+ } else {
+ dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
+ ret = reset_control_deassert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
+ ndelay(200);
+ clk_prepare_enable(mdwc->iface_clk);
+ clk_prepare_enable(mdwc->core_clk);
+ clk_prepare_enable(mdwc->sleep_clk);
+ clk_prepare_enable(mdwc->utmi_clk);
+ enable_irq(mdwc->pwr_event_irq);
+ }
+
+ return ret;
+}
+
+static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
+{
+ u32 guctl, gfladj = 0;
+
+ guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
+ guctl &= ~DWC3_GUCTL_REFCLKPER;
+
+ /* GFLADJ register is used starting with revision 2.50a */
+ if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
+ gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
+ gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
+ gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
+ gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
+ gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
+ }
+
+ /* Refer to SNPS Databook Table 6-55 for calculations used */
+ switch (mdwc->utmi_clk_rate) {
+ case 19200000:
+ guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
+ gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
+ gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
+ gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+ gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
+ break;
+ case 24000000:
+ guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
+ gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
+ gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+ gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
+ break;
+ default:
+ dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
+ mdwc->utmi_clk_rate);
+ break;
+ }
+
+ dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
+ if (gfladj)
+ dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
+}
+
+/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
+static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
+{
+ if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
+ /* On older cores set XHCI_REV bit to specify revision 1.0 */
+ dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
+ BIT(2), 1);
+
+ /*
+ * Enable master clock for RAMs to allow BAM to access RAMs when
+ * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
+ * are seen where RAM clocks get turned OFF in SS mode
+ */
+ dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
+ dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
+
+}
+
+static void dwc3_msm_vbus_draw_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+ vbus_draw_work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
+}
+
+static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event,
+ unsigned value)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ u32 reg;
+
+ if (dwc->revision < DWC3_REVISION_230A)
+ return;
+
+ switch (event) {
+ case DWC3_CONTROLLER_ERROR_EVENT:
+ dev_info(mdwc->dev,
+ "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
+ dwc->irq_cnt);
+
+ dwc3_gadget_disable_irq(dwc);
+
+ /* prevent core from generating interrupts until recovery */
+ reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
+ reg |= DWC3_GCTL_CORESOFTRESET;
+ dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
+
+ /* restart USB which performs full reset and reconnect */
+ schedule_work(&mdwc->restart_usb_work);
+ break;
+ case DWC3_CONTROLLER_RESET_EVENT:
+ dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
+ /* HS & SSPHYs get reset as part of core soft reset */
+ dwc3_msm_qscratch_reg_init(mdwc);
+ break;
+ case DWC3_CONTROLLER_POST_RESET_EVENT:
+ dev_dbg(mdwc->dev,
+ "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
+
+ /*
+ * Below sequence is used when controller is working without
+ * having ssphy and only USB high/full speed is supported.
+ */
+ if (dwc->maximum_speed == USB_SPEED_HIGH ||
+ dwc->maximum_speed == USB_SPEED_FULL) {
+ dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+ dwc3_msm_read_reg(mdwc->base,
+ QSCRATCH_GENERAL_CFG)
+ | PIPE_UTMI_CLK_DIS);
+
+ usleep_range(2, 5);
+
+
+ dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+ dwc3_msm_read_reg(mdwc->base,
+ QSCRATCH_GENERAL_CFG)
+ | PIPE_UTMI_CLK_SEL
+ | PIPE3_PHYSTATUS_SW);
+
+ usleep_range(2, 5);
+
+ dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+ dwc3_msm_read_reg(mdwc->base,
+ QSCRATCH_GENERAL_CFG)
+ & ~PIPE_UTMI_CLK_DIS);
+ }
+
+ dwc3_msm_update_ref_clk(mdwc);
+ dwc->tx_fifo_size = mdwc->tx_fifo_size;
+ break;
+ case DWC3_CONTROLLER_CONNDONE_EVENT:
+ dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
+ /*
+ * Add power event if the dbm indicates coming out of L1 by
+ * interrupt
+ */
+ if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
+ dwc3_msm_write_reg_field(mdwc->base,
+ PWR_EVNT_IRQ_MASK_REG,
+ PWR_EVNT_LPM_OUT_L1_MASK, 1);
+
+ atomic_set(&dwc->in_lpm, 0);
+ break;
+ case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
+ dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
+ if (dwc->enable_bus_suspend) {
+ mdwc->suspend = dwc->b_suspend;
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+ break;
+ case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
+ dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
+ schedule_work(&mdwc->vbus_draw_work);
+ break;
+ case DWC3_CONTROLLER_RESTART_USB_SESSION:
+ dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
+ schedule_work(&mdwc->restart_usb_work);
+ break;
+ case DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER:
+ dwc3_msm_dbm_disable_updxfer(dwc, value);
+ break;
+ default:
+ dev_dbg(mdwc->dev, "unknown dwc3 event\n");
+ break;
+ }
+}
+
+static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
+{
+ int ret = 0;
+
+ if (core_reset) {
+ ret = dwc3_msm_link_clk_reset(mdwc, 1);
+ if (ret)
+ return;
+
+ usleep_range(1000, 1200);
+ ret = dwc3_msm_link_clk_reset(mdwc, 0);
+ if (ret)
+ return;
+
+ usleep_range(10000, 12000);
+ }
+
+ if (mdwc->dbm) {
+ /* Reset the DBM */
+ dbm_soft_reset(mdwc->dbm, 1);
+ usleep_range(1000, 1200);
+ dbm_soft_reset(mdwc->dbm, 0);
+
+ /*enable DBM*/
+ dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
+ DBM_EN_MASK, 0x1);
+ dbm_enable(mdwc->dbm);
+ }
+}
+
+static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ u32 val;
+ int ret;
+
+ /* Configure AHB2PHY for one wait state read/write */
+ if (mdwc->ahb2phy_base) {
+ clk_prepare_enable(mdwc->cfg_ahb_clk);
+ val = readl_relaxed(mdwc->ahb2phy_base +
+ PERIPH_SS_AHB2PHY_TOP_CFG);
+ if (val != ONE_READ_WRITE_WAIT) {
+ writel_relaxed(ONE_READ_WRITE_WAIT,
+ mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
+ /* complete above write before configuring USB PHY. */
+ mb();
+ }
+ clk_disable_unprepare(mdwc->cfg_ahb_clk);
+ }
+
+ if (!mdwc->init) {
+ dbg_event(0xFF, "dwc3 init",
+ atomic_read(&mdwc->dev->power.usage_count));
+ ret = dwc3_core_pre_init(dwc);
+ if (ret) {
+ dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
+ mdwc->core_init_failed = true;
+ return;
+ }
+ mdwc->init = true;
+ }
+
+ ret = dwc3_core_init(dwc);
+ if (ret) {
+ dev_err(mdwc->dev, "dwc3_core_init failed\n");
+ mdwc->core_init_failed = true;
+ return;
+ }
+
+ mdwc->core_init_failed = false;
+ /* Re-configure event buffers */
+ dwc3_event_buffers_setup(dwc);
+
+ /* Get initial P3 status and enable IN_P3 event */
+ val = dwc3_msm_read_reg_field(mdwc->base,
+ DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+ atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3);
+ dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+ PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+ if (mdwc->drd_state == DRD_STATE_HOST) {
+ dev_dbg(mdwc->dev, "%s: set the core in host mode\n",
+ __func__);
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ }
+}
+
+static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
+{
+ unsigned long timeout;
+ u32 reg = 0;
+
+ if ((mdwc->in_host_mode || mdwc->in_device_mode)
+ && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
+ if (!atomic_read(&mdwc->in_p3)) {
+ dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Clear previous L2 events */
+ dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
+ PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
+
+ /* Prepare HSPHY for suspend */
+ reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
+ dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+ reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
+
+ /* Wait for PHY to go into L2 */
+ timeout = jiffies + msecs_to_jiffies(5);
+ while (!time_after(jiffies, timeout)) {
+ reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
+ if (reg & PWR_EVNT_LPM_IN_L2_MASK)
+ break;
+ usleep_range(20, 30);
+ }
+ if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
+ dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
+
+ /* Clear L2 event bit */
+ dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
+ PWR_EVNT_LPM_IN_L2_MASK);
+
+ return 0;
+}
+
+static void dwc3_msm_bus_vote_w(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
+ int ret;
+
+ ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
+ mdwc->bus_vote);
+ if (ret)
+ dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
+}
+
+static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ int i, num_ports;
+ u32 reg;
+
+ mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
+ if (mdwc->in_host_mode) {
+ reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+ num_ports = HCS_MAX_PORTS(reg);
+ for (i = 0; i < num_ports; i++) {
+ reg = dwc3_msm_read_reg(mdwc->base,
+ USB3_PORTSC + i*0x10);
+ if (reg & PORT_PE) {
+ if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
+ mdwc->hs_phy->flags |= PHY_HSFS_MODE;
+ else if (DEV_LOWSPEED(reg))
+ mdwc->hs_phy->flags |= PHY_LS_MODE;
+ }
+ }
+ } else {
+ if (dwc->gadget.speed == USB_SPEED_HIGH ||
+ dwc->gadget.speed == USB_SPEED_FULL)
+ mdwc->hs_phy->flags |= PHY_HSFS_MODE;
+ else if (dwc->gadget.speed == USB_SPEED_LOW)
+ mdwc->hs_phy->flags |= PHY_LS_MODE;
+ }
+}
+
+static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
+ bool perf_mode);
+
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
+{
+ int ret, i;
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ mutex_lock(&mdwc->suspend_resume_mutex);
+ if (atomic_read(&dwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+ return 0;
+ }
+
+ cancel_delayed_work_sync(&mdwc->perf_vote_work);
+ msm_dwc3_perf_vote_update(mdwc, false);
+
+ if (!mdwc->in_host_mode) {
+ /* pending device events unprocessed */
+ for (i = 0; i < dwc->num_event_buffers; i++) {
+ struct dwc3_event_buffer *evt = dwc->ev_buffs[i];
+ if ((evt->flags & DWC3_EVENT_PENDING)) {
+ dev_dbg(mdwc->dev,
+ "%s: %d device events pending, abort suspend\n",
+ __func__, evt->count / 4);
+ dbg_print_reg("PENDING DEVICE EVENT",
+ *(u32 *)(evt->buf + evt->lpos));
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+ return -EBUSY;
+ }
+ }
+ }
+
+ if (!mdwc->vbus_active && dwc->is_drd &&
+ mdwc->drd_state == DRD_STATE_PERIPHERAL) {
+ /*
+ * In some cases, the pm_runtime_suspend may be called by
+ * usb_bam when there is pending lpm flag. However, if this is
+ * done when cable was disconnected and otg state has not
+ * yet changed to IDLE, then it means OTG state machine
+ * is running and we race against it. So cancel LPM for now,
+ * and OTG state machine will go for LPM later, after completing
+ * transition to IDLE state.
+ */
+ dev_dbg(mdwc->dev,
+ "%s: cable disconnected while not in idle otg state\n",
+ __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+ return -EBUSY;
+ }
+
+ /*
+ * Check if device is not in CONFIGURED state
+ * then check controller state of L2 and break
+ * LPM sequence. Check this for device bus suspend case.
+ */
+ if ((dwc->is_drd && mdwc->drd_state == DRD_STATE_PERIPHERAL_SUSPEND) &&
+ (dwc->gadget.state != USB_STATE_CONFIGURED)) {
+ pr_err("%s(): Trying to go in LPM with state:%d\n",
+ __func__, dwc->gadget.state);
+ pr_err("%s(): LPM is not performed.\n", __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+ return -EBUSY;
+ }
+
+ ret = dwc3_msm_prepare_suspend(mdwc);
+ if (ret) {
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+ return ret;
+ }
+
+ /* Disable core irq */
+ if (dwc->irq)
+ disable_irq(dwc->irq);
+
+ if (work_busy(&dwc->bh_work))
+ dbg_event(0xFF, "pend evt", 0);
+
+ /* disable power event irq, hs and ss phy irq is used as wake up src */
+ disable_irq(mdwc->pwr_event_irq);
+
+ dwc3_set_phy_speed_flags(mdwc);
+ /* Suspend HS PHY */
+ usb_phy_set_suspend(mdwc->hs_phy, 1);
+
+ /* Suspend SS PHY */
+ if (dwc->maximum_speed == USB_SPEED_SUPER) {
+ /* indicate phy about SS mode */
+ if (dwc3_msm_is_superspeed(mdwc))
+ mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
+ usb_phy_set_suspend(mdwc->ss_phy, 1);
+ mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
+ }
+
+ /* make sure above writes are completed before turning off clocks */
+ wmb();
+
+ /* Disable clocks */
+ if (mdwc->bus_aggr_clk)
+ clk_disable_unprepare(mdwc->bus_aggr_clk);
+ clk_disable_unprepare(mdwc->utmi_clk);
+
+ /* Memory core: OFF, Memory periphery: OFF */
+ if (!mdwc->in_host_mode && !mdwc->vbus_active) {
+ clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
+ clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ clk_set_rate(mdwc->core_clk, 19200000);
+ clk_disable_unprepare(mdwc->core_clk);
+ if (mdwc->noc_aggr_clk)
+ clk_disable_unprepare(mdwc->noc_aggr_clk);
+ /*
+ * Disable iface_clk only after core_clk as core_clk has FSM
+ * depedency on iface_clk. Hence iface_clk should be turned off
+ * after core_clk is turned off.
+ */
+ clk_disable_unprepare(mdwc->iface_clk);
+ /* USB PHY no more requires TCXO */
+ clk_disable_unprepare(mdwc->xo_clk);
+
+ /* Perform controller power collapse */
+ if ((!mdwc->in_host_mode && (!mdwc->in_device_mode || mdwc->in_restart))
+ || hibernation) {
+ mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
+ dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
+ dwc3_msm_config_gdsc(mdwc, 0);
+ clk_disable_unprepare(mdwc->sleep_clk);
+ }
+
+ /* Remove bus voting */
+ if (mdwc->bus_perf_client) {
+ mdwc->bus_vote = 0;
+ schedule_work(&mdwc->bus_vote_w);
+ }
+
+ /*
+ * release wakeup source with timeout to defer system suspend to
+ * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
+ * event is received.
+ */
+ if (mdwc->lpm_to_suspend_delay) {
+ dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
+ mdwc->lpm_to_suspend_delay);
+ pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
+ } else {
+ pm_relax(mdwc->dev);
+ }
+
+ atomic_set(&dwc->in_lpm, 1);
+
+ /*
+ * with DCP or during cable disconnect, we dont require wakeup
+ * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
+ * case of host bus suspend and device bus suspend.
+ */
+ if (mdwc->in_device_mode || mdwc->in_host_mode) {
+ if (!mdwc->no_wakeup_src_in_hostmode)
+ enable_irq_wake(mdwc->hs_phy_irq);
+ enable_irq(mdwc->hs_phy_irq);
+ if (mdwc->ss_phy_irq) {
+ if (!mdwc->no_wakeup_src_in_hostmode)
+ enable_irq_wake(mdwc->ss_phy_irq);
+ enable_irq(mdwc->ss_phy_irq);
+ }
+ mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
+ }
+
+ dev_info(mdwc->dev, "DWC3 in low power mode\n");
+ dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm));
+
+ /* kick_sm if it is waiting for lpm sequence to finish */
+ if (test_and_clear_bit(WAIT_FOR_LPM, &mdwc->inputs))
+ queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0);
+
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+
+ return 0;
+}
+
+static int dwc3_msm_resume(struct dwc3_msm *mdwc)
+{
+ int ret;
+ long core_clk_rate;
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
+
+ mutex_lock(&mdwc->suspend_resume_mutex);
+ if (!atomic_read(&dwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+ return 0;
+ }
+
+ pm_stay_awake(mdwc->dev);
+
+ /* Enable bus voting */
+ if (mdwc->bus_perf_client) {
+ mdwc->bus_vote = 1;
+ schedule_work(&mdwc->bus_vote_w);
+ }
+
+ /* Vote for TCXO while waking up USB HSPHY */
+ ret = clk_prepare_enable(mdwc->xo_clk);
+ if (ret)
+ dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
+ __func__, ret);
+
+ /* Restore controller power collapse */
+ if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
+ dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
+ dwc3_msm_config_gdsc(mdwc, 1);
+ ret = reset_control_assert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "%s:core_reset assert failed\n",
+ __func__);
+ /* HW requires a short delay for reset to take place properly */
+ usleep_range(1000, 1200);
+ ret = reset_control_deassert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
+ __func__);
+ clk_prepare_enable(mdwc->sleep_clk);
+ }
+
+ /*
+ * Enable clocks
+ * Turned ON iface_clk before core_clk due to FSM depedency.
+ */
+ clk_prepare_enable(mdwc->iface_clk);
+ if (mdwc->noc_aggr_clk)
+ clk_prepare_enable(mdwc->noc_aggr_clk);
+
+ core_clk_rate = mdwc->core_clk_rate;
+ if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
+ core_clk_rate = mdwc->core_clk_rate_hs;
+ dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
+ core_clk_rate);
+ }
+
+ clk_set_rate(mdwc->core_clk, core_clk_rate);
+ clk_prepare_enable(mdwc->core_clk);
+
+ /* set Memory core: ON, Memory periphery: ON */
+ clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
+ clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
+
+ clk_prepare_enable(mdwc->utmi_clk);
+ if (mdwc->bus_aggr_clk)
+ clk_prepare_enable(mdwc->bus_aggr_clk);
+
+ /* Resume SS PHY */
+ if (dwc->maximum_speed == USB_SPEED_SUPER &&
+ mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
+ mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
+ if (mdwc->typec_orientation == ORIENTATION_CC1)
+ mdwc->ss_phy->flags |= PHY_LANE_A;
+ if (mdwc->typec_orientation == ORIENTATION_CC2)
+ mdwc->ss_phy->flags |= PHY_LANE_B;
+ usb_phy_set_suspend(mdwc->ss_phy, 0);
+ mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
+ mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
+ }
+
+ mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
+ /* Resume HS PHY */
+ usb_phy_set_suspend(mdwc->hs_phy, 0);
+
+ /* Disable HSPHY auto suspend */
+ dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+ dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
+ ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
+ DWC3_GUSB2PHYCFG_SUSPHY));
+
+ /* Recover from controller power collapse */
+ if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
+ dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
+
+ dwc3_msm_power_collapse_por(mdwc);
+
+ mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
+ }
+
+ atomic_set(&dwc->in_lpm, 0);
+
+ /* enable power evt irq for IN P3 detection */
+ enable_irq(mdwc->pwr_event_irq);
+
+ /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
+ if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
+ if (!mdwc->no_wakeup_src_in_hostmode)
+ disable_irq_wake(mdwc->hs_phy_irq);
+ disable_irq_nosync(mdwc->hs_phy_irq);
+ if (mdwc->ss_phy_irq) {
+ if (!mdwc->no_wakeup_src_in_hostmode)
+ disable_irq_wake(mdwc->ss_phy_irq);
+ disable_irq_nosync(mdwc->ss_phy_irq);
+ }
+ mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
+ }
+
+ dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
+
+ /* Enable core irq */
+ if (dwc->irq)
+ enable_irq(dwc->irq);
+
+ /*
+ * Handle other power events that could not have been handled during
+ * Low Power Mode
+ */
+ dwc3_pwr_event_handler(mdwc);
+
+ if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
+ schedule_delayed_work(&mdwc->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+
+ dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+
+ return 0;
+}
+
+/**
+ * dwc3_ext_event_notify - callback to handle events from external transceiver
+ *
+ * Returns 0 on success
+ */
+static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
+{
+ /* Flush processing any pending events before handling new ones */
+ flush_delayed_work(&mdwc->sm_work);
+
+ if (mdwc->id_state == DWC3_ID_FLOAT) {
+ dev_dbg(mdwc->dev, "XCVR: ID set\n");
+ set_bit(ID, &mdwc->inputs);
+ } else {
+ dev_dbg(mdwc->dev, "XCVR: ID clear\n");
+ clear_bit(ID, &mdwc->inputs);
+ }
+
+ if (mdwc->vbus_active && !mdwc->in_restart) {
+ dev_dbg(mdwc->dev, "XCVR: BSV set\n");
+ set_bit(B_SESS_VLD, &mdwc->inputs);
+ } else {
+ dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
+ clear_bit(B_SESS_VLD, &mdwc->inputs);
+ }
+
+ if (mdwc->suspend) {
+ dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
+ set_bit(B_SUSPEND, &mdwc->inputs);
+ } else {
+ dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
+ clear_bit(B_SUSPEND, &mdwc->inputs);
+ }
+
+ pm_stay_awake(mdwc->dev);
+ queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0);
+}
+
+static void dwc3_resume_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
+
+ /*
+ * exit LPM first to meet resume timeline from device side.
+ * resume_pending flag would prevent calling
+ * dwc3_msm_resume() in case we are here due to system
+ * wide resume without usb cable connected. This flag is set
+ * only in case of power event irq in lpm.
+ */
+ if (mdwc->resume_pending) {
+ dwc3_msm_resume(mdwc);
+ mdwc->resume_pending = false;
+ }
+
+ if (atomic_read(&mdwc->pm_suspended)) {
+ dbg_event(0xFF, "RWrk PMSus", 0);
+ /* let pm resume kick in resume work later */
+ return;
+ }
+
+ dbg_event(0xFF, "RWrk", dwc->is_drd);
+ dwc3_ext_event_notify(mdwc);
+}
+
+static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ u32 irq_stat, irq_clear = 0;
+
+ irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
+ dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
+
+ /* Check for P3 events */
+ if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
+ (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
+ /* Can't tell if entered or exit P3, so check LINKSTATE */
+ u32 ls = dwc3_msm_read_reg_field(mdwc->base,
+ DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+ dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
+ atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
+
+ irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
+ PWR_EVNT_POWERDOWN_IN_P3_MASK);
+ irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
+ PWR_EVNT_POWERDOWN_IN_P3_MASK);
+ } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
+ atomic_set(&mdwc->in_p3, 0);
+ irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
+ irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
+ } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
+ atomic_set(&mdwc->in_p3, 1);
+ irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
+ irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
+ }
+
+ /* Clear L2 exit */
+ if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
+ irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
+ irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
+ }
+
+ /* Handle exit from L1 events */
+ if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
+ dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
+ __func__);
+ if (usb_gadget_wakeup(&dwc->gadget))
+ dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
+ __func__);
+ irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
+ irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
+ }
+
+ /* Unhandled events */
+ if (irq_stat)
+ dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
+ __func__, irq_stat);
+
+ dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
+}
+
+static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
+{
+ struct dwc3_msm *mdwc = _mdwc;
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dev_dbg(mdwc->dev, "%s\n", __func__);
+
+ if (atomic_read(&dwc->in_lpm))
+ dwc3_resume_work(&mdwc->resume_work);
+ else
+ dwc3_pwr_event_handler(mdwc);
+
+ dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
+{
+ struct dwc3_msm *mdwc = data;
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dwc->t_pwr_evt_irq = ktime_get();
+ dev_dbg(mdwc->dev, "%s received\n", __func__);
+ /*
+ * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
+ * which interrupts have been triggered, as the clocks are disabled.
+ * Resume controller by waking up pwr event irq thread.After re-enabling
+ * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
+ * all other power events.
+ */
+ if (atomic_read(&dwc->in_lpm)) {
+ /* set this to call dwc3_msm_resume() */
+ mdwc->resume_pending = true;
+ return IRQ_WAKE_THREAD;
+ }
+
+ dwc3_pwr_event_handler(mdwc);
+ return IRQ_HANDLED;
+}
+
+static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ uint32_t cpu = (uintptr_t)hcpu;
+ struct dwc3_msm *mdwc =
+ container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
+
+ if (cpu == cpu_to_affin && action == CPU_ONLINE) {
+ pr_debug("%s: cpu online:%u irq:%d\n", __func__,
+ cpu_to_affin, mdwc->irq_to_affin);
+ irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
+ }
+
+ return NOTIFY_OK;
+}
+
+static void dwc3_otg_sm_work(struct work_struct *w);
+
+static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
+{
+ int ret;
+
+ mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
+ if (IS_ERR(mdwc->dwc3_gdsc))
+ mdwc->dwc3_gdsc = NULL;
+
+ mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
+ if (IS_ERR(mdwc->xo_clk)) {
+ dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
+ __func__);
+ ret = PTR_ERR(mdwc->xo_clk);
+ return ret;
+ }
+ clk_set_rate(mdwc->xo_clk, 19200000);
+
+ mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
+ if (IS_ERR(mdwc->iface_clk)) {
+ dev_err(mdwc->dev, "failed to get iface_clk\n");
+ ret = PTR_ERR(mdwc->iface_clk);
+ return ret;
+ }
+
+ /*
+ * DWC3 Core requires its CORE CLK (aka master / bus clk) to
+ * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
+ * On newer platform it can run at 150MHz as well.
+ */
+ mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
+ if (IS_ERR(mdwc->core_clk)) {
+ dev_err(mdwc->dev, "failed to get core_clk\n");
+ ret = PTR_ERR(mdwc->core_clk);
+ return ret;
+ }
+
+ if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
+ (u32 *)&mdwc->core_clk_rate)) {
+ dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
+ return -EINVAL;
+ }
+
+ mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
+ mdwc->core_clk_rate);
+
+ dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
+ mdwc->core_clk_rate);
+ ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+ if (ret)
+ dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
+
+ if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
+ (u32 *)&mdwc->core_clk_rate_hs)) {
+ dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
+ mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
+ }
+
+ mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
+ if (IS_ERR(mdwc->core_reset)) {
+ dev_err(mdwc->dev, "failed to get core_reset\n");
+ return PTR_ERR(mdwc->core_reset);
+ }
+
+ mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
+ if (IS_ERR(mdwc->sleep_clk)) {
+ dev_err(mdwc->dev, "failed to get sleep_clk\n");
+ ret = PTR_ERR(mdwc->sleep_clk);
+ return ret;
+ }
+
+ clk_set_rate(mdwc->sleep_clk, 32000);
+ mdwc->utmi_clk_rate = 19200000;
+ mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
+ if (IS_ERR(mdwc->utmi_clk)) {
+ dev_err(mdwc->dev, "failed to get utmi_clk\n");
+ ret = PTR_ERR(mdwc->utmi_clk);
+ return ret;
+ }
+
+ clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
+ mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
+ if (IS_ERR(mdwc->bus_aggr_clk))
+ mdwc->bus_aggr_clk = NULL;
+
+ mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
+ if (IS_ERR(mdwc->noc_aggr_clk))
+ mdwc->noc_aggr_clk = NULL;
+
+ if (of_property_match_string(mdwc->dev->of_node,
+ "clock-names", "cfg_ahb_clk") >= 0) {
+ mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
+ if (IS_ERR(mdwc->cfg_ahb_clk)) {
+ ret = PTR_ERR(mdwc->cfg_ahb_clk);
+ mdwc->cfg_ahb_clk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(mdwc->dev,
+ "failed to get cfg_ahb_clk ret %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dwc3_msm_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct extcon_dev *edev = ptr;
+ enum dwc3_id_state id;
+ int cc_state;
+ int speed;
+
+ if (!edev) {
+ dev_err(mdwc->dev, "%s: edev null\n", __func__);
+ goto done;
+ }
+
+ id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
+
+ dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
+
+ cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+ if (cc_state < 0)
+ mdwc->typec_orientation = ORIENTATION_NONE;
+ else
+ mdwc->typec_orientation =
+ cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+
+ speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
+ dwc->maximum_speed = (speed <= 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+ if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
+
+ if (!id && mdwc->override_usb_speed) {
+ dwc->maximum_speed = mdwc->override_usb_speed;
+ dbg_event(0xFF, "override_usb_speed",
+ mdwc->override_usb_speed);
+ mdwc->override_usb_speed = 0;
+ }
+
+ if (mdwc->id_state != id) {
+ mdwc->id_state = id;
+ dbg_event(0xFF, "id_state", mdwc->id_state);
+ pm_stay_awake(mdwc->dev);
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+
+static void check_for_sdp_connection(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc =
+ container_of(w, struct dwc3_msm, sdp_check.work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ if (!mdwc->vbus_active)
+ return;
+
+ /* USB 3.1 compliance equipment usually repoted as floating
+ * charger as HS dp/dm lines are never connected. Do not
+ * tear down USB stack if compliance parameter is set
+ */
+ if (mdwc->usb_compliance_mode)
+ return;
+
+ /* floating D+/D- lines detected */
+ if (dwc->gadget.state < USB_STATE_DEFAULT &&
+ dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
+ mdwc->vbus_active = 0;
+ dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+}
+
+static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct extcon_dev *edev = ptr;
+ int cc_state;
+ int speed;
+ int self_powered;
+
+ if (!edev) {
+ dev_err(mdwc->dev, "%s: edev null\n", __func__);
+ goto done;
+ }
+
+ dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
+
+ if (mdwc->vbus_active == event)
+ return NOTIFY_DONE;
+
+ mdwc->float_detected = false;
+ cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+ if (cc_state < 0)
+ mdwc->typec_orientation = ORIENTATION_NONE;
+ else
+ mdwc->typec_orientation =
+ cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+
+ speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
+ dwc->maximum_speed = (speed <= 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+ if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
+
+ self_powered = extcon_get_cable_state_(edev,
+ EXTCON_USB_TYPEC_MED_HIGH_CURRENT);
+ if (self_powered < 0)
+ dwc->gadget.is_selfpowered = 0;
+ else
+ dwc->gadget.is_selfpowered = self_powered;
+
+ mdwc->vbus_active = event;
+ if (dwc->is_drd && !mdwc->in_restart) {
+ dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
+ pm_stay_awake(mdwc->dev);
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
+{
+ struct device_node *node = mdwc->dev->of_node;
+ struct extcon_dev *edev;
+ struct dwc3 *dwc;
+ int ret = 0;
+
+ dwc = platform_get_drvdata(mdwc->dwc3);
+ if (!of_property_read_bool(node, "extcon")) {
+ dev_dbg(mdwc->dev, "extcon property doesn't exist\n");
+ if (usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST
+ || dwc->is_drd)
+ return 0;
+ dev_err(mdwc->dev, "Neither host nor DRD, fail probe\n");
+ return -EINVAL;
+ }
+
+ edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
+ if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
+ return PTR_ERR(edev);
+
+ if (!IS_ERR(edev)) {
+ mdwc->extcon_vbus = edev;
+ mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB,
+ &mdwc->vbus_nb);
+ if (ret < 0) {
+ dev_err(mdwc->dev, "failed to register notifier for USB\n");
+ return ret;
+ }
+ }
+
+ /* if a second phandle was provided, use it to get a separate edev */
+ if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
+ edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
+ if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
+ ret = PTR_ERR(edev);
+ goto err;
+ }
+ }
+
+ if (!IS_ERR(edev)) {
+ mdwc->extcon_id = edev;
+ mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ &mdwc->id_nb);
+ if (ret < 0) {
+ dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ if (mdwc->extcon_vbus)
+ extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
+ &mdwc->vbus_nb);
+ return ret;
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ if (mdwc->vbus_active)
+ return snprintf(buf, PAGE_SIZE, "peripheral\n");
+ if (mdwc->id_state == DWC3_ID_GROUND)
+ return snprintf(buf, PAGE_SIZE, "host\n");
+
+ return snprintf(buf, PAGE_SIZE, "none\n");
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ if (sysfs_streq(buf, "peripheral")) {
+ mdwc->vbus_active = true;
+ mdwc->id_state = DWC3_ID_FLOAT;
+ } else if (sysfs_streq(buf, "host")) {
+ mdwc->vbus_active = false;
+ mdwc->id_state = DWC3_ID_GROUND;
+ } else {
+ mdwc->vbus_active = false;
+ mdwc->id_state = DWC3_ID_FLOAT;
+ }
+
+ dwc3_ext_event_notify(mdwc);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mode);
+
+/* This node only shows max speed supported dwc3 and it should be
+ * same as what is reported in udc/core.c max_speed node. For current
+ * operating gadget speed, query current_speed node which is implemented
+ * by udc/core.c
+ */
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ usb_speed_string(dwc->maximum_speed));
+}
+
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
+
+ /* DEVSPD can only have values SS(0x4), HS(0x0) and FS(0x1).
+ * per 3.20a data book. Allow only these settings. Note that,
+ * xhci does not support full-speed only mode.
+ */
+ if (sysfs_streq(buf, "full"))
+ req_speed = USB_SPEED_FULL;
+ else if (sysfs_streq(buf, "high"))
+ req_speed = USB_SPEED_HIGH;
+ else if (sysfs_streq(buf, "super"))
+ req_speed = USB_SPEED_SUPER;
+ else
+ return -EINVAL;
+
+ /* restart usb only works for device mode. Perform manual cable
+ * plug in/out for host mode restart.
+ */
+ if (req_speed != dwc->maximum_speed &&
+ req_speed <= dwc->max_hw_supp_speed) {
+ mdwc->override_usb_speed = req_speed;
+ schedule_work(&mdwc->restart_usb_work);
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(speed);
+
+static void msm_dwc3_perf_vote_work(struct work_struct *w);
+static ssize_t xhci_link_compliance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ if (mdwc->xhci_ss_compliance_enable)
+ return snprintf(buf, PAGE_SIZE, "y\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "n\n");
+}
+
+static ssize_t xhci_link_compliance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ bool value;
+ int ret;
+
+ ret = strtobool(buf, &value);
+ if (!ret) {
+ mdwc->xhci_ss_compliance_enable = value;
+ return count;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(xhci_link_compliance);
+
+static ssize_t usb_compliance_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%c\n",
+ mdwc->usb_compliance_mode ? 'Y' : 'N');
+}
+
+static ssize_t usb_compliance_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ ret = strtobool(buf, &mdwc->usb_compliance_mode);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(usb_compliance_mode);
+
+static int dwc3_msm_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node, *dwc3_node;
+ struct device *dev = &pdev->dev;
+ union power_supply_propval pval = {0};
+ struct dwc3_msm *mdwc;
+ struct dwc3 *dwc;
+ struct resource *res;
+ void __iomem *tcsr;
+ bool host_mode;
+ int ret = 0;
+ int ext_hub_reset_gpio;
+ u32 val;
+ char boot_marker[40];
+
+ mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
+ if (!mdwc)
+ return -ENOMEM;
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ platform_set_drvdata(pdev, mdwc);
+ mdwc->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&mdwc->req_complete_list);
+ INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
+ INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
+ INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
+ INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
+ INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
+ INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
+ INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
+
+ mdwc->sm_usb_wq = create_freezable_workqueue("k_sm_usb");
+ if (!mdwc->sm_usb_wq) {
+ pr_err("%s: Failed to create workqueue for sm_usb\n", __func__);
+ return -ENOMEM;
+ }
+
+ mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
+ if (!mdwc->dwc3_wq) {
+ pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Get all clks and gdsc reference */
+ ret = dwc3_msm_get_clk_gdsc(mdwc);
+ if (ret) {
+ dev_err(&pdev->dev, "error getting clock or gdsc.\n");
+ goto err;
+ }
+
+ mdwc->id_state = DWC3_ID_FLOAT;
+ set_bit(ID, &mdwc->inputs);
+
+ mdwc->charging_disabled = of_property_read_bool(node,
+ "qcom,charging-disabled");
+
+ ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
+ &mdwc->lpm_to_suspend_delay);
+ if (ret) {
+ dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
+ mdwc->lpm_to_suspend_delay = 0;
+ }
+
+ /*
+ * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
+ * DP and DM linestate transitions during low power mode.
+ */
+ mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
+ if (mdwc->hs_phy_irq < 0) {
+ dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
+ ret = -EINVAL;
+ goto err;
+ } else {
+ irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
+ msm_dwc3_pwr_irq,
+ msm_dwc3_pwr_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
+ | IRQF_ONESHOT, "hs_phy_irq", mdwc);
+ if (ret) {
+ dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
+ if (mdwc->ss_phy_irq < 0) {
+ dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
+ } else {
+ irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
+ msm_dwc3_pwr_irq,
+ msm_dwc3_pwr_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH
+ | IRQF_EARLY_RESUME | IRQF_ONESHOT,
+ "ss_phy_irq", mdwc);
+ if (ret) {
+ dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
+ if (mdwc->pwr_event_irq < 0) {
+ dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
+ ret = -EINVAL;
+ goto err;
+ } else {
+ /* will be enabled in dwc3_msm_resume() */
+ irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
+ msm_dwc3_pwr_irq,
+ msm_dwc3_pwr_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
+ "msm_dwc3", mdwc);
+ if (ret) {
+ dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
+ if (!res) {
+ dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
+ } else {
+ tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR_OR_NULL(tcsr)) {
+ dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
+ } else {
+ /* Enable USB3 on the primary USB port. */
+ writel_relaxed(0x1, tcsr);
+ /*
+ * Ensure that TCSR write is completed before
+ * USB registers initialization.
+ */
+ mb();
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
+ if (!res) {
+ dev_err(&pdev->dev, "missing memory base resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!mdwc->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ahb2phy_base");
+ if (res) {
+ mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
+ res->start, resource_size(res));
+ if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
+ dev_err(dev, "couldn't find ahb2phy_base addr.\n");
+ mdwc->ahb2phy_base = NULL;
+ } else {
+ /*
+ * On some targets cfg_ahb_clk depends upon usb gdsc
+ * regulator. If cfg_ahb_clk is enabled without
+ * turning on usb gdsc regulator clk is stuck off.
+ */
+ dwc3_msm_config_gdsc(mdwc, 1);
+ clk_prepare_enable(mdwc->cfg_ahb_clk);
+ /* Configure AHB2PHY for one wait state read/write*/
+ val = readl_relaxed(mdwc->ahb2phy_base +
+ PERIPH_SS_AHB2PHY_TOP_CFG);
+ if (val != ONE_READ_WRITE_WAIT) {
+ writel_relaxed(ONE_READ_WRITE_WAIT,
+ mdwc->ahb2phy_base +
+ PERIPH_SS_AHB2PHY_TOP_CFG);
+ /* complete above write before using USB PHY */
+ mb();
+ }
+ clk_disable_unprepare(mdwc->cfg_ahb_clk);
+ dwc3_msm_config_gdsc(mdwc, 0);
+ }
+ }
+
+ if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
+ mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
+ if (IS_ERR(mdwc->dbm)) {
+ dev_err(&pdev->dev, "unable to get dbm device\n");
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+ /*
+ * Add power event if the dbm indicates coming out of L1
+ * by interrupt
+ */
+ if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
+ if (!mdwc->pwr_event_irq) {
+ dev_err(&pdev->dev,
+ "need pwr_event_irq exiting L1\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ }
+
+ ext_hub_reset_gpio = of_get_named_gpio(node,
+ "qcom,ext-hub-reset-gpio", 0);
+
+ if (gpio_is_valid(ext_hub_reset_gpio)
+ && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
+ "qcom,ext-hub-reset-gpio"))) {
+ /* reset external hub */
+ gpio_direction_output(ext_hub_reset_gpio, 1);
+ /*
+ * Hub reset should be asserted for minimum 5microsec
+ * before deasserting.
+ */
+ usleep_range(5, 1000);
+ gpio_direction_output(ext_hub_reset_gpio, 0);
+ }
+
+ if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
+ &mdwc->tx_fifo_size))
+ dev_err(&pdev->dev,
+ "unable to read platform data tx fifo size\n");
+
+ mdwc->disable_host_mode_pm = of_property_read_bool(node,
+ "qcom,disable-host-mode-pm");
+
+ mdwc->no_wakeup_src_in_hostmode = of_property_read_bool(node,
+ "qcom,no-wakeup-src-in-hostmode");
+ if (mdwc->no_wakeup_src_in_hostmode)
+ dev_dbg(&pdev->dev, "dwc3 host not using wakeup source\n");
+
+ dwc3_set_notifier(&dwc3_msm_notify_event);
+
+ /* Assumes dwc3 is the first DT child of dwc3-msm */
+ dwc3_node = of_get_next_available_child(node, NULL);
+ if (!dwc3_node) {
+ dev_err(&pdev->dev, "failed to find dwc3 child\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add create dwc3 core\n");
+ of_node_put(dwc3_node);
+ goto err;
+ }
+
+ mdwc->dwc3 = of_find_device_by_node(dwc3_node);
+ of_node_put(dwc3_node);
+ if (!mdwc->dwc3) {
+ dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+ goto put_dwc3;
+ }
+
+ mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
+ "usb-phy", 0);
+ if (IS_ERR(mdwc->hs_phy)) {
+ dev_err(&pdev->dev, "unable to get hsphy device\n");
+ ret = PTR_ERR(mdwc->hs_phy);
+ goto put_dwc3;
+ }
+ mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
+ "usb-phy", 1);
+ if (IS_ERR(mdwc->ss_phy)) {
+ dev_err(&pdev->dev, "unable to get ssphy device\n");
+ ret = PTR_ERR(mdwc->ss_phy);
+ goto put_dwc3;
+ }
+
+ mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (mdwc->bus_scale_table) {
+ mdwc->bus_perf_client =
+ msm_bus_scale_register_client(mdwc->bus_scale_table);
+ }
+
+ dwc = platform_get_drvdata(mdwc->dwc3);
+ if (!dwc) {
+ dev_err(&pdev->dev, "Failed to get dwc3 device\n");
+ goto put_dwc3;
+ }
+
+ mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
+ mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
+
+ if (cpu_to_affin)
+ register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
+
+ /*
+ * Clocks and regulators will not be turned on until the first time
+ * runtime PM resume is called. This is to allow for booting up with
+ * charger already connected so as not to disturb PHY line states.
+ */
+ mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
+ atomic_set(&dwc->in_lpm, 1);
+ pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
+ pm_runtime_use_autosuspend(mdwc->dev);
+ device_init_wakeup(mdwc->dev, 1);
+
+ if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
+ pm_runtime_get_noresume(mdwc->dev);
+
+ mdwc->check_for_float = of_property_read_bool(node,
+ "qcom,check-for-float");
+ ret = dwc3_msm_extcon_register(mdwc);
+ if (ret)
+ goto put_dwc3;
+
+ ret = of_property_read_u32(node, "qcom,pm-qos-latency",
+ &mdwc->pm_qos_latency);
+ if (ret) {
+ dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
+ mdwc->pm_qos_latency = 0;
+ }
+
+ mdwc->usb_psy = power_supply_get_by_name("usb");
+ if (!mdwc->usb_psy) {
+ dev_warn(mdwc->dev, "Could not get usb power_supply\n");
+ pval.intval = -EINVAL;
+ } else {
+ power_supply_get_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ }
+
+ mutex_init(&mdwc->suspend_resume_mutex);
+ /* Update initial VBUS/ID state from extcon */
+ if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
+ EXTCON_USB))
+ dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
+ else if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
+ EXTCON_USB_HOST))
+ dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
+ else if (!pval.intval) {
+ /* USB cable is not connected */
+ queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0);
+ } else {
+ if (pval.intval > 0)
+ dev_info(mdwc->dev, "charger detection in progress\n");
+ }
+
+ device_create_file(&pdev->dev, &dev_attr_mode);
+ device_create_file(&pdev->dev, &dev_attr_speed);
+ device_create_file(&pdev->dev, &dev_attr_xhci_link_compliance);
+ device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
+
+ host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
+ if (host_mode ||
+ (dwc->is_drd && !of_property_read_bool(node, "extcon"))) {
+ dev_dbg(&pdev->dev, "DWC3 in default host mode\n");
+ mdwc->host_only_mode = true;
+ mdwc->id_state = DWC3_ID_GROUND;
+ dwc3_ext_event_notify(mdwc);
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER %s Host Ready", dev_name(&pdev->dev));
+ } else {
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER %s Device Ready", dev_name(&pdev->dev));
+ }
+
+ place_marker(boot_marker);
+
+ return 0;
+
+put_dwc3:
+ if (mdwc->bus_perf_client)
+ msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+ of_platform_depopulate(&pdev->dev);
+err:
+ destroy_workqueue(mdwc->dwc3_wq);
+ return ret;
+}
+
+static int dwc3_msm_remove(struct platform_device *pdev)
+{
+ struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
+ int ret_pm;
+
+ device_remove_file(&pdev->dev, &dev_attr_mode);
+ device_remove_file(&pdev->dev, &dev_attr_xhci_link_compliance);
+
+ if (cpu_to_affin)
+ unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
+
+ /*
+ * In case of system suspend, pm_runtime_get_sync fails.
+ * Hence turn ON the clocks manually.
+ */
+ ret_pm = pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "Remov gsyn", ret_pm);
+ if (ret_pm < 0) {
+ dev_err(mdwc->dev,
+ "pm_runtime_get_sync failed with %d\n", ret_pm);
+ if (mdwc->noc_aggr_clk)
+ clk_prepare_enable(mdwc->noc_aggr_clk);
+ clk_prepare_enable(mdwc->utmi_clk);
+ clk_prepare_enable(mdwc->core_clk);
+ clk_prepare_enable(mdwc->iface_clk);
+ clk_prepare_enable(mdwc->sleep_clk);
+ if (mdwc->bus_aggr_clk)
+ clk_prepare_enable(mdwc->bus_aggr_clk);
+ clk_prepare_enable(mdwc->xo_clk);
+ }
+
+ cancel_delayed_work_sync(&mdwc->perf_vote_work);
+ cancel_delayed_work_sync(&mdwc->sm_work);
+
+ if (mdwc->hs_phy)
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+ of_platform_depopulate(&pdev->dev);
+
+ dbg_event(0xFF, "Remov put", 0);
+ pm_runtime_disable(mdwc->dev);
+ pm_runtime_barrier(mdwc->dev);
+ pm_runtime_put_sync(mdwc->dev);
+ pm_runtime_set_suspended(mdwc->dev);
+ device_wakeup_disable(mdwc->dev);
+
+ if (mdwc->bus_perf_client)
+ msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+
+ if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
+ regulator_disable(mdwc->vbus_reg);
+
+ disable_irq(mdwc->hs_phy_irq);
+ if (mdwc->ss_phy_irq)
+ disable_irq(mdwc->ss_phy_irq);
+ disable_irq(mdwc->pwr_event_irq);
+
+ clk_disable_unprepare(mdwc->utmi_clk);
+ clk_set_rate(mdwc->core_clk, 19200000);
+ clk_disable_unprepare(mdwc->core_clk);
+ clk_disable_unprepare(mdwc->iface_clk);
+ clk_disable_unprepare(mdwc->sleep_clk);
+ clk_disable_unprepare(mdwc->xo_clk);
+ clk_put(mdwc->xo_clk);
+
+ dwc3_msm_config_gdsc(mdwc, 0);
+
+ return 0;
+}
+
+static int dwc3_msm_host_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_device *udev = ptr;
+ union power_supply_propval pval;
+ unsigned max_power;
+
+ if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
+ return NOTIFY_DONE;
+
+ if (!mdwc->usb_psy) {
+ mdwc->usb_psy = power_supply_get_by_name("usb");
+ if (!mdwc->usb_psy)
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * For direct-attach devices, new udev is direct child of root hub
+ * i.e. dwc -> xhci -> root_hub -> udev
+ * root_hub's udev->parent==NULL, so traverse struct device hierarchy
+ */
+ if (udev->parent && !udev->parent->parent &&
+ udev->dev.parent->parent == &dwc->xhci->dev) {
+ if (event == USB_DEVICE_ADD && udev->actconfig) {
+ if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
+ /*
+ * Core clock rate can be reduced only if root
+ * hub SS port is not enabled/connected.
+ */
+ clk_set_rate(mdwc->core_clk,
+ mdwc->core_clk_rate_hs);
+ dev_dbg(mdwc->dev,
+ "set hs core clk rate %ld\n",
+ mdwc->core_clk_rate_hs);
+ mdwc->max_rh_port_speed = USB_SPEED_HIGH;
+ } else {
+ mdwc->max_rh_port_speed = USB_SPEED_SUPER;
+ }
+
+ if (udev->speed >= USB_SPEED_SUPER)
+ max_power = udev->actconfig->desc.bMaxPower * 8;
+ else
+ max_power = udev->actconfig->desc.bMaxPower * 2;
+
+ dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
+ dev_name(&udev->dev), max_power);
+
+ /* inform PMIC of max power so it can optimize boost */
+ pval.intval = max_power * 1000;
+ power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
+ } else {
+ pval.intval = 0;
+ power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
+
+ /* set rate back to default core clk rate */
+ clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+ dev_dbg(mdwc->dev, "set core clk rate %ld\n",
+ mdwc->core_clk_rate);
+ mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
+{
+ static bool curr_perf_mode;
+ int latency = mdwc->pm_qos_latency;
+
+ if ((curr_perf_mode == perf_mode) || !latency)
+ return;
+
+ if (perf_mode)
+ pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
+ else
+ pm_qos_update_request(&mdwc->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+
+ curr_perf_mode = perf_mode;
+ pr_debug("%s: latency updated to: %d\n", __func__,
+ perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
+}
+
+static void msm_dwc3_perf_vote_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+ perf_vote_work.work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ bool in_perf_mode = false;
+ int latency = mdwc->pm_qos_latency;
+
+ if (!latency)
+ return;
+
+ if (dwc->irq_cnt - dwc->last_irq_cnt >= PM_QOS_THRESHOLD)
+ in_perf_mode = true;
+
+ pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
+ __func__, in_perf_mode, (dwc->irq_cnt - dwc->last_irq_cnt));
+
+ dwc->last_irq_cnt = dwc->irq_cnt;
+ msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
+ schedule_delayed_work(&mdwc->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+}
+
+#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
+
+/**
+ * dwc3_otg_start_host - helper function for starting/stoping the host controller driver.
+ *
+ * @mdwc: Pointer to the dwc3_msm structure.
+ * @on: start / stop the host controller driver.
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ int ret = 0;
+
+ if (!dwc->xhci)
+ return -EINVAL;
+
+ /*
+ * The vbus_reg pointer could have multiple values
+ * NULL: regulator_get() hasn't been called, or was previously deferred
+ * IS_ERR: regulator could not be obtained, so skip using it
+ * Valid pointer otherwise
+ */
+ if (!mdwc->vbus_reg) {
+ mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
+ "vbus_dwc3");
+ if (IS_ERR(mdwc->vbus_reg) &&
+ PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
+ /* regulators may not be ready, so retry again later */
+ mdwc->vbus_reg = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (on) {
+ dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
+
+ pm_runtime_get_sync(mdwc->dev);
+ if (mdwc->core_init_failed) {
+ dev_err(mdwc->dev, "%s: Core init failed\n", __func__);
+ pm_runtime_put_sync_suspend(mdwc->dev);
+ return -EAGAIN;
+ }
+
+ mdwc->hs_phy->flags |= PHY_HOST_MODE;
+ if (dwc->maximum_speed == USB_SPEED_SUPER) {
+ mdwc->ss_phy->flags |= PHY_HOST_MODE;
+ usb_phy_notify_connect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ }
+
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+ dbg_event(0xFF, "StrtHost gync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ if (!IS_ERR(mdwc->vbus_reg))
+ ret = regulator_enable(mdwc->vbus_reg);
+ if (ret) {
+ dev_err(mdwc->dev, "unable to enable vbus_reg\n");
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "vregerr psync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ return ret;
+ }
+
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+
+ mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
+ usb_register_notify(&mdwc->host_nb);
+
+ mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
+ usb_register_atomic_notify(&mdwc->usbdev_nb);
+ /*
+ * FIXME If micro A cable is disconnected during system suspend,
+ * xhci platform device will be removed before runtime pm is
+ * enabled for xhci device. Due to this, disable_depth becomes
+ * greater than one and runtimepm is not enabled for next microA
+ * connect. Fix this by calling pm_runtime_init for xhci device.
+ */
+ pm_runtime_init(&dwc->xhci->dev);
+ ret = platform_device_add(dwc->xhci);
+ if (ret) {
+ dev_err(mdwc->dev,
+ "%s: failed to add XHCI pdev ret=%d\n",
+ __func__, ret);
+ if (!IS_ERR(mdwc->vbus_reg))
+ regulator_disable(mdwc->vbus_reg);
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "pdeverr psync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ usb_unregister_notify(&mdwc->host_nb);
+ return ret;
+ }
+
+ /*
+ * If the Compliance Transition Capability(CTC) flag of
+ * HCCPARAMS2 register is set and xhci_link_compliance sysfs
+ * param has been enabled by the user for the SuperSpeed host
+ * controller, then write 10 (Link in Compliance Mode State)
+ * onto the Port Link State(PLS) field of the PORTSC register
+ * for 3.0 host controller which is at an offset of USB3_PORTSC
+ * + 0x10 from the DWC3 base address. Also, disable the runtime
+ * PM of 3.0 root hub (root hub of shared_hcd of xhci device)
+ */
+ if (HCC_CTC(dwc3_msm_read_reg(mdwc->base, USB3_HCCPARAMS2))
+ && mdwc->xhci_ss_compliance_enable
+ && dwc->maximum_speed == USB_SPEED_SUPER) {
+ dwc3_msm_write_reg(mdwc->base, USB3_PORTSC + 0x10,
+ 0x10340);
+ pm_runtime_disable(&hcd_to_xhci(platform_get_drvdata(
+ dwc->xhci))->shared_hcd->self.root_hub->dev);
+ }
+
+ /*
+ * In some cases it is observed that USB PHY is not going into
+ * suspend with host mode suspend functionality. Hence disable
+ * XHCI's runtime PM here if disable_host_mode_pm is set.
+ */
+ if (mdwc->disable_host_mode_pm)
+ pm_runtime_disable(&dwc->xhci->dev);
+
+ mdwc->in_host_mode = true;
+ dwc3_usb3_phy_suspend(dwc, true);
+
+ /* xHCI should have incremented child count as necessary */
+ dbg_event(0xFF, "StrtHost psync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ pm_runtime_mark_last_busy(mdwc->dev);
+ pm_runtime_put_sync_autosuspend(mdwc->dev);
+#ifdef CONFIG_SMP
+ mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ mdwc->pm_qos_req_dma.irq = dwc->irq;
+#endif
+ pm_qos_add_request(&mdwc->pm_qos_req_dma,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* start in perf mode for better performance initially */
+ msm_dwc3_perf_vote_update(mdwc, true);
+ schedule_delayed_work(&mdwc->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+ } else {
+ dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
+
+ usb_unregister_atomic_notify(&mdwc->usbdev_nb);
+ if (!IS_ERR(mdwc->vbus_reg))
+ ret = regulator_disable(mdwc->vbus_reg);
+ if (ret) {
+ dev_err(mdwc->dev, "unable to disable vbus_reg\n");
+ return ret;
+ }
+
+ cancel_delayed_work_sync(&mdwc->perf_vote_work);
+ msm_dwc3_perf_vote_update(mdwc, false);
+ pm_qos_remove_request(&mdwc->pm_qos_req_dma);
+
+ pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "StopHost gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+ usb_phy_notify_disconnect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ }
+
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+ platform_device_del(dwc->xhci);
+ usb_unregister_notify(&mdwc->host_nb);
+
+ /*
+ * Perform USB hardware RESET (both core reset and DBM reset)
+ * when moving from host to peripheral. This is required for
+ * peripheral mode to work.
+ */
+ dwc3_msm_block_reset(mdwc, true);
+
+ dwc3_usb3_phy_suspend(dwc, false);
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+ mdwc->in_host_mode = false;
+
+ /* re-init core and OTG registers as block reset clears these */
+ if (!mdwc->host_only_mode)
+ dwc3_post_host_reset_core_init(dwc);
+
+ /* wait for LPM, to ensure h/w is reset after stop_host */
+ set_bit(WAIT_FOR_LPM, &mdwc->inputs);
+
+ pm_runtime_put_sync_suspend(mdwc->dev);
+ dbg_event(0xFF, "StopHost psync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ }
+
+ return 0;
+}
+
+static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ /* Update OTG VBUS Valid from HSPHY to controller */
+ dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
+ vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
+ UTMI_OTG_VBUS_VALID,
+ vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
+
+ /* Update only if Super Speed is supported */
+ if (dwc->maximum_speed == USB_SPEED_SUPER) {
+ /* Update VBUS Valid from SSPHY to controller */
+ dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
+ LANE0_PWR_PRESENT,
+ vbus_present ? LANE0_PWR_PRESENT : 0);
+ }
+}
+
+/**
+ * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
+ *
+ * @mdwc: Pointer to the dwc3_msm structure.
+ * @on: Turn ON/OFF the gadget.
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
+{
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "StrtGdgt gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
+
+ if (on) {
+ dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
+ __func__, dwc->gadget.name);
+
+ dwc3_override_vbus_status(mdwc, true);
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+ usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
+
+ /* Core reset is not required during start peripheral. Only
+ * DBM reset is required, hence perform only DBM reset here */
+ dwc3_msm_block_reset(mdwc, false);
+
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ mdwc->in_device_mode = true;
+ usb_gadget_vbus_connect(&dwc->gadget);
+#ifdef CONFIG_SMP
+ mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ mdwc->pm_qos_req_dma.irq = dwc->irq;
+#endif
+ pm_qos_add_request(&mdwc->pm_qos_req_dma,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* start in perf mode for better performance initially */
+ msm_dwc3_perf_vote_update(mdwc, true);
+ schedule_delayed_work(&mdwc->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+ } else {
+ dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
+ __func__, dwc->gadget.name);
+ cancel_delayed_work_sync(&mdwc->perf_vote_work);
+ msm_dwc3_perf_vote_update(mdwc, false);
+ pm_qos_remove_request(&mdwc->pm_qos_req_dma);
+
+ mdwc->in_device_mode = false;
+ usb_gadget_vbus_disconnect(&dwc->gadget);
+ usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+ dwc3_override_vbus_status(mdwc, false);
+ dwc3_usb3_phy_suspend(dwc, false);
+
+ /* wait for LPM, to ensure h/w is reset after stop_peripheral */
+ set_bit(WAIT_FOR_LPM, &mdwc->inputs);
+ }
+
+ pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "StopGdgt psync",
+ atomic_read(&mdwc->dev->power.usage_count));
+
+ return 0;
+}
+
+int get_psy_type(struct dwc3_msm *mdwc)
+{
+ union power_supply_propval pval = {0};
+
+ if (mdwc->charging_disabled)
+ return -EINVAL;
+
+ if (!mdwc->usb_psy) {
+ mdwc->usb_psy = power_supply_get_by_name("usb");
+ if (!mdwc->usb_psy) {
+ dev_err(mdwc->dev, "Could not get usb psy\n");
+ return -ENODEV;
+ }
+ }
+
+ power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
+ &pval);
+
+ return pval.intval;
+}
+
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
+{
+ union power_supply_propval pval = {0};
+ int ret, psy_type;
+
+ psy_type = get_psy_type(mdwc);
+ if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT
+ || (mdwc->check_for_float && mdwc->float_detected)) {
+ if (!mA)
+ pval.intval = -ETIMEDOUT;
+ else
+ pval.intval = 1000 * mA;
+ goto set_prop;
+ }
+
+ if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+ return 0;
+
+ dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ /* Set max current limit in uA */
+ pval.intval = 1000 * mA;
+
+set_prop:
+ ret = power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
+ if (ret) {
+ dev_dbg(mdwc->dev, "power supply error when setting property\n");
+ return ret;
+ }
+
+ mdwc->max_power = mA;
+ return 0;
+}
+
+
+/**
+ * dwc3_otg_sm_work - workqueue function.
+ *
+ * @w: Pointer to the dwc3 otg workqueue
+ *
+ * NOTE: After any change in drd_state, we must reschdule the state machine.
+ */
+static void dwc3_otg_sm_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
+ struct dwc3 *dwc = NULL;
+ bool work = 0;
+ int ret = 0;
+ unsigned long delay = 0;
+ const char *state;
+
+ if (mdwc->dwc3)
+ dwc = platform_get_drvdata(mdwc->dwc3);
+
+ if (!dwc) {
+ dev_err(mdwc->dev, "dwc is NULL.\n");
+ return;
+ }
+
+ state = dwc3_drd_state_string(mdwc->drd_state);
+ dev_dbg(mdwc->dev, "%s state\n", state);
+ dbg_event(0xFF, state, 0);
+
+ /* Check OTG state */
+ switch (mdwc->drd_state) {
+ case DRD_STATE_UNDEFINED:
+ /* put controller and phy in suspend if no cable connected */
+ if (test_bit(ID, &mdwc->inputs) &&
+ !test_bit(B_SESS_VLD, &mdwc->inputs)) {
+ dbg_event(0xFF, "undef_id_!bsv", 0);
+ pm_runtime_set_active(mdwc->dev);
+ pm_runtime_enable(mdwc->dev);
+ pm_runtime_get_noresume(mdwc->dev);
+ dwc3_msm_resume(mdwc);
+ pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "Undef NoUSB",
+ atomic_read(&mdwc->dev->power.usage_count));
+ mdwc->drd_state = DRD_STATE_IDLE;
+ break;
+ }
+
+ dbg_event(0xFF, "Exit UNDEF", 0);
+ mdwc->drd_state = DRD_STATE_IDLE;
+ pm_runtime_set_suspended(mdwc->dev);
+ pm_runtime_enable(mdwc->dev);
+ /* fall-through */
+ case DRD_STATE_IDLE:
+ if (test_bit(WAIT_FOR_LPM, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "still not in lpm, wait.\n");
+ break;
+ }
+
+ if (!test_bit(ID, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "!id\n");
+ mdwc->drd_state = DRD_STATE_HOST_IDLE;
+ work = 1;
+ } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "b_sess_vld\n");
+ mdwc->float_detected = false;
+ if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
+ queue_delayed_work(mdwc->dwc3_wq,
+ &mdwc->sdp_check,
+ msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
+ /*
+ * Increment pm usage count upon cable connect. Count
+ * is decremented in DRD_STATE_PERIPHERAL state on
+ * cable disconnect or in bus suspend.
+ */
+ pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "BIDLE gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ if (mdwc->check_for_float) {
+ /*
+ * If DP_DM are found to be floating, do not
+ * start the peripheral mode.
+ */
+ if (usb_phy_dpdm_with_idp_src(mdwc->hs_phy) ==
+ DP_DM_STATE_FLOAT) {
+ mdwc->float_detected = true;
+ dwc3_msm_gadget_vbus_draw(mdwc, 0);
+ pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "FLT sync", atomic_read(
+ &mdwc->dev->power.usage_count));
+ break;
+ }
+ }
+ dwc3_otg_start_peripheral(mdwc, 1);
+ mdwc->drd_state = DRD_STATE_PERIPHERAL;
+ work = 1;
+ break;
+ } else {
+ dwc3_msm_gadget_vbus_draw(mdwc, 0);
+ pm_relax(mdwc->dev);
+ dev_dbg(mdwc->dev, "Cable disconnected\n");
+ }
+ break;
+
+ case DRD_STATE_PERIPHERAL:
+ if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
+ !test_bit(ID, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "!id || !bsv\n");
+ mdwc->drd_state = DRD_STATE_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
+ dwc3_otg_start_peripheral(mdwc, 0);
+ /*
+ * Decrement pm usage count upon cable disconnect
+ * which was incremented upon cable connect in
+ * DRD_STATE_IDLE state
+ */
+ pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "!BSV psync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ work = 1;
+ } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
+ test_bit(B_SESS_VLD, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "BPER bsv && susp\n");
+ mdwc->drd_state = DRD_STATE_PERIPHERAL_SUSPEND;
+ /*
+ * Decrement pm usage count upon bus suspend.
+ * Count was incremented either upon cable
+ * connect in DRD_STATE_IDLE or host
+ * initiated resume after bus suspend in
+ * DRD_STATE_PERIPHERAL_SUSPEND state
+ */
+ pm_runtime_mark_last_busy(mdwc->dev);
+ pm_runtime_put_autosuspend(mdwc->dev);
+ dbg_event(0xFF, "SUSP put",
+ atomic_read(&mdwc->dev->power.usage_count));
+ }
+ break;
+
+ case DRD_STATE_PERIPHERAL_SUSPEND:
+ if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
+ mdwc->drd_state = DRD_STATE_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
+ dwc3_otg_start_peripheral(mdwc, 0);
+ } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "BSUSP !susp\n");
+ mdwc->drd_state = DRD_STATE_PERIPHERAL;
+ /*
+ * Increment pm usage count upon host
+ * initiated resume. Count was decremented
+ * upon bus suspend in
+ * DRD_STATE_PERIPHERAL state.
+ */
+ pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "!SUSP gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
+ }
+ break;
+
+ case DRD_STATE_HOST_IDLE:
+ /* Switch to A-Device*/
+ if (test_bit(ID, &mdwc->inputs)) {
+ dev_dbg(mdwc->dev, "id\n");
+ mdwc->drd_state = DRD_STATE_IDLE;
+ mdwc->vbus_retry_count = 0;
+ work = 1;
+ } else {
+ mdwc->drd_state = DRD_STATE_HOST;
+ ret = dwc3_otg_start_host(mdwc, 1);
+ if ((ret == -EPROBE_DEFER) &&
+ mdwc->vbus_retry_count < 3) {
+ /*
+ * Get regulator failed as regulator driver is
+ * not up yet. Will try to start host after 1sec
+ */
+ mdwc->drd_state = DRD_STATE_HOST_IDLE;
+ dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
+ delay = VBUS_REG_CHECK_DELAY;
+ work = 1;
+ mdwc->vbus_retry_count++;
+ } else if (ret == -EAGAIN) {
+ mdwc->drd_state = DRD_STATE_HOST_IDLE;
+ dev_dbg(mdwc->dev, "Core init failed. Retrying...\n");
+ work = 1;
+ } else if (ret) {
+ dev_err(mdwc->dev, "unable to start host\n");
+ mdwc->drd_state = DRD_STATE_HOST_IDLE;
+ goto ret;
+ }
+ if (mdwc->no_wakeup_src_in_hostmode) {
+ pm_wakeup_event(mdwc->dev,
+ DWC3_WAKEUP_SRC_TIMEOUT);
+ }
+ }
+ break;
+
+ case DRD_STATE_HOST:
+ if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
+ dbg_event(0xFF, "id || hc_died", 0);
+ dev_dbg(mdwc->dev, "%s state id || hc_died\n", state);
+ dwc3_otg_start_host(mdwc, 0);
+ mdwc->drd_state = DRD_STATE_IDLE;
+ mdwc->vbus_retry_count = 0;
+ mdwc->hc_died = false;
+ work = 1;
+ } else {
+ dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
+ dbg_event(0xFF, "XHCIResume", 0);
+ if (dwc)
+ pm_runtime_resume(&dwc->xhci->dev);
+ if (mdwc->no_wakeup_src_in_hostmode) {
+ pm_wakeup_event(mdwc->dev,
+ DWC3_WAKEUP_SRC_TIMEOUT);
+ }
+ }
+ break;
+
+ default:
+ dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
+
+ }
+
+ if (work)
+ queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, delay);
+
+ret:
+ return;
+}
+
+static int dwc3_msm_pm_prepare(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ dev_dbg(dev, "dwc3-msm PM prepare,lpm:%u\n", atomic_read(&dwc->in_lpm));
+ dbg_event(0xFF, "PM Prep", 0);
+ if (!mdwc->in_host_mode || !mdwc->no_wakeup_src_in_hostmode)
+ return 0;
+
+ hcd = dev_get_drvdata(&dwc->xhci->dev);
+ xhci = hcd_to_xhci(hcd);
+ flush_delayed_work(&mdwc->sm_work);
+
+ /* If in lpm then prevent usb core to runtime_resume from pm_suspend */
+ if (atomic_read(&dwc->in_lpm)) {
+ hcd_to_bus(hcd)->skip_resume = true;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+ } else {
+ hcd_to_bus(hcd)->skip_resume = false;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = false;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_msm_pm_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dev_dbg(dev, "dwc3-msm PM suspend\n");
+ dbg_event(0xFF, "PM Sus", 0);
+
+ flush_workqueue(mdwc->dwc3_wq);
+ if (!atomic_read(&dwc->in_lpm) && !mdwc->no_wakeup_src_in_hostmode) {
+ dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
+ return -EBUSY;
+ }
+
+ ret = dwc3_msm_suspend(mdwc, false);
+ if (ret)
+ return ret;
+
+ flush_work(&mdwc->bus_vote_w);
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return 0;
+}
+
+static int dwc3_msm_pm_freeze(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM freeze\n");
+ dbg_event(0xFF, "PM Freeze", 0);
+
+ flush_workqueue(mdwc->dwc3_wq);
+
+ /* Resume the core to make sure we can power collapse it */
+ ret = dwc3_msm_resume(mdwc);
+
+ /*
+ * PHYs also need to be power collapsed, so call the notify_disconnect
+ * before suspend to ensure it.
+ */
+ usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+ usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ }
+
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+
+ ret = dwc3_msm_suspend(mdwc, true);
+ if (ret)
+ return ret;
+
+ flush_work(&mdwc->bus_vote_w);
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return 0;
+}
+
+static int dwc3_msm_pm_resume(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM resume\n");
+
+ dbg_event(0xFF, "PM Res", 0);
+
+ /* flush to avoid race in read/write of pm_suspended */
+ flush_workqueue(mdwc->dwc3_wq);
+ atomic_set(&mdwc->pm_suspended, 0);
+
+ /* Resume h/w in host mode as it may not be runtime suspended */
+ if (mdwc->no_wakeup_src_in_hostmode && !test_bit(ID, &mdwc->inputs))
+ dwc3_msm_resume(mdwc);
+
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+
+ return 0;
+}
+
+static int dwc3_msm_pm_restore(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM restore\n");
+ dbg_event(0xFF, "PM Restore", 0);
+
+ atomic_set(&mdwc->pm_suspended, 0);
+
+ dwc3_msm_resume(mdwc);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /* Restore PHY flags if hibernated in host mode */
+ if (mdwc->drd_state == DRD_STATE_HOST) {
+ mdwc->hs_phy->flags |= PHY_HOST_MODE;
+ if (mdwc->ss_phy) {
+ mdwc->ss_phy->flags |= PHY_HOST_MODE;
+ usb_phy_notify_connect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ }
+
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int dwc3_msm_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "DWC3-msm runtime idle\n");
+ dbg_event(0xFF, "RT Idle", 0);
+
+ return 0;
+}
+
+static int dwc3_msm_runtime_suspend(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "DWC3-msm runtime suspend\n");
+ dbg_event(0xFF, "RT Sus", 0);
+
+ return dwc3_msm_suspend(mdwc, false);
+}
+
+static int dwc3_msm_runtime_resume(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "DWC3-msm runtime resume\n");
+ dbg_event(0xFF, "RT Res", 0);
+
+ return dwc3_msm_resume(mdwc);
+}
+#endif
+
+static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+ .prepare = dwc3_msm_pm_prepare,
+ .suspend = dwc3_msm_pm_suspend,
+ .resume = dwc3_msm_pm_resume,
+ .freeze = dwc3_msm_pm_freeze,
+ .thaw = dwc3_msm_pm_restore,
+ .poweroff = dwc3_msm_pm_suspend,
+ .restore = dwc3_msm_pm_restore,
+ SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
+ dwc3_msm_runtime_idle)
+};
+
+static const struct of_device_id of_dwc3_matach[] = {
+ {
+ .compatible = "qcom,dwc-usb3-msm",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_matach);
+
+static struct platform_driver dwc3_msm_driver = {
+ .probe = dwc3_msm_probe,
+ .remove = dwc3_msm_remove,
+ .driver = {
+ .name = "msm-dwc3",
+ .pm = &dwc3_msm_dev_pm_ops,
+ .of_match_table = of_dwc3_matach,
+ },
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
+
+static int dwc3_msm_init(void)
+{
+ return platform_driver_register(&dwc3_msm_driver);
+}
+module_init(dwc3_msm_init);
+
+static void __exit dwc3_msm_exit(void)
+{
+ platform_driver_unregister(&dwc3_msm_driver);
+}
+module_exit(dwc3_msm_exit);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index ca631fea59e0..d631a1fead5c 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -34,6 +35,12 @@
#include "debug.h"
#include "gadget.h"
#include "io.h"
+#include "debug.h"
+
+
+static bool enable_dwc3_u1u2;
+module_param(enable_dwc3_u1u2, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_dwc3_u1u2, "Enable support for U1U2 low power modes");
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
@@ -232,11 +239,13 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
unsigned long flags;
int ret;
+ enum dwc3_link_state link_state;
+ u32 reg;
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
dwc3_trace(trace_dwc3_ep0,
- "trying to queue request %p to disabled %s",
+ "trying to queue request %pK to disabled %s",
request, dep->name);
ret = -ESHUTDOWN;
goto out;
@@ -248,8 +257,20 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
+ /* if link stats is in L1 initiate remote wakeup before queuing req */
+ if (dwc->speed != DWC3_DSTS_SUPERSPEED) {
+ link_state = dwc3_get_link_state(dwc);
+ /* in HS this link state is same as L1 */
+ if (link_state == DWC3_LINK_STATE_U2) {
+ dwc->l1_remote_wakeup_cnt++;
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+ }
+
dwc3_trace(trace_dwc3_ep0,
- "queueing request %p to %s length %d state '%s'",
+ "queueing request %pK to %s length %d state '%s'",
request, dep->name, request->length,
dwc3_ep0_state_string(dwc->ep0state));
@@ -261,7 +282,7 @@ out:
return ret;
}
-static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -291,6 +312,7 @@ int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
+ dbg_event(dep->number, "EP0STAL", value);
dwc3_ep0_stall_and_restart(dwc);
return 0;
@@ -317,7 +339,8 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
ret = dwc3_ep0_start_trans(dwc, 0);
- WARN_ON(ret < 0);
+ if (WARN_ON_ONCE(ret < 0))
+ dbg_event(dwc->eps[0]->number, "EOUTSTART", ret);
}
static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
@@ -343,12 +366,24 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
{
}
+
+static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&dwc->lock);
+ ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
+ spin_lock(&dwc->lock);
+ return ret;
+}
+
/*
* ch 9.4.5
*/
static int dwc3_ep0_handle_status(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl)
{
+ int ret;
struct dwc3_ep *dep;
u32 recip;
u32 reg;
@@ -369,6 +404,9 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
if (reg & DWC3_DCTL_INITU2ENA)
usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
+ } else {
+ usb_status |= dwc->gadget.remote_wakeup <<
+ USB_DEVICE_REMOTE_WAKEUP;
}
break;
@@ -378,7 +416,9 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
* Function Remote Wake Capable D0
* Function Remote Wakeup D1
*/
- break;
+
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ return ret;
case USB_RECIP_ENDPOINT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
@@ -400,6 +440,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
+ dwc->ep0_usb_req.request.dma = DMA_ERROR_CODE;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
@@ -425,6 +466,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
+ pr_debug("%s(): remote wakeup :%s\n", __func__,
+ (set ? "enabled" : "disabled"));
+ dwc->gadget.remote_wakeup = set;
break;
/*
* 9.4.1 says only only for SS, in AddressState only for
@@ -436,6 +480,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
+ if (dwc->usb3_u1u2_disable && !enable_dwc3_u1u2)
+ return -EINVAL;
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (set)
reg |= DWC3_DCTL_INITU1ENA;
@@ -450,6 +497,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
+ if (dwc->usb3_u1u2_disable && !enable_dwc3_u1u2)
+ return -EINVAL;
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (set)
reg |= DWC3_DCTL_INITU2ENA;
@@ -484,6 +534,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
/* XXX enable remote wakeup */
;
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
@@ -545,22 +598,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return 0;
}
-static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
-{
- int ret;
-
- spin_unlock(&dwc->lock);
- ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
- spin_lock(&dwc->lock);
- return ret;
-}
-
static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = dwc->gadget.state;
u32 cfg;
- int ret;
+ int ret, num;
u32 reg;
+ struct dwc3_ep *dep;
cfg = le16_to_cpu(ctrl->wValue);
@@ -569,6 +613,32 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
+ /*
+ * If tx-fifo-resize flag is not set for the controller, then
+ * do not clear existing allocated TXFIFO since we do not
+ * allocate it again in dwc3_gadget_resize_tx_fifos
+ */
+ if (dwc->needs_fifo_resize) {
+ /* Read ep0IN related TXFIFO size */
+ dwc->last_fifo_depth = (dwc3_readl(dwc->regs,
+ DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
+ /* Clear existing TXFIFO for all IN eps except ep0 */
+ for (num = 0; num < dwc->num_in_eps; num++) {
+ dep = dwc->eps[(num << 1) | 1];
+ if (num) {
+ dwc3_writel(dwc->regs,
+ DWC3_GTXFIFOSIZ(num), 0);
+ dep->fifo_depth = 0;
+ } else {
+ dep->fifo_depth = dwc->last_fifo_depth;
+ }
+
+ dev_dbg(dwc->dev, "%s(): %s fifo_depth:%x\n",
+ __func__, dep->name, dep->fifo_depth);
+ dbg_event(0xFF, "fifo_reset", dep->number);
+ }
+ }
+
ret = dwc3_ep0_delegate_req(dwc, ctrl);
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
@@ -583,16 +653,16 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
usb_gadget_set_state(&dwc->gadget,
USB_STATE_CONFIGURED);
- /*
- * Enable transition to U1/U2 state when
- * nothing is pending from application.
- */
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
- dwc->resize_fifos = true;
- dwc3_trace(trace_dwc3_ep0, "resize FIFOs flag SET");
+ if (!dwc->usb3_u1u2_disable || enable_dwc3_u1u2) {
+ /*
+ * Enable transition to U1/U2 state when
+ * nothing is pending from application.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= (DWC3_DCTL_ACCEPTU1ENA |
+ DWC3_DCTL_ACCEPTU2ENA);
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
}
break;
@@ -649,7 +719,8 @@ static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
/* now that we have the time, issue DGCMD Set Sel */
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_PERIODIC_PAR, param);
- WARN_ON(ret < 0);
+ if (WARN_ON_ONCE(ret < 0))
+ dbg_event(dep->number, "ESET_SELCMPL", ret);
}
static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -684,6 +755,7 @@ static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
+ dwc->ep0_usb_req.request.dma = DMA_ERROR_CODE;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
@@ -775,6 +847,7 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
}
+ dbg_setup(0x00, ctrl);
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = dwc3_ep0_std_request(dwc, ctrl);
else
@@ -784,8 +857,10 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
dwc->delayed_status = true;
out:
- if (ret < 0)
+ if (ret < 0) {
+ dbg_event(0x0, "ERRSTAL", ret);
dwc3_ep0_stall_and_restart(dwc);
+ }
}
static void dwc3_ep0_complete_data(struct dwc3 *dwc,
@@ -867,7 +942,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
if ((epnum & 1) && ur->actual < ur->length) {
/* for some reason we did not get everything out */
-
+ dbg_event(epnum, "INDATSTAL", 0);
dwc3_ep0_stall_and_restart(dwc);
} else {
dwc3_gadget_giveback(ep0, r, 0);
@@ -881,7 +956,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
0, DWC3_TRBCTL_CONTROL_DATA, false);
ret = dwc3_ep0_start_trans(dwc, epnum);
- WARN_ON(ret < 0);
+ if (WARN_ON_ONCE(ret < 0))
+ dbg_event(epnum, "ECTRL_DATA", ret);
}
}
}
@@ -912,6 +988,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
if (ret < 0) {
dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d",
dwc->test_mode_nr);
+ dbg_event(0x00, "INVALTEST", ret);
dwc3_ep0_stall_and_restart(dwc);
return;
}
@@ -921,6 +998,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
if (status == DWC3_TRBSTS_SETUP_PENDING)
dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
+ dbg_print(dep->number, "DONE", status, "STATUS");
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
@@ -1013,7 +1091,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = dwc3_ep0_start_trans(dwc, dep->number);
}
- WARN_ON(ret < 0);
+ dbg_queue(dep->number, &req->request, ret);
}
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
@@ -1031,13 +1109,11 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
{
- if (dwc->resize_fifos) {
- dwc3_trace(trace_dwc3_ep0, "Resizing FIFOs");
- dwc3_gadget_resize_tx_fifos(dwc);
- dwc->resize_fifos = 0;
- }
+ int ret;
- WARN_ON(dwc3_ep0_start_control_status(dep));
+ ret = dwc3_ep0_start_control_status(dep);
+ if (WARN_ON_ONCE(ret))
+ dbg_event(dep->number, "ECTRLSTATUS", ret);
}
static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
@@ -1048,13 +1124,18 @@ static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
__dwc3_ep0_do_control_status(dwc, dep);
}
-static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if (!dep->resource_index)
+ /*
+ * For status/DATA OUT stage, TRB will be queued on ep0 out
+ * endpoint for which resource index is zero. Hence allow
+ * queuing ENDXFER command for ep0 out endpoint.
+ */
+ if (!dep->resource_index && dep->number)
return;
cmd = DWC3_DEPCMD_ENDTRANSFER;
@@ -1062,18 +1143,28 @@ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
- WARN_ON_ONCE(ret);
+ if (ret) {
+ dev_dbg(dwc->dev, "%s: send ep cmd ENDTRANSFER failed",
+ dep->name);
+ dbg_event(dep->number, "EENDXFER", ret);
+ }
dep->resource_index = 0;
}
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
+ u8 epnum;
+ struct dwc3_ep *dep;
+
dwc->setup_packet_pending = true;
+ epnum = event->endpoint_number;
+ dep = dwc->eps[epnum];
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
dwc3_trace(trace_dwc3_ep0, "Control Data");
+ dep->dbg_ep_events.control_data++;
/*
* We already have a DATA transfer in the controller's cache,
@@ -1090,6 +1181,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
dwc3_trace(trace_dwc3_ep0,
"Wrong direction for Data phase");
dwc3_ep0_end_control_data(dwc, dep);
+ dbg_event(epnum, "WRONGDR", 0);
dwc3_ep0_stall_and_restart(dwc);
return;
}
@@ -1097,6 +1189,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
break;
case DEPEVT_STATUS_CONTROL_STATUS:
+ dep->dbg_ep_events.control_status++;
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
return;
@@ -1105,7 +1198,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
- WARN_ON_ONCE(event->endpoint_number != 1);
+ if (event->endpoint_number != 1)
+ dbg_event(epnum, "EEPNUM", event->status);
dwc3_trace(trace_dwc3_ep0, "Delayed Status");
return;
}
@@ -1118,25 +1212,36 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
u8 epnum = event->endpoint_number;
+ struct dwc3_ep *dep;
dwc3_trace(trace_dwc3_ep0, "%s while ep%d%s in state '%s'",
dwc3_ep_event_string(event->endpoint_event),
epnum >> 1, (epnum & 1) ? "in" : "out",
dwc3_ep0_state_string(dwc->ep0state));
+ dep = dwc->eps[epnum];
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
+ dep->dbg_ep_events.xfercomplete++;
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_ep0_xfernotready(dwc, event);
+ dep->dbg_ep_events.xfernotready++;
break;
case DWC3_DEPEVT_XFERINPROGRESS:
+ dep->dbg_ep_events.xferinprogress++;
+ break;
case DWC3_DEPEVT_RXTXFIFOEVT:
+ dep->dbg_ep_events.rxtxfifoevent++;
+ break;
case DWC3_DEPEVT_STREAMEVT:
+ dep->dbg_ep_events.streamevent++;
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
+ dep->dbg_ep_events.epcmdcomplete++;
break;
}
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4b33aac86310..7e59f3708fa9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -22,19 +22,25 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include "debug.h"
#include "core.h"
#include "gadget.h"
+#include "debug.h"
#include "io.h"
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, bool remote_wakeup);
+static int dwc3_gadget_wakeup_int(struct dwc3 *dwc);
+
/**
* dwc3_gadget_set_test_mode - Enables USB2 Test Modes
* @dwc: pointer to our context structure
@@ -166,68 +172,65 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
*
* Unfortunately, due to many variables that's not always the case.
*/
-int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
+int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
{
- int last_fifo_depth = 0;
- int ram1_depth;
- int fifo_size;
- int mdwidth;
- int num;
+ int fifo_size, mdwidth, max_packet = 1024;
+ int tmp, mult = 1;
if (!dwc->needs_fifo_resize)
return 0;
- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
- mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ /* resize IN endpoints excepts ep0 */
+ if (!usb_endpoint_dir_in(dep->endpoint.desc) ||
+ dep->endpoint.ep_num == 0)
+ return 0;
+ /* Don't resize already resized IN endpoint */
+ if (dep->fifo_depth) {
+ dev_dbg(dwc->dev, "%s fifo_depth:%d is already set\n",
+ dep->endpoint.name, dep->fifo_depth);
+ return 0;
+ }
+
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth >>= 3;
- /*
- * FIXME For now we will only allocate 1 wMaxPacketSize space
- * for each enabled endpoint, later patches will come to
- * improve this algorithm so that we better use the internal
- * FIFO space
- */
- for (num = 0; num < dwc->num_in_eps; num++) {
- /* bit0 indicates direction; 1 means IN ep */
- struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
- int mult = 1;
- int tmp;
-
- if (!(dep->flags & DWC3_EP_ENABLED))
- continue;
-
- if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
- || usb_endpoint_xfer_isoc(dep->endpoint.desc))
- mult = 3;
-
- /*
- * REVISIT: the following assumes we will always have enough
- * space available on the FIFO RAM for all possible use cases.
- * Make sure that's true somehow and change FIFO allocation
- * accordingly.
- *
- * If we have Bulk or Isochronous endpoints, we want
- * them to be able to be very, very fast. So we're giving
- * those endpoints a fifo_size which is enough for 3 full
- * packets
- */
- tmp = mult * (dep->endpoint.maxpacket + mdwidth);
- tmp += mdwidth;
-
- fifo_size = DIV_ROUND_UP(tmp, mdwidth);
-
- fifo_size |= (last_fifo_depth << 16);
-
- dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
- dep->name, last_fifo_depth, fifo_size & 0xffff);
-
- dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
-
- last_fifo_depth += (fifo_size & 0xffff);
+ if (dep->endpoint.ep_type == EP_TYPE_GSI || dep->endpoint.endless)
+ mult = 3;
+
+ if (((dep->endpoint.maxburst > 1) &&
+ usb_endpoint_xfer_bulk(dep->endpoint.desc))
+ || usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ mult = 3;
+
+ tmp = ((max_packet + mdwidth) * mult) + mdwidth;
+ fifo_size = DIV_ROUND_UP(tmp, mdwidth);
+ dep->fifo_depth = fifo_size;
+ fifo_size |= (dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)) & 0xffff0000)
+ + (dwc->last_fifo_depth << 16);
+ dwc->last_fifo_depth += (fifo_size & 0xffff);
+
+ dev_dbg(dwc->dev, "%s ep_num:%d last_fifo_depth:%04x fifo_depth:%d\n",
+ dep->endpoint.name, dep->endpoint.ep_num, dwc->last_fifo_depth,
+ dep->fifo_depth);
+
+ dbg_event(0xFF, "resize_fifo", dep->number);
+ dbg_event(0xFF, "fifo_depth", dep->fifo_depth);
+ /* Check fifo size allocation doesn't exceed available RAM size. */
+ if (dwc->tx_fifo_size &&
+ ((dwc->last_fifo_depth * mdwidth) >= dwc->tx_fifo_size)) {
+ dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
+ (dwc->last_fifo_depth * mdwidth), dwc->tx_fifo_size,
+ dep->endpoint.name, fifo_size);
+ dwc->last_fifo_depth -= (fifo_size & 0xffff);
+ dep->fifo_depth = 0;
+ WARN_ON(1);
+ return -ENOMEM;
}
+ dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->endpoint.ep_num),
+ fifo_size);
return 0;
}
@@ -274,11 +277,12 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
&req->request, req->direction);
}
- dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
+ dev_dbg(dwc->dev, "request %pK from %s completed %d/%d ===> %d\n",
req, dep->name, req->request.actual,
req->request.length, status);
trace_dwc3_gadget_giveback(req);
+ dbg_done(dep->number, req->request.actual, req->request.status);
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
@@ -327,7 +331,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
{
struct dwc3_ep *dep = dwc->eps[ep];
- u32 timeout = 500;
+ u32 timeout = 3000;
u32 reg;
trace_dwc3_gadget_ep_cmd(dep, cmd, params);
@@ -343,7 +347,16 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
dwc3_trace(trace_dwc3_gadget,
"Command Complete --> %d",
DWC3_DEPCMD_STATUS(reg));
- if (DWC3_DEPCMD_STATUS(reg))
+
+ /* SW issues START TRANSFER command to isochronous ep
+ * with future frame interval. If future interval time
+ * has already passed when core recieves command, core
+ * will respond with an error(bit13 in Command complete
+ * event. Hence return error in this case.
+ */
+ if (reg & 0x2000)
+ return -EAGAIN;
+ else if (DWC3_DEPCMD_STATUS(reg))
return -EINVAL;
return 0;
}
@@ -356,36 +369,39 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
if (!timeout) {
dwc3_trace(trace_dwc3_gadget,
"Command Timed Out");
+ dev_err(dwc->dev, "%s command timeout for %s\n",
+ dwc3_gadget_ep_cmd_string(cmd), dep->name);
+ if (!(cmd & DWC3_DEPCMD_ENDTRANSFER)) {
+ dwc->ep_cmd_timeout_cnt++;
+ dwc3_notify_event(dwc,
+ DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
+ }
return -ETIMEDOUT;
}
-
- udelay(1);
+ if ((cmd & DWC3_DEPCMD_SETTRANSFRESOURCE))
+ udelay(20);
+ else
+ udelay(1);
} while (1);
}
-static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
- struct dwc3_trb *trb)
-{
- u32 offset = (char *) trb - (char *) dep->trb_pool;
-
- return dep->trb_pool_dma + offset;
-}
-
static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
+ u32 num_trbs = DWC3_TRB_NUM;
if (dep->trb_pool)
return 0;
- dep->trb_pool = dma_alloc_coherent(dwc->dev,
- sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ dep->trb_pool = dma_zalloc_coherent(dwc->dev,
+ sizeof(struct dwc3_trb) * num_trbs,
&dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
dep->name);
return -ENOMEM;
}
+ dep->num_trbs = num_trbs;
return 0;
}
@@ -394,11 +410,27 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
- dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
- dep->trb_pool, dep->trb_pool_dma);
+ /* Freeing of GSI EP TRBs are handled by GSI EP ops. */
+ if (dep->endpoint.ep_type == EP_TYPE_GSI)
+ return;
- dep->trb_pool = NULL;
- dep->trb_pool_dma = 0;
+ /*
+ * Clean up ep ring to avoid getting xferInProgress due to stale trbs
+ * with HWO bit set from previous composition when update transfer cmd
+ * is issued.
+ */
+ if (dep->number > 1 && dep->trb_pool && dep->trb_pool_dma) {
+ memset(&dep->trb_pool[0], 0,
+ sizeof(struct dwc3_trb) * dep->num_trbs);
+ dbg_event(dep->number, "Clr_TRB", 0);
+
+ dma_free_coherent(dwc->dev,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool,
+ dep->trb_pool_dma);
+
+ dep->trb_pool = NULL;
+ dep->trb_pool_dma = 0;
+ }
}
static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
@@ -493,8 +525,15 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
params.param2 |= dep->saved_state;
}
- params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
- | DWC3_DEPCFG_XFER_NOT_READY_EN;
+ if (!dep->endpoint.endless) {
+ pr_debug("%s(): enable xfer_complete_int for %s\n",
+ __func__, dep->endpoint.name);
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
+ | DWC3_DEPCFG_XFER_NOT_READY_EN;
+ } else {
+ pr_debug("%s(): disable xfer_complete_int for %s\n",
+ __func__, dep->endpoint.name);
+ }
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
@@ -502,7 +541,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
dep->stream_capable = true;
}
- if (!usb_endpoint_xfer_control(desc))
+ if (usb_endpoint_xfer_isoc(desc))
params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
/*
@@ -575,23 +614,36 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
if (!(dep->flags & DWC3_EP_ENABLED)) {
+ dep->endpoint.desc = desc;
+ dep->comp_desc = comp_desc;
+ dep->type = usb_endpoint_type(desc);
+ ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
+ if (ret) {
+ dep->endpoint.desc = NULL;
+ dep->comp_desc = NULL;
+ dep->type = 0;
+ return ret;
+ }
+
ret = dwc3_gadget_start_config(dwc, dep);
- if (ret)
+ if (ret) {
+ dev_err(dwc->dev, "start_config() failed for %s\n",
+ dep->name);
return ret;
+ }
}
ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
restore);
- if (ret)
+ if (ret) {
+ dev_err(dwc->dev, "set_ep_config() failed for %s\n", dep->name);
return ret;
+ }
if (!(dep->flags & DWC3_EP_ENABLED)) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- dep->endpoint.desc = desc;
- dep->comp_desc = comp_desc;
- dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
@@ -633,7 +685,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
@@ -671,7 +722,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
- dwc3_remove_requests(dwc, dep);
+ if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
+ dwc3_remove_requests(dwc, dep);
+ else if (dep->endpoint.ep_type == EP_TYPE_GSI)
+ dwc3_stop_active_transfer(dwc, dep->number, true);
/* make sure HW endpoint isn't stalled */
if (dep->flags & DWC3_EP_STALL)
@@ -687,9 +741,12 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->type = 0;
dep->flags = 0;
- snprintf(dep->name, sizeof(dep->name), "ep%d%s",
+ /* Keep GSI ep names with "-gsi" suffix */
+ if (!strnstr(dep->name, "gsi", 10)) {
+ snprintf(dep->name, sizeof(dep->name), "ep%d%s",
dep->number >> 1,
(dep->number & 1) ? "in" : "out");
+ }
return 0;
}
@@ -718,7 +775,8 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
int ret;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
- pr_debug("dwc3: invalid parameters\n");
+ pr_debug("dwc3: invalid parameters. ep=%pK, desc=%pK, DT=%d\n",
+ ep, desc, desc ? desc->bDescriptorType : 0);
return -EINVAL;
}
@@ -738,6 +796,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
+ dbg_event(dep->number, "ENABLE", ret);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -759,13 +818,14 @@ static int dwc3_gadget_ep_disable(struct usb_ep *ep)
dwc = dep->dwc;
if (!(dep->flags & DWC3_EP_ENABLED)) {
- dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
- dep->name);
+ dev_dbg(dwc->dev, "%s is already disabled\n", dep->name);
+ dbg_event(dep->number, "ALRDY DISABLED", dep->flags);
return 0;
}
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_disable(dep);
+ dbg_event(dep->number, "DISABLE", ret);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -783,6 +843,7 @@ static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
req->epnum = dep->number;
req->dep = dep;
+ req->request.dma = DMA_ERROR_CODE;
trace_dwc3_alloc_request(req);
@@ -809,7 +870,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
{
struct dwc3_trb *trb;
- dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
+ dwc3_trace(trace_dwc3_gadget, "%s: req %pK dma %08llx length %d%s%s",
dep->name, req, (unsigned long long) dma,
length, last ? " last" : "",
chain ? " chain" : "");
@@ -844,11 +905,19 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
else
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+
+ if (!req->request.no_interrupt && !chain)
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
break;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
trb->ctrl = DWC3_TRBCTL_NORMAL;
+ if (req->request.num_mapped_sgs > 0) {
+ if (!last && !chain &&
+ !req->request.no_interrupt)
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
+ }
break;
default:
/*
@@ -858,9 +927,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
BUG();
}
- if (!req->request.no_interrupt && !chain)
- trb->ctrl |= DWC3_TRB_CTRL_IOC;
-
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
trb->ctrl |= DWC3_TRB_CTRL_CSP;
@@ -977,6 +1043,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (last_one)
break;
}
+ dbg_queue(dep->number, &req->request, trbs_left);
if (last_one)
break;
@@ -995,6 +1062,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
dwc3_prepare_one_trb(dep, req, dma, length,
last_one, false, 0);
+ dbg_queue(dep->number, &req->request, 0);
if (last_one)
break;
}
@@ -1005,7 +1073,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
int start_new)
{
struct dwc3_gadget_ep_cmd_params params;
- struct dwc3_request *req;
+ struct dwc3_request *req, *req1, *n;
struct dwc3 *dwc = dep->dwc;
int ret;
u32 cmd;
@@ -1035,6 +1103,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
}
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
+ dbg_event(dep->number, "NO REQ", 0);
return 0;
}
@@ -1053,6 +1122,35 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
if (ret < 0) {
dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
+ if ((ret == -EAGAIN) && start_new &&
+ usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ /* If bit13 in Command complete event is set, software
+ * must issue ENDTRANSFER command and wait for
+ * Xfernotready event to queue the requests again.
+ */
+ if (!dep->resource_index) {
+ dep->resource_index =
+ dwc3_gadget_ep_get_transfer_index(dwc,
+ dep->number);
+ WARN_ON_ONCE(!dep->resource_index);
+ }
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+ list_for_each_entry_safe_reverse(req1, n,
+ &dep->req_queued, list) {
+ req1->trb = NULL;
+ dwc3_gadget_move_request_list_front(req1);
+ if (req->request.num_mapped_sgs)
+ dep->busy_slot +=
+ req->request.num_mapped_sgs;
+ else
+ dep->busy_slot++;
+ if ((dep->busy_slot & DWC3_TRB_MASK) ==
+ DWC3_TRB_NUM - 1)
+ dep->busy_slot++;
+ }
+ return ret;
+ }
+
/*
* FIXME we need to iterate over the list of requests
* here and stop, unmap, free and del each of the linked
@@ -1079,6 +1177,9 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
u32 uf;
+ int ret;
+
+ dep->current_uf = cur_uf;
if (list_empty(&dep->request_list)) {
dwc3_trace(trace_dwc3_gadget,
@@ -1091,7 +1192,9 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
/* 4 micro frames in the future */
uf = cur_uf + dep->interval * 4;
- __dwc3_gadget_kick_transfer(dep, uf, 1);
+ ret = __dwc3_gadget_kick_transfer(dep, uf, 1);
+ if (ret < 0)
+ dbg_event(dep->number, "ISOC QUEUE", ret);
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1110,6 +1213,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
struct dwc3 *dwc = dep->dwc;
int ret;
+ if (req->request.status == -EINPROGRESS) {
+ ret = -EBUSY;
+ dev_err(dwc->dev, "%s: %pK request already in queue",
+ dep->name, req);
+ return ret;
+ }
+
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->direction = dep->direction;
@@ -1137,20 +1247,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->request_list);
/*
- * If there are no pending requests and the endpoint isn't already
- * busy, we will just start the request straight away.
- *
- * This will save one IRQ (XFER_NOT_READY) and possibly make it a
- * little bit faster.
- */
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- !usb_endpoint_xfer_int(dep->endpoint.desc) &&
- !(dep->flags & DWC3_EP_BUSY)) {
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
- goto out;
- }
-
- /*
* There are a few special cases:
*
* 1. XferNotReady with empty list of requests. We need to kick the
@@ -1169,16 +1265,25 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* notion of current microframe.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if (list_empty(&dep->req_queued)) {
+ /* If xfernotready event is recieved before issuing
+ * START TRANSFER command, don't issue END TRANSFER.
+ * Rather start queueing the requests by issuing START
+ * TRANSFER command.
+ */
+ if (list_empty(&dep->req_queued) && dep->resource_index)
dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
- }
+ else
+ __dwc3_gadget_start_isoc(dwc, dep,
+ dep->current_uf);
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
return 0;
}
ret = __dwc3_gadget_kick_transfer(dep, 0, true);
if (!ret)
dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+ else if (ret != -EBUSY)
+ dbg_event(dep->number, "XfNR QUEUE", ret);
goto out;
}
@@ -1194,6 +1299,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
WARN_ON_ONCE(!dep->resource_index);
ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
false);
+ if (ret && ret != -EBUSY)
+ dbg_event(dep->number, "XfIP QUEUE", ret);
goto out;
}
@@ -1206,15 +1313,33 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
ret = __dwc3_gadget_kick_transfer(dep, 0, true);
out:
- if (ret && ret != -EBUSY)
+ if (ret && ret != -EBUSY) {
+ dbg_event(dep->number, "QUEUE err", ret);
dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
dep->name);
+ }
if (ret == -EBUSY)
ret = 0;
return ret;
}
+static int dwc3_gadget_wakeup(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ schedule_work(&dwc->wakeup_work);
+ return 0;
+}
+
+static bool dwc3_gadget_is_suspended(struct dwc3 *dwc)
+{
+ if (atomic_read(&dwc->in_lpm) ||
+ dwc->link_state == DWC3_LINK_STATE_U3)
+ return true;
+ return false;
+}
+
static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
struct usb_request *request)
{
@@ -1249,12 +1374,11 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
-
int ret;
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
- dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
+ dev_dbg(dwc->dev, "trying to queue request %pK to disabled %s\n",
request, ep->name);
ret = -ESHUTDOWN;
goto out;
@@ -1266,6 +1390,27 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
+ /*
+ * Queuing endless request to USB endpoint through generic ep queue
+ * API should not be allowed.
+ */
+ if (dep->endpoint.endless) {
+ dev_dbg(dwc->dev, "trying to queue endless request %p to %s\n",
+ request, ep->name);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return -EPERM;
+ }
+
+ if (dwc3_gadget_is_suspended(dwc)) {
+ if (dwc->gadget.remote_wakeup)
+ dwc3_gadget_wakeup(&dwc->gadget);
+ ret = dwc->gadget.remote_wakeup ? -EAGAIN : -ENOTSUPP;
+ goto out;
+ }
+
+ WARN(!dep->direction && (request->length % ep->desc->wMaxPacketSize),
+ "trying to queue unaligned request (%d)\n", request->length);
+
ret = __dwc3_gadget_ep_queue(dep, req);
/*
@@ -1296,6 +1441,11 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
unsigned long flags;
int ret = 0;
+ if (atomic_read(&dwc->in_lpm)) {
+ dev_err(dwc->dev, "Unable to dequeue while in LPM\n");
+ return -EAGAIN;
+ }
+
trace_dwc3_ep_dequeue(req);
spin_lock_irqsave(&dwc->lock, flags);
@@ -1322,6 +1472,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
out1:
+ dbg_event(dep->number, "DEQUEUE", 0);
/* giveback the request */
dwc3_gadget_giveback(dep, req, -ECONNRESET);
@@ -1337,11 +1488,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
struct dwc3 *dwc = dep->dwc;
int ret;
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
- return -EINVAL;
- }
-
memset(&params, 0x00, sizeof(params));
if (value) {
@@ -1382,8 +1528,21 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
int ret;
+ if (!ep->desc) {
+ dev_err(dwc->dev, "(%s)'s desc is NULL.\n", dep->name);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
+ dbg_event(dep->number, "HALT", value);
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = __dwc3_gadget_ep_set_halt(dep, value, false);
+out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1397,6 +1556,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
int ret;
spin_lock_irqsave(&dwc->lock, flags);
+ dbg_event(dep->number, "WEDGE", 0);
dep->flags |= DWC3_EP_WEDGE;
if (dep->number == 0 || dep->number == 1)
@@ -1449,43 +1609,83 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
return DWC3_DSTS_SOFFN(reg);
}
-static int dwc3_gadget_wakeup(struct usb_gadget *g)
+#define DWC3_PM_RESUME_RETRIES 20 /* Max Number of retries */
+#define DWC3_PM_RESUME_DELAY 100 /* 100 msec */
+
+static void dwc3_gadget_wakeup_work(struct work_struct *w)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
+ struct dwc3 *dwc;
+ int ret;
+ static int retry_count;
- unsigned long timeout;
- unsigned long flags;
+ dwc = container_of(w, struct dwc3, wakeup_work);
- u32 reg;
+ ret = pm_runtime_get_sync(dwc->dev);
+ if (ret) {
+ /* pm_runtime_get_sync returns -EACCES error between
+ * late_suspend and early_resume, wait for system resume to
+ * finish and queue work again
+ */
+ pr_debug("PM runtime get sync failed, ret %d\n", ret);
+ if (ret == -EACCES) {
+ pm_runtime_put_noidle(dwc->dev);
+ if (retry_count == DWC3_PM_RESUME_RETRIES) {
+ retry_count = 0;
+ pr_err("pm_runtime_get_sync timed out\n");
+ return;
+ }
+ msleep(DWC3_PM_RESUME_DELAY);
+ retry_count++;
+ schedule_work(&dwc->wakeup_work);
+ return;
+ }
+ }
+ retry_count = 0;
+ dbg_event(0xFF, "Gdgwake gsyn",
+ atomic_read(&dwc->dev->power.usage_count));
- int ret = 0;
+ ret = dwc3_gadget_wakeup_int(dwc);
+
+ if (ret)
+ pr_err("Remote wakeup failed. ret = %d.\n", ret);
+ else
+ pr_debug("Remote wakeup succeeded.\n");
+ pm_runtime_put_noidle(dwc->dev);
+ dbg_event(0xFF, "Gdgwake put",
+ atomic_read(&dwc->dev->power.usage_count));
+}
+
+static int dwc3_gadget_wakeup_int(struct dwc3 *dwc)
+{
+ bool link_recover_only = false;
+
+ u32 reg;
+ int ret = 0;
u8 link_state;
- u8 speed;
+ unsigned long flags;
+ pr_debug("%s(): Entry\n", __func__);
+ disable_irq(dwc->irq);
spin_lock_irqsave(&dwc->lock, flags);
-
/*
* According to the Databook Remote wakeup request should
* be issued only when the device is in early suspend state.
*
* We can check that via USB Link State bits in DSTS register.
*/
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-
- speed = reg & DWC3_DSTS_CONNECTSPD;
- if (speed == DWC3_DSTS_SUPERSPEED) {
- dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
- ret = -EINVAL;
- goto out;
- }
-
- link_state = DWC3_DSTS_USBLNKST(reg);
+ link_state = dwc3_get_link_state(dwc);
switch (link_state) {
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
break;
+ case DWC3_LINK_STATE_U1:
+ if (dwc->gadget.speed != USB_SPEED_SUPER) {
+ link_recover_only = true;
+ break;
+ }
+ /* Intentional fallthrough */
default:
dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
link_state);
@@ -1493,9 +1693,25 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
goto out;
}
+ /* Enable LINK STATUS change event */
+ reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+ reg |= DWC3_DEVTEN_ULSTCNGEN;
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ /*
+ * memory barrier is required to make sure that required events
+ * with core is enabled before performing RECOVERY mechnism.
+ */
+ mb();
+
ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
if (ret < 0) {
dev_err(dwc->dev, "failed to put link in Recovery\n");
+ /* Disable LINK STATUS change */
+ reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+ reg &= ~DWC3_DEVTEN_ULSTCNGEN;
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ /* Required to complete this operation before returning */
+ mb();
goto out;
}
@@ -1507,24 +1723,94 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
- /* poll until Link State changes to ON */
- timeout = jiffies + msecs_to_jiffies(100);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ enable_irq(dwc->irq);
- while (!time_after(jiffies, timeout)) {
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ /*
+ * Have bigger value (16 sec) for timeout since some host PCs driving
+ * resume for very long time (e.g. 8 sec)
+ */
+ ret = wait_event_interruptible_timeout(dwc->wait_linkstate,
+ (dwc->link_state < DWC3_LINK_STATE_U3) ||
+ (dwc->link_state == DWC3_LINK_STATE_SS_DIS),
+ msecs_to_jiffies(16000));
- /* in HS, means ON */
- if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
- break;
- }
+ spin_lock_irqsave(&dwc->lock, flags);
+ /* Disable link status change event */
+ reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+ reg &= ~DWC3_DEVTEN_ULSTCNGEN;
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ /*
+ * Complete this write before we go ahead and perform resume
+ * as we don't need link status change notificaiton anymore.
+ */
+ mb();
- if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
- dev_err(dwc->dev, "failed to send remote wakeup\n");
+ if (!ret) {
+ dev_dbg(dwc->dev, "Timeout moving into state(%d)\n",
+ dwc->link_state);
ret = -EINVAL;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ goto out1;
+ } else {
+ ret = 0;
+ /*
+ * If USB is disconnected OR received RESET from host,
+ * don't perform resume
+ */
+ if (dwc->link_state == DWC3_LINK_STATE_SS_DIS ||
+ dwc->gadget.state == USB_STATE_DEFAULT)
+ link_recover_only = true;
}
+ /*
+ * According to DWC3 databook, the controller does not
+ * trigger a wakeup event when remote-wakeup is used.
+ * Hence, after remote-wakeup sequence is complete, and
+ * the device is back at U0 state, it is required that
+ * the resume sequence is initiated by SW.
+ */
+ if (!link_recover_only)
+ dwc3_gadget_wakeup_interrupt(dwc, true);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ pr_debug("%s: Exit\n", __func__);
+ return ret;
+
out:
spin_unlock_irqrestore(&dwc->lock, flags);
+ enable_irq(dwc->irq);
+
+out1:
+ return ret;
+}
+
+static int dwc_gadget_func_wakeup(struct usb_gadget *g, int interface_id)
+{
+ int ret = 0;
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ if (!g || (g->speed != USB_SPEED_SUPER))
+ return -ENOTSUPP;
+
+ if (dwc3_gadget_is_suspended(dwc)) {
+ pr_debug("USB bus is suspended. Scheduling wakeup and returning -EAGAIN.\n");
+ dwc3_gadget_wakeup(&dwc->gadget);
+ return -EAGAIN;
+ }
+
+ if (dwc->revision < DWC3_REVISION_220A) {
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_XMIT_FUNCTION, interface_id);
+ } else {
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_XMIT_DEV, 0x1 | (interface_id << 4));
+ }
+
+ if (ret)
+ pr_err("Function wakeup HW command failed.\n");
+ else
+ pr_debug("Function wakeup HW command succeeded.\n");
return ret;
}
@@ -1542,6 +1828,7 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
+#define DWC3_SOFT_RESET_TIMEOUT 10 /* 10 msec */
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
@@ -1549,6 +1836,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
+ dbg_event(0xFF, "Pullup_enable", is_on);
if (dwc->revision <= DWC3_REVISION_187A) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
@@ -1556,6 +1844,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
if (dwc->revision >= DWC3_REVISION_194A)
reg &= ~DWC3_DCTL_KEEP_CONNECT;
+
+
+ dwc3_event_buffers_setup(dwc);
+ dwc3_gadget_restart(dwc);
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_RUN_STOP;
if (dwc->has_hibernation)
@@ -1563,12 +1856,18 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc->pullups_connected = true;
} else {
+ dbg_event(0xFF, "Pullup_disable", is_on);
+ dwc3_gadget_disable_irq(dwc);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+
reg &= ~DWC3_DCTL_RUN_STOP;
if (dwc->has_hibernation && !suspend)
reg &= ~DWC3_DCTL_KEEP_CONNECT;
dwc->pullups_connected = false;
+ usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
}
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -1583,8 +1882,15 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
break;
}
timeout--;
- if (!timeout)
+ if (!timeout) {
+ dev_err(dwc->dev, "failed to %s controller\n",
+ is_on ? "start" : "stop");
+ if (is_on)
+ dbg_event(0xFF, "STARTTOUT", reg);
+ else
+ dbg_event(0xFF, "STOPTOUT", reg);
return -ETIMEDOUT;
+ }
udelay(1);
} while (1);
@@ -1596,6 +1902,16 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
return 0;
}
+static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned mA)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ dwc->vbus_draw = mA;
+ dev_dbg(dwc->dev, "Notify controller from %s. mA = %d\n", __func__, mA);
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT, 0);
+ return 0;
+}
+
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
@@ -1604,14 +1920,43 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
+ dwc->softconnect = is_on;
+
+ if ((dwc->is_drd && !dwc->vbus_active) || !dwc->gadget_driver) {
+ /*
+ * Need to wait for vbus_session(on) from otg driver or to
+ * the udc_start.
+ */
+ return 0;
+ }
+
+ pm_runtime_get_sync(dwc->dev);
+ dbg_event(0xFF, "Pullup gsync",
+ atomic_read(&dwc->dev->power.usage_count));
+
spin_lock_irqsave(&dwc->lock, flags);
+
+ /*
+ * If we are here after bus suspend notify otg state machine to
+ * increment pm usage count of dwc to prevent pm_runtime_suspend
+ * during enumeration.
+ */
+ dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+ dwc->b_suspend = false;
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
+
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
+ dbg_event(0xFF, "Pullup put",
+ atomic_read(&dwc->dev->power.usage_count));
+
return ret;
}
-static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+void dwc3_gadget_enable_irq(struct dwc3 *dwc)
{
u32 reg;
@@ -1621,53 +1966,91 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
- DWC3_DEVTEN_ULSTCNGEN |
DWC3_DEVTEN_CONNECTDONEEN |
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ /*
+ * Enable SUSPENDEVENT(BIT:6) for version 230A and above
+ * else enable USB Link change event (BIT:3) for older version
+ */
+ if (dwc->revision < DWC3_REVISION_230A)
+ reg |= DWC3_DEVTEN_ULSTCNGEN;
+ else
+ reg |= DWC3_DEVTEN_SUSPEND;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
-static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+void dwc3_gadget_disable_irq(struct dwc3 *dwc)
{
/* mask all interrupts */
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
}
-static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
+static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc);
-static int dwc3_gadget_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
- struct dwc3_ep *dep;
- unsigned long flags;
- int ret = 0;
- int irq;
- u32 reg;
+ struct dwc3 *dwc = gadget_to_dwc(_gadget);
+ unsigned long flags;
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
- IRQF_SHARED, "dwc3", dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
- irq, ret);
- goto err0;
- }
+ if (!dwc->is_drd)
+ return -EPERM;
+
+ is_active = !!is_active;
spin_lock_irqsave(&dwc->lock, flags);
- if (dwc->gadget_driver) {
- dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget.name,
- dwc->gadget_driver->driver.name);
- ret = -EBUSY;
- goto err1;
+ /* Mark that the vbus was powered */
+ dwc->vbus_active = is_active;
+
+ /*
+ * Check if upper level usb_gadget_driver was already registerd with
+ * this udc controller driver (if dwc3_gadget_start was called)
+ */
+ if (dwc->gadget_driver && dwc->softconnect) {
+ if (dwc->vbus_active) {
+ /*
+ * Both vbus was activated by otg and pullup was
+ * signaled by the gadget driver.
+ */
+ dwc3_gadget_run_stop(dwc, 1, false);
+ } else {
+ dwc3_gadget_run_stop(dwc, 0, false);
+ }
}
- dwc->gadget_driver = driver;
+ /*
+ * Clearing run/stop bit might occur before disconnect event is seen.
+ * Make sure to let gadget driver know in that case.
+ */
+ if (!dwc->vbus_active) {
+ dev_dbg(dwc->dev, "calling disconnect from %s\n", __func__);
+ dwc3_gadget_disconnect_interrupt(dwc);
+ }
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+}
+
+static int __dwc3_gadget_start(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int ret = 0;
+ u32 reg;
+
+ /*
+ * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+ * the core supports IMOD, disable it.
+ */
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ } else if (dwc3_has_imod(dwc)) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+ }
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1706,23 +2089,40 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ /* Programs the number of outstanding pipelined transfer requests
+ * the AXI master pushes to the AXI slave.
+ */
+ if (dwc->revision >= DWC3_REVISION_270A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG1);
+ reg &= ~DWC3_GSBUSCFG1_PIPETRANSLIMIT_MASK;
+ reg |= DWC3_GSBUSCFG1_PIPETRANSLIMIT(0xe);
+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG1, reg);
+ }
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ dwc->delayed_status = false;
+ /* reinitialize physical ep0-1 */
dep = dwc->eps[0];
+ dep->flags = 0;
+ dep->endpoint.maxburst = 1;
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err2;
+ return ret;
}
dep = dwc->eps[1];
+ dep->flags = 0;
+ dep->endpoint.maxburst = 1;
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err3;
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ return ret;
}
/* begin to receive SETUP packets */
@@ -1732,22 +2132,45 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_enable_irq(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
+ return ret;
+}
- return 0;
+/* Required gadget re-initialization before switching to gadget in OTG mode */
+void dwc3_gadget_restart(struct dwc3 *dwc)
+{
+ __dwc3_gadget_start(dwc);
+}
-err3:
- __dwc3_gadget_ep_disable(dwc->eps[0]);
+static int dwc3_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int ret = 0;
-err2:
- dwc->gadget_driver = NULL;
+ spin_lock_irqsave(&dwc->lock, flags);
-err1:
- spin_unlock_irqrestore(&dwc->lock, flags);
+ if (dwc->gadget_driver) {
+ dev_err(dwc->dev, "%s is already bound to %s\n",
+ dwc->gadget.name,
+ dwc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ dwc->gadget_driver = driver;
- free_irq(irq, dwc);
+ /*
+ * For DRD, this might get called by gadget driver during bootup
+ * even though host mode might be active. Don't actually perform
+ * device-specific initialization until device mode is activated.
+ * In that case dwc3_gadget_restart() will handle it.
+ */
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
err0:
+ spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
@@ -1755,40 +2178,58 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
- int irq;
-
- spin_lock_irqsave(&dwc->lock, flags);
- dwc3_gadget_disable_irq(dwc);
- __dwc3_gadget_ep_disable(dwc->eps[0]);
- __dwc3_gadget_ep_disable(dwc->eps[1]);
+ spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = NULL;
-
spin_unlock_irqrestore(&dwc->lock, flags);
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- free_irq(irq, dwc);
-
return 0;
}
+static int dwc3_gadget_restart_usb_session(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ return dwc3_notify_event(dwc, DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
+}
+
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
+ .func_wakeup = dwc_gadget_func_wakeup,
.set_selfpowered = dwc3_gadget_set_selfpowered,
+ .vbus_session = dwc3_gadget_vbus_session,
+ .vbus_draw = dwc3_gadget_vbus_draw,
.pullup = dwc3_gadget_pullup,
.udc_start = dwc3_gadget_start,
.udc_stop = dwc3_gadget_stop,
+ .restart = dwc3_gadget_restart_usb_session,
};
/* -------------------------------------------------------------------------- */
+#define NUM_GSI_OUT_EPS 1
+#define NUM_GSI_IN_EPS 2
+
static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
u8 num, u32 direction)
{
struct dwc3_ep *dep;
- u8 i;
+ u8 i, gsi_ep_count, gsi_ep_index = 0;
+
+ /* Read number of event buffers to check if we need
+ * to update gsi_ep_count. For non GSI targets this
+ * will be 0 and we will skip reservation of GSI eps.
+ * There is one event buffer for each GSI EP.
+ */
+ gsi_ep_count = dwc->num_gsi_event_buffers;
+ /* OUT GSI EPs based on direction field */
+ if (gsi_ep_count && !direction)
+ gsi_ep_count = NUM_GSI_OUT_EPS;
+ /* IN GSI EPs */
+ else if (gsi_ep_count && direction)
+ gsi_ep_count = NUM_GSI_IN_EPS;
for (i = 0; i < num; i++) {
u8 epnum = (i << 1) | (!!direction);
@@ -1802,9 +2243,21 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->direction = !!direction;
dwc->eps[epnum] = dep;
- snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
- (epnum & 1) ? "in" : "out");
+ /* Reserve EPs at the end for GSI based on gsi_ep_count */
+ if ((gsi_ep_index < gsi_ep_count) &&
+ (i > (num - 1 - gsi_ep_count))) {
+ gsi_ep_index++;
+ /* For GSI EPs, name eps as "gsi-epin" or "gsi-epout" */
+ snprintf(dep->name, sizeof(dep->name), "%s",
+ (epnum & 1) ? "gsi-epin" : "gsi-epout");
+ /* Set ep type as GSI */
+ dep->endpoint.ep_type = EP_TYPE_GSI;
+ } else {
+ snprintf(dep->name, sizeof(dep->name), "ep%d%s",
+ epnum >> 1, (epnum & 1) ? "in" : "out");
+ }
+ dep->endpoint.ep_num = epnum >> 1;
dep->endpoint.name = dep->name;
dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
@@ -1900,7 +2353,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
/* -------------------------------------------------------------------------- */
static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
- struct dwc3_request *req, struct dwc3_trb *trb,
+ struct dwc3_request *req, struct dwc3_trb *trb, unsigned length,
const struct dwc3_event_depevt *event, int status)
{
unsigned int count;
@@ -1944,6 +2397,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
* request in the request_list.
*/
dep->flags |= DWC3_EP_MISSED_ISOC;
+ dbg_event(dep->number, "MISSED ISOC", status);
} else {
dev_err(dwc->dev, "incomplete IN transfer %s\n",
dep->name);
@@ -1957,6 +2411,14 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
s_pkt = 1;
}
+ /*
+ * We assume here we will always receive the entire data block
+ * which we should receive. Meaning, if we program RX to
+ * receive 4K but we receive only 2K, we assume that's all we
+ * should receive and we simply bounce the request back to the
+ * gadget driver for further processing.
+ */
+ req->request.actual += length - count;
if (s_pkt)
return 1;
if ((event->status & DEPEVT_STATUS_LST) &&
@@ -1976,15 +2438,21 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_trb *trb;
unsigned int slot;
unsigned int i;
- int count = 0;
+ unsigned int trb_len;
int ret;
do {
req = next_request(&dep->req_queued);
if (!req) {
- WARN_ON_ONCE(1);
+ dev_err(dwc->dev, "%s: evt sts %x for no req queued",
+ dep->name, event->status);
return 1;
}
+
+ /* Make sure that not to queue any TRB if HWO bit is set. */
+ if (req->trb->ctrl & DWC3_TRB_CTRL_HWO)
+ return 0;
+
i = 0;
do {
slot = req->start_slot + i;
@@ -1993,50 +2461,50 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
slot++;
slot %= DWC3_TRB_NUM;
trb = &dep->trb_pool[slot];
- count += trb->size & DWC3_TRB_SIZE_MASK;
+ if (req->request.num_mapped_sgs)
+ trb_len = sg_dma_len(&req->request.sg[i]);
+ else
+ trb_len = req->request.length;
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
- event, status);
+ trb_len, event, status);
if (ret)
break;
} while (++i < req->request.num_mapped_sgs);
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- req->request.actual += req->request.length - count;
dwc3_gadget_giveback(dep, req, status);
+ /* EP possibly disabled during giveback? */
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ dev_dbg(dwc->dev, "%s disabled while handling ep event\n",
+ dep->name);
+ return 0;
+ }
+
if (ret)
break;
} while (1);
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->req_queued)) {
- if (list_empty(&dep->request_list)) {
+ if (list_empty(&dep->request_list))
/*
* If there is no entry in request list then do
* not issue END TRANSFER now. Just set PENDING
* flag, so that END TRANSFER is issued when an
* entry is added into request list.
*/
- dep->flags = DWC3_EP_PENDING_REQUEST;
- } else {
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ else
dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
- }
+ dep->flags &= ~DWC3_EP_MISSED_ISOC;
return 1;
}
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
- if ((event->status & DEPEVT_STATUS_IOC) &&
- (trb->ctrl & DWC3_TRB_CTRL_IOC))
- return 0;
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ return 0;
return 1;
}
@@ -2081,14 +2549,6 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
dwc->u1u2 = 0;
}
-
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- int ret;
-
- ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
- if (!ret || ret == -EBUSY)
- return;
- }
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
@@ -2107,9 +2567,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
return;
}
+ dep->dbg_ep_events.total++;
+
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dep->resource_index = 0;
+ dep->dbg_ep_events.xfercomplete++;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
@@ -2120,22 +2583,37 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERINPROGRESS:
+ dep->dbg_ep_events.xferinprogress++;
+ if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
+ dep->name);
+ return;
+ }
+
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
+ dep->dbg_ep_events.xfernotready++;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dwc3_gadget_start_isoc(dwc, dep, event);
} else {
- int active;
int ret;
- active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
-
dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
- dep->name, active ? "Transfer Active"
+ dep->name, event->status &
+ DEPEVT_STATUS_TRANSFER_ACTIVE
+ ? "Transfer Active"
: "Transfer Not Active");
- ret = __dwc3_gadget_kick_transfer(dep, 0, !active);
+ /*
+ * If XFERNOTREADY interrupt is received with event
+ * status as TRANSFER ACTIVE, don't kick next transfer.
+ * otherwise data stall is seen on that endpoint.
+ */
+ if (event->status & DEPEVT_STATUS_TRANSFER_ACTIVE)
+ return;
+
+ ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
if (!ret || ret == -EBUSY)
return;
@@ -2145,6 +2623,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEPEVT_STREAMEVT:
+ dep->dbg_ep_events.streamevent++;
if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
dep->name);
@@ -2166,53 +2645,71 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
+ dep->dbg_ep_events.rxtxfifoevent++;
break;
case DWC3_DEPEVT_EPCMDCMPLT:
dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
+ dep->dbg_ep_events.epcmdcomplete++;
break;
}
}
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
- dwc->gadget_driver->disconnect(&dwc->gadget);
+ dbg_event(0xFF, "DISCONNECT", 0);
+ gadget_driver->disconnect(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
- dwc->gadget_driver->suspend(&dwc->gadget);
+ dbg_event(0xFF, "SUSPEND", 0);
+ gadget_driver->suspend(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
- dwc->gadget_driver->resume(&dwc->gadget);
+ dbg_event(0xFF, "RESUME", 0);
+ gadget_driver->resume(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_reset_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (!dwc->gadget_driver)
return;
if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
- usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
+ dbg_event(0xFF, "UDC RESET", 0);
+ usb_gadget_udc_reset(&dwc->gadget, gadget_driver);
spin_lock(&dwc->lock);
}
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
+void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
{
struct dwc3_ep *dep;
struct dwc3_gadget_ep_cmd_params params;
@@ -2224,6 +2721,10 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
if (!dep->resource_index)
return;
+ if (dep->endpoint.endless)
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER,
+ dep->number);
+
/*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
@@ -2294,7 +2795,11 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_CLEARSTALL, &params);
- WARN_ON_ONCE(ret);
+ if (ret) {
+ dev_dbg(dwc->dev, "%s; send ep cmd CLEARSTALL failed",
+ dep->name);
+ dbg_event(dep->number, "ECLRSTALL", ret);
+ }
}
}
@@ -2302,6 +2807,10 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
+ dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+ dwc->b_suspend = false;
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -2309,11 +2818,14 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
reg &= ~DWC3_DCTL_INITU2ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dbg_event(0xFF, "DISCONNECT", 0);
dwc3_disconnect_gadget(dwc);
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
+ wake_up_interruptible(&dwc->wait_linkstate);
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -2351,13 +2863,39 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_gadget_disconnect_interrupt(dwc);
}
+ dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+ dwc->b_suspend = false;
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
+
+ dwc3_usb3_phy_suspend(dwc, false);
+ usb_gadget_vbus_draw(&dwc->gadget, 100);
+
dwc3_reset_gadget(dwc);
+ dbg_event(0xFF, "BUS RST", 0);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->test_mode = false;
+ /*
+ * From SNPS databook section 8.1.2
+ * the EP0 should be in setup phase. So ensure
+ * that EP0 is in setup phase by issuing a stall
+ * and restart if EP0 is not in setup phase.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ unsigned int dir;
+
+ dbg_event(0xFF, "CONTRPEND", dwc->ep0state);
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+ dwc3_ep0_stall_and_restart(dwc);
+ }
+
dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
@@ -2365,6 +2903,9 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->link_state = DWC3_LINK_STATE_U0;
+ wake_up_interruptible(&dwc->wait_linkstate);
}
static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
@@ -2480,6 +3021,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
+ /*
+ * In HS mode this allows SS phy suspend. In SS mode this allows ss phy
+ * suspend in P3 state and generates IN_P3 power event irq.
+ */
+ dwc3_usb3_phy_suspend(dwc, true);
+
dep = dwc->eps[0];
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
false);
@@ -2496,6 +3043,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
return;
}
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_CONNDONE_EVENT, 0);
+
/*
* Configure PHY via GUSB3PIPECTLn if required.
*
@@ -2505,14 +3054,45 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
*/
}
-static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, bool remote_wakeup)
{
+ bool perform_resume = true;
+
+ dev_dbg(dwc->dev, "%s\n", __func__);
+
/*
- * TODO take core out of low power mode when that's
- * implemented.
+ * Identify if it is called from wakeup_interrupt() context for bus
+ * resume or as part of remote wakeup. And based on that check for
+ * U3 state. as we need to handle case of L1 resume i.e. where we
+ * don't want to perform resume.
*/
+ if (!remote_wakeup && dwc->link_state != DWC3_LINK_STATE_U3)
+ perform_resume = false;
+
+ /* Only perform resume from L2 or Early Suspend states */
+ if (perform_resume) {
+ dbg_event(0xFF, "WAKEUP", 0);
+
+ /*
+ * In case of remote wake up dwc3_gadget_wakeup_work()
+ * is doing pm_runtime_get_sync().
+ */
+ dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+ dwc->b_suspend = false;
+ dwc3_notify_event(dwc,
+ DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
+
+ /*
+ * set state to U0 as function level resume is trying to queue
+ * notification over USB interrupt endpoint which would fail
+ * due to state is not being updated.
+ */
+ dwc->link_state = DWC3_LINK_STATE_U0;
+ dwc3_resume_gadget(dwc);
+ return;
+ }
- dwc->gadget_driver->resume(&dwc->gadget);
+ dwc->link_state = DWC3_LINK_STATE_U0;
}
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
@@ -2612,7 +3192,9 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
break;
}
+ dev_dbg(dwc->dev, "Going from (%d)--->(%d)\n", dwc->link_state, next);
dwc->link_state = next;
+ wake_up_interruptible(&dwc->wait_linkstate);
}
static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
@@ -2639,21 +3221,82 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
/* enter hibernation here */
}
+static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ dev_dbg(dwc->dev, "%s Entry to %d\n", __func__, next);
+
+ if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) {
+ /*
+ * When first connecting the cable, even before the initial
+ * DWC3_DEVICE_EVENT_RESET or DWC3_DEVICE_EVENT_CONNECT_DONE
+ * events, the controller sees a DWC3_DEVICE_EVENT_SUSPEND
+ * event. In such a case, ignore.
+ * Ignore suspend event until device side usb is not into
+ * CONFIGURED state.
+ */
+ if (dwc->gadget.state != USB_STATE_CONFIGURED) {
+ pr_err("%s(): state:%d. Ignore SUSPEND.\n",
+ __func__, dwc->gadget.state);
+ return;
+ }
+
+ dwc3_suspend_gadget(dwc);
+
+ dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+ dwc->b_suspend = true;
+ dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT, 0);
+ }
+
+ dwc->link_state = next;
+ dwc3_trace(trace_dwc3_gadget, "link state %d", dwc->link_state);
+}
+
+static void dwc3_dump_reg_info(struct dwc3 *dwc)
+{
+ dbg_event(0xFF, "REGDUMP", 0);
+
+ dbg_print_reg("GUSB3PIPCTL", dwc3_readl(dwc->regs,
+ DWC3_GUSB3PIPECTL(0)));
+ dbg_print_reg("GUSB2PHYCONFIG", dwc3_readl(dwc->regs,
+ DWC3_GUSB2PHYCFG(0)));
+ dbg_print_reg("GCTL", dwc3_readl(dwc->regs, DWC3_GCTL));
+ dbg_print_reg("GUCTL", dwc3_readl(dwc->regs, DWC3_GUCTL));
+ dbg_print_reg("GDBGLTSSM", dwc3_readl(dwc->regs, DWC3_GDBGLTSSM));
+ dbg_print_reg("DCFG", dwc3_readl(dwc->regs, DWC3_DCFG));
+ dbg_print_reg("DCTL", dwc3_readl(dwc->regs, DWC3_DCTL));
+ dbg_print_reg("DEVTEN", dwc3_readl(dwc->regs, DWC3_DEVTEN));
+ dbg_print_reg("DSTS", dwc3_readl(dwc->regs, DWC3_DSTS));
+ dbg_print_reg("DALPENA", dwc3_readl(dwc->regs, DWC3_DALEPENA));
+ dbg_print_reg("DGCMD", dwc3_readl(dwc->regs, DWC3_DGCMD));
+
+ dbg_print_reg("OCFG", dwc3_readl(dwc->regs, DWC3_OCFG));
+ dbg_print_reg("OCTL", dwc3_readl(dwc->regs, DWC3_OCTL));
+ dbg_print_reg("OEVT", dwc3_readl(dwc->regs, DWC3_OEVT));
+ dbg_print_reg("OSTS", dwc3_readl(dwc->regs, DWC3_OSTS));
+}
+
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
const struct dwc3_event_devt *event)
{
switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
dwc3_gadget_disconnect_interrupt(dwc);
+ dwc->dbg_gadget_events.disconnect++;
break;
case DWC3_DEVICE_EVENT_RESET:
dwc3_gadget_reset_interrupt(dwc);
+ dwc->dbg_gadget_events.reset++;
break;
case DWC3_DEVICE_EVENT_CONNECT_DONE:
dwc3_gadget_conndone_interrupt(dwc);
+ dwc->dbg_gadget_events.connect++;
break;
case DWC3_DEVICE_EVENT_WAKEUP:
- dwc3_gadget_wakeup_interrupt(dwc);
+ dwc3_gadget_wakeup_interrupt(dwc, false);
+ dwc->dbg_gadget_events.wakeup++;
break;
case DWC3_DEVICE_EVENT_HIBER_REQ:
if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
@@ -2664,25 +3307,54 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
+ dwc->dbg_gadget_events.link_status_change++;
break;
- case DWC3_DEVICE_EVENT_EOPF:
- dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ if (dwc->revision < DWC3_REVISION_230A) {
+ dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+ dwc->dbg_gadget_events.eopf++;
+ } else {
+ dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
+ dbg_event(0xFF, "GAD SUS", 0);
+ dwc->dbg_gadget_events.suspend++;
+
+ /*
+ * Ignore suspend event if usb cable is not connected
+ * and speed is not being detected.
+ */
+ if (dwc->gadget.speed != USB_SPEED_UNKNOWN &&
+ dwc->vbus_active)
+ dwc3_gadget_suspend_interrupt(dwc,
+ event->event_info);
+ }
break;
case DWC3_DEVICE_EVENT_SOF:
dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
+ dwc->dbg_gadget_events.sof++;
break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
dwc3_trace(trace_dwc3_gadget, "Erratic Error");
+ if (!dwc->err_evt_seen) {
+ dbg_event(0xFF, "ERROR", 0);
+ dwc3_dump_reg_info(dwc);
+ }
+ dwc->dbg_gadget_events.erratic_error++;
break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
dwc3_trace(trace_dwc3_gadget, "Command Complete");
+ dwc->dbg_gadget_events.cmdcmplt++;
break;
case DWC3_DEVICE_EVENT_OVERFLOW:
dwc3_trace(trace_dwc3_gadget, "Overflow");
+ dbg_event(0xFF, "OVERFL", 0);
+ dwc->dbg_gadget_events.overflow++;
break;
default:
dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
+ dwc->dbg_gadget_events.unknown_event++;
}
+
+ dwc->err_evt_seen = (event->type == DWC3_DEVICE_EVENT_ERRATIC_ERROR);
}
static void dwc3_process_event_entry(struct dwc3 *dwc,
@@ -2690,6 +3362,18 @@ static void dwc3_process_event_entry(struct dwc3 *dwc,
{
trace_dwc3_event(event->raw);
+ /* skip event processing in absence of vbus */
+ if (!dwc->vbus_active) {
+ dbg_print_reg("SKIP EVT", event->raw);
+ return;
+ }
+
+ /* If run/stop is cleared don't process any more events */
+ if (!dwc->pullups_connected) {
+ dbg_print_reg("SKIP_EVT_PULLUP", event->raw);
+ return;
+ }
+
/* Endpoint IRQ, handle it and return early */
if (event->type.is_devspec == 0) {
/* depevt */
@@ -2726,6 +3410,20 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
dwc3_process_event_entry(dwc, &event);
+ if (dwc->err_evt_seen) {
+ /*
+ * if erratic error, skip remaining events
+ * while controller undergoes reset
+ */
+ evt->lpos = (evt->lpos + left) %
+ DWC3_EVENT_BUFFERS_SIZE;
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), left);
+ if (dwc3_notify_event(dwc,
+ DWC3_CONTROLLER_ERROR_EVENT, 0))
+ dwc->err_evt_seen = 0;
+ break;
+ }
+
/*
* FIXME we wrap around correctly to the next entry as
* almost all entries are 4 bytes in size. There is one
@@ -2737,10 +3435,10 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
*/
evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
left -= 4;
-
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
}
+ dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] += (evt->count / 4);
+
evt->count = 0;
evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
@@ -2750,23 +3448,45 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
reg &= ~DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+ if (dwc->imod_interval)
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf),
+ DWC3_GEVNTCOUNT_EHB);
+
return ret;
}
+void dwc3_bh_work(struct work_struct *w)
+{
+ struct dwc3 *dwc = container_of(w, struct dwc3, bh_work);
+
+ pm_runtime_get_sync(dwc->dev);
+ dwc3_thread_interrupt(dwc->irq, dwc);
+ pm_runtime_put(dwc->dev);
+}
+
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
{
struct dwc3 *dwc = _dwc;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
int i;
+ unsigned temp_time;
+ ktime_t start_time;
+
+ start_time = ktime_get();
spin_lock_irqsave(&dwc->lock, flags);
+ dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] = 0;
- for (i = 0; i < dwc->num_event_buffers; i++)
+ for (i = 0; i < dwc->num_normal_event_buffers; i++)
ret |= dwc3_process_event_buf(dwc, i);
spin_unlock_irqrestore(&dwc->lock, flags);
+ temp_time = ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dwc->bh_completion_time[dwc->bh_dbg_index] = temp_time;
+ dwc->bh_dbg_index = (dwc->bh_dbg_index + 1) % 10;
+
return ret;
}
@@ -2783,6 +3503,13 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
if (!count)
return IRQ_NONE;
+ if (count > evt->length) {
+ dbg_event(0xFF, "HUGE_EVCNT", count);
+ evt->lpos = (evt->lpos + count) % DWC3_EVENT_BUFFERS_SIZE;
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), count);
+ return IRQ_HANDLED;
+ }
+
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
@@ -2791,24 +3518,46 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
reg |= DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), count);
+
return IRQ_WAKE_THREAD;
}
-static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
+irqreturn_t dwc3_interrupt(int irq, void *_dwc)
{
struct dwc3 *dwc = _dwc;
int i;
irqreturn_t ret = IRQ_NONE;
+ unsigned temp_cnt = 0;
+ ktime_t start_time;
+
+ start_time = ktime_get();
+ dwc->irq_cnt++;
- for (i = 0; i < dwc->num_event_buffers; i++) {
+ /* controller reset is still pending */
+ if (dwc->err_evt_seen)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < dwc->num_normal_event_buffers; i++) {
irqreturn_t status;
status = dwc3_check_event_buf(dwc, i);
if (status == IRQ_WAKE_THREAD)
ret = status;
+
+ temp_cnt += dwc->ev_buffs[i]->count;
}
- return ret;
+ dwc->irq_start_time[dwc->irq_dbg_index] = start_time;
+ dwc->irq_completion_time[dwc->irq_dbg_index] =
+ ktime_us_delta(ktime_get(), start_time);
+ dwc->irq_event_count[dwc->irq_dbg_index] = temp_cnt / 4;
+ dwc->irq_dbg_index = (dwc->irq_dbg_index + 1) % MAX_INTR_STATS;
+
+ if (ret == IRQ_WAKE_THREAD)
+ queue_work(dwc->dwc_wq, &dwc->bh_work);
+
+ return IRQ_HANDLED;
}
/**
@@ -2821,6 +3570,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
{
int ret;
+ INIT_WORK(&dwc->wakeup_work, dwc3_gadget_wakeup_work);
+
dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
&dwc->ctrl_req_addr, GFP_KERNEL);
if (!dwc->ctrl_req) {
@@ -2907,6 +3658,13 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err5;
}
+ if (!dwc->is_drd) {
+ pm_runtime_no_callbacks(&dwc->gadget.dev);
+ pm_runtime_set_active(&dwc->gadget.dev);
+ pm_runtime_enable(&dwc->gadget.dev);
+ pm_runtime_get(&dwc->gadget.dev);
+ }
+
return 0;
err5:
@@ -2936,6 +3694,11 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
+ if (dwc->is_drd) {
+ pm_runtime_put(&dwc->gadget.dev);
+ pm_runtime_disable(&dwc->gadget.dev);
+ }
+
usb_del_gadget_udc(&dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index ccd9694f8e36..bbf9b5096846 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -68,6 +68,14 @@ static inline struct dwc3_request *next_request(struct list_head *list)
return list_first_entry(list, struct dwc3_request, list);
}
+static inline void dwc3_gadget_move_request_list_front(struct dwc3_request *req)
+{
+ struct dwc3_ep *dep = req->dep;
+
+ req->queued = false;
+ list_move(&req->list, &dep->request_list);
+}
+
static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
{
struct dwc3_ep *dep = req->dep;
@@ -76,17 +84,38 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
list_move_tail(&req->list, &dep->req_queued);
}
+static inline enum dwc3_link_state dwc3_get_link_state(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_USBLNKST(reg);
+}
+
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
+irqreturn_t dwc3_interrupt(int irq, void *_dwc);
+void dwc3_bh_work(struct work_struct *w);
+
+static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+ struct dwc3_trb *trb)
+{
+ u32 offset = (char *) trb - (char *) dep->trb_pool;
+
+ return dep->trb_pool_dma + offset;
+}
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index c679f63783ae..7f1ae5cf9909 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -25,6 +25,7 @@ int dwc3_host_init(struct dwc3 *dwc)
struct platform_device *xhci;
struct usb_xhci_pdata pdata;
int ret;
+ struct device_node *node = dwc->dev->of_node;
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
@@ -32,6 +33,7 @@ int dwc3_host_init(struct dwc3 *dwc)
return -ENOMEM;
}
+ arch_setup_dma_ops(&xhci->dev, 0, 0, NULL, 0);
dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
xhci->dev.parent = dwc->dev;
@@ -51,6 +53,11 @@ int dwc3_host_init(struct dwc3 *dwc)
pdata.usb3_lpm_capable = dwc->usb3_lpm_capable;
+ ret = of_property_read_u32(node, "xhci-imod-value",
+ &pdata.imod_interval);
+ if (ret)
+ pdata.imod_interval = 0; /* use default xhci.c value */
+
ret = platform_device_add_data(xhci, &pdata, sizeof(pdata));
if (ret) {
dev_err(dwc->dev, "couldn't add platform data to xHCI device\n");
@@ -62,18 +69,9 @@ int dwc3_host_init(struct dwc3 *dwc)
phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
dev_name(&xhci->dev));
- ret = platform_device_add(xhci);
- if (ret) {
- dev_err(dwc->dev, "failed to register xHCI device\n");
- goto err2;
- }
-
+ /* Platform device gets added as part of state machine */
return 0;
-err2:
- phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
- phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+
err1:
platform_device_put(xhci);
return ret;
@@ -85,5 +83,6 @@ void dwc3_host_exit(struct dwc3 *dwc)
dev_name(&dwc->xhci->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
dev_name(&dwc->xhci->dev));
- platform_device_unregister(dwc->xhci);
+ if (!dwc->is_drd)
+ platform_device_unregister(dwc->xhci);
}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index 6a79c8e66bbc..d797eb8728de 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -41,7 +41,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_readl, "addr %p value %08x",
+ dwc3_trace(trace_dwc3_readl, "addr %pK value %08x",
base - DWC3_GLOBALS_REGS_START + offset, value);
return value;
@@ -63,7 +63,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_writel, "addr %p value %08x",
+ dwc3_trace(trace_dwc3_writel, "addr %pK value %08x",
base - DWC3_GLOBALS_REGS_START + offset, value);
}
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 9c10669ab91f..225b2d4f9ecd 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(dwc3_log_request,
__entry->length = req->request.length;
__entry->status = req->request.status;
),
- TP_printk("%s: req %p length %u/%u ==> %d",
+ TP_printk("%s: req %pK length %u/%u ==> %d",
__get_str(name), __entry->req, __entry->actual, __entry->length,
__entry->status
)
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
),
- TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x",
+ TP_printk("%s: trb %pK bph %08x bpl %08x size %08x ctrl %08x",
__get_str(name), __entry->trb, __entry->bph, __entry->bpl,
__entry->size, __entry->ctrl
)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c79d70ea3402..330cecbde51d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -151,6 +151,9 @@ config USB_U_SERIAL
config USB_U_ETHER
tristate
+config USB_U_AUDIO
+ tristate
+
config USB_F_SERIAL
tristate
@@ -175,6 +178,12 @@ config USB_F_SUBSET
config USB_F_RNDIS
tristate
+config USB_F_QCRNDIS
+ tristate
+
+config USB_F_RMNET_BAM
+ tristate
+
config USB_F_MASS_STORAGE
tristate
@@ -184,6 +193,9 @@ config USB_F_FS
config USB_F_UAC1
tristate
+config USB_F_UAC1_LEGACY
+ tristate
+
config USB_F_UAC2
tristate
@@ -211,6 +223,21 @@ config USB_F_AUDIO_SRC
config USB_F_ACC
tristate
+config USB_F_DIAG
+ tristate
+
+config USB_F_GSI
+ tristate
+
+config USB_F_CDEV
+ tristate
+
+config USB_F_QDSS
+ tristate
+
+config USB_F_CCID
+ tristate
+
choice
tristate "USB Gadget Drivers"
default USB_ETH
@@ -307,6 +334,14 @@ config USB_CONFIGFS_ECM_SUBSET
On hardware that can't implement the full protocol,
a simple CDC subset is used, placing fewer demands on USB.
+config USB_CONFIGFS_QCRNDIS
+ bool "QCRNDIS"
+ depends on USB_CONFIGFS
+ depends on RNDIS_IPA
+ depends on NET
+ select USB_U_ETHER
+ select USB_F_QCRNDIS
+
config USB_CONFIGFS_RNDIS
bool "RNDIS"
depends on USB_CONFIGFS
@@ -323,6 +358,12 @@ config USB_CONFIGFS_RNDIS
XP, you'll need to download drivers from Microsoft's website; a URL
is given in comments found in that info file.
+config USB_CONFIGFS_RMNET_BAM
+ bool "RMNET"
+ depends on USB_CONFIGFS
+ depends on IPA
+ select USB_F_RMNET_BAM
+
config USB_CONFIGFS_EEM
bool "Ethernet Emulation Model (EEM)"
depends on USB_CONFIGFS
@@ -430,12 +471,30 @@ config USB_CONFIGFS_F_UAC1
depends on SND
select USB_LIBCOMPOSITE
select SND_PCM
+ select USB_U_AUDIO
select USB_F_UAC1
help
This Audio function implements 1 AudioControl interface,
1 AudioStreaming Interface each for USB-OUT and USB-IN.
- This driver requires a real Audio codec to be present
- on the device.
+ This driver doesn't expect any real Audio codec to be present
+ on the device - the audio streams are simply sinked to and
+ sourced from a virtual ALSA sound card created. The user-space
+ application may choose to do whatever it wants with the data
+ received from the USB Host and choose to provide whatever it
+ wants as audio data to the USB Host.
+
+config USB_CONFIGFS_F_UAC1_LEGACY
+ bool "Audio Class 1.0 (legacy implementation)"
+ depends on USB_CONFIGFS
+ depends on SND
+ select USB_LIBCOMPOSITE
+ select SND_PCM
+ select USB_F_UAC1_LEGACY
+ help
+ This Audio function implements 1 AudioControl interface,
+ 1 AudioStreaming Interface each for USB-OUT and USB-IN.
+ This is a legacy driver and requires a real Audio codec
+ to be present on the device.
config USB_CONFIGFS_F_UAC2
bool "Audio Class 2.0"
@@ -443,6 +502,7 @@ config USB_CONFIGFS_F_UAC2
depends on SND
select USB_LIBCOMPOSITE
select SND_PCM
+ select USB_U_AUDIO
select USB_F_UAC2
help
This Audio function is compatible with USB Audio Class
@@ -504,6 +564,43 @@ config USB_CONFIGFS_F_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
+config USB_CONFIGFS_F_DIAG
+ bool "USB Diag function"
+ select USB_F_DIAG
+ depends on USB_CONFIGFS
+ help
+ Diag function driver enables support for Qualcomm diagnostics
+ port over USB.
+
+config USB_CONFIGFS_F_GSI
+ bool "USB GSI function"
+ select USB_F_GSI
+ depends on USB_CONFIGFS
+ help
+ Generic function driver to support h/w acceleration to IPA over GSI.
+
+config USB_CONFIGFS_F_CDEV
+ bool "USB Serial Character function"
+ select USB_F_CDEV
+ depends on USB_CONFIGFS
+ help
+ Generic USB serial character function driver to support DUN/NMEA.
+
+config USB_CONFIGFS_F_QDSS
+ bool "USB QDSS function"
+ select USB_F_QDSS
+ depends on USB_CONFIGFS
+ help
+ USB QDSS function driver to get hwtracing related data over USB.
+
+config USB_CONFIGFS_F_CCID
+ bool "USB CCID function"
+ select USB_F_CCID
+ depends on USB_CONFIGFS
+ help
+ USB CCID function driver creats transport layer between the
+ userspace CCID component and the Windows Host.
+
source "drivers/usb/gadget/legacy/Kconfig"
endchoice
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 598a67d6ba05..32962896cf68 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,3 +10,4 @@ libcomposite-y := usbstring.o config.o epautoconf.o
libcomposite-y += composite.o functions.o configfs.o u_f.o
obj-$(CONFIG_USB_GADGET) += udc/ function/ legacy/
+obj-y += debug.o
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3950586a1ed2..54be4126f367 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -23,6 +23,23 @@
#include <asm/unaligned.h>
#include "u_os_desc.h"
+#define SSUSB_GADGET_VBUS_DRAW 900 /* in mA */
+#define SSUSB_GADGET_VBUS_DRAW_UNITS 8
+#define HSUSB_GADGET_VBUS_DRAW_UNITS 2
+
+/*
+ * Based on enumerated USB speed, draw power with set_config and resume
+ * HSUSB: 500mA, SSUSB: 900mA
+ */
+#define USB_VBUS_DRAW(speed)\
+ (speed == USB_SPEED_SUPER ?\
+ SSUSB_GADGET_VBUS_DRAW : CONFIG_USB_GADGET_VBUS_DRAW)
+
+/* disable LPM by default */
+static bool disable_l1_for_hs = true;
+module_param(disable_l1_for_hs, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_l1_for_hs,
+ "Disable support for L1 LPM for HS devices");
/**
* struct usb_os_string - represents OS String to be reported by a gadget
@@ -208,7 +225,7 @@ int usb_add_function(struct usb_configuration *config,
{
int value = -EINVAL;
- DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
+ DBG(config->cdev, "adding '%s'/%pK to config '%s'/%pK\n",
function->name, function,
config->label, config);
@@ -216,6 +233,7 @@ int usb_add_function(struct usb_configuration *config,
goto done;
function->config = config;
+ function->intf_id = -EINVAL;
list_add_tail(&function->list, &config->functions);
if (function->bind_deactivated) {
@@ -248,7 +266,7 @@ int usb_add_function(struct usb_configuration *config,
done:
if (value)
- DBG(config->cdev, "adding '%s'/%p --> %d\n",
+ DBG(config->cdev, "adding '%s'/%pK --> %d\n",
function->name, function, value);
return value;
}
@@ -370,6 +388,8 @@ int usb_interface_id(struct usb_configuration *config,
if (id < MAX_CONFIG_INTERFACES) {
config->interface[id] = function;
+ if (function->intf_id < 0)
+ function->intf_id = id;
config->next_interface_id = id + 1;
return id;
}
@@ -377,22 +397,115 @@ int usb_interface_id(struct usb_configuration *config,
}
EXPORT_SYMBOL_GPL(usb_interface_id);
+static int usb_func_wakeup_int(struct usb_function *func)
+{
+ int ret;
+ struct usb_gadget *gadget;
+
+ pr_debug("%s - %s function wakeup\n",
+ __func__, func->name ? func->name : "");
+
+ if (!func || !func->config || !func->config->cdev ||
+ !func->config->cdev->gadget)
+ return -EINVAL;
+
+ gadget = func->config->cdev->gadget;
+ if ((gadget->speed != USB_SPEED_SUPER) || !func->func_wakeup_allowed) {
+ DBG(func->config->cdev,
+ "Function Wakeup is not possible. speed=%u, func_wakeup_allowed=%u\n",
+ gadget->speed,
+ func->func_wakeup_allowed);
+
+ return -ENOTSUPP;
+ }
+
+ ret = usb_gadget_func_wakeup(gadget, func->intf_id);
+
+ return ret;
+}
+
+int usb_func_wakeup(struct usb_function *func)
+{
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s function wakeup\n",
+ func->name ? func->name : "");
+
+ spin_lock_irqsave(&func->config->cdev->lock, flags);
+ ret = usb_func_wakeup_int(func);
+ if (ret == -EAGAIN) {
+ DBG(func->config->cdev,
+ "Function wakeup for %s could not complete due to suspend state. Delayed until after bus resume.\n",
+ func->name ? func->name : "");
+ ret = 0;
+ } else if (ret < 0 && ret != -ENOTSUPP) {
+ ERROR(func->config->cdev,
+ "Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n",
+ func->name ? func->name : "", ret);
+ }
+
+ spin_unlock_irqrestore(&func->config->cdev->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_func_wakeup);
+
+int usb_func_ep_queue(struct usb_function *func, struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ int ret;
+ struct usb_gadget *gadget;
+
+ if (!func || !func->config || !func->config->cdev ||
+ !func->config->cdev->gadget || !ep || !req) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("Function %s queueing new data into ep %u\n",
+ func->name ? func->name : "", ep->address);
+
+ gadget = func->config->cdev->gadget;
+
+ if (func->func_is_suspended && func->func_wakeup_allowed) {
+ ret = usb_gadget_func_wakeup(gadget, func->intf_id);
+ if (ret == -EAGAIN) {
+ pr_debug("bus suspended func wakeup for %s delayed until bus resume.\n",
+ func->name ? func->name : "");
+ } else if (ret < 0 && ret != -ENOTSUPP) {
+ pr_err("Failed to wake function %s from suspend state. ret=%d.\n",
+ func->name ? func->name : "", ret);
+ }
+ goto done;
+ }
+
+ if (func->func_is_suspended && !func->func_wakeup_allowed) {
+ ret = -ENOTSUPP;
+ goto done;
+ }
+
+ ret = usb_ep_queue(ep, req, gfp_flags);
+done:
+ return ret;
+}
+
static u8 encode_bMaxPower(enum usb_device_speed speed,
struct usb_configuration *c)
{
- unsigned val;
+ unsigned val = c->MaxPower;
- if (c->MaxPower)
- val = c->MaxPower;
- else
- val = CONFIG_USB_GADGET_VBUS_DRAW;
- if (!val)
- return 0;
switch (speed) {
case USB_SPEED_SUPER:
- return DIV_ROUND_UP(val, 8);
+ /* with super-speed report 900mA if user hasn't specified */
+ if (!val)
+ val = SSUSB_GADGET_VBUS_DRAW;
+
+ return (u8)(val / SSUSB_GADGET_VBUS_DRAW_UNITS);
default:
- return DIV_ROUND_UP(val, 2);
+ if (!val)
+ val = CONFIG_USB_GADGET_VBUS_DRAW;
+
+ return DIV_ROUND_UP(val, HSUSB_GADGET_VBUS_DRAW_UNITS);
}
}
@@ -416,6 +529,10 @@ static int config_buf(struct usb_configuration *config,
c->iConfiguration = config->iConfiguration;
c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
c->bMaxPower = encode_bMaxPower(speed, config);
+ if (config->cdev->gadget->is_selfpowered) {
+ c->bmAttributes |= USB_CONFIG_ATT_SELFPOWER;
+ c->bMaxPower = 0;
+ }
/* There may be e.g. OTG descriptors */
if (config->descriptors) {
@@ -573,7 +690,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
/*
* A SuperSpeed device shall include the USB2.0 extension descriptor
- * and shall support LPM when operating in USB2.0 HS mode.
+ * and shall support LPM when operating in USB2.0 HS mode, as well as
+ * a HS device when operating in USB2.1 HS mode.
*/
usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
bos->bNumDeviceCaps++;
@@ -583,33 +701,37 @@ static int bos_desc(struct usb_composite_dev *cdev)
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
- /*
- * The Superspeed USB Capability descriptor shall be implemented by all
- * SuperSpeed devices.
- */
- ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
- bos->bNumDeviceCaps++;
- le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
- ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
- ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
- ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
- ss_cap->bmAttributes = 0; /* LTM is not supported yet */
- ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
- USB_FULL_SPEED_OPERATION |
- USB_HIGH_SPEED_OPERATION |
- USB_5GBPS_OPERATION);
- ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
-
- /* Get Controller configuration */
- if (cdev->gadget->ops->get_config_params)
- cdev->gadget->ops->get_config_params(&dcd_config_params);
- else {
- dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT;
- dcd_config_params.bU2DevExitLat =
- cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+ if (gadget_is_superspeed(cdev->gadget)) {
+ /*
+ * The Superspeed USB Capability descriptor shall be
+ * implemented by all SuperSpeed devices.
+ */
+ ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
+ ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
+ ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
+ ss_cap->bmAttributes = 0; /* LTM is not supported yet */
+ ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
+ USB_FULL_SPEED_OPERATION |
+ USB_HIGH_SPEED_OPERATION |
+ USB_5GBPS_OPERATION);
+ ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+
+ /* Get Controller configuration */
+ if (cdev->gadget->ops->get_config_params)
+ cdev->gadget->ops->get_config_params
+ (&dcd_config_params);
+ else {
+ dcd_config_params.bU1devExitLat =
+ USB_DEFAULT_U1_DEV_EXIT_LAT;
+ dcd_config_params.bU2DevExitLat =
+ cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+ }
+ ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
+ ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
}
- ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
- ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
return le16_to_cpu(bos->wTotalLength);
}
@@ -643,6 +765,11 @@ static void reset_config(struct usb_composite_dev *cdev)
if (f->disable)
f->disable(f);
+ /* USB 3.0 addition */
+ f->func_is_suspended = false;
+ f->func_wakeup_allowed = false;
+ f->func_wakeup_pending = false;
+
bitmap_zero(f->endpoints, 32);
}
cdev->config = NULL;
@@ -655,9 +782,18 @@ static int set_config(struct usb_composite_dev *cdev,
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c = NULL;
int result = -EINVAL;
- unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
+ /*
+ * ignore 2nd time SET_CONFIGURATION
+ * only for same config value twice.
+ */
+ if (cdev->config && (cdev->config->bConfigurationValue == number)) {
+ DBG(cdev, "already in the same config with value %d\n",
+ number);
+ return 0;
+ }
+
if (number) {
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == number) {
@@ -689,6 +825,8 @@ static int set_config(struct usb_composite_dev *cdev,
usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
cdev->config = c;
+ c->num_ineps_used = 0;
+ c->num_outeps_used = 0;
/* Initialize all interfaces by setting them to altsetting zero. */
for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
@@ -706,6 +844,12 @@ static int set_config(struct usb_composite_dev *cdev,
*/
switch (gadget->speed) {
case USB_SPEED_SUPER:
+ if (!f->ss_descriptors) {
+ pr_err("%s(): No SS desc for function:%s\n",
+ __func__, f->name);
+ usb_gadget_set_state(gadget, USB_STATE_ADDRESS);
+ return -EINVAL;
+ }
descriptors = f->ss_descriptors;
break;
case USB_SPEED_HIGH:
@@ -726,11 +870,15 @@ static int set_config(struct usb_composite_dev *cdev,
addr = ((ep->bEndpointAddress & 0x80) >> 3)
| (ep->bEndpointAddress & 0x0f);
set_bit(addr, f->endpoints);
+ if (usb_endpoint_dir_in(ep))
+ c->num_ineps_used++;
+ else
+ c->num_outeps_used++;
}
result = f->set_alt(f, tmp, 0);
if (result < 0) {
- DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
+ DBG(cdev, "interface %d (%s/%pK) alt 0 --> %d\n",
tmp, f->name, f, result);
reset_config(cdev);
@@ -747,15 +895,13 @@ static int set_config(struct usb_composite_dev *cdev,
}
}
- /* when we return, be sure our power usage is valid */
- power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
done:
- if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
+ if (USB_VBUS_DRAW(gadget->speed) <= USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_set_selfpowered(gadget);
else
usb_gadget_clear_selfpowered(gadget);
- usb_gadget_vbus_draw(gadget, power);
+ usb_gadget_vbus_draw(gadget, USB_VBUS_DRAW(gadget->speed));
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
return result;
@@ -810,7 +956,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
if (!bind)
goto done;
- DBG(cdev, "adding config #%u '%s'/%p\n",
+ DBG(cdev, "adding config #%u '%s'/%pK\n",
config->bConfigurationValue,
config->label, config);
@@ -827,7 +973,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
struct usb_function, list);
list_del(&f->list);
if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n",
+ DBG(cdev, "unbind function '%s'/%pK\n",
f->name, f);
f->unbind(config, f);
/* may free memory for "f" */
@@ -838,7 +984,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
} else {
unsigned i;
- DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
+ DBG(cdev, "cfg %d/%pK speeds:%s%s%s\n",
config->bConfigurationValue, config,
config->superspeed ? " super" : "",
config->highspeed ? " high" : "",
@@ -853,7 +999,7 @@ int usb_add_config(struct usb_composite_dev *cdev,
if (!f)
continue;
- DBG(cdev, " interface %d = %s/%p\n",
+ DBG(cdev, " interface %d = %s/%pK\n",
i, f->name, f);
}
}
@@ -879,14 +1025,14 @@ static void remove_config(struct usb_composite_dev *cdev,
struct usb_function, list);
list_del(&f->list);
if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+ DBG(cdev, "unbind function '%s'/%pK\n", f->name, f);
f->unbind(config, f);
/* may free memory for "f" */
}
}
list_del(&config->list);
if (config->unbind) {
- DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+ DBG(cdev, "unbind config '%s'/%pK\n", config->label, config);
config->unbind(config);
/* may free memory for "c" */
}
@@ -1294,7 +1440,7 @@ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
else if (cdev->os_desc_req == req)
cdev->os_desc_pending = false;
else
- WARN(1, "unknown request %p\n", req);
+ WARN(1, "unknown request %pK\n", req);
}
static int composite_ep0_queue(struct usb_composite_dev *cdev,
@@ -1309,7 +1455,7 @@ static int composite_ep0_queue(struct usb_composite_dev *cdev,
else if (cdev->os_desc_req == req)
cdev->os_desc_pending = true;
else
- WARN(1, "unknown request %p\n", req);
+ WARN(1, "unknown request %pK\n", req);
}
return ret;
@@ -1512,17 +1658,24 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
+ if (cdev->desc.bNumConfigurations == 0) {
+ pr_err("%s:config is not active. send stall\n",
+ __func__);
+ break;
+ }
+
cdev->desc.bMaxPacketSize0 =
cdev->gadget->ep0->maxpacket;
+ cdev->desc.bcdUSB = cpu_to_le16(0x0200);
if (gadget_is_superspeed(gadget)) {
if (gadget->speed >= USB_SPEED_SUPER) {
- cdev->desc.bcdUSB = cpu_to_le16(0x0300);
+ cdev->desc.bcdUSB = cpu_to_le16(0x0310);
cdev->desc.bMaxPacketSize0 = 9;
- } else {
+ } else if (!disable_l1_for_hs) {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
+ DBG(cdev,
+ "Config HS device with LPM(L1)\n");
}
- } else {
- cdev->desc.bcdUSB = cpu_to_le16(0x0200);
}
value = min(w_length, (u16) sizeof cdev->desc);
@@ -1532,7 +1685,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
+ spin_lock(&cdev->lock);
device_qual(cdev);
+ spin_unlock(&cdev->lock);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
break;
@@ -1542,18 +1697,24 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
/* FALLTHROUGH */
case USB_DT_CONFIG:
+ spin_lock(&cdev->lock);
value = config_desc(cdev, w_value);
+ spin_unlock(&cdev->lock);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_STRING:
+ spin_lock(&cdev->lock);
value = get_string(cdev, req->buf,
w_index, w_value & 0xff);
+ spin_unlock(&cdev->lock);
if (value >= 0)
value = min(w_length, (u16) value);
break;
case USB_DT_BOS:
- if (gadget_is_superspeed(gadget)) {
+ if ((gadget_is_superspeed(gadget) &&
+ (gadget->speed >= USB_SPEED_SUPER))
+ || !disable_l1_for_hs) {
value = bos_desc(cdev);
value = min(w_length, (u16) value);
}
@@ -1702,8 +1863,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (!f)
break;
value = 0;
- if (f->func_suspend)
- value = f->func_suspend(f, w_index >> 8);
+ if (f->func_suspend) {
+ const u8 suspend_opt = w_index >> 8;
+
+ value = f->func_suspend(f, suspend_opt);
+ DBG(cdev, "%s function: FUNCTION_SUSPEND(%u)",
+ f->name ? f->name : "", suspend_opt);
+ }
if (value < 0) {
ERROR(cdev,
"func_suspend() returned error %d\n",
@@ -1790,6 +1956,16 @@ unknown:
}
break;
}
+
+ if (value < 0) {
+ DBG(cdev, "%s: unhandled os desc request\n",
+ __func__);
+ DBG(cdev, "req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ return value;
+ }
+
req->length = value;
req->context = cdev;
req->zero = value < w_length;
@@ -1797,7 +1973,9 @@ unknown:
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
- composite_setup_complete(gadget->ep0, req);
+ if (value != -ESHUTDOWN)
+ composite_setup_complete(gadget->ep0,
+ req);
}
return value;
}
@@ -1865,6 +2043,14 @@ try_fun_setup:
if (f->setup)
value = f->setup(f, ctrl);
}
+ if (value == USB_GADGET_DELAYED_STATUS) {
+ DBG(cdev,
+ "%s: interface %d (%s) requested delayed status\n",
+ __func__, intf, f->name);
+ cdev->delayed_status++;
+ DBG(cdev, "delayed_status count %d\n",
+ cdev->delayed_status);
+ }
goto done;
}
@@ -1878,7 +2064,8 @@ try_fun_setup:
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
- composite_setup_complete(gadget->ep0, req);
+ if (value != -ESHUTDOWN)
+ composite_setup_complete(gadget->ep0, req);
}
} else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) {
WARN(cdev,
@@ -1911,6 +2098,10 @@ void composite_disconnect(struct usb_gadget *gadget)
reset_config(cdev);
if (cdev->driver->disconnect)
cdev->driver->disconnect(cdev);
+ if (cdev->delayed_status != 0) {
+ INFO(cdev, "delayed status mismatch..resetting\n");
+ cdev->delayed_status = 0;
+ }
spin_unlock_irqrestore(&cdev->lock, flags);
}
@@ -1922,7 +2113,7 @@ static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_composite_dev *cdev = get_gadget_data(gadget);
- return sprintf(buf, "%d\n", cdev->suspended);
+ return snprintf(buf, PAGE_SIZE, "%d\n", cdev->suspended);
}
static DEVICE_ATTR_RO(suspended);
@@ -2157,11 +2348,13 @@ void composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
+ unsigned long flags;
/* REVISIT: should we have config level
* suspend/resume callbacks?
*/
DBG(cdev, "suspend\n");
+ spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->suspend)
@@ -2172,6 +2365,7 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->driver->suspend(cdev);
cdev->suspended = 1;
+ spin_unlock_irqrestore(&cdev->lock, flags);
usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, 2);
@@ -2181,7 +2375,8 @@ void composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
- u16 maxpower;
+ int ret;
+ unsigned long flags;
/* REVISIT: should we have config level
* suspend/resume callbacks?
@@ -2189,21 +2384,36 @@ void composite_resume(struct usb_gadget *gadget)
DBG(cdev, "resume\n");
if (cdev->driver->resume)
cdev->driver->resume(cdev);
+
+ spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list) {
+ ret = usb_func_wakeup_int(f);
+ if (ret) {
+ if (ret == -EAGAIN) {
+ ERROR(f->config->cdev,
+ "Function wakeup for %s could not complete due to suspend state.\n",
+ f->name ? f->name : "");
+ break;
+ } else if (ret != -ENOTSUPP) {
+ ERROR(f->config->cdev,
+ "Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n",
+ f->name ? f->name : "",
+ ret);
+ }
+ }
+
if (f->resume)
f->resume(f);
}
- maxpower = cdev->config->MaxPower;
-
- if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ if (USB_VBUS_DRAW(gadget->speed) > USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_clear_selfpowered(gadget);
- usb_gadget_vbus_draw(gadget, maxpower ?
- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
+ usb_gadget_vbus_draw(gadget, USB_VBUS_DRAW(gadget->speed));
}
+ spin_unlock_irqrestore(&cdev->lock, flags);
cdev->suspended = 0;
}
@@ -2295,7 +2505,13 @@ void usb_composite_setup_continue(struct usb_composite_dev *cdev)
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->delayed_status == 0) {
+ if (!cdev->config) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&cdev->lock, flags);
WARN(cdev, "%s: Unexpected call\n", __func__);
+ return;
} else if (--cdev->delayed_status == 0) {
DBG(cdev, "%s: Completing delayed status\n", __func__);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 7e63dd7b3834..9ba61939eed6 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -8,6 +8,7 @@
#include "configfs.h"
#include "u_f.h"
#include "u_os_desc.h"
+#include "debug.h"
#ifdef CONFIG_USB_CONFIGFS_UEVENT
#include <linux/platform_device.h>
@@ -22,6 +23,7 @@ void acc_disconnect(void);
static struct class *android_class;
static struct device *android_device;
static int index;
+static int gadget_index;
struct device *create_function_device(char *name)
{
@@ -85,6 +87,7 @@ struct gadget_info {
struct usb_composite_driver composite;
struct usb_composite_dev cdev;
bool use_os_desc;
+ bool unbinding;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
spinlock_t spinlock;
@@ -282,9 +285,11 @@ static int unregister_gadget(struct gadget_info *gi)
if (!gi->udc_name)
return -ENODEV;
+ gi->unbinding = true;
ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
if (ret)
return ret;
+ gi->unbinding = false;
kfree(gi->udc_name);
gi->udc_name = NULL;
return 0;
@@ -1263,7 +1268,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_err(&gi->cdev.gadget->dev, "unbind function"
- " '%s'/%p\n", f->name, f);
+ " '%s'/%pK\n", f->name, f);
f->unbind(c, f);
}
}
@@ -1444,28 +1449,28 @@ static void android_work(struct work_struct *data)
spin_unlock_irqrestore(&cdev->lock, flags);
if (status[0]) {
- kobject_uevent_env(&android_device->kobj,
+ kobject_uevent_env(&gi->dev->kobj,
KOBJ_CHANGE, connected);
pr_info("%s: sent uevent %s\n", __func__, connected[0]);
uevent_sent = true;
}
if (status[1]) {
- kobject_uevent_env(&android_device->kobj,
+ kobject_uevent_env(&gi->dev->kobj,
KOBJ_CHANGE, configured);
pr_info("%s: sent uevent %s\n", __func__, configured[0]);
uevent_sent = true;
}
if (status[2]) {
- kobject_uevent_env(&android_device->kobj,
+ kobject_uevent_env(&gi->dev->kobj,
KOBJ_CHANGE, disconnected);
pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
uevent_sent = true;
}
if (!uevent_sent) {
- pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+ pr_info("%s: did not send uevent (%d %d %pK)\n", __func__,
gi->connected, gi->sw_connected, cdev->config);
}
}
@@ -1496,6 +1501,7 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
spin_unlock_irqrestore(&gi->spinlock, flags);
}
+#if !IS_ENABLED(CONFIG_USB_CONFIGFS_UEVENT)
static int configfs_composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
@@ -1542,6 +1548,7 @@ static void configfs_composite_disconnect(struct usb_gadget *gadget)
composite_disconnect(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
+#endif
static void configfs_composite_suspend(struct usb_gadget *gadget)
{
@@ -1632,7 +1639,14 @@ static int android_setup(struct usb_gadget *gadget,
static void android_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
- struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+ struct gadget_info *gi;
+
+ if (!cdev) {
+ pr_err("%s: gadget is not connected\n", __func__);
+ return;
+ }
+
+ gi = container_of(cdev, struct gadget_info, cdev);
/* FIXME: There's a race between usb_gadget_udc_stop() which is likely
* to set the gadget driver to NULL in the udc driver and this drivers
@@ -1655,7 +1669,8 @@ static void android_disconnect(struct usb_gadget *gadget)
acc_disconnect();
#endif
gi->connected = 0;
- schedule_work(&gi->work);
+ if (!gi->unbinding)
+ schedule_work(&gi->work);
composite_disconnect(gadget);
}
#endif
@@ -1720,23 +1735,28 @@ static int android_device_create(struct gadget_info *gi)
{
struct device_attribute **attrs;
struct device_attribute *attr;
+ char str[10];
INIT_WORK(&gi->work, android_work);
- android_device = device_create(android_class, NULL,
- MKDEV(0, 0), NULL, "android0");
- if (IS_ERR(android_device))
- return PTR_ERR(android_device);
+ snprintf(str, sizeof(str), "android%d", gadget_index - 1);
+ pr_debug("Creating android device %s\n", str);
+ gi->dev = device_create(android_class, NULL,
+ MKDEV(0, 0), NULL, str);
+ if (IS_ERR(gi->dev))
+ return PTR_ERR(gi->dev);
- dev_set_drvdata(android_device, gi);
+ dev_set_drvdata(gi->dev, gi);
+ if (gadget_index == 1)
+ android_device = gi->dev;
attrs = android_usb_attributes;
while ((attr = *attrs++)) {
int err;
- err = device_create_file(android_device, attr);
+ err = device_create_file(gi->dev, attr);
if (err) {
- device_destroy(android_device->class,
- android_device->devt);
+ device_destroy(gi->dev->class,
+ gi->dev->devt);
return err;
}
}
@@ -1744,15 +1764,15 @@ static int android_device_create(struct gadget_info *gi)
return 0;
}
-static void android_device_destroy(void)
+static void android_device_destroy(struct device *dev)
{
struct device_attribute **attrs;
struct device_attribute *attr;
attrs = android_usb_attributes;
while ((attr = *attrs++))
- device_remove_file(android_device, attr);
- device_destroy(android_device->class, android_device->devt);
+ device_remove_file(dev, attr);
+ device_destroy(dev->class, dev->devt);
}
#else
static inline int android_device_create(struct gadget_info *gi)
@@ -1760,7 +1780,7 @@ static inline int android_device_create(struct gadget_info *gi)
return 0;
}
-static inline void android_device_destroy(void)
+static inline void android_device_destroy(struct device *dev)
{
}
#endif
@@ -1799,6 +1819,7 @@ static struct config_group *gadgets_make(
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
+ spin_lock_init(&gi->spinlock);
composite_init_dev(&gi->cdev);
gi->cdev.desc.bLength = USB_DT_DEVICE_SIZE;
@@ -1813,6 +1834,8 @@ static struct config_group *gadgets_make(
if (!gi->composite.gadget_driver.function)
goto err;
+ gadget_index++;
+ pr_debug("Creating gadget index %d\n", gadget_index);
if (android_device_create(gi) < 0)
goto err;
@@ -1827,8 +1850,14 @@ err:
static void gadgets_drop(struct config_group *group, struct config_item *item)
{
+ struct gadget_info *gi;
+
+ gi = container_of(to_config_group(item), struct gadget_info, group);
config_item_put(item);
- android_device_destroy();
+ if (gi->dev) {
+ android_device_destroy(gi->dev);
+ gi->dev = NULL;
+ }
}
static struct configfs_group_operations gadgets_ops = {
@@ -1855,6 +1884,7 @@ void unregister_gadget_item(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
+ /* to protect race with gadget_dev_desc_UDC_store*/
mutex_lock(&gi->lock);
unregister_gadget(gi);
mutex_unlock(&gi->lock);
@@ -1867,6 +1897,8 @@ static int __init gadget_cfs_init(void)
config_group_init(&gadget_subsys.su_group);
+ debug_debugfs_init();
+
ret = configfs_register_subsystem(&gadget_subsys);
#ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -1881,6 +1913,7 @@ module_init(gadget_cfs_init);
static void __exit gadget_cfs_exit(void)
{
+ debug_debugfs_exit();
configfs_unregister_subsystem(&gadget_subsys);
#ifdef CONFIG_USB_CONFIGFS_UEVENT
if (!IS_ERR(android_class))
diff --git a/drivers/usb/gadget/debug.c b/drivers/usb/gadget/debug.c
new file mode 100644
index 000000000000..32a53299446c
--- /dev/null
+++ b/drivers/usb/gadget/debug.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/rwlock.h>
+#include <linux/debugfs.h>
+
+#include "debug.h"
+
+#define dbg_inc(i) ((i+1) % DBG_MAX_MSG)
+
+#define ENABLE_EVENT_LOG 1
+unsigned int enable_event_log = ENABLE_EVENT_LOG;
+module_param(enable_event_log, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_event_log, "enable event logging in debug buffer");
+
+static struct {
+ char buf[DBG_MAX_MSG][DBG_MSG_LEN]; /* buffer */
+ unsigned idx; /* index */
+ rwlock_t lck; /* lock */
+ struct dentry *root;
+} __maybe_unused dbg_buffer = {
+ .idx = 0,
+ .lck = __RW_LOCK_UNLOCKED(lck),
+ .root = NULL
+};
+
+void __maybe_unused put_timestamp(char *tbuf)
+{
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ unsigned long flags;
+
+ write_lock_irqsave(&dbg_buffer.lck, flags);
+ t = cpu_clock(smp_processor_id());
+ write_unlock_irqrestore(&dbg_buffer.lck, flags);
+ nanosec_rem = do_div(t, 1000000000)/1000;
+ snprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu]: ", (unsigned long)t,
+ nanosec_rem);
+}
+
+void __maybe_unused add_event_to_buf(char *tbuf)
+{
+ unsigned long flags;
+ char *buf;
+ write_lock_irqsave(&dbg_buffer.lck, flags);
+ buf = dbg_buffer.buf[dbg_buffer.idx];
+ memcpy(buf, tbuf, DBG_MSG_LEN);
+ dbg_buffer.idx = (dbg_buffer.idx + 1) % DBG_MAX_MSG;
+ write_unlock_irqrestore(&dbg_buffer.lck, flags);
+}
+
+static int dbg_read_buf_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ unsigned i;
+
+ read_lock_irqsave(&dbg_buffer.lck, flags);
+
+ i = dbg_buffer.idx;
+ if (strnlen(dbg_buffer.buf[i], DBG_MSG_LEN))
+ seq_printf(s, "%s\n", dbg_buffer.buf[i]);
+ for (i = dbg_inc(i); i != dbg_buffer.idx; i = dbg_inc(i)) {
+ if (!strnlen(dbg_buffer.buf[i], DBG_MSG_LEN))
+ continue;
+ seq_printf(s, "%s\n", dbg_buffer.buf[i]);
+ }
+
+ read_unlock_irqrestore(&dbg_buffer.lck, flags);
+
+ return 0;
+}
+
+static int dbg_read_buf_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_read_buf_show, inode->i_private);
+}
+
+const struct file_operations dbg_read_buf_fops = {
+ .open = dbg_read_buf_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int debug_debugfs_init(void)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+
+ root = debugfs_create_dir("debug", NULL);
+ if (!root) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ dbg_buffer.root = root;
+
+ file = debugfs_create_file("read_buf", S_IRUGO, root,
+ NULL, &dbg_read_buf_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+
+err0:
+ return ret;
+}
+
+void debug_debugfs_exit(void)
+{
+ debugfs_remove_recursive(dbg_buffer.root);
+ dbg_buffer.root = NULL;
+}
diff --git a/drivers/usb/gadget/debug.h b/drivers/usb/gadget/debug.h
new file mode 100644
index 000000000000..8729aca0a69e
--- /dev/null
+++ b/drivers/usb/gadget/debug.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DEBUG_H_
+#define __DEBUG_H_
+
+#define DBG_MAX_MSG 1024UL
+#define DBG_MSG_LEN 80UL
+#define TIME_BUF_LEN 17
+#define DBG_EVENT_LEN (DBG_MSG_LEN - TIME_BUF_LEN)
+
+extern unsigned int enable_event_log;
+extern void put_timestamp(char *tbuf);
+extern void add_event_to_buf(char *tbuf);
+extern int debug_debugfs_init(void);
+extern void debug_debugfs_exit(void);
+
+#define LOGLEVEL_NONE 8
+#define LOGLEVEL_DEBUG 7
+#define LOGLEVEL_INFO 6
+#define LOGLEVEL_ERR 3
+
+#define log_event(log_level, x...) \
+do { \
+ char buf[DBG_MSG_LEN]; \
+ if (log_level == LOGLEVEL_DEBUG) \
+ pr_debug(x); \
+ else if (log_level == LOGLEVEL_ERR) \
+ pr_err(x); \
+ else if (log_level == LOGLEVEL_INFO) \
+ pr_info(x); \
+ if (enable_event_log) { \
+ put_timestamp(buf); \
+ snprintf(&buf[TIME_BUF_LEN - 1], DBG_EVENT_LEN, x); \
+ add_event_to_buf(buf); \
+ } \
+} while (0)
+
+#define log_event_none(x, ...) log_event(LOGLEVEL_NONE, x, ##__VA_ARGS__)
+#define log_event_dbg(x, ...) log_event(LOGLEVEL_DEBUG, x, ##__VA_ARGS__)
+#define log_event_err(x, ...) log_event(LOGLEVEL_ERR, x, ##__VA_ARGS__)
+#define log_event_info(x, ...) log_event(LOGLEVEL_INFO, x, ##__VA_ARGS__)
+
+#endif /* __DEBUG_H_ */
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 30fdab0ae383..16ee4714d38c 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -209,3 +209,42 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
gadget->out_epnum = 0;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig_reset);
+
+/**
+ * usb_ep_autoconfig_by_name - Used to pick the endpoint by name. eg ep1in-gsi
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ * initialized.
+ * @ep_name: EP name that is to be searched.
+ *
+ */
+struct usb_ep *usb_ep_autoconfig_by_name(
+ struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ const char *ep_name
+)
+{
+ struct usb_ep *ep;
+ bool ep_found = false;
+
+ list_for_each_entry(ep, &gadget->ep_list, ep_list)
+ if (0 == strcmp(ep->name, ep_name) &&
+ !ep->driver_data) {
+ ep_found = true;
+ break;
+ }
+
+ if (ep_found) {
+ desc->bEndpointAddress &= USB_DIR_IN;
+ desc->bEndpointAddress |= ep->ep_num;
+ ep->address = desc->bEndpointAddress;
+ pr_debug("Allocating ep address:%x\n", ep->address);
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+ return ep;
+ }
+
+ pr_err("%s:error finding ep %s\n", __func__, ep_name);
+ return NULL;
+}
+EXPORT_SYMBOL(usb_ep_autoconfig_by_name);
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 1cd544beef63..e04ca4b97c8e 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -32,8 +32,11 @@ usb_f_mass_storage-y := f_mass_storage.o storage_common.o
obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o
usb_f_fs-y := f_fs.o
obj-$(CONFIG_USB_F_FS) += usb_f_fs.o
-usb_f_uac1-y := f_uac1.o u_uac1.o
+obj-$(CONFIG_USB_U_AUDIO) += u_audio.o
+usb_f_uac1-y := f_uac1.o
obj-$(CONFIG_USB_F_UAC1) += usb_f_uac1.o
+usb_f_uac1_legacy-y := f_uac1_legacy.o u_uac1_legacy.o
+obj-$(CONFIG_USB_F_UAC1_LEGACY) += usb_f_uac1_legacy.o
usb_f_uac2-y := f_uac2.o
obj-$(CONFIG_USB_F_UAC2) += usb_f_uac2.o
usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o
@@ -52,3 +55,17 @@ usb_f_audio_source-y := f_audio_source.o
obj-$(CONFIG_USB_F_AUDIO_SRC) += usb_f_audio_source.o
usb_f_accessory-y := f_accessory.o
obj-$(CONFIG_USB_F_ACC) += usb_f_accessory.o
+usb_f_diag-y := f_diag.o
+obj-$(CONFIG_USB_F_DIAG) += usb_f_diag.o
+usb_f_gsi-y := f_gsi.o rndis.o
+obj-$(CONFIG_USB_F_GSI) += usb_f_gsi.o
+usb_f_cdev-y := f_cdev.o
+obj-$(CONFIG_USB_F_CDEV) += usb_f_cdev.o
+usb_f_qdss-y := f_qdss.o u_qdss.o
+obj-$(CONFIG_USB_F_QDSS) += usb_f_qdss.o
+usb_f_qcrndis-y := f_qc_rndis.o rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o
+usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o
+obj-$(CONFIG_USB_F_RMNET_BAM) += usb_f_rmnet_bam.o
+usb_f_ccid-y := f_ccid.o
+obj-$(CONFIG_USB_F_CCID) += usb_f_ccid.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 925688505967..b5c1ad06f8be 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -45,6 +45,7 @@
#define MAX_INST_NAME_LEN 40
#define BULK_BUFFER_SIZE 16384
+#define BULK_BUFFER_INIT_SIZE 131072
#define ACC_STRING_SIZE 256
#define PROTOCOL_VERSION 2
@@ -56,6 +57,9 @@
#define TX_REQ_MAX 4
#define RX_REQ_MAX 2
+unsigned int acc_rx_req_len = BULK_BUFFER_INIT_SIZE;
+unsigned int acc_tx_req_len = BULK_BUFFER_INIT_SIZE;
+
struct acc_hid_dev {
struct list_head list;
struct hid_device *hid;
@@ -142,12 +146,47 @@ static struct usb_interface_descriptor acc_interface_desc = {
.bInterfaceProtocol = 0,
};
+static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_in_comp_desc = {
+ .bLength = sizeof(acc_superspeed_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 8,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_out_comp_desc = {
+ .bLength = sizeof(acc_superspeed_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 8,
+ /* .bmAttributes = 0, */
+};
+
+
static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
@@ -155,7 +194,7 @@ static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
@@ -186,6 +225,15 @@ static struct usb_descriptor_header *hs_acc_descs[] = {
NULL,
};
+static struct usb_descriptor_header *ss_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_in_comp_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_out_desc,
+ (struct usb_descriptor_header *) &acc_superspeed_out_comp_desc,
+ NULL,
+};
+
static struct usb_string acc_string_defs[] = {
[INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
{ }, /* end of list */
@@ -346,6 +394,7 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
struct acc_dev *dev = ep->driver_data;
char *string_dest = NULL;
int length = req->actual;
+ unsigned long flags;
if (req->status != 0) {
pr_err("acc_complete_set_string, err %d\n", req->status);
@@ -371,22 +420,26 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
case ACCESSORY_STRING_SERIAL:
string_dest = dev->serial;
break;
+ default:
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ return;
}
- if (string_dest) {
- unsigned long flags;
- if (length >= ACC_STRING_SIZE)
- length = ACC_STRING_SIZE - 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- memcpy(string_dest, req->buf, length);
- /* ensure zero termination */
- string_dest[length] = 0;
- spin_unlock_irqrestore(&dev->lock, flags);
- } else {
- pr_err("unknown accessory string index %d\n",
- dev->string_index);
+ if (!length) {
+ pr_debug("zero length for accessory string index %d\n",
+ dev->string_index);
+ return;
}
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
@@ -559,7 +612,7 @@ static int create_bulk_endpoints(struct acc_dev *dev,
struct usb_ep *ep;
int i;
- DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+ DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
ep = usb_ep_autoconfig(cdev->gadget, in_desc);
if (!ep) {
@@ -579,18 +632,36 @@ static int create_bulk_endpoints(struct acc_dev *dev,
ep->driver_data = dev; /* claim the endpoint */
dev->ep_out = ep;
+retry_tx_alloc:
/* now allocate requests for our endpoints */
for (i = 0; i < TX_REQ_MAX; i++) {
- req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ req = acc_request_new(dev->ep_in, acc_tx_req_len);
+ if (!req) {
+ if (acc_tx_req_len <= BULK_BUFFER_SIZE)
+ goto fail;
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ acc_tx_req_len /= 2;
+ goto retry_tx_alloc;
+ }
req->complete = acc_complete_in;
req_put(dev, &dev->tx_idle, req);
}
+
+retry_rx_alloc:
for (i = 0; i < RX_REQ_MAX; i++) {
- req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ req = acc_request_new(dev->ep_out, acc_rx_req_len);
+ if (!req) {
+ if (acc_rx_req_len <= BULK_BUFFER_SIZE)
+ goto fail;
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ acc_request_free(dev->rx_req[i],
+ dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
+ acc_rx_req_len /= 2;
+ goto retry_rx_alloc;
+ }
req->complete = acc_complete_out;
dev->rx_req[i] = req;
}
@@ -601,8 +672,10 @@ fail:
pr_err("acc_bind() could not allocate requests\n");
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
- for (i = 0; i < RX_REQ_MAX; i++)
+ for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
return -1;
}
@@ -611,9 +684,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
{
struct acc_dev *dev = fp->private_data;
struct usb_request *req;
- ssize_t r = count;
- ssize_t data_length;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
pr_debug("acc_read(%zu)\n", count);
@@ -623,8 +694,8 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
return -ENODEV;
}
- if (count > BULK_BUFFER_SIZE)
- count = BULK_BUFFER_SIZE;
+ if (count > acc_rx_req_len)
+ count = acc_rx_req_len;
/* we will block until we're online */
pr_debug("acc_read: waiting for online\n");
@@ -634,14 +705,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
goto done;
}
- /*
- * Calculate the data length by considering termination character.
- * Then compansite the difference of rounding up to
- * integer multiple of maxpacket size.
- */
- data_length = count;
- data_length += dev->ep_out->maxpacket - 1;
- data_length -= data_length % dev->ep_out->maxpacket;
+ len = ALIGN(count, dev->ep_out->maxpacket);
if (dev->rx_done) {
// last req cancelled. try to get it.
@@ -652,14 +716,14 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
requeue_req:
/* queue a request */
req = dev->rx_req[0];
- req->length = data_length;
+ req->length = len;
dev->rx_done = 0;
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
goto done;
} else {
- pr_debug("rx %p queue\n", req);
+ pr_debug("rx %pK queue\n", req);
}
/* wait for a request to complete */
@@ -682,7 +746,7 @@ copy_data:
if (req->actual == 0)
goto requeue_req;
- pr_debug("rx %p %u\n", req, req->actual);
+ pr_debug("rx %pK %u\n", req, req->actual);
xfer = (req->actual < count) ? req->actual : count;
r = xfer;
if (copy_to_user(buf, req->buf, xfer))
@@ -727,8 +791,8 @@ static ssize_t acc_write(struct file *fp, const char __user *buf,
break;
}
- if (count > BULK_BUFFER_SIZE) {
- xfer = BULK_BUFFER_SIZE;
+ if (count > acc_tx_req_len) {
+ xfer = acc_tx_req_len;
/* ZLP, They will be more TX requests so not yet. */
req->zero = 0;
} else {
@@ -846,6 +910,9 @@ static const struct file_operations acc_fops = {
.read = acc_read,
.write = acc_write,
.unlocked_ioctl = acc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = acc_ioctl,
+#endif
.open = acc_open,
.release = acc_release,
};
@@ -973,6 +1040,8 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
memset(dev->serial, 0, sizeof(dev->serial));
dev->start_requested = 0;
dev->audio_mode = 0;
+ strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE);
+ strlcpy(dev->model, "Android", ACC_STRING_SIZE);
}
}
@@ -1006,7 +1075,7 @@ __acc_function_bind(struct usb_configuration *c,
int id;
int ret;
- DBG(cdev, "acc_function_bind dev: %p\n", dev);
+ DBG(cdev, "acc_function_bind dev: %pK\n", dev);
if (configfs) {
if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
@@ -1044,6 +1113,14 @@ __acc_function_bind(struct usb_configuration *c,
acc_fullspeed_out_desc.bEndpointAddress;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ acc_superspeed_in_desc.bEndpointAddress =
+ acc_fullspeed_in_desc.bEndpointAddress;
+ acc_superspeed_out_desc.bEndpointAddress =
+ acc_fullspeed_out_desc.bEndpointAddress;
+ }
+
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
f->name, dev->ep_in->name, dev->ep_out->name);
@@ -1099,8 +1176,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
- for (i = 0; i < RX_REQ_MAX; i++)
+ for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
acc_hid_unbind(dev);
}
@@ -1184,7 +1263,7 @@ static void acc_hid_work(struct work_struct *data)
list_for_each_safe(entry, temp, &new_list) {
hid = list_entry(entry, struct acc_hid_dev, list);
if (acc_hid_init(hid)) {
- pr_err("can't add HID device %p\n", hid);
+ pr_err("can't add HID device %pK\n", hid);
acc_hid_delete(hid);
} else {
spin_lock_irqsave(&dev->lock, flags);
@@ -1423,6 +1502,7 @@ static struct usb_function *acc_alloc(struct usb_function_instance *fi)
dev->function.strings = acc_strings,
dev->function.fs_descriptors = fs_acc_descs;
dev->function.hs_descriptors = hs_acc_descs;
+ dev->function.ss_descriptors = ss_acc_descs;
dev->function.bind = acc_function_bind_configfs;
dev->function.unbind = acc_function_unbind;
dev->function.set_alt = acc_function_set_alt;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 670a89f197cd..5819f6503f75 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -704,7 +704,7 @@ fail:
if (acm->notify_req)
gs_free_req(acm->notify, acm->notify_req);
- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+ ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status);
return status;
}
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 8124af33b738..7d8bfe62b148 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -369,15 +369,22 @@ static void audio_send(struct audio_dev *audio)
s64 msecs;
s64 frames;
ktime_t now;
+ unsigned long flags;
+ spin_lock_irqsave(&audio->lock, flags);
/* audio->substream will be null if we have been closed */
- if (!audio->substream)
+ if (!audio->substream) {
+ spin_unlock_irqrestore(&audio->lock, flags);
return;
+ }
/* audio->buffer_pos will be null if we have been stopped */
- if (!audio->buffer_pos)
+ if (!audio->buffer_pos) {
+ spin_unlock_irqrestore(&audio->lock, flags);
return;
+ }
runtime = audio->substream->runtime;
+ spin_unlock_irqrestore(&audio->lock, flags);
/* compute number of frames to send */
now = ktime_get();
@@ -400,8 +407,21 @@ static void audio_send(struct audio_dev *audio)
while (frames > 0) {
req = audio_req_get(audio);
- if (!req)
+ spin_lock_irqsave(&audio->lock, flags);
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream) {
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return;
+ }
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos) {
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&audio->lock, flags);
break;
+ }
length = frames_to_bytes(runtime, frames);
if (length > IN_EP_MAX_PACKET_SIZE)
@@ -427,6 +447,7 @@ static void audio_send(struct audio_dev *audio)
}
req->length = length;
+ spin_unlock_irqrestore(&audio->lock, flags);
ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
if (ret < 0) {
pr_err("usb_ep_queue failed ret: %d\n", ret);
@@ -570,14 +591,38 @@ static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+ if (!alt) {
+ usb_ep_disable(audio->in_ep);
+ return 0;
+ }
+
ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
- if (ret)
+ if (ret) {
+ audio->in_ep->desc = NULL;
+ pr_err("config_ep fail for audio ep ret %d\n", ret);
return ret;
+ }
+ ret = usb_ep_enable(audio->in_ep);
+ if (ret) {
+ audio->in_ep->desc = NULL;
+ pr_err("failed to enable audio ret %d\n", ret);
+ return ret;
+ }
- usb_ep_enable(audio->in_ep);
return 0;
}
+/*
+ * Because the data interface supports multiple altsettings,
+ * this audio_source function *MUST* implement a get_alt() method.
+ */
+static int audio_get_alt(struct usb_function *f, unsigned int intf)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ return audio->in_ep->enabled ? 1 : 0;
+}
+
static void audio_disable(struct usb_function *f)
{
struct audio_dev *audio = func_to_audio(f);
@@ -755,11 +800,11 @@ static int audio_pcm_close(struct snd_pcm_substream *substream)
struct audio_dev *audio = substream->private_data;
unsigned long flags;
- spin_lock_irqsave(&audio->lock, flags);
-
/* Remove the QoS request */
pm_qos_remove_request(&audio->pm_qos);
+ spin_lock_irqsave(&audio->lock, flags);
+
audio->substream = NULL;
spin_unlock_irqrestore(&audio->lock, flags);
@@ -841,6 +886,7 @@ static struct audio_dev _audio_dev = {
.bind = audio_bind,
.unbind = audio_unbind,
.set_alt = audio_set_alt,
+ .get_alt = audio_get_alt,
.setup = audio_setup,
.disable = audio_disable,
.free_func = audio_free_func,
@@ -1000,6 +1046,7 @@ static ssize_t audio_source_pcm_show(struct device *dev,
struct device *create_function_device(char *name);
+#define AUDIO_SOURCE_DEV_NAME_LENGTH 20
static struct usb_function_instance *audio_source_alloc_inst(void)
{
struct audio_source_instance *fi_audio;
@@ -1008,6 +1055,8 @@ static struct usb_function_instance *audio_source_alloc_inst(void)
struct device *dev;
void *err_ptr;
int err = 0;
+ char device_name[AUDIO_SOURCE_DEV_NAME_LENGTH];
+ static u8 count;
fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
if (!fi_audio)
@@ -1025,7 +1074,17 @@ static struct usb_function_instance *audio_source_alloc_inst(void)
config_group_init_type_name(&fi_audio->func_inst.group, "",
&audio_source_func_type);
- dev = create_function_device("f_audio_source");
+
+ if (!count) {
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ "f_audio_source");
+ count++;
+ } else {
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ "f_audio_source%d", count++);
+ }
+
+ dev = create_function_device(device_name);
if (IS_ERR(dev)) {
err_ptr = dev;
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
new file mode 100644
index 000000000000..0b335575f245
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -0,0 +1,1176 @@
+/*
+ * f_ccid.c -- CCID function Driver
+ *
+ * Copyright (c) 2011, 2013, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/usb/ccid_desc.h>
+#include <linux/usb/composite.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#include "f_ccid.h"
+
+#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
+#define BULK_OUT_BUFFER_SIZE 1024
+#define CTRL_BUF_SIZE 4
+#define FUNCTION_NAME "ccid"
+#define MAX_INST_NAME_LEN 40
+#define CCID_CTRL_DEV_NAME "ccid_ctrl"
+#define CCID_BULK_DEV_NAME "ccid_bulk"
+#define CCID_NOTIFY_INTERVAL 5
+#define CCID_NOTIFY_MAXPACKET 4
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+struct ccid_ctrl_dev {
+ atomic_t opened;
+ struct list_head tx_q;
+ wait_queue_head_t tx_wait_q;
+ unsigned char buf[CTRL_BUF_SIZE];
+ int tx_ctrl_done;
+ struct miscdevice ccid_ctrl_device;
+};
+
+struct ccid_bulk_dev {
+ atomic_t error;
+ atomic_t opened;
+ atomic_t rx_req_busy;
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req;
+ int rx_done;
+ struct list_head tx_idle;
+ struct miscdevice ccid_bulk_device;
+};
+
+struct ccid_opts {
+ struct usb_function_instance func_inst;
+ struct f_ccid *ccid;
+};
+
+struct f_ccid {
+ struct usb_function function;
+ int ifc_id;
+ spinlock_t lock;
+ atomic_t online;
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ struct usb_request *notify_req;
+ struct ccid_ctrl_dev ctrl_dev;
+ struct ccid_bulk_dev bulk_dev;
+ int dtr_state;
+};
+
+static inline struct f_ccid *ctrl_dev_to_ccid(struct ccid_ctrl_dev *d)
+{
+ return container_of(d, struct f_ccid, ctrl_dev);
+}
+
+static inline struct f_ccid *bulk_dev_to_ccid(struct ccid_bulk_dev *d)
+{
+ return container_of(d, struct f_ccid, bulk_dev);
+}
+
+/* Interface Descriptor: */
+static struct usb_interface_descriptor ccid_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_CSCID,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+};
+/* CCID Class Descriptor */
+static struct usb_ccid_class_descriptor ccid_class_desc = {
+ .bLength = sizeof(ccid_class_desc),
+ .bDescriptorType = CCID_DECRIPTOR_TYPE,
+ .bcdCCID = CCID1_10,
+ .bMaxSlotIndex = 0,
+ /* This value indicates what voltages the CCID can supply to slots */
+ .bVoltageSupport = VOLTS_3_0,
+ .dwProtocols = PROTOCOL_TO,
+ /* Default ICC clock frequency in KHz */
+ .dwDefaultClock = 3580,
+ /* Maximum supported ICC clock frequency in KHz */
+ .dwMaximumClock = 3580,
+ .bNumClockSupported = 0,
+ /* Default ICC I/O data rate in bps */
+ .dwDataRate = 9600,
+ /* Maximum supported ICC I/O data rate in bps */
+ .dwMaxDataRate = 9600,
+ .bNumDataRatesSupported = 0,
+ .dwMaxIFSD = 0,
+ .dwSynchProtocols = 0,
+ .dwMechanical = 0,
+ /* This value indicates what intelligent features the CCID has */
+ .dwFeatures = CCID_FEATURES_EXC_TPDU |
+ CCID_FEATURES_AUTO_PNEGO |
+ CCID_FEATURES_AUTO_BAUD |
+ CCID_FEATURES_AUTO_CLOCK |
+ CCID_FEATURES_AUTO_VOLT |
+ CCID_FEATURES_AUTO_ACTIV |
+ CCID_FEATURES_AUTO_PCONF,
+ /* extended APDU level Message Length */
+ .dwMaxCCIDMessageLength = 0x200,
+ .bClassGetResponse = 0x0,
+ .bClassEnvelope = 0x0,
+ .wLcdLayout = 0,
+ .bPINSupport = 0,
+ .bMaxCCIDBusySlots = 1
+};
+/* Full speed support: */
+static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = 1 << CCID_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor ccid_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor ccid_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *ccid_fs_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_fs_notify_desc,
+ (struct usb_descriptor_header *) &ccid_fs_in_desc,
+ (struct usb_descriptor_header *) &ccid_fs_out_desc,
+ NULL,
+};
+
+/* High speed support: */
+static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor ccid_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ccid_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ccid_hs_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_hs_notify_desc,
+ (struct usb_descriptor_header *) &ccid_hs_in_desc,
+ (struct usb_descriptor_header *) &ccid_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support: */
+static struct usb_endpoint_descriptor ccid_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_notify_comp_desc = {
+ .bLength = sizeof(ccid_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_in_comp_desc = {
+ .bLength = sizeof(ccid_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_out_comp_desc = {
+ .bLength = sizeof(ccid_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ccid_ss_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_comp_desc,
+ NULL,
+};
+
+static inline struct f_ccid *func_to_ccid(struct usb_function *f)
+{
+ return container_of(f, struct f_ccid, function);
+}
+
+static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+}
+
+static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!list_empty(head)) {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ return req;
+}
+
+static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ switch (req->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case 0:
+ break;
+ default:
+ pr_err("CCID notify ep error %d\n", req->status);
+ }
+}
+
+static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ccid *ccid_dev = req->context;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+
+ if (req->status != 0)
+ atomic_set(&bulk_dev->error, 1);
+
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ wake_up(&bulk_dev->write_wq);
+}
+
+static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ccid *ccid_dev = req->context;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ if (req->status != 0)
+ atomic_set(&bulk_dev->error, 1);
+
+ bulk_dev->rx_done = 1;
+ wake_up(&bulk_dev->read_wq);
+}
+
+static struct usb_request *
+ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+ if (req != NULL) {
+ req->length = len;
+ req->buf = kmalloc(len, kmalloc_flags);
+ if (req->buf == NULL) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+
+ return req ? req : ERR_PTR(-ENOMEM);
+}
+
+static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int
+ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int ret = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ if (!atomic_read(&ccid_dev->online))
+ return -ENOTCONN;
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_ABORT:
+ if (w_length != 0)
+ goto invalid;
+ ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
+ ctrl_dev->buf[1] = w_value & 0xFF;
+ ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
+ ctrl_dev->buf[3] = 0x00;
+ ctrl_dev->tx_ctrl_done = 1;
+ wake_up(&ctrl_dev->tx_wait_q);
+ ret = 0;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
+ *(u32 *) req->buf =
+ cpu_to_le32(ccid_class_desc.dwDefaultClock);
+ ret = min_t(u32, w_length,
+ sizeof(ccid_class_desc.dwDefaultClock));
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_GET_DATA_RATES:
+ *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
+ ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
+ break;
+
+ default:
+invalid:
+ pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ pr_err("ccid ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void ccid_function_disable(struct usb_function *f)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ struct usb_request *req;
+
+ /* Disable endpoints */
+ usb_ep_disable(ccid_dev->notify);
+ usb_ep_disable(ccid_dev->in);
+ usb_ep_disable(ccid_dev->out);
+ /* Free endpoint related requests */
+ ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+ if (!atomic_read(&bulk_dev->rx_req_busy))
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+ while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+
+ ccid_dev->dtr_state = 0;
+ atomic_set(&ccid_dev->online, 0);
+ /* Wake up threads */
+ wake_up(&bulk_dev->write_wq);
+ wake_up(&bulk_dev->read_wq);
+ wake_up(&ctrl_dev->tx_wait_q);
+
+}
+
+static int
+ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req;
+ int ret = 0;
+ int i;
+
+ ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
+ sizeof(struct usb_ccid_notification), GFP_ATOMIC);
+ if (IS_ERR(ccid_dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ return PTR_ERR(ccid_dev->notify_req);
+ }
+ ccid_dev->notify_req->complete = ccid_notify_complete;
+ ccid_dev->notify_req->context = ccid_dev;
+
+ /* now allocate requests for our endpoints */
+ req = ccid_request_alloc(ccid_dev->out, (unsigned)BULK_OUT_BUFFER_SIZE,
+ GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ pr_err("%s: unable to allocate memory for out req\n",
+ __func__);
+ ret = PTR_ERR(req);
+ goto free_notify;
+ }
+ req->complete = ccid_bulk_complete_out;
+ req->context = ccid_dev;
+ bulk_dev->rx_req = req;
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = ccid_request_alloc(ccid_dev->in,
+ (unsigned)BULK_IN_BUFFER_SIZE,
+ GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ pr_err("%s: unable to allocate memory for in req\n",
+ __func__);
+ ret = PTR_ERR(req);
+ goto free_bulk_out;
+ }
+ req->complete = ccid_bulk_complete_in;
+ req->context = ccid_dev;
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ }
+
+ /* choose the descriptors and enable endpoints */
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
+ if (ret) {
+ ccid_dev->notify->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->notify->name, ret);
+ goto free_bulk_in;
+ }
+ ret = usb_ep_enable(ccid_dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->notify->name, ret);
+ goto free_bulk_in;
+ }
+ ccid_dev->notify->driver_data = ccid_dev;
+
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->in);
+ if (ret) {
+ ccid_dev->in->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->in->name, ret);
+ goto disable_ep_notify;
+ }
+ ret = usb_ep_enable(ccid_dev->in);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->in->name, ret);
+ goto disable_ep_notify;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->out);
+ if (ret) {
+ ccid_dev->out->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->out->name, ret);
+ goto disable_ep_in;
+ }
+ ret = usb_ep_enable(ccid_dev->out);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->out->name, ret);
+ goto disable_ep_in;
+ }
+ ccid_dev->dtr_state = 1;
+ atomic_set(&ccid_dev->online, 1);
+ return ret;
+
+disable_ep_in:
+ usb_ep_disable(ccid_dev->in);
+disable_ep_notify:
+ usb_ep_disable(ccid_dev->notify);
+ ccid_dev->notify->driver_data = NULL;
+free_bulk_in:
+ while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+free_bulk_out:
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+free_notify:
+ ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+ return ret;
+}
+
+static void ccid_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ usb_free_all_descriptors(f);
+}
+
+static int ccid_function_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+
+ ccid_dev->ifc_id = usb_interface_id(c, f);
+ if (ccid_dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d",
+ __func__, ccid_dev->ifc_id);
+ return ccid_dev->ifc_id;
+ }
+ ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ return -ENODEV;
+ }
+ ccid_dev->notify = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_in_fail;
+ }
+ ccid_dev->in = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ ccid_dev->out = ep;
+ ep->driver_data = cdev;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ ccid_hs_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_hs_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_hs_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+
+ ccid_ss_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_ss_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_ss_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, ccid_fs_descs, ccid_hs_descs,
+ ccid_ss_descs);
+ if (ret)
+ goto ep_auto_out_fail;
+
+ pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+ ccid_dev->in->name, ccid_dev->out->name);
+
+ return 0;
+
+ep_auto_out_fail:
+ ccid_dev->out->driver_data = NULL;
+ ccid_dev->out = NULL;
+ep_auto_in_fail:
+ ccid_dev->in->driver_data = NULL;
+ ccid_dev->in = NULL;
+
+ return ret;
+}
+
+static int ccid_bulk_open(struct inode *ip, struct file *fp)
+{
+ struct ccid_bulk_dev *bulk_dev = container_of(fp->private_data,
+ struct ccid_bulk_dev,
+ ccid_bulk_device);
+ struct f_ccid *ccid_dev = bulk_dev_to_ccid(bulk_dev);
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_open\n");
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&bulk_dev->opened)) {
+ pr_debug("%s: bulk device is already opened\n", __func__);
+ return -EBUSY;
+ }
+ atomic_set(&bulk_dev->opened, 1);
+ /* clear the error latch */
+ atomic_set(&bulk_dev->error, 0);
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ fp->private_data = ccid_dev;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ return 0;
+}
+
+static int ccid_bulk_release(struct inode *ip, struct file *fp)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+
+ pr_debug("ccid_bulk_release\n");
+ atomic_set(&bulk_dev->opened, 0);
+ return 0;
+}
+
+static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req;
+ int r = count, xfer, len;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_read(%zu)\n", count);
+
+ if (count > BULK_OUT_BUFFER_SIZE) {
+ pr_err("%s: max_buffer_size:%d given_pkt_size:%zu\n",
+ __func__, BULK_OUT_BUFFER_SIZE, count);
+ return -ENOMEM;
+ }
+
+ if (atomic_read(&bulk_dev->error)) {
+ r = -EIO;
+ pr_err("%s bulk_dev_error\n", __func__);
+ goto done;
+ }
+
+ len = ALIGN(count, ccid_dev->out->maxpacket);
+requeue_req:
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ /* queue a request */
+ req = bulk_dev->rx_req;
+ req->length = len;
+ bulk_dev->rx_done = 0;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ pr_err("%s usb ep queue failed\n", __func__);
+ atomic_set(&bulk_dev->error, 1);
+ goto done;
+ }
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
+ atomic_read(&bulk_dev->error) ||
+ !atomic_read(&ccid_dev->online));
+ if (ret < 0) {
+ atomic_set(&bulk_dev->error, 1);
+ r = ret;
+ usb_ep_dequeue(ccid_dev->out, req);
+ goto done;
+ }
+ if (!atomic_read(&bulk_dev->error)) {
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n", __func__);
+ r = -ENODEV;
+ goto done;
+ }
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ goto requeue_req;
+ }
+ if (req->actual > count)
+ pr_err("%s More data received(%d) than required(%zu)\n",
+ __func__, req->actual, count);
+ xfer = (req->actual < count) ? req->actual : count;
+ atomic_set(&bulk_dev->rx_req_busy, 1);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ atomic_set(&bulk_dev->rx_req_busy, 0);
+ if (!atomic_read(&ccid_dev->online)) {
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n", __func__);
+ r = -ENODEV;
+ goto done;
+ } else {
+ r = xfer;
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ } else {
+ r = -EIO;
+ }
+done:
+ pr_debug("ccid_bulk_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req = 0;
+ int r = count;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_write(%zu)\n", count);
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!count) {
+ pr_err("%s: zero length ctrl pkt\n", __func__);
+ return -ENODEV;
+ }
+ if (count > BULK_IN_BUFFER_SIZE) {
+ pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
+ __func__, BULK_IN_BUFFER_SIZE, count);
+ return -ENOMEM;
+ }
+
+
+ /* get an idle tx request to use */
+ ret = wait_event_interruptible(bulk_dev->write_wq,
+ ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
+ atomic_read(&bulk_dev->error)));
+
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+ if (atomic_read(&bulk_dev->error)) {
+ pr_err(" %s dev->error\n", __func__);
+ r = -EIO;
+ goto done;
+ }
+ if (copy_from_user(req->buf, buf, count)) {
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n",
+ __func__);
+ ccid_request_free(req, ccid_dev->in);
+ r = -ENODEV;
+ } else {
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ r = -EFAULT;
+ }
+ goto done;
+ }
+ req->length = count;
+ ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("ccid_bulk_write: xfer error %d\n", ret);
+ atomic_set(&bulk_dev->error, 1);
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ r = -EIO;
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n",
+ __func__);
+ while ((req = ccid_req_get(ccid_dev,
+ &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+ r = -ENODEV;
+ goto done;
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ }
+done:
+ pr_debug("ccid_bulk_write returning %d\n", r);
+ return r;
+}
+
+static const struct file_operations ccid_bulk_fops = {
+ .owner = THIS_MODULE,
+ .read = ccid_bulk_read,
+ .write = ccid_bulk_write,
+ .open = ccid_bulk_open,
+ .release = ccid_bulk_release,
+};
+
+static int ccid_bulk_device_init(struct f_ccid *dev)
+{
+ int ret;
+ struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev;
+
+ init_waitqueue_head(&bulk_dev->read_wq);
+ init_waitqueue_head(&bulk_dev->write_wq);
+ INIT_LIST_HEAD(&bulk_dev->tx_idle);
+
+ bulk_dev->ccid_bulk_device.name = CCID_BULK_DEV_NAME;
+ bulk_dev->ccid_bulk_device.fops = &ccid_bulk_fops;
+ bulk_dev->ccid_bulk_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&bulk_dev->ccid_bulk_device);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccid_ctrl_open(struct inode *inode, struct file *fp)
+{
+ struct ccid_ctrl_dev *ctrl_dev = container_of(fp->private_data,
+ struct ccid_ctrl_dev,
+ ccid_ctrl_device);
+ struct f_ccid *ccid_dev = ctrl_dev_to_ccid(ctrl_dev);
+ unsigned long flags;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ if (atomic_read(&ctrl_dev->opened)) {
+ pr_debug("%s: ctrl device is already opened\n", __func__);
+ return -EBUSY;
+ }
+ atomic_set(&ctrl_dev->opened, 1);
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ fp->private_data = ccid_dev;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ return 0;
+}
+
+
+static int ccid_ctrl_release(struct inode *inode, struct file *fp)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+
+ atomic_set(&ctrl_dev->opened, 0);
+
+ return 0;
+}
+
+static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ int ret = 0;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ if (count > CTRL_BUF_SIZE)
+ count = CTRL_BUF_SIZE;
+
+ ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
+ ctrl_dev->tx_ctrl_done ||
+ !atomic_read(&ccid_dev->online));
+ if (ret < 0)
+ return ret;
+ ctrl_dev->tx_ctrl_done = 0;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ ret = copy_to_user(buf, ctrl_dev->buf, count);
+ if (ret)
+ return -EFAULT;
+
+ return count;
+}
+
+static long
+ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct usb_request *req = ccid_dev->notify_req;
+ struct usb_ccid_notification *ccid_notify = req->buf;
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ switch (cmd) {
+ case CCID_NOTIFY_CARD:
+ if (copy_from_user(ccid_notify, argp,
+ sizeof(struct usb_ccid_notification)))
+ return -EFAULT;
+ req->length = 2;
+ break;
+ case CCID_NOTIFY_HWERROR:
+ if (copy_from_user(ccid_notify, argp,
+ sizeof(struct usb_ccid_notification)))
+ return -EFAULT;
+ req->length = 4;
+ break;
+ case CCID_READ_DTR:
+ if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
+ return -EFAULT;
+ return 0;
+ }
+ ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("ccid notify ep enqueue error %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct file_operations ccid_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = ccid_ctrl_open,
+ .release = ccid_ctrl_release,
+ .read = ccid_ctrl_read,
+ .unlocked_ioctl = ccid_ctrl_ioctl,
+};
+
+static int ccid_ctrl_device_init(struct f_ccid *dev)
+{
+ int ret;
+ struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ INIT_LIST_HEAD(&ctrl_dev->tx_q);
+ init_waitqueue_head(&ctrl_dev->tx_wait_q);
+
+ ctrl_dev->ccid_ctrl_device.name = CCID_CTRL_DEV_NAME;
+ ctrl_dev->ccid_ctrl_device.fops = &ccid_ctrl_fops;
+ ctrl_dev->ccid_ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&ctrl_dev->ccid_ctrl_device);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ccid_free_func(struct usb_function *f)
+{
+ pr_debug("%s\n", __func__);
+}
+
+static int ccid_bind_config(struct f_ccid *ccid_dev)
+{
+ pr_debug("ccid_bind_config\n");
+
+ ccid_dev->function.name = FUNCTION_NAME;
+ ccid_dev->function.fs_descriptors = ccid_fs_descs;
+ ccid_dev->function.hs_descriptors = ccid_hs_descs;
+ ccid_dev->function.ss_descriptors = ccid_ss_descs;
+ ccid_dev->function.bind = ccid_function_bind;
+ ccid_dev->function.unbind = ccid_function_unbind;
+ ccid_dev->function.set_alt = ccid_function_set_alt;
+ ccid_dev->function.setup = ccid_function_setup;
+ ccid_dev->function.disable = ccid_function_disable;
+ ccid_dev->function.free_func = ccid_free_func;
+
+ return 0;
+}
+
+static struct f_ccid *ccid_setup(void)
+{
+ struct f_ccid *ccid_dev;
+ int ret;
+
+ ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
+ if (!ccid_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ spin_lock_init(&ccid_dev->lock);
+
+ ret = ccid_ctrl_device_init(ccid_dev);
+ if (ret) {
+ pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
+ __func__, ret);
+ goto err_ctrl_init;
+ }
+ ret = ccid_bulk_device_init(ccid_dev);
+ if (ret) {
+ pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
+ __func__, ret);
+ goto err_bulk_init;
+ }
+
+ return ccid_dev;
+err_bulk_init:
+ misc_deregister(&ccid_dev->ctrl_dev.ccid_ctrl_device);
+err_ctrl_init:
+ kfree(ccid_dev);
+error:
+ pr_err("ccid gadget driver failed to initialize\n");
+ return ERR_PTR(ret);
+}
+
+static inline struct ccid_opts *to_ccid_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct ccid_opts,
+ func_inst.group);
+}
+
+static void ccid_attr_release(struct config_item *item)
+{
+ struct ccid_opts *opts = to_ccid_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations ccid_item_ops = {
+ .release = ccid_attr_release,
+};
+
+static struct config_item_type ccid_func_type = {
+ .ct_item_ops = &ccid_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int ccid_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int name_len;
+ struct f_ccid *ccid;
+ struct ccid_opts *opts = container_of(fi, struct ccid_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ccid = ccid_setup();
+ if (IS_ERR(ccid))
+ return PTR_ERR(ccid);
+
+ opts->ccid = ccid;
+
+ return 0;
+}
+
+static void ccid_free_inst(struct usb_function_instance *f)
+{
+ struct ccid_opts *opts = container_of(f, struct ccid_opts, func_inst);
+
+ if (!opts->ccid)
+ return;
+
+ misc_deregister(&opts->ccid->ctrl_dev.ccid_ctrl_device);
+ misc_deregister(&opts->ccid->bulk_dev.ccid_bulk_device);
+
+ kfree(opts->ccid);
+ kfree(opts);
+}
+
+
+static struct usb_function_instance *ccid_alloc_inst(void)
+{
+ struct ccid_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = ccid_set_inst_name;
+ opts->func_inst.free_func_inst = ccid_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &ccid_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *ccid_alloc(struct usb_function_instance *fi)
+{
+ struct ccid_opts *opts;
+ int ret;
+
+ opts = container_of(fi, struct ccid_opts, func_inst);
+
+ ret = ccid_bind_config(opts->ccid);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &opts->ccid->function;
+}
+
+DECLARE_USB_FUNCTION_INIT(ccid, ccid_alloc_inst, ccid_alloc);
+MODULE_DESCRIPTION("USB CCID function Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_ccid.h b/drivers/usb/gadget/function/f_ccid.h
new file mode 100644
index 000000000000..935308cff0bc
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ccid.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2011, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef __F_CCID_H
+#define __F_CCID_H
+
+#define PROTOCOL_TO 0x01
+#define PROTOCOL_T1 0x02
+#define ABDATA_SIZE 512
+
+/* define for dwFeatures for Smart Card Device Class Descriptors */
+/* No special characteristics */
+#define CCID_FEATURES_NADA 0x00000000
+/* Automatic parameter configuration based on ATR data */
+#define CCID_FEATURES_AUTO_PCONF 0x00000002
+/* Automatic activation of ICC on inserting */
+#define CCID_FEATURES_AUTO_ACTIV 0x00000004
+/* Automatic ICC voltage selection */
+#define CCID_FEATURES_AUTO_VOLT 0x00000008
+/* Automatic ICC clock frequency change */
+#define CCID_FEATURES_AUTO_CLOCK 0x00000010
+/* Automatic baud rate change */
+#define CCID_FEATURES_AUTO_BAUD 0x00000020
+/*Automatic parameters negotiation made by the CCID */
+#define CCID_FEATURES_AUTO_PNEGO 0x00000040
+/* Automatic PPS made by the CCID according to the active parameters */
+#define CCID_FEATURES_AUTO_PPS 0x00000080
+/* CCID can set ICC in clock stop mode */
+#define CCID_FEATURES_ICCSTOP 0x00000100
+/* NAD value other than 00 accepted (T=1 protocol in use) */
+#define CCID_FEATURES_NAD 0x00000200
+/* Automatic IFSD exchange as first exchange (T=1 protocol in use) */
+#define CCID_FEATURES_AUTO_IFSD 0x00000400
+/* TPDU level exchanges with CCID */
+#define CCID_FEATURES_EXC_TPDU 0x00010000
+/* Short APDU level exchange with CCID */
+#define CCID_FEATURES_EXC_SAPDU 0x00020000
+/* Short and Extended APDU level exchange with CCID */
+#define CCID_FEATURES_EXC_APDU 0x00040000
+/* USB Wake up signaling supported on card insertion and removal */
+#define CCID_FEATURES_WAKEUP 0x00100000
+
+#define CCID_NOTIFY_CARD _IOW('C', 1, struct usb_ccid_notification)
+#define CCID_NOTIFY_HWERROR _IOW('C', 2, struct usb_ccid_notification)
+#define CCID_READ_DTR _IOR('C', 3, int)
+
+struct usb_ccid_notification {
+ __u8 buf[4];
+} __packed;
+
+struct ccid_bulk_in_header {
+ __u8 bMessageType;
+ __u32 wLength;
+ __u8 bSlot;
+ __u8 bSeq;
+ __u8 bStatus;
+ __u8 bError;
+ __u8 bSpecific;
+ __u8 abData[ABDATA_SIZE];
+ __u8 bSizeToSend;
+} __packed;
+
+struct ccid_bulk_out_header {
+ __u8 bMessageType;
+ __u32 wLength;
+ __u8 bSlot;
+ __u8 bSeq;
+ __u8 bSpecific_0;
+ __u8 bSpecific_1;
+ __u8 bSpecific_2;
+ __u8 APDU[ABDATA_SIZE];
+} __packed;
+#endif
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
new file mode 100644
index 000000000000..233221fed424
--- /dev/null
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -0,0 +1,1847 @@
+/*
+ * Copyright (c) 2011, 2013-2018, The Linux Foundation. All rights reserved.
+ * Linux Foundation chooses to take subject only to the GPLv2 license terms,
+ * and distributes only under these terms.
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ *
+ * f_cdev_read() API implementation is using borrowed code from
+ * drivers/usb/gadget/legacy/printer.c, which is
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2006 Craig W. Nadler
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/module.h>
+#include <asm/ioctls.h>
+#include <asm-generic/termios.h>
+
+#define DEVICE_NAME "at_usb"
+#define MODULE_NAME "msm_usb_bridge"
+#define NUM_INSTANCE 3
+
+#define MAX_CDEV_INST_NAME 15
+#define MAX_CDEV_FUNC_NAME 5
+
+#define BRIDGE_RX_QUEUE_SIZE 8
+#define BRIDGE_RX_BUF_SIZE 2048
+#define BRIDGE_TX_QUEUE_SIZE 8
+#define BRIDGE_TX_BUF_SIZE 2048
+
+#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
+
+struct cserial {
+ struct usb_function func;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ struct usb_cdc_line_coding port_line_coding;
+ u8 pending;
+ u8 q_again;
+ u8 data_id;
+ u16 serial_state;
+ u16 port_handshake_bits;
+ /* control signal callbacks*/
+ unsigned int (*get_dtr)(struct cserial *p);
+ unsigned int (*get_rts)(struct cserial *p);
+
+ /* notification callbacks */
+ void (*connect)(struct cserial *p);
+ void (*disconnect)(struct cserial *p);
+ int (*send_break)(struct cserial *p, int duration);
+ unsigned int (*send_carrier_detect)(struct cserial *p, unsigned int);
+ unsigned int (*send_ring_indicator)(struct cserial *p, unsigned int);
+ int (*send_modem_ctrl_bits)(struct cserial *p, int ctrl_bits);
+
+ /* notification changes to modem */
+ void (*notify_modem)(void *port, int ctrl_bits);
+};
+
+struct f_cdev {
+ struct cdev fcdev_cdev;
+ struct device *dev;
+ unsigned port_num;
+ char name[sizeof(DEVICE_NAME) + 2];
+ int minor;
+
+ spinlock_t port_lock;
+
+ wait_queue_head_t open_wq;
+ wait_queue_head_t read_wq;
+
+ struct list_head read_pool;
+ struct list_head read_queued;
+ struct list_head write_pool;
+
+ /* current active USB RX request */
+ struct usb_request *current_rx_req;
+ /* number of pending bytes */
+ size_t pending_rx_bytes;
+ /* current USB RX buffer */
+ u8 *current_rx_buf;
+
+ struct cserial port_usb;
+
+#define ACM_CTRL_DTR 0x01
+#define ACM_CTRL_RTS 0x02
+#define ACM_CTRL_DCD 0x01
+#define ACM_CTRL_DSR 0x02
+#define ACM_CTRL_BRK 0x04
+#define ACM_CTRL_RI 0x08
+
+ unsigned cbits_to_modem;
+ bool cbits_updated;
+
+ struct workqueue_struct *fcdev_wq;
+ bool is_connected;
+ bool port_open;
+
+ unsigned long nbytes_from_host;
+ unsigned long nbytes_to_host;
+ unsigned long nbytes_to_port_bridge;
+ unsigned long nbytes_from_port_bridge;
+};
+
+struct f_cdev_opts {
+ struct usb_function_instance func_inst;
+ struct f_cdev *port;
+ char *func_name;
+ u8 port_num;
+};
+
+static int major, minors;
+struct class *fcdev_classp;
+static DEFINE_IDA(chardev_ida);
+static DEFINE_MUTEX(chardev_ida_lock);
+
+static int usb_cser_alloc_chardev_region(void);
+static void usb_cser_chardev_deinit(void);
+static void usb_cser_read_complete(struct usb_ep *ep, struct usb_request *req);
+static int usb_cser_connect(struct f_cdev *port);
+static void usb_cser_disconnect(struct f_cdev *port);
+static struct f_cdev *f_cdev_alloc(char *func_name, int portno);
+static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req);
+
+static struct usb_interface_descriptor cser_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc cser_header_desc = {
+ .bLength = sizeof(cser_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+cser_call_mgmt_descriptor = {
+ .bLength = sizeof(cser_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+ .bmCapabilities = 0,
+ /* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor cser_descriptor = {
+ .bLength = sizeof(cser_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+ .bmCapabilities = USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc cser_union_desc = {
+ .bLength = sizeof(cser_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor cser_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor cser_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor cser_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *cser_fs_function[] = {
+ (struct usb_descriptor_header *) &cser_interface_desc,
+ (struct usb_descriptor_header *) &cser_header_desc,
+ (struct usb_descriptor_header *) &cser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &cser_descriptor,
+ (struct usb_descriptor_header *) &cser_union_desc,
+ (struct usb_descriptor_header *) &cser_fs_notify_desc,
+ (struct usb_descriptor_header *) &cser_fs_in_desc,
+ (struct usb_descriptor_header *) &cser_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor cser_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_endpoint_descriptor cser_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor cser_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *cser_hs_function[] = {
+ (struct usb_descriptor_header *) &cser_interface_desc,
+ (struct usb_descriptor_header *) &cser_header_desc,
+ (struct usb_descriptor_header *) &cser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &cser_descriptor,
+ (struct usb_descriptor_header *) &cser_union_desc,
+ (struct usb_descriptor_header *) &cser_hs_notify_desc,
+ (struct usb_descriptor_header *) &cser_hs_in_desc,
+ (struct usb_descriptor_header *) &cser_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor cser_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor cser_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor cser_ss_bulk_comp_desc = {
+ .bLength = sizeof(cser_ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_endpoint_descriptor cser_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_ss_ep_comp_descriptor cser_ss_notify_comp_desc = {
+ .bLength = sizeof(cser_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+};
+
+static struct usb_descriptor_header *cser_ss_function[] = {
+ (struct usb_descriptor_header *) &cser_interface_desc,
+ (struct usb_descriptor_header *) &cser_header_desc,
+ (struct usb_descriptor_header *) &cser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &cser_descriptor,
+ (struct usb_descriptor_header *) &cser_union_desc,
+ (struct usb_descriptor_header *) &cser_ss_notify_desc,
+ (struct usb_descriptor_header *) &cser_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &cser_ss_in_desc,
+ (struct usb_descriptor_header *) &cser_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &cser_ss_out_desc,
+ (struct usb_descriptor_header *) &cser_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string cser_string_defs[] = {
+ [0].s = "CDEV Serial",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings cser_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = cser_string_defs,
+};
+
+static struct usb_gadget_strings *usb_cser_strings[] = {
+ &cser_string_table,
+ NULL,
+};
+
+static inline struct f_cdev *func_to_port(struct usb_function *f)
+{
+ return container_of(f, struct f_cdev, port_usb.func);
+}
+
+static inline struct f_cdev *cser_to_port(struct cserial *cser)
+{
+ return container_of(cser, struct f_cdev, port_usb);
+}
+
+static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig)
+{
+ unsigned int uart_sig = 0;
+
+ acm_sig &= (ACM_CTRL_DTR | ACM_CTRL_RTS);
+ if (acm_sig & ACM_CTRL_DTR)
+ uart_sig |= TIOCM_DTR;
+
+ if (acm_sig & ACM_CTRL_RTS)
+ uart_sig |= TIOCM_RTS;
+
+ return uart_sig;
+}
+
+static void port_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_cdev *port = ep->driver_data;
+ struct usb_composite_dev *cdev = port->port_usb.func.config->cdev;
+
+ if (req->status != 0) {
+ dev_dbg(&cdev->gadget->dev, "port(%s) completion, err %d\n",
+ port->name, req->status);
+ return;
+ }
+
+ /* normal completion */
+ if (req->actual != sizeof(port->port_usb.port_line_coding)) {
+ dev_dbg(&cdev->gadget->dev, "port(%s) short resp, len %d\n",
+ port->name, req->actual);
+ usb_ep_set_halt(ep);
+ } else {
+ struct usb_cdc_line_coding *value = req->buf;
+
+ port->port_usb.port_line_coding = *value;
+ }
+}
+
+static void usb_cser_free_func(struct usb_function *f)
+{
+ /* Do nothing as cser_alloc() doesn't alloc anything. */
+}
+
+static int
+usb_cser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_cdev *port = func_to_port(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* SET_LINE_CODING ... just read and save what the host sends */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_LINE_CODING:
+ if (w_length != sizeof(struct usb_cdc_line_coding))
+ goto invalid;
+
+ value = w_length;
+ cdev->gadget->ep0->driver_data = port;
+ req->complete = port_complete_set_line_coding;
+ break;
+
+ /* GET_LINE_CODING ... return what host sent, or initial value */
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_GET_LINE_CODING:
+ value = min_t(unsigned, w_length,
+ sizeof(struct usb_cdc_line_coding));
+ memcpy(req->buf, &port->port_usb.port_line_coding, value);
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+ value = 0;
+ port->port_usb.port_handshake_bits = w_value;
+ pr_debug("USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d RST:%d\n",
+ w_value & ACM_CTRL_DTR ? 1 : 0,
+ w_value & ACM_CTRL_RTS ? 1 : 0);
+ if (port->port_usb.notify_modem)
+ port->port_usb.notify_modem(port, w_value);
+
+ break;
+
+ default:
+invalid:
+ dev_dbg(&cdev->gadget->dev,
+ "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ dev_dbg(&cdev->gadget->dev,
+ "port(%s) req%02x.%02x v%04x i%04x l%d\n",
+ port->name, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("port response on (%s), err %d\n",
+ port->name, value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int usb_cser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_cdev *port = func_to_port(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int rc = 0;
+
+ if (port->port_usb.notify->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset port(%s)\n", port->name);
+ usb_ep_disable(port->port_usb.notify);
+ }
+
+ if (!port->port_usb.notify->desc) {
+ if (config_ep_by_speed(cdev->gadget, f,
+ port->port_usb.notify)) {
+ port->port_usb.notify->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ rc = usb_ep_enable(port->port_usb.notify);
+ if (rc) {
+ dev_err(&cdev->gadget->dev, "can't enable %s, result %d\n",
+ port->port_usb.notify->name, rc);
+ return rc;
+ }
+ port->port_usb.notify->driver_data = port;
+
+ if (port->port_usb.in->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset port(%s)\n", port->name);
+ usb_cser_disconnect(port);
+ }
+ if (!port->port_usb.in->desc || !port->port_usb.out->desc) {
+ dev_dbg(&cdev->gadget->dev,
+ "activate port(%s)\n", port->name);
+ if (config_ep_by_speed(cdev->gadget, f, port->port_usb.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ port->port_usb.out)) {
+ port->port_usb.in->desc = NULL;
+ port->port_usb.out->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ usb_cser_connect(port);
+ return rc;
+}
+
+static void usb_cser_disable(struct usb_function *f)
+{
+ struct f_cdev *port = func_to_port(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ dev_dbg(&cdev->gadget->dev,
+ "port(%s) deactivated\n", port->name);
+
+ usb_cser_disconnect(port);
+ usb_ep_disable(port->port_usb.notify);
+ port->port_usb.notify->driver_data = NULL;
+}
+
+static int usb_cser_notify(struct f_cdev *port, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = port->port_usb.notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s: port disconnected\n", __func__);
+ return -ENODEV;
+ }
+
+ req = port->port_usb.notify_req;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(port->port_usb.data_id);
+ notify->wLength = cpu_to_le16(length);
+ /* 2 byte data copy */
+ memcpy(buf, data, length);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ pr_err("port %s can't notify serial state, %d\n",
+ port->name, status);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb.pending = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ return status;
+}
+
+static int port_notify_serial_state(struct cserial *cser)
+{
+ struct f_cdev *port = cser_to_port(cser);
+ int status;
+ unsigned long flags;
+ struct usb_composite_dev *cdev = port->port_usb.func.config->cdev;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb.pending) {
+ port->port_usb.pending = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ dev_dbg(&cdev->gadget->dev, "port %d serial state %04x\n",
+ port->port_num, port->port_usb.serial_state);
+ status = usb_cser_notify(port, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &port->port_usb.serial_state,
+ sizeof(port->port_usb.serial_state));
+ spin_lock_irqsave(&port->port_lock, flags);
+ } else {
+ port->port_usb.q_again = true;
+ status = 0;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return status;
+}
+
+static void usb_cser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_cdev *port = req->context;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb.pending = false;
+ if (req->status != -ESHUTDOWN && port->port_usb.q_again) {
+ port->port_usb.q_again = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ port_notify_serial_state(&port->port_usb);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void dun_cser_connect(struct cserial *cser)
+{
+ cser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ port_notify_serial_state(cser);
+}
+
+unsigned int dun_cser_get_dtr(struct cserial *cser)
+{
+ if (cser->port_handshake_bits & ACM_CTRL_DTR)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int dun_cser_get_rts(struct cserial *cser)
+{
+ if (cser->port_handshake_bits & ACM_CTRL_RTS)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int dun_cser_send_carrier_detect(struct cserial *cser,
+ unsigned int yes)
+{
+ u16 state;
+
+ state = cser->serial_state;
+ state &= ~ACM_CTRL_DCD;
+ if (yes)
+ state |= ACM_CTRL_DCD;
+
+ cser->serial_state = state;
+ return port_notify_serial_state(cser);
+}
+
+unsigned int dun_cser_send_ring_indicator(struct cserial *cser,
+ unsigned int yes)
+{
+ u16 state;
+
+ state = cser->serial_state;
+ state &= ~ACM_CTRL_RI;
+ if (yes)
+ state |= ACM_CTRL_RI;
+
+ cser->serial_state = state;
+ return port_notify_serial_state(cser);
+}
+
+static void dun_cser_disconnect(struct cserial *cser)
+{
+ cser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ port_notify_serial_state(cser);
+}
+
+static int dun_cser_send_break(struct cserial *cser, int duration)
+{
+ u16 state;
+
+ state = cser->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ cser->serial_state = state;
+ return port_notify_serial_state(cser);
+}
+
+static int dun_cser_send_ctrl_bits(struct cserial *cser, int ctrl_bits)
+{
+ cser->serial_state = ctrl_bits;
+ return port_notify_serial_state(cser);
+}
+
+static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+}
+
+static void usb_cser_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del_init(&req->list);
+ usb_cser_free_req(ep, req);
+ }
+}
+
+static struct usb_request *
+usb_cser_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("usb alloc request failed\n");
+ return 0;
+ }
+
+ req->length = len;
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ pr_err("request buf allocation failed\n");
+ usb_ep_free_request(ep, req);
+ return 0;
+ }
+
+ return req;
+}
+
+static int usb_cser_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_cdev *port = func_to_port(f);
+ int status;
+ struct usb_ep *ep;
+
+ if (cser_string_defs[0].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ cser_string_defs[0].id = status;
+ }
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ port->port_usb.data_id = status;
+ cser_interface_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+ ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_in_desc);
+ if (!ep)
+ goto fail;
+ port->port_usb.in = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_out_desc);
+ if (!ep)
+ goto fail;
+ port->port_usb.out = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &cser_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ port->port_usb.notify = ep;
+ ep->driver_data = cdev;
+ /* allocate notification */
+ port->port_usb.notify_req = usb_cser_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2, GFP_KERNEL);
+ if (!port->port_usb.notify_req)
+ goto fail;
+
+ port->port_usb.notify_req->complete = usb_cser_notify_complete;
+ port->port_usb.notify_req->context = port;
+
+ cser_hs_in_desc.bEndpointAddress = cser_fs_in_desc.bEndpointAddress;
+ cser_hs_out_desc.bEndpointAddress = cser_fs_out_desc.bEndpointAddress;
+
+ cser_ss_in_desc.bEndpointAddress = cser_fs_in_desc.bEndpointAddress;
+ cser_ss_out_desc.bEndpointAddress = cser_fs_out_desc.bEndpointAddress;
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ cser_hs_notify_desc.bEndpointAddress =
+ cser_fs_notify_desc.bEndpointAddress;
+ }
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ cser_ss_notify_desc.bEndpointAddress =
+ cser_fs_notify_desc.bEndpointAddress;
+ }
+
+ status = usb_assign_descriptors(f, cser_fs_function, cser_hs_function,
+ cser_ss_function);
+ if (status)
+ goto fail;
+
+ dev_dbg(&cdev->gadget->dev, "usb serial port(%d): %s speed IN/%s OUT/%s\n",
+ port->port_num,
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ port->port_usb.in->name, port->port_usb.out->name);
+ return 0;
+
+fail:
+ if (port->port_usb.notify_req)
+ usb_cser_free_req(port->port_usb.notify,
+ port->port_usb.notify_req);
+
+ if (port->port_usb.notify)
+ port->port_usb.notify->driver_data = NULL;
+ if (port->port_usb.out)
+ port->port_usb.out->driver_data = NULL;
+ if (port->port_usb.in)
+ port->port_usb.in->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+ return status;
+}
+
+static void cser_free_inst(struct usb_function_instance *fi)
+{
+ struct f_cdev_opts *opts;
+
+ opts = container_of(fi, struct f_cdev_opts, func_inst);
+
+ if (opts->port) {
+ device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
+ cdev_del(&opts->port->fcdev_cdev);
+ }
+ usb_cser_chardev_deinit();
+ kfree(opts->func_name);
+ kfree(opts->port);
+ kfree(opts);
+}
+
+static void usb_cser_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_cdev *port = func_to_port(f);
+
+ usb_free_all_descriptors(f);
+ usb_cser_free_req(port->port_usb.notify, port->port_usb.notify_req);
+}
+
+static int usb_cser_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num, int size,
+ void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("ep:%pK head:%pK num:%d size:%d cb:%pK",
+ ep, head, num, size, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_cser_alloc_req(ep, size, GFP_ATOMIC);
+ if (!req) {
+ pr_debug("req allocated:%d\n", i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add_tail(&req->list, head);
+ }
+
+ return 0;
+}
+
+static void usb_cser_start_rx(struct f_cdev *port)
+{
+ struct list_head *pool;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("start RX(USB OUT)\n");
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!(port->is_connected && port->port_open)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("can't start rx.\n");
+ return;
+ }
+
+ pool = &port->read_pool;
+ ep = port->port_usb.out;
+
+ while (!list_empty(pool)) {
+ struct usb_request *req;
+
+ req = list_entry(pool->next, struct usb_request, list);
+ list_del_init(&req->list);
+ req->length = BRIDGE_RX_BUF_SIZE;
+ req->complete = usb_cser_read_complete;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_KERNEL);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ pr_err("port(%d):%pK usb ep(%s) queue failed\n",
+ port->port_num, port, ep->name);
+ list_add(&req->list, pool);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void usb_cser_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_cdev *port = ep->driver_data;
+ unsigned long flags;
+
+ pr_debug("ep:(%pK)(%s) port:%pK req_status:%d req->actual:%u\n",
+ ep, ep->name, port, req->status, req->actual);
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_open || req->status || !req->actual) {
+ list_add_tail(&req->list, &port->read_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->nbytes_from_host += req->actual;
+ list_add_tail(&req->list, &port->read_queued);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ wake_up(&port->read_wq);
+ return;
+}
+
+static void usb_cser_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ unsigned long flags;
+ struct f_cdev *port = ep->driver_data;
+
+ pr_debug("ep:(%pK)(%s) port:%pK req_stats:%d\n",
+ ep, ep->name, port, req->status);
+
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host += req->actual;
+ list_add_tail(&req->list, &port->write_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ switch (req->status) {
+ default:
+ pr_debug("unexpected %s status %d\n", ep->name, req->status);
+ /* FALL THROUGH */
+ case 0:
+ /* normal completion */
+ break;
+
+ case -ESHUTDOWN:
+ /* disconnect */
+ pr_debug("%s shutdown\n", ep->name);
+ break;
+ }
+
+ return;
+}
+
+static void usb_cser_start_io(struct f_cdev *port)
+{
+ int ret = -ENODEV;
+ unsigned long flags;
+
+ pr_debug("port: %pK\n", port);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected)
+ goto start_io_out;
+
+ port->current_rx_req = NULL;
+ port->pending_rx_bytes = 0;
+ port->current_rx_buf = NULL;
+
+ ret = usb_cser_alloc_requests(port->port_usb.out,
+ &port->read_pool,
+ BRIDGE_RX_QUEUE_SIZE, BRIDGE_RX_BUF_SIZE,
+ usb_cser_read_complete);
+ if (ret) {
+ pr_err("unable to allocate out requests\n");
+ goto start_io_out;
+ }
+
+ ret = usb_cser_alloc_requests(port->port_usb.in,
+ &port->write_pool,
+ BRIDGE_TX_QUEUE_SIZE, BRIDGE_TX_BUF_SIZE,
+ usb_cser_write_complete);
+ if (ret) {
+ usb_cser_free_requests(port->port_usb.out, &port->read_pool);
+ pr_err("unable to allocate IN requests\n");
+ goto start_io_out;
+ }
+
+start_io_out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (ret)
+ return;
+
+ usb_cser_start_rx(port);
+}
+
+static void usb_cser_stop_io(struct f_cdev *port)
+{
+ struct usb_ep *in;
+ struct usb_ep *out;
+ unsigned long flags;
+
+ pr_debug("port:%pK\n", port);
+
+ in = port->port_usb.in;
+ out = port->port_usb.out;
+
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(out);
+ out->driver_data = NULL;
+ usb_ep_disable(in);
+ in->driver_data = NULL;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->current_rx_req != NULL) {
+ kfree(port->current_rx_req->buf);
+ usb_ep_free_request(out, port->current_rx_req);
+ }
+
+ port->pending_rx_bytes = 0;
+ port->current_rx_buf = NULL;
+ usb_cser_free_requests(out, &port->read_queued);
+ usb_cser_free_requests(out, &port->read_pool);
+ usb_cser_free_requests(in, &port->write_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int f_cdev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ unsigned long flags;
+ struct f_cdev *port;
+
+ port = container_of(inode->i_cdev, struct f_cdev, fcdev_cdev);
+ if (!port) {
+ pr_err("Port is NULL.\n");
+ return -EINVAL;
+ }
+
+ if (port && port->port_open) {
+ pr_err("port is already opened.\n");
+ return -EBUSY;
+ }
+
+ file->private_data = port;
+ pr_debug("opening port(%s)(%pK)\n", port->name, port);
+ ret = wait_event_interruptible(port->open_wq,
+ port->is_connected);
+ if (ret) {
+ pr_debug("open interrupted.\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_open = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_cser_start_rx(port);
+
+ pr_debug("port(%s)(%pK) open is success\n", port->name, port);
+
+ return 0;
+}
+
+int f_cdev_release(struct inode *inode, struct file *file)
+{
+ unsigned long flags;
+ struct f_cdev *port;
+
+ port = file->private_data;
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_open = false;
+ port->cbits_updated = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("port(%s)(%pK) is closed.\n", port->name, port);
+
+ return 0;
+}
+
+ssize_t f_cdev_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long flags;
+ struct f_cdev *port;
+ struct usb_request *req;
+ struct list_head *pool;
+ struct usb_request *current_rx_req;
+ size_t pending_rx_bytes, bytes_copied = 0, size;
+ u8 *current_rx_buf;
+
+ port = file->private_data;
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("read on port(%s)(%pK) count:%zu\n", port->name, port, count);
+ spin_lock_irqsave(&port->port_lock, flags);
+ current_rx_req = port->current_rx_req;
+ pending_rx_bytes = port->pending_rx_bytes;
+ current_rx_buf = port->current_rx_buf;
+ port->current_rx_req = NULL;
+ port->current_rx_buf = NULL;
+ port->pending_rx_bytes = 0;
+ bytes_copied = 0;
+
+ if (list_empty(&port->read_queued) && !pending_rx_bytes) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s(): read_queued list is empty.\n", __func__);
+ goto start_rx;
+ }
+
+ /*
+ * Consider below cases:
+ * 1. If available read buffer size (i.e. count value) is greater than
+ * available data as part of one USB OUT request buffer, then consider
+ * copying multiple USB OUT request buffers until read buffer is filled.
+ * 2. If available read buffer size (i.e. count value) is smaller than
+ * available data as part of one USB OUT request buffer, then copy this
+ * buffer data across multiple read() call until whole USB OUT request
+ * buffer is copied.
+ */
+ while ((pending_rx_bytes || !list_empty(&port->read_queued)) && count) {
+ if (pending_rx_bytes == 0) {
+ pool = &port->read_queued;
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del_init(&req->list);
+ current_rx_req = req;
+ pending_rx_bytes = req->actual;
+ current_rx_buf = req->buf;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ size = count;
+ if (size > pending_rx_bytes)
+ size = pending_rx_bytes;
+
+ pr_debug("pending_rx_bytes:%zu count:%zu size:%zu\n",
+ pending_rx_bytes, count, size);
+ size -= copy_to_user(buf, current_rx_buf, size);
+ port->nbytes_to_port_bridge += size;
+ bytes_copied += size;
+ count -= size;
+ buf += size;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ list_add_tail(&current_rx_req->list, &port->read_pool);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -EAGAIN;
+ }
+
+ /*
+ * partial data available, then update pending_rx_bytes,
+ * otherwise add USB request back to read_pool for next data.
+ */
+ if (size < pending_rx_bytes) {
+ pending_rx_bytes -= size;
+ current_rx_buf += size;
+ } else {
+ list_add_tail(&current_rx_req->list, &port->read_pool);
+ pending_rx_bytes = 0;
+ current_rx_req = NULL;
+ current_rx_buf = NULL;
+ }
+ }
+
+ port->pending_rx_bytes = pending_rx_bytes;
+ port->current_rx_buf = current_rx_buf;
+ port->current_rx_req = current_rx_req;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+start_rx:
+ usb_cser_start_rx(port);
+ return bytes_copied;
+}
+
+ssize_t f_cdev_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned long flags;
+ struct f_cdev *port;
+ struct usb_request *req;
+ struct list_head *pool;
+ unsigned xfer_size;
+ struct usb_ep *in;
+
+ port = file->private_data;
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ pr_debug("write on port(%s)(%pK)\n", port->name, port);
+
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: cable is disconnected.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (list_empty(&port->write_pool)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s: Request list is empty.\n", __func__);
+ return 0;
+ }
+
+ in = port->port_usb.in;
+ pool = &port->write_pool;
+ req = list_first_entry(pool, struct usb_request, list);
+ list_del_init(&req->list);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: write buf size:%zu\n", __func__, count);
+ if (count > BRIDGE_TX_BUF_SIZE)
+ xfer_size = BRIDGE_TX_BUF_SIZE;
+ else
+ xfer_size = count;
+
+ ret = copy_from_user(req->buf, buf, xfer_size);
+ if (ret) {
+ pr_err("copy_from_user failed: err %d\n", ret);
+ ret = -EFAULT;
+ } else {
+ req->length = xfer_size;
+ req->zero = 1;
+ ret = usb_ep_queue(in, req, GFP_KERNEL);
+ if (ret) {
+ pr_err("EP QUEUE failed:%d\n", ret);
+ ret = -EIO;
+ goto err_exit;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_from_port_bridge += req->length;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+err_exit:
+ if (ret) {
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* USB cable is connected, add it back otherwise free request */
+ if (port->is_connected)
+ list_add(&req->list, &port->write_pool);
+ else
+ usb_cser_free_req(in, req);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+ }
+
+ return xfer_size;
+}
+
+static unsigned int f_cdev_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct f_cdev *port;
+ unsigned long flags;
+
+ port = file->private_data;
+ if (port && port->is_connected) {
+ poll_wait(file, &port->read_wq, wait);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!list_empty(&port->read_queued)) {
+ mask |= POLLIN | POLLRDNORM;
+ pr_debug("sets POLLIN for %s\n", port->name);
+ }
+
+ if (port->cbits_updated) {
+ mask |= POLLPRI;
+ pr_debug("sets POLLPRI for %s\n", port->name);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ } else {
+ pr_err("Failed due to NULL device or disconnected.\n");
+ mask = POLLERR;
+ }
+
+ return mask;
+}
+
+static int f_cdev_tiocmget(struct f_cdev *port)
+{
+ struct cserial *cser;
+ unsigned int result = 0;
+
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -ENODEV;
+ }
+
+ cser = &port->port_usb;
+ if (cser->get_dtr)
+ result |= (cser->get_dtr(cser) ? TIOCM_DTR : 0);
+
+ if (cser->get_rts)
+ result |= (cser->get_rts(cser) ? TIOCM_RTS : 0);
+
+ if (cser->serial_state & TIOCM_CD)
+ result |= TIOCM_CD;
+
+ if (cser->serial_state & TIOCM_RI)
+ result |= TIOCM_RI;
+ return result;
+}
+
+static int f_cdev_tiocmset(struct f_cdev *port,
+ unsigned int set, unsigned int clear)
+{
+ struct cserial *cser;
+ int status = 0;
+
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -ENODEV;
+ }
+
+ cser = &port->port_usb;
+ if (set & TIOCM_RI) {
+ if (cser->send_ring_indicator) {
+ cser->serial_state |= TIOCM_RI;
+ status = cser->send_ring_indicator(cser, 1);
+ }
+ }
+ if (clear & TIOCM_RI) {
+ if (cser->send_ring_indicator) {
+ cser->serial_state &= ~TIOCM_RI;
+ status = cser->send_ring_indicator(cser, 0);
+ }
+ }
+ if (set & TIOCM_CD) {
+ if (cser->send_carrier_detect) {
+ cser->serial_state |= TIOCM_CD;
+ status = cser->send_carrier_detect(cser, 1);
+ }
+ }
+ if (clear & TIOCM_CD) {
+ if (cser->send_carrier_detect) {
+ cser->serial_state &= ~TIOCM_CD;
+ status = cser->send_carrier_detect(cser, 0);
+ }
+ }
+
+ return status;
+}
+
+static long f_cdev_ioctl(struct file *fp, unsigned cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ int i = 0;
+ uint32_t val;
+ struct f_cdev *port;
+
+ port = fp->private_data;
+ if (!port) {
+ pr_err("port is null.\n");
+ return POLLERR;
+ }
+
+ switch (cmd) {
+ case TIOCMBIC:
+ case TIOCMBIS:
+ case TIOCMSET:
+ pr_debug("TIOCMSET on port(%s)%pK\n", port->name, port);
+ i = get_user(val, (uint32_t *)arg);
+ if (i) {
+ pr_err("Error getting TIOCMSET value\n");
+ return i;
+ }
+ ret = f_cdev_tiocmset(port, val, ~val);
+ break;
+ case TIOCMGET:
+ pr_debug("TIOCMGET on port(%s)%pK\n", port->name, port);
+ ret = f_cdev_tiocmget(port);
+ if (ret >= 0) {
+ ret = put_user(ret, (uint32_t *)arg);
+ port->cbits_updated = false;
+ }
+ break;
+ default:
+ pr_err("Received cmd:%d not supported\n", cmd);
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static void usb_cser_notify_modem(void *fport, int ctrl_bits)
+{
+ int temp;
+ struct f_cdev *port = fport;
+
+ if (!port) {
+ pr_err("port is null\n");
+ return;
+ }
+
+ pr_debug("port(%s): ctrl_bits:%x\n", port->name, ctrl_bits);
+
+ temp = convert_acm_sigs_to_uart(ctrl_bits);
+
+ if (temp == port->cbits_to_modem)
+ return;
+
+ port->cbits_to_modem = temp;
+ port->cbits_updated = true;
+
+ wake_up(&port->read_wq);
+}
+
+int usb_cser_connect(struct f_cdev *port)
+{
+ unsigned long flags;
+ int ret;
+ struct cserial *cser;
+
+ if (!port) {
+ pr_err("port is NULL.\n");
+ return -ENODEV;
+ }
+
+ pr_debug("port(%s) (%pK)\n", port->name, port);
+
+ cser = &port->port_usb;
+ cser->notify_modem = usb_cser_notify_modem;
+
+ ret = usb_ep_enable(cser->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK, err:%d",
+ cser->in, ret);
+ return ret;
+ }
+ cser->in->driver_data = port;
+
+ ret = usb_ep_enable(cser->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK, err: %d",
+ cser->out, ret);
+ cser->in->driver_data = 0;
+ return ret;
+ }
+ cser->out->driver_data = port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ cser->pending = false;
+ cser->q_again = false;
+ port->is_connected = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ usb_cser_start_io(port);
+ wake_up(&port->open_wq);
+ return 0;
+}
+
+void usb_cser_disconnect(struct f_cdev *port)
+{
+ unsigned long flags;
+
+ usb_cser_stop_io(port);
+
+ /* lower DTR to modem */
+ usb_cser_notify_modem(port, 0);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_connected = false;
+ port->nbytes_from_host = port->nbytes_to_host = 0;
+ port->nbytes_to_port_bridge = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static const struct file_operations f_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = f_cdev_open,
+ .release = f_cdev_release,
+ .read = f_cdev_read,
+ .write = f_cdev_write,
+ .poll = f_cdev_poll,
+ .unlocked_ioctl = f_cdev_ioctl,
+ .compat_ioctl = f_cdev_ioctl,
+};
+
+static struct f_cdev *f_cdev_alloc(char *func_name, int portno)
+{
+ int ret;
+ dev_t dev;
+ struct device *device;
+ struct f_cdev *port;
+
+ port = kzalloc(sizeof(struct f_cdev), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&chardev_ida_lock);
+ if (idr_is_empty(&chardev_ida.idr)) {
+ ret = usb_cser_alloc_chardev_region();
+ if (ret) {
+ mutex_unlock(&chardev_ida_lock);
+ pr_err("alloc chardev failed\n");
+ goto err_alloc_chardev;
+ }
+ }
+
+ ret = ida_simple_get(&chardev_ida, 0, 0, GFP_KERNEL);
+ if (ret >= NUM_INSTANCE) {
+ ida_simple_remove(&chardev_ida, ret);
+ mutex_unlock(&chardev_ida_lock);
+ ret = -ENODEV;
+ goto err_get_ida;
+ }
+
+ port->port_num = portno;
+ port->minor = ret;
+ mutex_unlock(&chardev_ida_lock);
+
+ snprintf(port->name, sizeof(port->name), "%s%d", DEVICE_NAME, portno);
+ spin_lock_init(&port->port_lock);
+
+ init_waitqueue_head(&port->open_wq);
+ init_waitqueue_head(&port->read_wq);
+ INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queued);
+ INIT_LIST_HEAD(&port->write_pool);
+
+ port->fcdev_wq = create_singlethread_workqueue(port->name);
+ if (!port->fcdev_wq) {
+ pr_err("Unable to create workqueue fcdev_wq for port:%s\n",
+ port->name);
+ ret = -ENOMEM;
+ goto err_get_ida;
+ }
+
+ /* create char device */
+ cdev_init(&port->fcdev_cdev, &f_cdev_fops);
+ dev = MKDEV(major, port->minor);
+ ret = cdev_add(&port->fcdev_cdev, dev, 1);
+ if (ret) {
+ pr_err("Failed to add cdev for port(%s)\n", port->name);
+ goto err_cdev_add;
+ }
+
+ device = device_create(fcdev_classp, NULL, dev, NULL, port->name);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ goto err_create_dev;
+ }
+
+ pr_info("port_name:%s (%pK) portno:(%d)\n",
+ port->name, port, port->port_num);
+ return port;
+
+err_create_dev:
+ cdev_del(&port->fcdev_cdev);
+err_cdev_add:
+ destroy_workqueue(port->fcdev_wq);
+err_get_ida:
+ usb_cser_chardev_deinit();
+err_alloc_chardev:
+ kfree(port);
+
+ return ERR_PTR(ret);
+}
+
+static void usb_cser_chardev_deinit(void)
+{
+
+ if (idr_is_empty(&chardev_ida.idr)) {
+
+ if (major) {
+ unregister_chrdev_region(MKDEV(major, 0), minors);
+ major = minors = 0;
+ }
+
+ if (!IS_ERR_OR_NULL(fcdev_classp))
+ class_destroy(fcdev_classp);
+ }
+}
+
+static int usb_cser_alloc_chardev_region(void)
+{
+ int ret;
+ dev_t dev;
+
+ ret = alloc_chrdev_region(&dev,
+ 0,
+ NUM_INSTANCE,
+ MODULE_NAME);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("alloc_chrdev_region() failed ret:%i\n", ret);
+ return ret;
+ }
+
+ major = MAJOR(dev);
+ minors = NUM_INSTANCE;
+
+ fcdev_classp = class_create(THIS_MODULE, MODULE_NAME);
+ if (IS_ERR(fcdev_classp)) {
+ pr_err("class_create() failed ENOMEM\n");
+ ret = -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline struct f_cdev_opts *to_f_cdev_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_cdev_opts,
+ func_inst.group);
+}
+
+static struct f_cdev_opts *to_fi_cdev_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct f_cdev_opts, func_inst);
+}
+
+static void cserial_attr_release(struct config_item *item)
+{
+ struct f_cdev_opts *opts = to_f_cdev_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations cserial_item_ops = {
+ .release = cserial_attr_release,
+};
+
+static ssize_t usb_cser_status_show(struct config_item *item, char *page)
+{
+ struct f_cdev *port = to_f_cdev_opts(item)->port;
+ char *buf;
+ unsigned long flags;
+ int temp = 0;
+ int ret;
+
+ buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ temp += scnprintf(buf + temp, 512 - temp,
+ "###PORT:%s###\n"
+ "port_no:%d\n"
+ "func:%s\n"
+ "nbytes_to_host: %lu\n"
+ "nbytes_from_host: %lu\n"
+ "nbytes_to_port_bridge: %lu\n"
+ "nbytes_from_port_bridge: %lu\n"
+ "cbits_to_modem: %u\n"
+ "Port Opened: %s\n",
+ port->name,
+ port->port_num,
+ to_f_cdev_opts(item)->func_name,
+ port->nbytes_to_host,
+ port->nbytes_from_host,
+ port->nbytes_to_port_bridge,
+ port->nbytes_from_port_bridge,
+ port->cbits_to_modem,
+ (port->port_open ? "Opened" : "Closed"));
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = scnprintf(page, temp, buf);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t usb_cser_status_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_cdev *port = to_f_cdev_opts(item)->port;
+ unsigned long flags;
+ u8 stats;
+
+ if (page == NULL) {
+ pr_err("Invalid buffer");
+ return len;
+ }
+
+ if (kstrtou8(page, 0, &stats) != 0 || stats != 0) {
+ pr_err("(%u)Wrong value. enter 0 to clear.\n", stats);
+ return len;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host = port->nbytes_from_host = 0;
+ port->nbytes_to_port_bridge = port->nbytes_from_port_bridge = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return len;
+}
+
+CONFIGFS_ATTR(usb_cser_, status);
+static struct configfs_attribute *cserial_attrs[] = {
+ &usb_cser_attr_status,
+ NULL,
+};
+
+static struct config_item_type cserial_func_type = {
+ .ct_item_ops = &cserial_item_ops,
+ .ct_attrs = cserial_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static int cser_set_inst_name(struct usb_function_instance *f, const char *name)
+{
+ struct f_cdev_opts *opts =
+ container_of(f, struct f_cdev_opts, func_inst);
+ char *ptr, *str;
+ size_t name_len, str_size;
+ int ret;
+ struct f_cdev *port;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_CDEV_INST_NAME)
+ return -ENAMETOOLONG;
+
+ /* expect name as cdev.<func>.<port_num> */
+ str = strnchr(name, strlen(name), '.');
+ if (!str) {
+ pr_err("invalid input (%s)\n", name);
+ return -EINVAL;
+ }
+
+ /* get function name */
+ str_size = name_len - strlen(str);
+ if (str_size > MAX_CDEV_FUNC_NAME)
+ return -ENAMETOOLONG;
+
+ ptr = kstrndup(name, str_size - 1, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("error:%ld\n", PTR_ERR(ptr));
+ return -ENOMEM;
+ }
+
+ opts->func_name = ptr;
+
+ /* get port number */
+ str = strrchr(name, '.');
+ if (!str) {
+ pr_err("err: port number not found\n");
+ return -EINVAL;
+ }
+ pr_debug("str:%s\n", str);
+
+ *str = '\0';
+ str++;
+
+ ret = kstrtou8(str, 0, &opts->port_num);
+ if (ret) {
+ pr_err("erro: not able to get port number\n");
+ return -EINVAL;
+ }
+
+ pr_debug("gser: port_num:%d func_name:%s\n",
+ opts->port_num, opts->func_name);
+
+ port = f_cdev_alloc(opts->func_name, opts->port_num);
+ if (IS_ERR(port)) {
+ pr_err("Failed to create cdev port(%d)\n", opts->port_num);
+ return -ENOMEM;
+ }
+
+ opts->port = port;
+
+ /* For DUN functionality only sets control signal handling */
+ if (!strcmp(opts->func_name, "dun")) {
+ port->port_usb.connect = dun_cser_connect;
+ port->port_usb.get_dtr = dun_cser_get_dtr;
+ port->port_usb.get_rts = dun_cser_get_rts;
+ port->port_usb.send_carrier_detect =
+ dun_cser_send_carrier_detect;
+ port->port_usb.send_ring_indicator =
+ dun_cser_send_ring_indicator;
+ port->port_usb.send_modem_ctrl_bits = dun_cser_send_ctrl_bits;
+ port->port_usb.disconnect = dun_cser_disconnect;
+ port->port_usb.send_break = dun_cser_send_break;
+ }
+
+ return 0;
+}
+
+static struct usb_function_instance *cser_alloc_inst(void)
+{
+ struct f_cdev_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.free_func_inst = cser_free_inst;
+ opts->func_inst.set_inst_name = cser_set_inst_name;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &cserial_func_type);
+ return &opts->func_inst;
+}
+
+static struct usb_function *cser_alloc(struct usb_function_instance *fi)
+{
+ struct f_cdev_opts *opts = to_fi_cdev_opts(fi);
+ struct f_cdev *port = opts->port;
+
+ port->port_usb.func.name = "cser";
+ port->port_usb.func.strings = usb_cser_strings;
+ port->port_usb.func.bind = usb_cser_bind;
+ port->port_usb.func.unbind = usb_cser_unbind;
+ port->port_usb.func.set_alt = usb_cser_set_alt;
+ port->port_usb.func.disable = usb_cser_disable;
+ port->port_usb.func.setup = usb_cser_setup;
+ port->port_usb.func.free_func = usb_cser_free_func;
+
+ return &port->port_usb.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(cser, cser_alloc_inst, cser_alloc);
+MODULE_DESCRIPTION("USB Serial Character Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c
new file mode 100644
index 000000000000..72f22a469ff1
--- /dev/null
+++ b/drivers/usb/gadget/function/f_diag.c
@@ -0,0 +1,1116 @@
+/* drivers/usb/gadget/f_diag.c
+ * Diag Function Device - Route ARM9 and ARM11 DIAG messages
+ * between HOST and DEVICE.
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+
+#include <linux/usb/usbdiag.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/kmemleak.h>
+#include <linux/qcom/diag_dload.h>
+
+#define MAX_INST_NAME_LEN 40
+
+/* for configfs support */
+struct diag_opts {
+ struct usb_function_instance func_inst;
+ char *name;
+};
+
+static inline struct diag_opts *to_diag_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct diag_opts,
+ func_inst.group);
+}
+
+static DEFINE_SPINLOCK(ch_lock);
+static LIST_HEAD(usb_diag_ch_list);
+
+static struct dload_struct __iomem *diag_dload;
+
+static struct usb_interface_descriptor intf_desc = {
+ .bLength = sizeof intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xFF,
+ .bInterfaceSubClass = 0xFF,
+ .bInterfaceProtocol = 0x30,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .bInterval = 0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(64),
+ .bInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = {
+ .bLength = sizeof ss_bulk_in_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ss_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = {
+ .bLength = sizeof ss_bulk_out_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *fs_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &fs_bulk_in_desc,
+ (struct usb_descriptor_header *) &fs_bulk_out_desc,
+ NULL,
+ };
+static struct usb_descriptor_header *hs_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &hs_bulk_in_desc,
+ (struct usb_descriptor_header *) &hs_bulk_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *ss_diag_desc[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &ss_bulk_in_desc,
+ (struct usb_descriptor_header *) &ss_bulk_in_comp_desc,
+ (struct usb_descriptor_header *) &ss_bulk_out_desc,
+ (struct usb_descriptor_header *) &ss_bulk_out_comp_desc,
+ NULL,
+};
+
+/**
+ * struct diag_context - USB diag function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_desc: USB IN endpoint descriptor struct
+ * @out_desc: USB OUT endpoint descriptor struct
+ * @read_pool: List of requests used for Rx (OUT ep)
+ * @write_pool: List of requests used for Tx (IN ep)
+ * @lock: Spinlock to proctect read_pool, write_pool lists
+ * @cdev: USB composite device struct
+ * @ch: USB diag channel
+ *
+ */
+struct diag_context {
+ struct usb_function function;
+ struct usb_ep *out;
+ struct usb_ep *in;
+ struct list_head read_pool;
+ struct list_head write_pool;
+ spinlock_t lock;
+ unsigned configured;
+ struct usb_composite_dev *cdev;
+ struct usb_diag_ch *ch;
+ struct kref kref;
+
+ /* pkt counters */
+ unsigned long dpkts_tolaptop;
+ unsigned long dpkts_tomodem;
+ unsigned dpkts_tolaptop_pending;
+
+ /* A list node inside the diag_dev_list */
+ struct list_head list_item;
+};
+
+static struct list_head diag_dev_list;
+
+static inline struct diag_context *func_to_diag(struct usb_function *f)
+{
+ return container_of(f, struct diag_context, function);
+}
+
+/* Called with ctxt->lock held; i.e. only use with kref_put_spinlock_irqsave */
+static void diag_context_release(struct kref *kref)
+{
+ struct diag_context *ctxt =
+ container_of(kref, struct diag_context, kref);
+
+ spin_unlock(&ctxt->lock);
+ kfree(ctxt);
+}
+
+static void diag_update_pid_and_serial_num(struct diag_context *ctxt)
+{
+ struct usb_composite_dev *cdev = ctxt->cdev;
+ struct usb_gadget_strings **table;
+ struct usb_string *s;
+ struct usb_gadget_string_container *uc;
+ struct dload_struct local_diag_dload = { 0 };
+
+ /*
+ * update pid and serial number to dload only if diag
+ * interface is zeroth interface.
+ */
+ if (intf_desc.bInterfaceNumber)
+ return;
+
+ if (!diag_dload) {
+ pr_debug("%s: unable to update PID and serial_no\n", __func__);
+ return;
+ }
+
+ /* update pid */
+ local_diag_dload.magic_struct.pid = PID_MAGIC_ID;
+ local_diag_dload.pid = cdev->desc.idProduct;
+ local_diag_dload.magic_struct.serial_num = SERIAL_NUM_MAGIC_ID;
+
+ list_for_each_entry(uc, &cdev->gstrings, list) {
+ table = (struct usb_gadget_strings **)uc->stash;
+ if (!table) {
+ pr_err("%s: can't update dload cookie\n", __func__);
+ break;
+ }
+
+ for (s = (*table)->strings; s && s->s; s++) {
+ if (s->id == cdev->desc.iSerialNumber) {
+ strlcpy(local_diag_dload.serial_number, s->s,
+ SERIAL_NUMBER_LENGTH);
+ goto update_dload;
+ }
+ }
+
+ }
+
+update_dload:
+ pr_debug("%s: dload:%pK pid:%x serial_num:%s\n",
+ __func__, diag_dload, local_diag_dload.pid,
+ local_diag_dload.serial_number);
+
+ memcpy_toio(diag_dload, &local_diag_dload, sizeof(local_diag_dload));
+}
+
+static void diag_write_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct diag_context *ctxt = ep->driver_data;
+ struct diag_request *d_req = req->context;
+ unsigned long flags;
+
+ ctxt->dpkts_tolaptop_pending--;
+
+ if (!req->status) {
+ if ((req->length >= ep->maxpacket) &&
+ ((req->length % ep->maxpacket) == 0)) {
+ ctxt->dpkts_tolaptop_pending++;
+ req->length = 0;
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ /* Queue zero length packet */
+ if (!usb_ep_queue(ctxt->in, req, GFP_ATOMIC))
+ return;
+ ctxt->dpkts_tolaptop_pending--;
+ } else {
+ ctxt->dpkts_tolaptop++;
+ }
+ }
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->write_pool);
+ if (req->length != 0) {
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ }
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ if (ctxt->ch && ctxt->ch->notify)
+ ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_WRITE_DONE, d_req);
+
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+}
+
+static void diag_read_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct diag_context *ctxt = ep->driver_data;
+ struct diag_request *d_req = req->context;
+ unsigned long flags;
+
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->read_pool);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ ctxt->dpkts_tomodem++;
+
+ if (ctxt->ch && ctxt->ch->notify)
+ ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_READ_DONE, d_req);
+
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+}
+
+/**
+ * usb_diag_open() - Open a diag channel over USB
+ * @name: Name of the channel
+ * @priv: Private structure pointer which will be passed in notify()
+ * @notify: Callback function to receive notifications
+ *
+ * This function iterates overs the available channels and returns
+ * the channel handler if the name matches. The notify callback is called
+ * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
+ *
+ */
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+ void (*notify)(void *, unsigned, struct diag_request *))
+{
+ struct usb_diag_ch *ch;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ /* Check if we already have a channel with this name */
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ if (!strcmp(name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch_lock, flags);
+
+ if (!found) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ch->name = name;
+ ch->priv = priv;
+ ch->notify = notify;
+
+ if (!found) {
+ spin_lock_irqsave(&ch_lock, flags);
+ list_add_tail(&ch->list, &usb_diag_ch_list);
+ spin_unlock_irqrestore(&ch_lock, flags);
+ }
+
+ return ch;
+}
+EXPORT_SYMBOL(usb_diag_open);
+
+/**
+ * usb_diag_close() - Close a diag channel over USB
+ * @ch: Channel handler
+ *
+ * This function closes the diag channel.
+ *
+ */
+void usb_diag_close(struct usb_diag_ch *ch)
+{
+ struct diag_context *dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ ch->priv = NULL;
+ ch->notify = NULL;
+ /* Free-up the resources if channel is no more active */
+ list_del(&ch->list);
+ list_for_each_entry(dev, &diag_dev_list, list_item)
+ if (dev->ch == ch)
+ dev->ch = NULL;
+ kfree(ch);
+
+ spin_unlock_irqrestore(&ch_lock, flags);
+}
+EXPORT_SYMBOL(usb_diag_close);
+
+static void free_reqs(struct diag_context *ctxt)
+{
+ struct list_head *act, *tmp;
+ struct usb_request *req;
+
+ list_for_each_safe(act, tmp, &ctxt->write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ctxt->in, req);
+ }
+
+ list_for_each_safe(act, tmp, &ctxt->read_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ctxt->out, req);
+ }
+}
+
+/**
+ * usb_diag_alloc_req() - Allocate USB requests
+ * @ch: Channel handler
+ * @n_write: Number of requests for Tx
+ * @n_read: Number of requests for Rx
+ *
+ * This function allocate read and write USB requests for the interface
+ * associated with this channel. The actual buffer is not allocated.
+ * The buffer is passed by diag char driver.
+ *
+ */
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ struct usb_request *req;
+ int i;
+ unsigned long flags;
+
+ if (!ctxt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ /* Free previous session's stale requests */
+ free_reqs(ctxt);
+ for (i = 0; i < n_write; i++) {
+ req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
+ if (!req)
+ goto fail;
+ kmemleak_not_leak(req);
+ req->complete = diag_write_complete;
+ list_add_tail(&req->list, &ctxt->write_pool);
+ }
+
+ for (i = 0; i < n_read; i++) {
+ req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
+ if (!req)
+ goto fail;
+ kmemleak_not_leak(req);
+ req->complete = diag_read_complete;
+ list_add_tail(&req->list, &ctxt->read_pool);
+ }
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return 0;
+fail:
+ free_reqs(ctxt);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -ENOMEM;
+
+}
+EXPORT_SYMBOL(usb_diag_alloc_req);
+#define DWC3_MAX_REQUEST_SIZE (16 * 1024 * 1024)
+/**
+ * usb_diag_request_size - Max request size for controller
+ * @ch: Channel handler
+ *
+ * Infom max request size so that diag driver can split packets
+ * in chunks of max size which controller can handle.
+ */
+int usb_diag_request_size(struct usb_diag_ch *ch)
+{
+ return DWC3_MAX_REQUEST_SIZE;
+}
+EXPORT_SYMBOL(usb_diag_request_size);
+
+/**
+ * usb_diag_read() - Read data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Rx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ *
+ */
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req;
+ struct usb_ep *out;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+ if (!ctxt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+
+ if (!ctxt->configured || !ctxt->out) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ out = ctxt->out;
+
+ if (list_empty(&ctxt->read_pool)) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
+ list_del(&req->list);
+ kref_get(&ctxt->kref); /* put called in complete callback */
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+
+ /* make sure context is still valid after releasing lock */
+ if (ctxt != ch->priv_usb) {
+ usb_ep_free_request(out, req);
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+ return -EIO;
+ }
+
+ if (usb_ep_queue(out, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->read_pool);
+ /* 1 error message for every 10 sec */
+ if (__ratelimit(&rl))
+ ERROR(ctxt->cdev, "%s: cannot queue"
+ " read request\n", __func__);
+
+ if (kref_put(&ctxt->kref, diag_context_release))
+ /* diag_context_release called spin_unlock already */
+ local_irq_restore(flags);
+ else
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_diag_read);
+
+/**
+ * usb_diag_write() - Write data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Tx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ *
+ */
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+ struct usb_ep *in;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+ if (!ctxt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+
+ if (!ctxt->configured || !ctxt->in) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ in = ctxt->in;
+
+ if (list_empty(&ctxt->write_pool)) {
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
+ list_del(&req->list);
+ kref_get(&ctxt->kref); /* put called in complete callback */
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+
+ /* make sure context is still valid after releasing lock */
+ if (ctxt != ch->priv_usb) {
+ usb_ep_free_request(in, req);
+ kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+ &ctxt->lock);
+ return -EIO;
+ }
+
+ ctxt->dpkts_tolaptop_pending++;
+ if (usb_ep_queue(in, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_add_tail(&req->list, &ctxt->write_pool);
+ ctxt->dpkts_tolaptop_pending--;
+ /* 1 error message for every 10 sec */
+ if (__ratelimit(&rl))
+ ERROR(ctxt->cdev, "%s: cannot queue"
+ " read request\n", __func__);
+
+ if (kref_put(&ctxt->kref, diag_context_release))
+ /* diag_context_release called spin_unlock already */
+ local_irq_restore(flags);
+ else
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ return -EIO;
+ }
+
+ /*
+ * It's possible that both write completion AND unbind could have been
+ * completed asynchronously by this point. Since they both release the
+ * kref, ctxt is _NOT_ guaranteed to be valid here.
+ */
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_diag_write);
+
+static void diag_function_disable(struct usb_function *f)
+{
+ struct diag_context *dev = func_to_diag(f);
+ unsigned long flags;
+
+ DBG(dev->cdev, "diag_function_disable\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->configured = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (dev->ch && dev->ch->notify)
+ dev->ch->notify(dev->ch->priv, USB_DIAG_DISCONNECT, NULL);
+
+ usb_ep_disable(dev->in);
+ dev->in->driver_data = NULL;
+
+ usb_ep_disable(dev->out);
+ dev->out->driver_data = NULL;
+ if (dev->ch)
+ dev->ch->priv_usb = NULL;
+}
+
+static void diag_free_func(struct usb_function *f)
+{
+ struct diag_context *ctxt = func_to_diag(f);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ list_del(&ctxt->list_item);
+ if (kref_put(&ctxt->kref, diag_context_release))
+ /* diag_context_release called spin_unlock already */
+ local_irq_restore(flags);
+ else
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static int diag_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct diag_context *dev = func_to_diag(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
+ int rc = 0;
+
+ if (config_ep_by_speed(cdev->gadget, f, dev->in) ||
+ config_ep_by_speed(cdev->gadget, f, dev->out)) {
+ dev->in->desc = NULL;
+ dev->out->desc = NULL;
+ return -EINVAL;
+ }
+
+ if (!dev->ch)
+ return -ENODEV;
+
+ /*
+ * Indicate to the diag channel that the active diag device is dev.
+ * Since a few diag devices can point to the same channel.
+ */
+ dev->ch->priv_usb = dev;
+
+ dev->in->driver_data = dev;
+ rc = usb_ep_enable(dev->in);
+ if (rc) {
+ ERROR(dev->cdev, "can't enable %s, result %d\n",
+ dev->in->name, rc);
+ return rc;
+ }
+ dev->out->driver_data = dev;
+ rc = usb_ep_enable(dev->out);
+ if (rc) {
+ ERROR(dev->cdev, "can't enable %s, result %d\n",
+ dev->out->name, rc);
+ usb_ep_disable(dev->in);
+ return rc;
+ }
+
+ dev->dpkts_tolaptop = 0;
+ dev->dpkts_tomodem = 0;
+ dev->dpkts_tolaptop_pending = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->configured = 1;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (dev->ch->notify)
+ dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL);
+
+ return rc;
+}
+
+static void diag_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct diag_context *ctxt = func_to_diag(f);
+ unsigned long flags;
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+
+ usb_free_descriptors(f->fs_descriptors);
+
+ /*
+ * Channel priv_usb may point to other diag function.
+ * Clear the priv_usb only if the channel is used by the
+ * diag dev we unbind here.
+ */
+ if (ctxt->ch && ctxt->ch->priv_usb == ctxt)
+ ctxt->ch->priv_usb = NULL;
+
+ spin_lock_irqsave(&ctxt->lock, flags);
+ /* Free any pending USB requests from last session */
+ free_reqs(ctxt);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static int diag_function_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct diag_context *ctxt = func_to_diag(f);
+ struct usb_ep *ep;
+ int status = -ENODEV;
+
+ ctxt->cdev = c->cdev;
+
+ intf_desc.bInterfaceNumber = usb_interface_id(c, f);
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+ if (!ep)
+ goto fail;
+ ctxt->in = ep;
+ ep->driver_data = ctxt;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+ if (!ep)
+ goto fail;
+ ctxt->out = ep;
+ ep->driver_data = ctxt;
+
+ status = -ENOMEM;
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(fs_diag_desc);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hs_bulk_in_desc.bEndpointAddress =
+ fs_bulk_in_desc.bEndpointAddress;
+ hs_bulk_out_desc.bEndpointAddress =
+ fs_bulk_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_bulk_in_desc.bEndpointAddress =
+ fs_bulk_in_desc.bEndpointAddress;
+ ss_bulk_out_desc.bEndpointAddress =
+ fs_bulk_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(ss_diag_desc);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ /* Allow only first diag channel to update pid and serial no */
+ if (ctxt == list_first_entry(&diag_dev_list,
+ struct diag_context, list_item))
+ diag_update_pid_and_serial_num(ctxt);
+
+ return 0;
+fail:
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (ctxt->out)
+ ctxt->out->driver_data = NULL;
+ if (ctxt->in)
+ ctxt->in->driver_data = NULL;
+ return status;
+
+}
+
+static struct diag_context *diag_context_init(const char *name)
+{
+ struct diag_context *dev;
+ struct usb_diag_ch *_ch;
+ int found = 0;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ list_for_each_entry(_ch, &usb_diag_ch_list, list) {
+ if (!strcmp(name, _ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_warn("%s: unable to get diag usb channel\n", __func__);
+
+ _ch = kzalloc(sizeof(*_ch), GFP_KERNEL);
+ if (_ch == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ _ch->name = name;
+
+ spin_lock_irqsave(&ch_lock, flags);
+ list_add_tail(&_ch->list, &usb_diag_ch_list);
+ spin_unlock_irqrestore(&ch_lock, flags);
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ list_add_tail(&dev->list_item, &diag_dev_list);
+
+ /*
+ * A few diag devices can point to the same channel, in case that
+ * the diag devices belong to different configurations, however
+ * only the active diag device will claim the channel by setting
+ * the ch->priv_usb (see diag_function_set_alt).
+ */
+ dev->ch = _ch;
+
+ dev->function.name = _ch->name;
+ dev->function.fs_descriptors = fs_diag_desc;
+ dev->function.hs_descriptors = hs_diag_desc;
+ dev->function.bind = diag_function_bind;
+ dev->function.unbind = diag_function_unbind;
+ dev->function.set_alt = diag_function_set_alt;
+ dev->function.disable = diag_function_disable;
+ dev->function.free_func = diag_free_func;
+ kref_init(&dev->kref);
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->read_pool);
+ INIT_LIST_HEAD(&dev->write_pool);
+
+ return dev;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = debug_buffer;
+ int temp = 0;
+ struct usb_diag_ch *ch;
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+
+ if (ctxt) {
+ spin_lock_irqsave(&ctxt->lock, flags);
+ temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+ "---Name: %s---\n"
+ "endpoints: %s, %s\n"
+ "dpkts_tolaptop: %lu\n"
+ "dpkts_tomodem: %lu\n"
+ "pkts_tolaptop_pending: %u\n",
+ ch->name,
+ ctxt->in->name, ctxt->out->name,
+ ctxt->dpkts_tolaptop,
+ ctxt->dpkts_tomodem,
+ ctxt->dpkts_tolaptop_pending);
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ }
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_diag_ch *ch;
+
+ list_for_each_entry(ch, &usb_diag_ch_list, list) {
+ struct diag_context *ctxt = ch->priv_usb;
+ unsigned long flags;
+
+ if (ctxt) {
+ spin_lock_irqsave(&ctxt->lock, flags);
+ ctxt->dpkts_tolaptop = 0;
+ ctxt->dpkts_tomodem = 0;
+ ctxt->dpkts_tolaptop_pending = 0;
+ spin_unlock_irqrestore(&ctxt->lock, flags);
+ }
+ }
+
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations debug_fdiag_ops = {
+ .open = debug_open,
+ .read = debug_read_stats,
+ .write = debug_reset_stats,
+};
+
+struct dentry *dent_diag;
+static void fdiag_debugfs_init(void)
+{
+ struct dentry *dent_diag_status;
+ dent_diag = debugfs_create_dir("usb_diag", 0);
+ if (!dent_diag || IS_ERR(dent_diag))
+ return;
+
+ dent_diag_status = debugfs_create_file("status", 0444, dent_diag, 0,
+ &debug_fdiag_ops);
+
+ if (!dent_diag_status || IS_ERR(dent_diag_status)) {
+ debugfs_remove(dent_diag);
+ dent_diag = NULL;
+ return;
+ }
+}
+
+static void fdiag_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dent_diag);
+}
+#else
+static inline void fdiag_debugfs_init(void) {}
+static inline void fdiag_debugfs_remove(void) {}
+#endif
+
+static void diag_opts_release(struct config_item *item)
+{
+ struct diag_opts *opts = to_diag_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations diag_item_ops = {
+ .release = diag_opts_release,
+};
+
+static struct config_item_type diag_func_type = {
+ .ct_item_ops = &diag_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int diag_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ struct diag_opts *opts = container_of(fi, struct diag_opts, func_inst);
+ char *ptr;
+ int name_len;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ptr = kstrndup(name, name_len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ opts->name = ptr;
+
+ return 0;
+}
+
+static void diag_free_inst(struct usb_function_instance *f)
+{
+ struct diag_opts *opts;
+
+ opts = container_of(f, struct diag_opts, func_inst);
+ kfree(opts->name);
+ kfree(opts);
+}
+
+static struct usb_function_instance *diag_alloc_inst(void)
+{
+ struct diag_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = diag_set_inst_name;
+ opts->func_inst.free_func_inst = diag_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &diag_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *diag_alloc(struct usb_function_instance *fi)
+{
+ struct diag_opts *opts;
+ struct diag_context *dev;
+
+ opts = container_of(fi, struct diag_opts, func_inst);
+
+ dev = diag_context_init(opts->name);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ return &dev->function;
+}
+
+DECLARE_USB_FUNCTION(diag, diag_alloc_inst, diag_alloc);
+
+static int __init diag_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ INIT_LIST_HEAD(&diag_dev_list);
+
+ fdiag_debugfs_init();
+
+ ret = usb_function_register(&diagusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-diag-dload");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL, "qcom,android-usb");
+
+ if (!np)
+ pr_warn("diag: failed to find diag_dload imem node\n");
+
+ diag_dload = np ? of_iomap(np, 0) : NULL;
+
+ return ret;
+}
+
+static void __exit diag_exit(void)
+{
+ struct list_head *act, *tmp;
+ struct usb_diag_ch *_ch;
+ unsigned long flags;
+
+ if (diag_dload)
+ iounmap(diag_dload);
+
+ usb_function_unregister(&diagusb_func);
+
+ fdiag_debugfs_remove();
+
+ list_for_each_safe(act, tmp, &usb_diag_ch_list) {
+ _ch = list_entry(act, struct usb_diag_ch, list);
+
+ spin_lock_irqsave(&ch_lock, flags);
+ /* Free if diagchar is not using the channel anymore */
+ if (!_ch->priv) {
+ list_del(&_ch->list);
+ kfree(_ch);
+ }
+ spin_unlock_irqrestore(&ch_lock, flags);
+ }
+
+}
+
+module_init(diag_init);
+module_exit(diag_exit);
+
+MODULE_DESCRIPTION("Diag function driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index c045d4176a9c..cd6441e8354c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -24,6 +24,7 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/uio.h>
+#include <linux/ipc_logging.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
@@ -41,6 +42,16 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define NUM_PAGES 10 /* # of pages for ipc logging */
+
+static void *ffs_ipc_log;
+#define ffs_log(fmt, ...) do { \
+ if (ffs_ipc_log) \
+ ipc_log_string(ffs_ipc_log, "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -57,10 +68,32 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
static int __must_check
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+static LIST_HEAD(inst_list);
+
+/* ffs instance status */
+#define INST_NAME_SIZE 16
+
+struct ffs_inst_status {
+ char inst_name[INST_NAME_SIZE];
+ struct list_head list;
+ struct mutex ffs_lock;
+ bool inst_exist;
+ struct f_fs_opts *opts;
+ struct ffs_data *ffs_data;
+};
+
+/* Free instance structures */
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name);
+static void ffs_inst_clean_delay(const char *inst_name);
+static int ffs_inst_exist_check(const char *inst_name);
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst);
/* The function structure ***************************************************/
struct ffs_ep;
+static bool first_read_done;
struct ffs_function {
struct usb_configuration *conf;
@@ -118,12 +151,14 @@ struct ffs_ep {
u8 num;
int status; /* P: epfile->mutex */
+ bool is_busy;
};
struct ffs_epfile {
/* Protects ep->ep and ep->req. */
struct mutex mutex;
wait_queue_head_t wait;
+ atomic_t error;
struct ffs_data *ffs;
struct ffs_ep *ep; /* P: ffs->eps_lock */
@@ -136,6 +171,7 @@ struct ffs_epfile {
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
+ atomic_t opened;
};
/* ffs_io_data structure ***************************************************/
@@ -211,6 +247,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
spin_unlock_irq(&ffs->ev.waitq.lock);
+ ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
req->buf = data;
req->length = len;
@@ -235,11 +274,18 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
}
ffs->setup_state = FFS_NO_SETUP;
+
+ ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
{
+ ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+ ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
if (ffs->ev.can_stall) {
pr_vdebug("ep0 stall\n");
usb_ep_set_halt(ffs->gadget->ep0);
@@ -260,6 +306,13 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -388,6 +441,9 @@ done_spin:
break;
}
+ ffs_log("exit:ret %zu state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
return ret;
}
@@ -421,6 +477,10 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
ffs->ev.count * sizeof *ffs->ev.types);
spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+ ffs->setup_state, ffs->flags, n);
+
mutex_unlock(&ffs->mutex);
return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -436,6 +496,13 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -524,20 +591,36 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
spin_unlock_irq(&ffs->ev.waitq.lock);
done_mutex:
+ ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
kfree(data);
+
return ret;
}
static int ffs_ep0_open(struct inode *inode, struct file *file)
{
struct ffs_data *ffs = inode->i_private;
+ int ret;
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (unlikely(ffs->state == FFS_CLOSING))
return -EBUSY;
+ smp_mb__before_atomic();
+ if (atomic_read(&ffs->opened))
+ return -EBUSY;
+
file->private_data = ffs;
ffs_data_opened(ffs);
@@ -550,6 +633,9 @@ static int ffs_ep0_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
ffs_data_closed(ffs);
return 0;
@@ -563,6 +649,13 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -581,6 +674,13 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
unsigned int mask = POLLWRNORM;
int ret;
+ ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ ret = ffs_inst_exist_check(ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
poll_wait(file, &ffs->ev.waitq, wait);
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -611,6 +711,8 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
break;
}
+ ffs_log("exit: mask %u", mask);
+
mutex_unlock(&ffs->mutex);
return mask;
@@ -632,10 +734,16 @@ static const struct file_operations ffs_ep0_operations = {
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
+ struct ffs_ep *ep = _ep->driver_data;
ENTER();
- if (likely(req->context)) {
+
+ /* req may be freed during unbind */
+ if (ep && ep->req && likely(req->context)) {
struct ffs_ep *ep = _ep->driver_data;
ep->status = req->status ? req->status : req->actual;
+ /* Set is_busy false to indicate completion of last request */
+ ep->is_busy = false;
+ ffs_log("ep status %d for req %pK", ep->status, req);
complete(req->context);
}
}
@@ -648,6 +756,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->req->actual;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ ffs_log("enter: ret %d", ret);
+
if (io_data->read && ret > 0) {
mm_segment_t oldfs = get_fs();
@@ -671,6 +781,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
+
+ ffs_log("exit");
}
static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -680,18 +792,35 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
ENTER();
+ ffs_log("enter");
+
INIT_WORK(&io_data->work, ffs_user_copy_worker);
schedule_work(&io_data->work);
+
+ ffs_log("exit");
}
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
struct ffs_ep *ep;
+ struct ffs_data *ffs = epfile->ffs;
char *data = NULL;
ssize_t ret, data_len = -EINVAL;
int halt;
+ ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name,
+ atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE");
+
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
+ smp_mb__before_atomic();
+retry:
+ if (atomic_read(&epfile->error))
+ return -ENODEV;
+
/* Are we still active? */
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
ret = -ENODEV;
@@ -706,11 +835,28 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
goto error;
}
- ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
- if (ret) {
+ /* Don't wait on write if device is offline */
+ if (!io_data->read) {
ret = -EINTR;
goto error;
}
+
+ /*
+ * If ep is disabled, this fails all current IOs
+ * and wait for next epfile open to happen.
+ */
+ smp_mb__before_atomic();
+ if (!atomic_read(&epfile->error)) {
+ ret = wait_event_interruptible(epfile->wait,
+ (ep = epfile->ep));
+ if (ret < 0)
+ goto error;
+ }
+
+ if (!ep) {
+ ret = -ENODEV;
+ goto error;
+ }
}
/* Do we halt? */
@@ -820,25 +966,50 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
} else {
- DECLARE_COMPLETION_ONSTACK(done);
+ struct completion *done;
req = ep->req;
req->buf = data;
req->length = data_len;
+ ret = 0;
- req->context = &done;
req->complete = ffs_epfile_io_complete;
- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ if (io_data->read) {
+ reinit_completion(&epfile->ffs->epout_completion);
+ done = &epfile->ffs->epout_completion;
+ } else {
+ reinit_completion(&epfile->ffs->epin_completion);
+ done = &epfile->ffs->epin_completion;
+ }
+ req->context = done;
+
+ /*
+ * Don't queue another read request if previous is
+ * still busy.
+ */
+ if (!(io_data->read && ep->is_busy)) {
+ ep->is_busy = true;
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ }
spin_unlock_irq(&epfile->ffs->eps_lock);
if (unlikely(ret < 0)) {
- /* nop */
+ ep->is_busy = false;
+ ret = -EIO;
} else if (unlikely(
- wait_for_completion_interruptible(&done))) {
+ wait_for_completion_interruptible(done))) {
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ /*
+ * While we were acquiring lock endpoint got
+ * disabled (disconnect) or changed
+ * (composition switch) ?
+ */
+ if (epfile->ep == ep)
+ usb_ep_dequeue(ep->ep, req);
+ spin_unlock_irq(&epfile->ffs->eps_lock);
ret = -EINTR;
- usb_ep_dequeue(ep->ep, req);
} else {
/*
* XXX We may end up silently droping data
@@ -847,11 +1018,58 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* to maxpacketsize), we may end up with more
* data then user space has space for.
*/
- ret = ep->status;
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ /*
+ * While we were acquiring lock endpoint got
+ * disabled (disconnect) or changed
+ * (composition switch) ?
+ */
+ if (epfile->ep == ep) {
+ ret = ep->status;
+ if (ret >= 0)
+ first_read_done = true;
+ } else {
+ ret = -ENODEV;
+ }
+
+ /* do wait again if func eps are not enabled */
+ if (io_data->read && !first_read_done
+ && ret < 0) {
+ unsigned short count = ffs->eps_count;
+
+ pr_debug("%s: waiting for the online state\n",
+ __func__);
+ ret = 0;
+ kfree(data);
+ data = NULL;
+ data_len = -EINVAL;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ mutex_unlock(&epfile->mutex);
+ epfile = ffs->epfiles;
+ do {
+ atomic_set(&epfile->error, 0);
+ ++epfile;
+ } while (--count);
+ epfile = file->private_data;
+ goto retry;
+ }
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
if (io_data->read && ret > 0) {
- ret = copy_to_iter(data, ret, &io_data->data);
- if (!ret)
- ret = -EFAULT;
+
+ if (ret > data_len) {
+ ret = -EOVERFLOW;
+ pr_err("More data(%zd) received than intended length(%zu)\n",
+ ret, data_len);
+
+ } else {
+ ret = copy_to_iter(data, ret, &io_data->data);
+ pr_debug("copied (%zd) bytes to user space\n", ret);
+ if (!ret) {
+ pr_err("Fail to copy to user\n");
+ ret = -EFAULT;
+ }
+ }
}
}
kfree(data);
@@ -859,6 +1077,9 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
}
mutex_unlock(&epfile->mutex);
+
+ ffs_log("exit:ret %zu", ret);
+
return ret;
error_lock:
@@ -866,6 +1087,9 @@ error_lock:
mutex_unlock(&epfile->mutex);
error:
kfree(data);
+
+ ffs_log("exit: ret %zu", ret);
+
return ret;
}
@@ -873,15 +1097,39 @@ static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ int ret;
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
+ smp_mb__before_atomic();
+ if (atomic_read(&epfile->opened)) {
+ pr_err("%s(): ep(%s) is already opened.\n",
+ __func__, epfile->name);
+ return -EBUSY;
+ }
+
+ smp_mb__before_atomic();
+ atomic_set(&epfile->opened, 1);
file->private_data = epfile;
ffs_data_opened(epfile->ffs);
+ smp_mb__before_atomic();
+ atomic_set(&epfile->error, 0);
+ first_read_done = false;
+
+ ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
return 0;
}
@@ -894,6 +1142,9 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
if (likely(io_data && io_data->ep && io_data->req))
@@ -903,6 +1154,8 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
+ ffs_log("exit: value %d", value);
+
return value;
}
@@ -913,6 +1166,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -940,6 +1195,9 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kfree(p);
else
*from = p->data;
+
+ ffs_log("exit");
+
return res;
}
@@ -950,6 +1208,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -989,6 +1249,9 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
} else {
*to = p->data;
}
+
+ ffs_log("exit");
+
return res;
}
@@ -999,7 +1262,16 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
+ smp_mb__before_atomic();
+ atomic_set(&epfile->opened, 0);
+ atomic_set(&epfile->error, 1);
ffs_data_closed(epfile->ffs);
+ file->private_data = NULL;
+
+ ffs_log("exit");
return 0;
}
@@ -1012,6 +1284,13 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -1064,6 +1343,8 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("exit:ret %d", ret);
+
return ret;
}
@@ -1095,6 +1376,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
ENTER();
+ ffs_log("enter");
+
inode = new_inode(sb);
if (likely(inode)) {
@@ -1114,6 +1397,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode->i_op = iops;
}
+ ffs_log("exit");
+
return inode;
}
@@ -1128,6 +1413,8 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
ENTER();
+ ffs_log("enter");
+
dentry = d_alloc_name(sb->s_root, name);
if (unlikely(!dentry))
return NULL;
@@ -1139,6 +1426,9 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
}
d_add(dentry, inode);
+
+ ffs_log("exit");
+
return dentry;
}
@@ -1164,6 +1454,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
ENTER();
+ ffs_log("enter");
+
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
@@ -1188,6 +1480,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&ffs_ep0_operations)))
return -ENOMEM;
+ ffs_log("exit");
+
return 0;
}
@@ -1195,6 +1489,8 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
{
ENTER();
+ ffs_log("enter");
+
if (!opts || !*opts)
return 0;
@@ -1277,6 +1573,8 @@ invalid:
opts = comma + 1;
}
+ ffs_log("exit");
+
return 0;
}
@@ -1299,9 +1597,12 @@ ffs_fs_mount(struct file_system_type *t, int flags,
int ret;
void *ffs_dev;
struct ffs_data *ffs;
+ struct ffs_inst_status *inst_status;
ENTER();
+ ffs_log("enter");
+
ret = ffs_fs_parse_opts(&data, opts);
if (unlikely(ret < 0))
return ERR_PTR(ret);
@@ -1326,11 +1627,26 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ffs->private_data = ffs_dev;
data.ffs_data = ffs;
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find instance (%s)\n",
+ ffs->dev_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Store ffs to global status structure */
+ ffs_dev_lock();
+ inst_status->ffs_data = ffs;
+ ffs_dev_unlock();
+
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
if (IS_ERR(rv) && data.ffs_data) {
ffs_release_dev(data.ffs_data);
ffs_data_put(data.ffs_data);
}
+
+ ffs_log("exit");
+
return rv;
}
@@ -1339,11 +1655,15 @@ ffs_fs_kill_sb(struct super_block *sb)
{
ENTER();
+ ffs_log("enter");
+
kill_litter_super(sb);
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
ffs_data_closed(sb->s_fs_info);
}
+
+ ffs_log("exit");
}
static struct file_system_type ffs_fs_type = {
@@ -1380,7 +1700,6 @@ static void functionfs_cleanup(void)
unregister_filesystem(&ffs_fs_type);
}
-
/* ffs_data and ffs_function construction and destruction code **************/
static void ffs_data_clear(struct ffs_data *ffs);
@@ -1390,39 +1709,72 @@ static void ffs_data_get(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter");
+
+ smp_mb__before_atomic();
atomic_inc(&ffs->ref);
+
+ ffs_log("exit");
}
static void ffs_data_opened(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ smp_mb__before_atomic();
atomic_inc(&ffs->ref);
if (atomic_add_return(1, &ffs->opened) == 1 &&
ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_data_put(struct ffs_data *ffs)
{
+ struct ffs_inst_status *inst_status;
+ const char *dev_name;
+
ENTER();
+ ffs_log("enter");
+
+ smp_mb__before_atomic();
if (unlikely(atomic_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
+ /* Clear ffs from global structure */
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (!IS_ERR(inst_status)) {
+ ffs_dev_lock();
+ inst_status->ffs_data = NULL;
+ ffs_dev_unlock();
+ }
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->ep0req_completion.wait));
- kfree(ffs->dev_name);
+ dev_name = ffs->dev_name;
kfree(ffs);
+ ffs_inst_clean_delay(dev_name);
+ kfree(dev_name);
}
+
+ ffs_log("exit");
}
static void ffs_data_closed(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
+ smp_mb__before_atomic();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
@@ -1438,11 +1790,16 @@ static void ffs_data_closed(struct ffs_data *ffs)
ffs_data_reset(ffs);
}
}
+
+ smp_mb__before_atomic();
if (atomic_read(&ffs->opened) < 0) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_put(ffs);
}
@@ -1454,6 +1811,8 @@ static struct ffs_data *ffs_data_new(void)
ENTER();
+ ffs_log("enter");
+
atomic_set(&ffs->ref, 1);
atomic_set(&ffs->opened, 0);
ffs->state = FFS_READ_DESCRIPTORS;
@@ -1461,10 +1820,14 @@ static struct ffs_data *ffs_data_new(void)
spin_lock_init(&ffs->eps_lock);
init_waitqueue_head(&ffs->ev.waitq);
init_completion(&ffs->ep0req_completion);
+ init_completion(&ffs->epout_completion);
+ init_completion(&ffs->epin_completion);
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
+ ffs_log("exit");
+
return ffs;
}
@@ -1472,8 +1835,16 @@ static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
+ pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
+ __func__, ffs->gadget, ffs->flags);
ffs_closed(ffs);
+ if (ffs->gadget)
+ pr_err("%s: ffs:%pK ffs->gadget= %pK, ffs->flags= %lu\n",
+ __func__, ffs, ffs->gadget, ffs->flags);
BUG_ON(ffs->gadget);
if (ffs->epfiles)
@@ -1485,12 +1856,18 @@ static void ffs_data_clear(struct ffs_data *ffs)
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_data_reset(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_clear(ffs);
ffs->epfiles = NULL;
@@ -1517,6 +1894,9 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs->ms_os_descs_ext_prop_count = 0;
ffs->ms_os_descs_ext_prop_name_len = 0;
ffs->ms_os_descs_ext_prop_data_len = 0;
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
@@ -1527,6 +1907,9 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (WARN_ON(ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
@@ -1552,6 +1935,10 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
}
ffs->gadget = cdev->gadget;
+
+ ffs_log("exit: state %d setup_state %d flag %lu gadget %pK\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
+
ffs_data_get(ffs);
return 0;
}
@@ -1565,6 +1952,8 @@ static void functionfs_unbind(struct ffs_data *ffs)
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
ffs_data_put(ffs);
}
}
@@ -1576,6 +1965,9 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
count = ffs->eps_count;
epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
@@ -1586,6 +1978,7 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
epfile->ffs = ffs;
mutex_init(&epfile->mutex);
init_waitqueue_head(&epfile->wait);
+ atomic_set(&epfile->opened, 0);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
@@ -1600,6 +1993,10 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
}
ffs->epfiles = epfiles;
+
+ ffs_log("exit: eps_count %u state %d setup_state %d flag %lu",
+ count, ffs->state, ffs->setup_state, ffs->flags);
+
return 0;
}
@@ -1609,6 +2006,8 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
ENTER();
+ ffs_log("enter: count %u", count);
+
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex) ||
waitqueue_active(&epfile->wait));
@@ -1620,6 +2019,8 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
}
kfree(epfiles);
+
+ ffs_log("exit");
}
static void ffs_func_eps_disable(struct ffs_function *func)
@@ -1629,19 +2030,30 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
+
+ smp_mb__before_atomic();
+ if (epfile)
+ atomic_set(&epfile->error, 1);
+
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
if (epfile) {
+ atomic_set(&epfile->error, 1);
epfile->ep = NULL;
++epfile;
}
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+ ffs_log("exit");
}
static int ffs_func_eps_enable(struct ffs_function *func)
@@ -1653,17 +2065,17 @@ static int ffs_func_eps_enable(struct ffs_function *func)
unsigned long flags;
int ret = 0;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
- struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
- int needs_comp_desc = false;
int desc_idx;
- if (ffs->gadget->speed == USB_SPEED_SUPER) {
+ if (ffs->gadget->speed == USB_SPEED_SUPER)
desc_idx = 2;
- needs_comp_desc = true;
- } else if (ffs->gadget->speed == USB_SPEED_HIGH)
+ else if (ffs->gadget->speed == USB_SPEED_HIGH)
desc_idx = 1;
else
desc_idx = 0;
@@ -1681,11 +2093,11 @@ static int ffs_func_eps_enable(struct ffs_function *func)
ep->ep->driver_data = ep;
ep->ep->desc = ds;
- if (needs_comp_desc) {
- comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
- USB_DT_ENDPOINT_SIZE);
- ep->ep->maxburst = comp_desc->bMaxBurst + 1;
- ep->ep->comp_desc = comp_desc;
+ ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
+ if (ret) {
+ pr_err("%s(): config_ep_by_speed(%d) err for %s\n",
+ __func__, ret, ep->ep->name);
+ break;
}
ret = usb_ep_enable(ep->ep);
@@ -1693,6 +2105,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ds);
epfile->isoc = usb_endpoint_xfer_isoc(ds);
+ ffs_log("usb_ep_enable %s", ep->ep->name);
} else {
break;
}
@@ -1704,6 +2117,8 @@ static int ffs_func_eps_enable(struct ffs_function *func)
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -1744,6 +2159,8 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
/* At least two bytes are required: length and type */
if (len < 2) {
pr_vdebug("descriptor too short\n");
@@ -1860,6 +2277,8 @@ inv_length:
#undef __entity_check_STRING
#undef __entity_check_ENDPOINT
+ ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
return length;
}
@@ -1871,6 +2290,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (;;) {
int ret;
@@ -1898,6 +2319,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
data += ret;
++num;
}
+
+ ffs_log("exit: len %u", len);
}
static int __ffs_data_do_entity(enum ffs_entity_type type,
@@ -1909,6 +2332,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
ENTER();
+ ffs_log("enter: type %u", type);
+
switch (type) {
case FFS_DESCRIPTOR:
break;
@@ -1947,6 +2372,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
break;
}
+ ffs_log("exit");
+
return 0;
}
@@ -1956,6 +2383,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
u16 bcd_version = le16_to_cpu(desc->bcdVersion);
u16 w_index = le16_to_cpu(desc->wIndex);
+ ffs_log("enter");
+
if (bcd_version != 1) {
pr_vdebug("unsupported os descriptors version: %d",
bcd_version);
@@ -1973,6 +2402,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
return -EINVAL;
}
+ ffs_log("exit: size of desc %zu", sizeof(*desc));
+
return sizeof(*desc);
}
@@ -1992,6 +2423,8 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u os desc type %d", len, type);
+
/* loop over all ext compat/ext prop descriptors */
while (feature_count--) {
ret = entity(type, h, data, len, priv);
@@ -2002,6 +2435,9 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
data += ret;
len -= ret;
}
+
+ ffs_log("exit");
+
return _len - len;
}
@@ -2015,6 +2451,8 @@ static int __must_check ffs_do_os_descs(unsigned count,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (num = 0; num < count; ++num) {
int ret;
enum ffs_os_desc_type type;
@@ -2064,6 +2502,9 @@ static int __must_check ffs_do_os_descs(unsigned count,
len -= ret;
data += ret;
}
+
+ ffs_log("exit");
+
return _len - len;
}
@@ -2079,6 +2520,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
ENTER();
+ ffs_log("enter: len %u", len);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *d = data;
@@ -2086,11 +2529,17 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ d->Reserved1 != 1) {
+ pr_err("%s(): Invalid os_desct_ext_compat\n",
+ __func__);
return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
- if (d->Reserved2[i])
+ if (d->Reserved2[i]) {
+ pr_err("%s(): Invalid Reserved2 of ext_compat\n",
+ __func__);
return -EINVAL;
+ }
length = sizeof(struct usb_ext_compat_desc);
}
@@ -2134,6 +2583,9 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
return -EINVAL;
}
+
+ ffs_log("exit");
+
return length;
}
@@ -2147,6 +2599,8 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (get_unaligned_le32(data + 4) != len)
goto error;
@@ -2260,10 +2714,13 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ffs->ss_descs_count = counts[2];
ffs->ms_os_descs_count = os_descs_count;
+ ffs_log("exit");
+
return 0;
error:
kfree(_data);
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -2277,6 +2734,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (unlikely(len < 16 ||
get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len))
@@ -2393,12 +2852,14 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ffs->stringtabs = stringtabs;
ffs->raw_strings = _data;
+ ffs_log("exit");
return 0;
error_free:
kfree(stringtabs);
error:
kfree(_data);
+ ffs_log("exit: -EINVAL");
return -EINVAL;
}
@@ -2411,6 +2872,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
enum usb_functionfs_event_type rem_type1, rem_type2 = type;
int neg = 0;
+ ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/*
* Abort any unhandled setup
*
@@ -2470,6 +2934,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
wake_up_locked(&ffs->ev.waitq);
if (ffs->ffs_eventfd)
eventfd_signal(ffs->ffs_eventfd, 1);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static void ffs_event_add(struct ffs_data *ffs,
@@ -2504,6 +2971,8 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
int idx;
static const char *speed_names[] = { "full", "high", "super" };
+ ffs_log("enter");
+
if (type != FFS_DESCRIPTOR)
return 0;
@@ -2579,6 +3048,8 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
}
ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+ ffs_log("exit");
+
return 0;
}
@@ -2590,6 +3061,8 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
unsigned idx;
u8 newValue;
+ ffs_log("enter: type %d", type);
+
switch (type) {
default:
case FFS_DESCRIPTOR:
@@ -2634,6 +3107,9 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
pr_vdebug("%02x -> %02x\n", *valuep, newValue);
*valuep = newValue;
+
+ ffs_log("exit: newValue %d", newValue);
+
return 0;
}
@@ -2644,6 +3120,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
struct ffs_function *func = priv;
u8 length = 0;
+ ffs_log("enter: type %d", type);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *desc = data;
@@ -2713,6 +3191,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
}
+ ffs_log("exit");
+
return length;
}
@@ -2726,6 +3206,8 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
ENTER();
+ ffs_log("enter");
+
/*
* Legacy gadget triggers binding in functionfs_ready_callback,
* which already uses locking; taking the same lock here would
@@ -2760,6 +3242,8 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
ffs_opts->refcnt++;
func->function.strings = func->ffs->stringtabs;
+ ffs_log("exit");
+
return ffs_opts;
}
@@ -2803,6 +3287,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
/* Has descriptors only for speeds gadget does not support */
if (unlikely(!(full | high | super)))
return -ENOTSUPP;
@@ -2894,7 +3381,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
goto error;
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
- if (c->cdev->use_os_string)
+ if (c->cdev->use_os_string) {
for (i = 0; i < ffs->interfaces_count; ++i) {
struct usb_os_desc *desc;
@@ -2905,22 +3392,29 @@ static int _ffs_func_bind(struct usb_configuration *c,
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
- ret = ffs_do_os_descs(ffs->ms_os_descs_count,
- vla_ptr(vlabuf, d, raw_descs) +
- fs_len + hs_len + ss_len,
- d_raw_descs__sz - fs_len - hs_len - ss_len,
- __ffs_func_bind_do_os_desc, func);
- if (unlikely(ret < 0))
- goto error;
+ ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) +
+ fs_len + hs_len + ss_len,
+ d_raw_descs__sz - fs_len - hs_len -
+ ss_len,
+ __ffs_func_bind_do_os_desc, func);
+ if (unlikely(ret < 0))
+ goto error;
+ }
func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0;
/* And we're done */
ffs_event_add(ffs, FUNCTIONFS_BIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return 0;
error:
/* XXX Do we need to release all claimed endpoints here? */
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -2931,6 +3425,8 @@ static int ffs_func_bind(struct usb_configuration *c,
struct ffs_function *func = ffs_func_from_usb(f);
int ret;
+ ffs_log("enter");
+
if (IS_ERR(ffs_opts))
return PTR_ERR(ffs_opts);
@@ -2938,6 +3434,8 @@ static int ffs_func_bind(struct usb_configuration *c,
if (ret && !--ffs_opts->refcnt)
functionfs_unbind(func->ffs);
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -2948,7 +3446,12 @@ static void ffs_reset_work(struct work_struct *work)
{
struct ffs_data *ffs = container_of(work,
struct ffs_data, reset_work);
+
+ ffs_log("enter");
+
ffs_data_reset(ffs);
+
+ ffs_log("exit");
}
static int ffs_func_set_alt(struct usb_function *f,
@@ -2958,14 +3461,20 @@ static int ffs_func_set_alt(struct usb_function *f,
struct ffs_data *ffs = func->ffs;
int ret = 0, intf;
+ ffs_log("enter");
+
if (alt != (unsigned)-1) {
intf = ffs_func_revmap_intf(func, interface);
if (unlikely(intf < 0))
return intf;
}
- if (ffs->func)
+ if (ffs->func) {
ffs_func_eps_disable(ffs->func);
+ ffs->func = NULL;
+ /* matching put to allow LPM on disconnect */
+ usb_gadget_autopm_put_async(ffs->gadget);
+ }
if (ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
@@ -2985,14 +3494,24 @@ static int ffs_func_set_alt(struct usb_function *f,
ffs->func = func;
ret = ffs_func_eps_enable(func);
- if (likely(ret >= 0))
+ if (likely(ret >= 0)) {
ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+ /* Disable USB LPM later on bus_suspend */
+ usb_gadget_autopm_get_async(ffs->gadget);
+ }
+
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
static void ffs_func_disable(struct usb_function *f)
{
+ ffs_log("enter");
+
ffs_func_set_alt(f, 0, (unsigned)-1);
+
+ ffs_log("exit");
}
static int ffs_func_setup(struct usb_function *f,
@@ -3005,6 +3524,8 @@ static int ffs_func_setup(struct usb_function *f,
ENTER();
+ ffs_log("enter");
+
pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
@@ -3048,19 +3569,31 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+ ffs_log("exit");
+
return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static void ffs_func_suspend(struct usb_function *f)
{
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+
+ ffs_log("exit");
}
static void ffs_func_resume(struct usb_function *f)
{
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+
+ ffs_log("exit");
}
@@ -3077,11 +3610,15 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
short *nums = func->interfaces_nums;
unsigned count = func->ffs->interfaces_count;
+ ffs_log("enter");
+
for (; count; --count, ++nums) {
if (*nums >= 0 && *nums == intf)
return nums - func->interfaces_nums;
}
+ ffs_log("exit");
+
return -EDOM;
}
@@ -3094,6 +3631,8 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
list_for_each_entry(dev, &ffs_devices, entry) {
if (!dev->name || !name)
continue;
@@ -3101,6 +3640,8 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
return dev;
}
+ ffs_log("exit");
+
return NULL;
}
@@ -3111,12 +3652,16 @@ static struct ffs_dev *_ffs_get_single_dev(void)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
if (list_is_singular(&ffs_devices)) {
dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
if (dev->single)
return dev;
}
+ ffs_log("exit");
+
return NULL;
}
@@ -3127,11 +3672,17 @@ static struct ffs_dev *_ffs_find_dev(const char *name)
{
struct ffs_dev *dev;
+ ffs_log("enter");
+
dev = _ffs_get_single_dev();
if (dev)
return dev;
- return _ffs_do_find_dev(name);
+ dev = _ffs_do_find_dev(name);
+
+ ffs_log("exit");
+
+ return dev;
}
/* Configfs support *********************************************************/
@@ -3161,25 +3712,146 @@ static struct config_item_type ffs_func_type = {
/* Function registration interface ******************************************/
-static void ffs_free_inst(struct usb_function_instance *f)
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst)
{
- struct f_fs_opts *opts;
+ struct ffs_inst_status *inst_status;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (!strncasecmp(inst_status->inst_name,
+ inst_name, strlen(inst_name)))
+ return inst_status;
+ }
+
+ if (!create_inst)
+ return ERR_PTR(-ENODEV);
+
+ inst_status = kzalloc(sizeof(struct ffs_inst_status),
+ GFP_KERNEL);
+ if (!inst_status)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&inst_status->ffs_lock);
+ snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name);
+ list_add_tail(&inst_status->list, &inst_list);
+
+ return inst_status;
+}
+
+static int ffs_inst_exist_check(const char *inst_name)
+{
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return -ENODEV;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ mutex_unlock(&inst_status->ffs_lock);
+ pr_err_ratelimited(
+ "%s: f_fs instance (%s) has been freed already.\n",
+ __func__, inst_name);
+ return -ENODEV;
+ }
+
+ mutex_unlock(&inst_status->ffs_lock);
+
+ return 0;
+}
+
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name)
+{
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return;
+ }
+
+ inst_status->opts = NULL;
- opts = to_f_fs_opts(f);
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
+static void ffs_inst_clean_delay(const char *inst_name)
+{
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find (%s) instance\n",
+ __func__, inst_name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ if (inst_status->opts) {
+ ffs_inst_clean(inst_status->opts, inst_name);
+ pr_err_ratelimited("%s: Delayed free memory\n",
+ __func__);
+ }
+ mutex_unlock(&inst_status->ffs_lock);
+ return;
+ }
+
+ mutex_unlock(&inst_status->ffs_lock);
+}
+
+static void ffs_free_inst(struct usb_function_instance *f)
+{
+ struct f_fs_opts *opts;
+ struct ffs_inst_status *inst_status;
+
+ opts = to_f_fs_opts(f);
+
+ inst_status = name_to_inst_status(opts->dev->name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find (%s) instance\n",
+ opts->dev->name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+ if (opts->dev->ffs_data
+ && atomic_read(&opts->dev->ffs_data->opened)) {
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("Dev is open, free mem when dev (%s) close\n",
+ opts->dev->name);
+ return;
+ }
+
+ ffs_inst_clean(opts, opts->dev->name);
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
+}
+
#define MAX_INST_NAME_LEN 40
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
- struct f_fs_opts *opts;
+ struct f_fs_opts *opts, *opts_prev;
+ struct ffs_data *ffs_data_tmp;
char *ptr;
const char *tmp;
int name_len, ret;
+ struct ffs_inst_status *inst_status;
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
@@ -3189,6 +3861,23 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
if (!ptr)
return -ENOMEM;
+ inst_status = name_to_inst_status(ptr, true);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to create status struct for (%s) instance\n",
+ ptr);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+ opts_prev = inst_status->opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("instance (%s): prev inst do not freed yet\n",
+ inst_status->inst_name);
+ return -EBUSY;
+ }
+ mutex_unlock(&inst_status->ffs_lock);
+
opts = to_f_fs_opts(fi);
tmp = NULL;
@@ -3203,10 +3892,28 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
}
opts->dev->name_allocated = true;
+ /*
+ * If ffs instance is freed and created once, new allocated
+ * opts->dev need to initialize opts->dev->ffs_data, and
+ * ffs_private_data also need to update new allocated opts->dev
+ * address.
+ */
+ ffs_data_tmp = inst_status->ffs_data;
+ if (ffs_data_tmp)
+ opts->dev->ffs_data = ffs_data_tmp;
+
+ if (opts->dev->ffs_data)
+ opts->dev->ffs_data->private_data = opts->dev;
+
ffs_dev_unlock();
kfree(tmp);
+ mutex_lock(&inst_status->ffs_lock);
+ inst_status->inst_exist = true;
+ inst_status->opts = opts;
+ mutex_unlock(&inst_status->ffs_lock);
+
return 0;
}
@@ -3253,6 +3960,10 @@ static void ffs_func_unbind(struct usb_configuration *c,
unsigned long flags;
ENTER();
+
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (ffs->func == func) {
ffs_func_eps_disable(func);
ffs->func = NULL;
@@ -3267,6 +3978,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
if (ep->ep && ep->req)
usb_ep_free_request(ep->ep, ep->req);
ep->req = NULL;
+ ep->ep = NULL;
++ep;
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
@@ -3282,6 +3994,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3344,12 +4059,16 @@ static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
+ ffs_log("enter");
+
existing = _ffs_do_find_dev(name);
if (existing)
return -EBUSY;
dev->name = name;
+ ffs_log("exit");
+
return 0;
}
@@ -3360,10 +4079,14 @@ int ffs_name_dev(struct ffs_dev *dev, const char *name)
{
int ret;
+ ffs_log("enter");
+
ffs_dev_lock();
ret = _ffs_name_dev(dev, name);
ffs_dev_unlock();
+ ffs_log("exit");
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_name_dev);
@@ -3372,6 +4095,8 @@ int ffs_single_dev(struct ffs_dev *dev)
{
int ret;
+ ffs_log("enter");
+
ret = 0;
ffs_dev_lock();
@@ -3381,6 +4106,9 @@ int ffs_single_dev(struct ffs_dev *dev)
dev->single = true;
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3390,12 +4118,17 @@ EXPORT_SYMBOL_GPL(ffs_single_dev);
*/
static void _ffs_free_dev(struct ffs_dev *dev)
{
+
+ ffs_log("enter");
+
list_del(&dev->entry);
if (dev->name_allocated)
kfree(dev->name);
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
+
+ ffs_log("exit");
}
static void *ffs_acquire_dev(const char *dev_name)
@@ -3403,6 +4136,9 @@ static void *ffs_acquire_dev(const char *dev_name)
struct ffs_dev *ffs_dev;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
@@ -3417,6 +4153,9 @@ static void *ffs_acquire_dev(const char *dev_name)
ffs_dev->mounted = true;
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ffs_dev;
}
@@ -3425,6 +4164,9 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
struct ffs_dev *ffs_dev;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_dev = ffs_data->private_data;
@@ -3436,6 +4178,8 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
}
ffs_dev_unlock();
+
+ ffs_log("exit");
}
static int ffs_ready(struct ffs_data *ffs)
@@ -3444,6 +4188,9 @@ static int ffs_ready(struct ffs_data *ffs)
int ret = 0;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
@@ -3468,6 +4215,9 @@ static int ffs_ready(struct ffs_data *ffs)
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done:
ffs_dev_unlock();
+
+ ffs_log("exit");
+
return ret;
}
@@ -3478,11 +4228,16 @@ static void ffs_closed(struct ffs_data *ffs)
struct config_item *ci;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
- if (!ffs_obj)
+ if (!ffs_obj) {
+ ffs_dev_unlock();
goto done;
+ }
ffs_obj->desc_ready = false;
@@ -3490,23 +4245,30 @@ static void ffs_closed(struct ffs_data *ffs)
ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs);
- if (ffs_obj->opts)
+ if (ffs_obj->opts) {
opts = ffs_obj->opts;
- else
+ } else {
+ ffs_dev_unlock();
goto done;
+ }
+ smp_mb__before_atomic();
if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
- || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+ || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) {
+ ffs_dev_unlock();
goto done;
+ }
ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
ffs_dev_unlock();
- if (test_bit(FFS_FL_BOUND, &ffs->flags))
+ if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
unregister_gadget_item(ci);
+ ffs_log("unreg gadget done");
+ }
return;
done:
- ffs_dev_unlock();
+ ffs_log("exit");
}
/* Misc helper functions ****************************************************/
@@ -3541,5 +4303,39 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
}
DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
+
+static int ffs_init(void)
+{
+ ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0);
+ if (IS_ERR_OR_NULL(ffs_ipc_log))
+ ffs_ipc_log = NULL;
+
+ return 0;
+}
+module_init(ffs_init);
+
+static void __exit ffs_exit(void)
+{
+ struct ffs_inst_status *inst_status, *inst_status_tmp = NULL;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+ inst_status_tmp = inst_status;
+ }
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+
+ if (ffs_ipc_log) {
+ ipc_log_context_destroy(ffs_ipc_log);
+ ffs_ipc_log = NULL;
+ }
+}
+module_exit(ffs_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Nazarewicz");
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
new file mode 100644
index 000000000000..266d19049986
--- /dev/null
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -0,0 +1,3302 @@
+/* Copyright (c) 2015-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "f_gsi.h"
+#include "rndis.h"
+#include "debug.h"
+
+static unsigned int gsi_in_aggr_size;
+module_param(gsi_in_aggr_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gsi_in_aggr_size,
+ "Aggr size of bus transfer to host");
+
+static unsigned int gsi_out_aggr_size;
+module_param(gsi_out_aggr_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gsi_out_aggr_size,
+ "Aggr size of bus transfer to device");
+
+static unsigned int num_in_bufs = GSI_NUM_IN_BUFFERS;
+module_param(num_in_bufs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_in_bufs,
+ "Number of IN buffers");
+
+static unsigned int num_out_bufs = GSI_NUM_OUT_BUFFERS;
+module_param(num_out_bufs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_out_bufs,
+ "Number of OUT buffers");
+
+static bool qti_packet_debug;
+module_param(qti_packet_debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
+
+static struct workqueue_struct *ipa_usb_wq;
+
+static struct gsi_inst_status {
+ struct mutex gsi_lock;
+ bool inst_exist;
+ struct gsi_opts *opts;
+} inst_status[IPA_USB_MAX_TETH_PROT_SIZE];
+
+/* Deregister misc device and free instance structures */
+static void gsi_inst_clean(struct gsi_opts *opts);
+
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port);
+static void ipa_disconnect_handler(struct gsi_data_port *d_port);
+static int gsi_ctrl_send_notification(struct f_gsi *gsi);
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
+static void gsi_free_trb_buffer(struct f_gsi *gsi);
+static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned len, gfp_t flags);
+static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
+
+static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
+{
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ log_event_dbg("%s: remote_wakeup_allowed:%s", __func__,
+ remote_wakeup_allowed ? "true" : "false");
+ return remote_wakeup_allowed;
+}
+
+void post_event(struct gsi_data_port *port, u8 event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+
+ port->evt_q.tail++;
+ /* Check for wraparound and make room */
+ port->evt_q.tail = port->evt_q.tail % MAXQUEUELEN;
+
+ /* Check for overflow */
+ if (port->evt_q.tail == port->evt_q.head) {
+ log_event_err("%s: event queue overflow error", __func__);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+ return;
+ }
+ /* Add event to queue */
+ port->evt_q.event[port->evt_q.tail] = event;
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+}
+
+void post_event_to_evt_queue(struct gsi_data_port *port, u8 event)
+{
+ post_event(port, event);
+ queue_work(port->ipa_usb_wq, &port->usb_ipa_w);
+}
+
+u8 read_event(struct gsi_data_port *port)
+{
+ u8 event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+ if (port->evt_q.head == port->evt_q.tail) {
+ log_event_dbg("%s: event queue empty", __func__);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+ return EVT_NONE;
+ }
+
+ port->evt_q.head++;
+ /* Check for wraparound and make room */
+ port->evt_q.head = port->evt_q.head % MAXQUEUELEN;
+
+ event = port->evt_q.event[port->evt_q.head];
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+
+ return event;
+}
+
+u8 peek_event(struct gsi_data_port *port)
+{
+ u8 event;
+ unsigned long flags;
+ u8 peek_index = 0;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+ if (port->evt_q.head == port->evt_q.tail) {
+ log_event_dbg("%s: event queue empty", __func__);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+ return EVT_NONE;
+ }
+
+ peek_index = (port->evt_q.head + 1) % MAXQUEUELEN;
+ event = port->evt_q.event[peek_index];
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+
+ return event;
+}
+
+void reset_event_queue(struct gsi_data_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->evt_q.q_lock, flags);
+ port->evt_q.head = port->evt_q.tail = MAXQUEUELEN - 1;
+ memset(&port->evt_q.event[0], EVT_NONE, MAXQUEUELEN);
+ spin_unlock_irqrestore(&port->evt_q.q_lock, flags);
+}
+
+int gsi_wakeup_host(struct f_gsi *gsi)
+{
+
+ int ret;
+ struct usb_gadget *gadget;
+ struct usb_function *func;
+
+ func = &gsi->function;
+ gadget = gsi->gadget;
+
+ log_event_dbg("Entering %s", __func__);
+
+ /*
+ * In Super-Speed mode, remote wakeup is not allowed for suspended
+ * functions which have been disallowed by the host to issue Function
+ * Remote Wakeup.
+ * Note - We deviate here from the USB 3.0 spec and allow
+ * non-suspended functions to issue remote-wakeup even if they were not
+ * allowed to do so by the host. This is done in order to support non
+ * fully USB 3.0 compatible hosts.
+ */
+ if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended)) {
+ log_event_dbg("%s: Calling usb_func_wakeup", __func__);
+ ret = usb_func_wakeup(func);
+ } else {
+ log_event_dbg("%s: Calling usb_gadget_wakeup", __func__);
+ ret = usb_gadget_wakeup(gadget);
+ }
+
+ if ((ret == -EBUSY) || (ret == -EAGAIN))
+ log_event_dbg("RW delayed due to LPM exit.");
+ else if (ret)
+ log_event_err("wakeup failed. ret=%d.", ret);
+
+ return ret;
+}
+
+/*
+ * Callback for when when network interface is up
+ * and userspace is ready to answer DHCP requests, or remote wakeup
+ */
+int ipa_usb_notify_cb(enum ipa_usb_notify_event event,
+ void *driver_data)
+{
+ struct f_gsi *gsi = driver_data;
+ unsigned long flags;
+ struct gsi_ctrl_pkt *cpkt_notify_connect, *cpkt_notify_speed;
+
+ if (!gsi) {
+ log_event_err("%s: invalid driver data", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&gsi->d_port.lock, flags);
+
+ switch (event) {
+ case IPA_USB_DEVICE_READY:
+
+ if (gsi->d_port.net_ready_trigger) {
+ spin_unlock_irqrestore(&gsi->d_port.lock, flags);
+ log_event_dbg("%s: Already triggered", __func__);
+ return 1;
+ }
+
+ log_event_err("%s: Set net_ready_trigger", __func__);
+ gsi->d_port.net_ready_trigger = true;
+
+ if (gsi->prot_id == IPA_USB_ECM) {
+ cpkt_notify_connect = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt_notify_connect)) {
+ spin_unlock_irqrestore(&gsi->d_port.lock,
+ flags);
+ log_event_dbg("%s: err cpkt_notify_connect\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cpkt_notify_connect->type = GSI_CTRL_NOTIFY_CONNECT;
+
+ cpkt_notify_speed = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt_notify_speed)) {
+ spin_unlock_irqrestore(&gsi->d_port.lock,
+ flags);
+ gsi_ctrl_pkt_free(cpkt_notify_connect);
+ log_event_dbg("%s: err cpkt_notify_speed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cpkt_notify_speed->type = GSI_CTRL_NOTIFY_SPEED;
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ list_add_tail(&cpkt_notify_connect->list,
+ &gsi->c_port.cpkt_resp_q);
+ list_add_tail(&cpkt_notify_speed->list,
+ &gsi->c_port.cpkt_resp_q);
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ gsi_ctrl_send_notification(gsi);
+ }
+
+ /* Do not post EVT_CONNECTED for RNDIS.
+ Data path for RNDIS is enabled on EVT_HOST_READY.
+ */
+ if (gsi->prot_id != IPA_USB_RNDIS) {
+ post_event(&gsi->d_port, EVT_CONNECTED);
+ queue_work(gsi->d_port.ipa_usb_wq,
+ &gsi->d_port.usb_ipa_w);
+ }
+ break;
+
+ case IPA_USB_REMOTE_WAKEUP:
+ gsi_wakeup_host(gsi);
+ break;
+
+ case IPA_USB_SUSPEND_COMPLETED:
+ post_event(&gsi->d_port, EVT_IPA_SUSPEND);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+ break;
+ }
+
+ spin_unlock_irqrestore(&gsi->d_port.lock, flags);
+ return 1;
+}
+
+static int ipa_connect_channels(struct gsi_data_port *d_port)
+{
+ int ret;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ struct ipa_usb_xdci_chan_params *in_params =
+ &d_port->ipa_in_channel_params;
+ struct ipa_usb_xdci_chan_params *out_params =
+ &d_port->ipa_out_channel_params;
+ struct ipa_usb_xdci_connect_params *conn_params =
+ &d_port->ipa_conn_pms;
+ struct usb_gadget *gadget = gsi->gadget;
+ struct gsi_channel_info gsi_channel_info;
+ struct ipa_req_chan_out_params ipa_in_channel_out_params;
+ struct ipa_req_chan_out_params ipa_out_channel_out_params;
+
+ log_event_dbg("%s: USB GSI IN OPS", __func__);
+ usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+ GSI_EP_OP_PREPARE_TRBS);
+ usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
+ GSI_EP_OP_STARTXFER);
+ d_port->in_xfer_rsc_index = usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_GET_XFER_IDX);
+
+ memset(in_params, 0x0, sizeof(*in_params));
+ gsi_channel_info.ch_req = &d_port->in_request;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&gsi_channel_info,
+ GSI_EP_OP_GET_CH_INFO);
+
+ log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
+ in_params->client =
+ (gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+ IPA_CLIENT_USB_DPL_CONS;
+ in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
+ in_params->teth_prot = gsi->prot_id;
+ in_params->gevntcount_low_addr =
+ gsi_channel_info.gevntcount_low_addr;
+ in_params->gevntcount_hi_addr =
+ gsi_channel_info.gevntcount_hi_addr;
+ in_params->dir = GSI_CHAN_DIR_FROM_GSI;
+ in_params->xfer_ring_len = gsi_channel_info.xfer_ring_len;
+ in_params->xfer_ring_base_addr = gsi_channel_info.xfer_ring_base_addr;
+ in_params->xfer_scratch.last_trb_addr_iova =
+ gsi_channel_info.last_trb_addr;
+ in_params->xfer_ring_base_addr = in_params->xfer_ring_base_addr_iova =
+ gsi_channel_info.xfer_ring_base_addr;
+ in_params->data_buff_base_len = d_port->in_request.buf_len *
+ d_port->in_request.num_bufs;
+ in_params->data_buff_base_addr = in_params->data_buff_base_addr_iova =
+ d_port->in_request.dma;
+ in_params->xfer_scratch.const_buffer_size =
+ gsi_channel_info.const_buffer_size;
+ in_params->xfer_scratch.depcmd_low_addr =
+ gsi_channel_info.depcmd_low_addr;
+ in_params->xfer_scratch.depcmd_hi_addr =
+ gsi_channel_info.depcmd_hi_addr;
+
+ if (d_port->out_ep) {
+ log_event_dbg("%s: USB GSI OUT OPS", __func__);
+ usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+ GSI_EP_OP_PREPARE_TRBS);
+ usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
+ GSI_EP_OP_STARTXFER);
+ d_port->out_xfer_rsc_index =
+ usb_gsi_ep_op(d_port->out_ep,
+ NULL, GSI_EP_OP_GET_XFER_IDX);
+ memset(out_params, 0x0, sizeof(*out_params));
+ gsi_channel_info.ch_req = &d_port->out_request;
+ usb_gsi_ep_op(d_port->out_ep, (void *)&gsi_channel_info,
+ GSI_EP_OP_GET_CH_INFO);
+ log_event_dbg("%s: USB GSI OUT OPS Completed", __func__);
+ out_params->client = IPA_CLIENT_USB_PROD;
+ out_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
+ out_params->teth_prot = gsi->prot_id;
+ out_params->gevntcount_low_addr =
+ gsi_channel_info.gevntcount_low_addr;
+ out_params->gevntcount_hi_addr =
+ gsi_channel_info.gevntcount_hi_addr;
+ out_params->dir = GSI_CHAN_DIR_TO_GSI;
+ out_params->xfer_ring_len =
+ gsi_channel_info.xfer_ring_len;
+ out_params->xfer_ring_base_addr =
+ out_params->xfer_ring_base_addr_iova =
+ gsi_channel_info.xfer_ring_base_addr;
+ out_params->data_buff_base_len = d_port->out_request.buf_len *
+ d_port->out_request.num_bufs;
+ out_params->data_buff_base_addr =
+ out_params->data_buff_base_addr_iova =
+ d_port->out_request.dma;
+ out_params->xfer_scratch.last_trb_addr_iova =
+ gsi_channel_info.last_trb_addr;
+ out_params->xfer_scratch.const_buffer_size =
+ gsi_channel_info.const_buffer_size;
+ out_params->xfer_scratch.depcmd_low_addr =
+ gsi_channel_info.depcmd_low_addr;
+ out_params->xfer_scratch.depcmd_hi_addr =
+ gsi_channel_info.depcmd_hi_addr;
+ }
+
+ /* Populate connection params */
+ conn_params->max_pkt_size =
+ (gadget->speed == USB_SPEED_SUPER) ?
+ IPA_USB_SUPER_SPEED_1024B : IPA_USB_HIGH_SPEED_512B;
+ conn_params->ipa_to_usb_xferrscidx =
+ d_port->in_xfer_rsc_index;
+ conn_params->usb_to_ipa_xferrscidx =
+ d_port->out_xfer_rsc_index;
+ conn_params->usb_to_ipa_xferrscidx_valid =
+ (gsi->prot_id != IPA_USB_DIAG) ? true : false;
+ conn_params->ipa_to_usb_xferrscidx_valid = true;
+ conn_params->teth_prot = gsi->prot_id;
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
+ if (gsi_out_aggr_size)
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_dev
+ = gsi_out_aggr_size;
+ else
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_dev
+ = d_port->out_aggr_size;
+ if (gsi_in_aggr_size)
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_host
+ = gsi_in_aggr_size;
+ else
+ conn_params->teth_prot_params.max_xfer_size_bytes_to_host
+ = d_port->in_aggr_size;
+ conn_params->teth_prot_params.max_packet_number_to_dev =
+ DEFAULT_MAX_PKT_PER_XFER;
+ conn_params->max_supported_bandwidth_mbps =
+ (gadget->speed == USB_SPEED_SUPER) ? 3600 : 400;
+
+ memset(&ipa_in_channel_out_params, 0x0,
+ sizeof(ipa_in_channel_out_params));
+ memset(&ipa_out_channel_out_params, 0x0,
+ sizeof(ipa_out_channel_out_params));
+
+ log_event_dbg("%s: Calling xdci_connect", __func__);
+ ret = ipa_usb_xdci_connect(out_params, in_params,
+ &ipa_out_channel_out_params,
+ &ipa_in_channel_out_params,
+ conn_params);
+ if (ret) {
+ log_event_err("%s: IPA connect failed %d", __func__, ret);
+ return ret;
+ }
+ log_event_dbg("%s: xdci_connect done", __func__);
+
+ log_event_dbg("%s: IN CH HDL %x", __func__,
+ ipa_in_channel_out_params.clnt_hdl);
+ log_event_dbg("%s: IN CH DBL addr %x", __func__,
+ ipa_in_channel_out_params.db_reg_phs_addr_lsb);
+
+ log_event_dbg("%s: OUT CH HDL %x", __func__,
+ ipa_out_channel_out_params.clnt_hdl);
+ log_event_dbg("%s: OUT CH DBL addr %x", __func__,
+ ipa_out_channel_out_params.db_reg_phs_addr_lsb);
+
+ d_port->in_channel_handle = ipa_in_channel_out_params.clnt_hdl;
+ d_port->in_db_reg_phs_addr_lsb =
+ ipa_in_channel_out_params.db_reg_phs_addr_lsb;
+ d_port->in_db_reg_phs_addr_msb =
+ ipa_in_channel_out_params.db_reg_phs_addr_msb;
+
+ if (gsi->prot_id != IPA_USB_DIAG) {
+ d_port->out_channel_handle =
+ ipa_out_channel_out_params.clnt_hdl;
+ d_port->out_db_reg_phs_addr_lsb =
+ ipa_out_channel_out_params.db_reg_phs_addr_lsb;
+ d_port->out_db_reg_phs_addr_msb =
+ ipa_out_channel_out_params.db_reg_phs_addr_msb;
+ }
+ return ret;
+}
+
+static void ipa_data_path_enable(struct gsi_data_port *d_port)
+{
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ struct usb_gsi_request req;
+ u64 dbl_register_addr;
+ bool block_db = false;
+
+
+ log_event_dbg("in_db_reg_phs_addr_lsb = %x",
+ gsi->d_port.in_db_reg_phs_addr_lsb);
+ usb_gsi_ep_op(gsi->d_port.in_ep,
+ (void *)&gsi->d_port.in_db_reg_phs_addr_lsb,
+ GSI_EP_OP_STORE_DBL_INFO);
+
+ if (gsi->d_port.out_ep) {
+ log_event_dbg("out_db_reg_phs_addr_lsb = %x",
+ gsi->d_port.out_db_reg_phs_addr_lsb);
+ usb_gsi_ep_op(gsi->d_port.out_ep,
+ (void *)&gsi->d_port.out_db_reg_phs_addr_lsb,
+ GSI_EP_OP_STORE_DBL_INFO);
+
+ usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+ GSI_EP_OP_ENABLE_GSI);
+ }
+
+ /* Unblock doorbell to GSI */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+
+ dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb;
+ dbl_register_addr = dbl_register_addr << 32;
+ dbl_register_addr =
+ dbl_register_addr | gsi->d_port.in_db_reg_phs_addr_lsb;
+
+ /* use temp gsi request to pass 64 bit dbl reg addr and num_bufs */
+ req.buf_base_addr = &dbl_register_addr;
+
+ req.num_bufs = gsi->d_port.in_request.num_bufs;
+ usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_IN_DB);
+
+ if (gsi->d_port.out_ep) {
+ usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+ GSI_EP_OP_UPDATEXFER);
+ }
+}
+
+static void ipa_disconnect_handler(struct gsi_data_port *d_port)
+{
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ bool block_db = true;
+
+ log_event_dbg("%s: EP Disable for data", __func__);
+
+ if (gsi->d_port.in_ep) {
+ /*
+ * Block doorbell to GSI to avoid USB wrapper from
+ * ringing doorbell in case IPA clocks are OFF.
+ */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
+ }
+
+ if (gsi->d_port.out_ep)
+ usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
+
+ gsi->d_port.net_ready_trigger = false;
+}
+
+static void ipa_disconnect_work_handler(struct gsi_data_port *d_port)
+{
+ int ret;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+
+ log_event_dbg("%s: Calling xdci_disconnect", __func__);
+
+ ret = ipa_usb_xdci_disconnect(gsi->d_port.out_channel_handle,
+ gsi->d_port.in_channel_handle, gsi->prot_id);
+ if (ret)
+ log_event_err("%s: IPA disconnect failed %d",
+ __func__, ret);
+
+ log_event_dbg("%s: xdci_disconnect done", __func__);
+
+ /* invalidate channel handles*/
+ gsi->d_port.in_channel_handle = -EINVAL;
+ gsi->d_port.out_channel_handle = -EINVAL;
+
+ usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+ if (gsi->d_port.out_ep)
+ usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_FREE_TRBS);
+
+ /* free buffers allocated with each TRB */
+ gsi_free_trb_buffer(gsi);
+}
+
+static int ipa_suspend_work_handler(struct gsi_data_port *d_port)
+{
+ int ret = 0;
+ bool block_db, f_suspend;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ struct usb_function *f = &gsi->function;
+
+ f_suspend = f->func_wakeup_allowed;
+ log_event_dbg("%s: f_suspend:%d", __func__, f_suspend);
+
+ if (!usb_gsi_ep_op(gsi->d_port.in_ep, (void *) &f_suspend,
+ GSI_EP_OP_CHECK_FOR_SUSPEND)) {
+ ret = -EFAULT;
+ block_db = false;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ goto done;
+ }
+
+ log_event_dbg("%s: Calling xdci_suspend", __func__);
+ ret = ipa_usb_xdci_suspend(gsi->d_port.out_channel_handle,
+ gsi->d_port.in_channel_handle, gsi->prot_id,
+ usb_gsi_remote_wakeup_allowed(f));
+ if (!ret) {
+ d_port->sm_state = STATE_SUSPENDED;
+ log_event_dbg("%s: STATE SUSPENDED", __func__);
+ goto done;
+ }
+
+ if (ret == -EFAULT) {
+ block_db = false;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_wakeup_host(gsi);
+ } else if (ret == -EINPROGRESS) {
+ d_port->sm_state = STATE_SUSPEND_IN_PROGRESS;
+ } else {
+ log_event_err("%s: Error %d for %d", __func__, ret,
+ gsi->prot_id);
+ }
+done:
+ log_event_dbg("%s: xdci_suspend ret %d", __func__, ret);
+ return ret;
+}
+
+static void ipa_resume_work_handler(struct gsi_data_port *d_port)
+{
+ bool block_db;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ int ret;
+
+ log_event_dbg("%s: Calling xdci_resume", __func__);
+
+ ret = ipa_usb_xdci_resume(gsi->d_port.out_channel_handle,
+ gsi->d_port.in_channel_handle,
+ gsi->prot_id);
+ if (ret)
+ log_event_dbg("%s: xdci_resume ret %d", __func__, ret);
+
+ log_event_dbg("%s: xdci_resume done", __func__);
+
+ block_db = false;
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+}
+
+static void ipa_work_handler(struct work_struct *w)
+{
+ struct gsi_data_port *d_port = container_of(w, struct gsi_data_port,
+ usb_ipa_w);
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+ u8 event;
+ int ret = 0;
+ struct usb_gadget *gadget = gsi->gadget;
+ struct device *dev;
+ struct device *gad_dev;
+ bool block_db;
+
+ event = read_event(d_port);
+
+ log_event_dbg("%s: event = %x sm_state %x", __func__,
+ event, d_port->sm_state);
+
+ if (gadget) {
+ dev = &gadget->dev;
+ if (!dev || !dev->parent) {
+ log_event_err("%s(): dev or dev->parent is NULL.\n",
+ __func__);
+ return;
+ }
+ gad_dev = dev->parent;
+ } else {
+ log_event_err("%s(): gadget is NULL.\n", __func__);
+ return;
+ }
+
+ gsi = d_port_to_gsi(d_port);
+
+ switch (d_port->sm_state) {
+ case STATE_UNINITIALIZED:
+ break;
+ case STATE_INITIALIZED:
+ if (event == EVT_CONNECT_IN_PROGRESS) {
+ usb_gadget_autopm_get(gadget);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ /* allocate buffers used with each TRB */
+ ret = gsi_alloc_trb_buffer(gsi);
+ if (ret) {
+ log_event_err("%s: gsi_alloc_trb_failed\n",
+ __func__);
+ break;
+ }
+ ipa_connect_channels(d_port);
+ d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
+ log_event_dbg("%s: ST_INIT_EVT_CONN_IN_PROG",
+ __func__);
+ } else if (event == EVT_HOST_READY) {
+ /*
+ * When in a composition such as RNDIS + ADB,
+ * RNDIS host sends a GEN_CURRENT_PACKET_FILTER msg
+ * to enable/disable flow control eg. during RNDIS
+ * adaptor disable/enable from device manager.
+ * In the case of the msg to disable flow control,
+ * connect IPA channels and enable data path.
+ * EVT_HOST_READY is posted to the state machine
+ * in the handler for this msg.
+ */
+ usb_gadget_autopm_get(gadget);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ /* allocate buffers used with each TRB */
+ ret = gsi_alloc_trb_buffer(gsi);
+ if (ret) {
+ log_event_err("%s: gsi_alloc_trb_failed\n",
+ __func__);
+ break;
+ }
+
+ ipa_connect_channels(d_port);
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_INIT_EVT_HOST_READY", __func__);
+ }
+ break;
+ case STATE_CONNECT_IN_PROGRESS:
+ if (event == EVT_HOST_READY) {
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_HOST_READY",
+ __func__);
+ } else if (event == EVT_CONNECTED) {
+ ipa_data_path_enable(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_CON %d",
+ __func__, __LINE__);
+ } else if (event == EVT_SUSPEND) {
+ if (peek_event(d_port) == EVT_DISCONNECTED) {
+ read_event(d_port);
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUS_DIS",
+ __func__);
+ log_event_dbg("%s: put_async1 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ break;
+ }
+ ret = ipa_suspend_work_handler(d_port);
+ if (!ret) {
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUS",
+ __func__);
+ log_event_dbg("%s: put_async2 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ } else if (event == EVT_DISCONNECTED) {
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_IN_PROG_EVT_DIS",
+ __func__);
+ log_event_dbg("%s: put_async3 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ break;
+ case STATE_CONNECTED:
+ if (event == EVT_DISCONNECTED || event == EVT_HOST_NRDY) {
+ if (peek_event(d_port) == EVT_HOST_READY) {
+ read_event(d_port);
+ log_event_dbg("%s: NO_OP NRDY_RDY", __func__);
+ break;
+ }
+
+ if (event == EVT_HOST_NRDY) {
+ log_event_dbg("%s: ST_CON_HOST_NRDY\n",
+ __func__);
+ block_db = true;
+ /* stop USB ringing doorbell to GSI(OUT_EP) */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_rndis_ipa_reset_trigger(d_port);
+ usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ usb_gsi_ep_op(d_port->out_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ }
+
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_EVT_DIS", __func__);
+ log_event_dbg("%s: put_async4 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ } else if (event == EVT_SUSPEND) {
+ if (peek_event(d_port) == EVT_DISCONNECTED) {
+ read_event(d_port);
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_EVT_SUS_DIS",
+ __func__);
+ log_event_dbg("%s: put_async5 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ break;
+ }
+ ret = ipa_suspend_work_handler(d_port);
+ if (!ret) {
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_CON_EVT_SUS",
+ __func__);
+ log_event_dbg("%s: put_async6 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ } else if (event == EVT_CONNECTED) {
+ d_port->sm_state = STATE_CONNECTED;
+ log_event_dbg("%s: ST_CON_EVT_CON", __func__);
+ }
+ break;
+ case STATE_DISCONNECTED:
+ if (event == EVT_CONNECT_IN_PROGRESS) {
+ ipa_connect_channels(d_port);
+ d_port->sm_state = STATE_CONNECT_IN_PROGRESS;
+ log_event_dbg("%s: ST_DIS_EVT_CON_IN_PROG", __func__);
+ } else if (event == EVT_UNINITIALIZED) {
+ d_port->sm_state = STATE_UNINITIALIZED;
+ log_event_dbg("%s: ST_DIS_EVT_UNINIT", __func__);
+ }
+ break;
+ case STATE_SUSPEND_IN_PROGRESS:
+ if (event == EVT_IPA_SUSPEND) {
+ d_port->sm_state = STATE_SUSPENDED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_SUS_IN_PROG_EVT_IPA_SUS",
+ __func__);
+ log_event_dbg("%s: put_async6 = %d",
+ __func__, atomic_read(
+ &gad_dev->power.usage_count));
+ } else if (event == EVT_RESUMED) {
+ ipa_resume_work_handler(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA disconnect is done in disconnect work
+ * (due to cable disconnect) or in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(gadget);
+ log_event_dbg("%s: ST_SUS_IN_PROG_EVT_RES", __func__);
+ log_event_dbg("%s: get_nores1 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ } else if (event == EVT_DISCONNECTED) {
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ usb_gadget_autopm_put_async(gadget);
+ log_event_dbg("%s: ST_SUS_IN_PROG_EVT_DIS", __func__);
+ log_event_dbg("%s: put_async7 = %d", __func__,
+ atomic_read(
+ &gad_dev->power.usage_count));
+ }
+ break;
+
+ case STATE_SUSPENDED:
+ if (event == EVT_RESUMED) {
+ usb_gadget_autopm_get(gadget);
+ log_event_dbg("%s: ST_SUS_EVT_RES", __func__);
+ log_event_dbg("%s: get = %d", __func__,
+ atomic_read(&gad_dev->power.usage_count));
+ ipa_resume_work_handler(d_port);
+ d_port->sm_state = STATE_CONNECTED;
+ } else if (event == EVT_DISCONNECTED) {
+ ipa_disconnect_work_handler(d_port);
+ d_port->sm_state = STATE_INITIALIZED;
+ log_event_dbg("%s: ST_SUS_EVT_DIS", __func__);
+ }
+ break;
+ default:
+ log_event_dbg("%s: Invalid state to SM", __func__);
+ }
+
+ if (peek_event(d_port) != EVT_NONE) {
+ log_event_dbg("%s: New events to process", __func__);
+ queue_work(d_port->ipa_usb_wq, &d_port->usb_ipa_w);
+ }
+}
+
+static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned len, gfp_t flags)
+{
+ struct gsi_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct gsi_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt)
+{
+ if (pkt) {
+ kfree(pkt->buf);
+ kfree(pkt);
+ }
+}
+
+static void gsi_ctrl_clear_cpkt_queues(struct f_gsi *gsi, bool skip_req_q)
+{
+ struct gsi_ctrl_pkt *cpkt = NULL;
+ struct list_head *act, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ if (skip_req_q)
+ goto clean_resp_q;
+
+ list_for_each_safe(act, tmp, &gsi->c_port.cpkt_req_q) {
+ cpkt = list_entry(act, struct gsi_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gsi_ctrl_pkt_free(cpkt);
+ }
+clean_resp_q:
+ list_for_each_safe(act, tmp, &gsi->c_port.cpkt_resp_q) {
+ cpkt = list_entry(act, struct gsi_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gsi_ctrl_pkt_free(cpkt);
+ }
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+}
+
+static int gsi_ctrl_send_cpkt_tomodem(struct f_gsi *gsi, void *buf, size_t len)
+{
+ unsigned long flags;
+ struct gsi_ctrl_port *c_port = &gsi->c_port;
+ struct gsi_ctrl_pkt *cpkt;
+
+ spin_lock_irqsave(&c_port->lock, flags);
+ /* drop cpkt if port is not open */
+ if (!gsi->c_port.is_open) {
+ log_event_dbg("%s: ctrl device %s is not open",
+ __func__, gsi->c_port.name);
+ c_port->cpkt_drop_cnt++;
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ return -ENODEV;
+ }
+
+ cpkt = gsi_ctrl_pkt_alloc(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ log_event_err("%s: Reset func pkt allocation failed", __func__);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ return -ENOMEM;
+ }
+
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ list_add_tail(&cpkt->list, &c_port->cpkt_req_q);
+ c_port->host_to_modem++;
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ log_event_dbg("%s: Wake up read queue", __func__);
+ wake_up(&c_port->read_wq);
+
+ return 0;
+}
+
+static int gsi_ctrl_dev_open(struct inode *ip, struct file *fp)
+{
+ struct gsi_ctrl_port *c_port = container_of(fp->private_data,
+ struct gsi_ctrl_port,
+ ctrl_device);
+ struct f_gsi *gsi;
+ struct gsi_inst_status *inst_cur;
+
+ if (!c_port) {
+ pr_err_ratelimited("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ pr_devel_ratelimited("%s: open ctrl dev %s", __func__, c_port->name);
+
+ gsi = container_of(c_port, struct f_gsi, c_port);
+ inst_cur = &inst_status[gsi->prot_id];
+
+ mutex_lock(&inst_cur->gsi_lock);
+
+ fp->private_data = &gsi->prot_id;
+
+ if (!inst_cur->inst_exist) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], GSI instance freed already\n",
+ __func__, gsi->prot_id);
+ return -ENODEV;
+ }
+
+ if (c_port->is_open) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ log_event_err("%s: Already opened\n", __func__);
+ return -EBUSY;
+ }
+
+ c_port->is_open = true;
+
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ return 0;
+}
+
+static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
+{
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+
+ mutex_lock(&inst_cur->gsi_lock);
+
+ if (unlikely(inst_cur->inst_exist == false)) {
+ if (inst_cur->opts) {
+ /* GSI instance clean up */
+ gsi_inst_clean(inst_cur->opts);
+ inst_cur->opts = NULL;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], Delayed free instance memory\n",
+ __func__, prot_id);
+ return -ENODEV;
+ }
+
+ inst_cur->opts->gsi->c_port.is_open = false;
+
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ log_event_dbg("close ctrl dev %s\n",
+ inst_cur->opts->gsi->c_port.name);
+
+ return 0;
+}
+
+static ssize_t
+gsi_ctrl_dev_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct gsi_ctrl_port *c_port;
+ struct gsi_ctrl_pkt *cpkt = NULL;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ unsigned long flags;
+ int ret = 0;
+
+ log_event_dbg("%s: Enter %zu", __func__, count);
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
+ if (!c_port) {
+ log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ if (count > GSI_MAX_CTRL_PKT_SIZE) {
+ log_event_err("Large buff size %zu, should be %d",
+ count, GSI_MAX_CTRL_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ /* block until a new packet is available */
+ spin_lock_irqsave(&c_port->lock, flags);
+ while (list_empty(&c_port->cpkt_req_q)) {
+ log_event_dbg("Requests list is empty. Wait.");
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ ret = wait_event_interruptible(c_port->read_wq,
+ !list_empty(&c_port->cpkt_req_q));
+ if (ret < 0) {
+ log_event_err("Waiting failed");
+ return -ERESTARTSYS;
+ }
+ log_event_dbg("Received request packet");
+ spin_lock_irqsave(&c_port->lock, flags);
+ }
+
+ cpkt = list_first_entry(&c_port->cpkt_req_q, struct gsi_ctrl_pkt,
+ list);
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ if (cpkt->len > count) {
+ log_event_err("cpkt size large:%d > buf size:%zu",
+ cpkt->len, count);
+ gsi_ctrl_pkt_free(cpkt);
+ return -ENOMEM;
+ }
+
+ log_event_dbg("%s: cpkt size:%d", __func__, cpkt->len);
+ if (qti_packet_debug)
+ print_hex_dump(KERN_DEBUG, "READ:", DUMP_PREFIX_OFFSET, 16, 1,
+ cpkt->buf, min_t(int, 30, cpkt->len), false);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ log_event_err("copy_to_user failed: err %d", ret);
+ ret = -EFAULT;
+ } else {
+ log_event_dbg("%s: copied %d bytes to user", __func__,
+ cpkt->len);
+ ret = cpkt->len;
+ c_port->copied_to_modem++;
+ }
+
+ gsi_ctrl_pkt_free(cpkt);
+
+ log_event_dbg("%s: Exit %zu", __func__, count);
+
+ return ret;
+}
+
+static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct gsi_ctrl_pkt *cpkt;
+ struct gsi_ctrl_port *c_port;
+ struct usb_request *req;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ struct f_gsi *gsi;
+
+ log_event_dbg("Enter %zu", count);
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+ req = c_port->notify_req;
+
+ if (!c_port || !req || !req->buf) {
+ log_event_err("%s: c_port %pK req %pK req->buf %pK",
+ __func__, c_port, req, req ? req->buf : req);
+ return -ENODEV;
+ }
+
+ if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
+ log_event_err("error: ctrl pkt length %zu", count);
+ return -EINVAL;
+ }
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_err("USB cable not connected\n");
+ return -ECONNRESET;
+ }
+
+ if (gsi->function.func_is_suspended &&
+ !gsi->function.func_wakeup_allowed) {
+ c_port->cpkt_drop_cnt++;
+ log_event_err("drop ctrl pkt of len %zu", count);
+ return -ENOTSUPP;
+ }
+
+ cpkt = gsi_ctrl_pkt_alloc(count, GFP_KERNEL);
+ if (IS_ERR(cpkt)) {
+ log_event_err("failed to allocate ctrl pkt");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cpkt->buf, buf, count);
+ if (ret) {
+ log_event_err("copy_from_user failed err:%d", ret);
+ gsi_ctrl_pkt_free(cpkt);
+ return ret;
+ }
+ cpkt->type = GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE;
+ c_port->copied_from_modem++;
+ if (qti_packet_debug)
+ print_hex_dump(KERN_DEBUG, "WRITE:", DUMP_PREFIX_OFFSET, 16, 1,
+ cpkt->buf, min_t(int, 30, count), false);
+
+ spin_lock_irqsave(&c_port->lock, flags);
+ list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ if (!gsi_ctrl_send_notification(gsi))
+ c_port->modem_to_host++;
+
+ log_event_dbg("Exit %zu", count);
+
+ return ret ? ret : count;
+}
+
+static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned cmd,
+ unsigned long arg)
+{
+ struct gsi_ctrl_port *c_port;
+ struct f_gsi *gsi;
+ struct gsi_ctrl_pkt *cpkt;
+ struct ep_info info;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ int val, ret = 0;
+ unsigned long flags;
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+
+ if (!c_port) {
+ log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case QTI_CTRL_MODEM_OFFLINE:
+ if (gsi->prot_id == IPA_USB_DIAG) {
+ log_event_dbg("%s:Modem Offline not handled", __func__);
+ goto exit_ioctl;
+ }
+ atomic_set(&c_port->ctrl_online, 0);
+ gsi_ctrl_clear_cpkt_queues(gsi, true);
+ cpkt = gsi_ctrl_pkt_alloc(0, GFP_KERNEL);
+ if (IS_ERR(cpkt)) {
+ log_event_err("%s: err allocating cpkt\n", __func__);
+ return -ENOMEM;
+ }
+ cpkt->type = GSI_CTRL_NOTIFY_OFFLINE;
+ spin_lock_irqsave(&c_port->lock, flags);
+ list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
+ spin_unlock_irqrestore(&c_port->lock, flags);
+ gsi_ctrl_send_notification(gsi);
+ break;
+ case QTI_CTRL_MODEM_ONLINE:
+ if (gsi->prot_id == IPA_USB_DIAG) {
+ log_event_dbg("%s:Modem Online not handled", __func__);
+ goto exit_ioctl;
+ }
+
+ atomic_set(&c_port->ctrl_online, 1);
+ break;
+ case QTI_CTRL_GET_LINE_STATE:
+ val = atomic_read(&gsi->connected);
+ ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+ if (ret) {
+ log_event_err("copy_to_user fail LINE_STATE");
+ ret = -EFAULT;
+ }
+ log_event_dbg("%s: Sent line_state: %d for prot id:%d",
+ __func__,
+ atomic_read(&gsi->connected), gsi->prot_id);
+ break;
+ case QTI_CTRL_EP_LOOKUP:
+ case GSI_MBIM_EP_LOOKUP:
+ log_event_dbg("%s: EP_LOOKUP for prot id:%d", __func__,
+ gsi->prot_id);
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("EP_LOOKUP failed: not connected");
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (gsi->prot_id == IPA_USB_DIAG &&
+ (gsi->d_port.in_channel_handle == -EINVAL)) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (gsi->d_port.in_channel_handle == -EINVAL &&
+ gsi->d_port.out_channel_handle == -EINVAL) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = gsi->data_id;
+ info.ipa_ep_pair.cons_pipe_num =
+ (gsi->prot_id == IPA_USB_DIAG) ? -1 :
+ gsi->d_port.out_channel_handle;
+ info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
+
+ log_event_dbg("%s: prot id :%d ep_type:%d intf:%d",
+ __func__, gsi->prot_id, info.ph_ep_info.ep_type,
+ info.ph_ep_info.peripheral_iface_id);
+
+ log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
+ __func__, info.ipa_ep_pair.cons_pipe_num,
+ info.ipa_ep_pair.prod_pipe_num);
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ log_event_err("copy_to_user fail MBIM");
+ ret = -EFAULT;
+ }
+ break;
+ case GSI_MBIM_GET_NTB_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &gsi->d_port.ntb_info.ntb_input_size,
+ sizeof(gsi->d_port.ntb_info.ntb_input_size));
+ if (ret) {
+ log_event_err("copy_to_user failNTB_SIZE");
+ ret = -EFAULT;
+ }
+ log_event_dbg("Sent NTB size %d",
+ gsi->d_port.ntb_info.ntb_input_size);
+ break;
+ case GSI_MBIM_GET_DATAGRAM_COUNT:
+ ret = copy_to_user((void __user *)arg,
+ &gsi->d_port.ntb_info.ntb_max_datagrams,
+ sizeof(gsi->d_port.ntb_info.ntb_max_datagrams));
+ if (ret) {
+ log_event_err("copy_to_user fail DATAGRAM");
+ ret = -EFAULT;
+ }
+ log_event_dbg("Sent NTB datagrams count %d",
+ gsi->d_port.ntb_info.ntb_max_datagrams);
+ break;
+ default:
+ log_event_err("wrong parameter");
+ ret = -EINVAL;
+ }
+
+exit_ioctl:
+ return ret;
+}
+
+static unsigned int gsi_ctrl_dev_poll(struct file *fp, poll_table *wait)
+{
+ struct gsi_ctrl_port *c_port;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ unsigned long flags;
+ unsigned int mask = 0;
+
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
+ if (!c_port) {
+ log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ return -ENODEV;
+ }
+
+ poll_wait(fp, &c_port->read_wq, wait);
+
+ spin_lock_irqsave(&c_port->lock, flags);
+ if (!list_empty(&c_port->cpkt_req_q)) {
+ mask |= POLLIN | POLLRDNORM;
+ log_event_dbg("%s sets POLLIN for %s", __func__, c_port->name);
+ }
+ spin_unlock_irqrestore(&c_port->lock, flags);
+
+ return mask;
+}
+
+/* file operations for rmnet/mbim/dpl devices */
+static const struct file_operations gsi_ctrl_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = gsi_ctrl_dev_open,
+ .release = gsi_ctrl_dev_release,
+ .read = gsi_ctrl_dev_read,
+ .write = gsi_ctrl_dev_write,
+ .unlocked_ioctl = gsi_ctrl_dev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gsi_ctrl_dev_ioctl,
+#endif
+ .poll = gsi_ctrl_dev_poll,
+};
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int gsi_xfer_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+int gsi_function_ctrl_port_init(struct f_gsi *gsi)
+{
+ int ret;
+ int sz = GSI_CTRL_NAME_LEN;
+ bool ctrl_dev_create = true;
+
+ if (!gsi) {
+ log_event_err("%s: gsi prot ctx is NULL", __func__);
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
+ INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
+
+ spin_lock_init(&gsi->c_port.lock);
+
+ init_waitqueue_head(&gsi->c_port.read_wq);
+
+ if (gsi->prot_id == IPA_USB_RMNET)
+ strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
+ else if (gsi->prot_id == IPA_USB_MBIM)
+ strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
+ else if (gsi->prot_id == IPA_USB_DIAG)
+ strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
+ else
+ ctrl_dev_create = false;
+
+ if (!ctrl_dev_create)
+ return 0;
+
+ gsi->c_port.ctrl_device.name = gsi->c_port.name;
+ gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
+ gsi->c_port.ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&gsi->c_port.ctrl_device);
+ if (ret) {
+ log_event_err("%s: misc register failed prot id %d",
+ __func__, gsi->prot_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct net_device *gsi_rndis_get_netdev(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+
+static void gsi_rndis_open(struct f_gsi *rndis)
+{
+ struct usb_composite_dev *cdev = rndis->function.config->cdev;
+
+ log_event_dbg("%s", __func__);
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+ gsi_xfer_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->params);
+}
+
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port)
+{
+ struct f_gsi *rndis = d_port_to_gsi(d_port);
+ unsigned long flags;
+
+ if (!rndis) {
+ log_event_err("%s: gsi prot ctx is %pK", __func__, rndis);
+ return;
+ }
+
+ spin_lock_irqsave(&rndis->d_port.lock, flags);
+ if (!rndis) {
+ log_event_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+ return;
+ }
+
+ rndis->d_port.net_ready_trigger = false;
+ spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+}
+
+void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param)
+{
+ struct f_gsi *rndis = param->v;
+ struct gsi_data_port *d_port;
+
+ if (!rndis) {
+ log_event_err("%s: gsi prot ctx is %pK", __func__, rndis);
+ return;
+ }
+
+ d_port = &rndis->d_port;
+
+ if (enable) {
+ log_event_dbg("%s: posting HOST_NRDY\n", __func__);
+ post_event(d_port, EVT_HOST_NRDY);
+ } else {
+ log_event_dbg("%s: posting HOST_READY\n", __func__);
+ post_event(d_port, EVT_HOST_READY);
+ }
+
+ queue_work(rndis->d_port.ipa_usb_wq, &rndis->d_port.usb_ipa_w);
+}
+
+static int queue_notification_request(struct f_gsi *gsi)
+{
+ int ret;
+ unsigned long flags;
+
+ ret = usb_func_ep_queue(&gsi->function, gsi->c_port.notify,
+ gsi->c_port.notify_req, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ gsi->c_port.notify_req_queued = false;
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ }
+
+ log_event_dbg("%s: ret:%d req_queued:%d",
+ __func__, ret, gsi->c_port.notify_req_queued);
+
+ return ret;
+}
+
+static int gsi_ctrl_send_notification(struct f_gsi *gsi)
+{
+ __le32 *data;
+ struct usb_cdc_notification *event;
+ struct usb_request *req = gsi->c_port.notify_req;
+ struct gsi_ctrl_pkt *cpkt;
+ unsigned long flags;
+ bool del_free_cpkt = false;
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("%s: cable disconnect", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ if (list_empty(&gsi->c_port.cpkt_resp_q)) {
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_dbg("%s: cpkt_resp_q is empty\n", __func__);
+ return 0;
+ }
+
+ log_event_dbg("%s: notify_req_queued:%d\n",
+ __func__, gsi->c_port.notify_req_queued);
+
+ if (gsi->c_port.notify_req_queued) {
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_dbg("%s: notify_req is already queued.\n", __func__);
+ return 0;
+ }
+
+ cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
+ struct gsi_ctrl_pkt, list);
+ log_event_dbg("%s: cpkt->type:%d\n", __func__, cpkt->type);
+
+ event = req->buf;
+
+ switch (cpkt->type) {
+ case GSI_CTRL_NOTIFY_CONNECT:
+ del_free_cpkt = true;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(1);
+ event->wLength = cpu_to_le16(0);
+ break;
+ case GSI_CTRL_NOTIFY_SPEED:
+ del_free_cpkt = true;
+ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(8);
+
+ /* SPEED_CHANGE data is up/down speeds in bits/sec */
+ data = req->buf + sizeof(*event);
+ data[0] = cpu_to_le32(gsi_xfer_bitrate(gsi->gadget));
+ data[1] = data[0];
+
+ log_event_dbg("notify speed %d",
+ gsi_xfer_bitrate(gsi->gadget));
+ break;
+ case GSI_CTRL_NOTIFY_OFFLINE:
+ del_free_cpkt = true;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(0);
+ break;
+ case GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE:
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(0);
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ data = req->buf;
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+ /*
+ * we need to free dummy packet for RNDIS as sending
+ * notification about response available multiple time,
+ * RNDIS host driver doesn't like. All SEND/GET
+ * ENCAPSULATED response is one-to-one for RNDIS case
+ * and host expects to have below sequence:
+ * ep0: USB_CDC_SEND_ENCAPSULATED_COMMAND
+ * int_ep: device->host: RESPONSE_AVAILABLE
+ * ep0: USB_GET_SEND_ENCAPSULATED_COMMAND
+ * For RMNET case: host ignores multiple notification.
+ */
+ del_free_cpkt = true;
+ }
+ break;
+ default:
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_err("%s:unknown notify state", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /*
+ * Delete and free cpkt related to non NOTIFY_RESPONSE_AVAILABLE
+ * notification whereas NOTIFY_RESPONSE_AVAILABLE related cpkt is
+ * deleted from USB_CDC_GET_ENCAPSULATED_RESPONSE setup request
+ */
+ if (del_free_cpkt) {
+ list_del(&cpkt->list);
+ gsi_ctrl_pkt_free(cpkt);
+ }
+
+ gsi->c_port.notify_req_queued = true;
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ log_event_dbg("send Notify type %02x", event->bNotificationType);
+
+ return queue_notification_request(gsi);
+}
+
+static void gsi_ctrl_notify_resp_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+ struct usb_cdc_notification *event = req->buf;
+ int status = req->status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ gsi->c_port.notify_req_queued = false;
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ log_event_dbg("ESHUTDOWN/ECONNRESET, connection gone");
+ gsi_ctrl_clear_cpkt_queues(gsi, false);
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+ break;
+ default:
+ log_event_err("Unknown event %02x --> %d",
+ event->bNotificationType, req->status);
+ /* FALLTHROUGH */
+ case 0:
+ break;
+ }
+}
+
+static void gsi_rndis_response_available(void *_rndis)
+{
+ struct f_gsi *gsi = _rndis;
+ struct gsi_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ cpkt = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ log_event_err("%s: err allocating cpkt\n", __func__);
+ return;
+ }
+
+ cpkt->type = GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE;
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
+ list_add_tail(&cpkt->list, &gsi->c_port.cpkt_resp_q);
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
+ gsi_ctrl_send_notification(gsi);
+}
+
+static void gsi_rndis_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *rndis = req->context;
+ rndis_init_msg_type *buf;
+ int status;
+
+ if (req->status != 0) {
+ log_event_err("RNDIS command completion error %d\n",
+ req->status);
+ return;
+ }
+
+ status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+ if (status < 0)
+ log_event_err("RNDIS command error %d, %d/%d",
+ status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ rndis->d_port.in_aggr_size = min_t(u32,
+ rndis->d_port.in_aggr_size,
+ rndis->params->dl_max_xfer_size);
+ log_event_dbg("RNDIS host dl_aggr_size:%d in_aggr_size:%d\n",
+ rndis->params->dl_max_xfer_size,
+ rndis->d_port.in_aggr_size);
+ }
+}
+
+static void
+gsi_ctrl_set_ntb_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* now for SET_NTB_INPUT_SIZE only */
+ unsigned in_size = 0;
+ struct f_gsi *gsi = req->context;
+ struct gsi_ntb_info *ntb = NULL;
+
+ log_event_dbg("dev:%pK", gsi);
+
+ req->context = NULL;
+ if (req->status || req->actual != req->length) {
+ log_event_err("Bad control-OUT transfer");
+ goto invalid;
+ }
+
+ if (req->length == 4) {
+ in_size = get_unaligned_le32(req->buf);
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_gsi_ntb_parameters.dwNtbInMaxSize))
+ goto invalid;
+ } else if (req->length == 8) {
+ ntb = (struct gsi_ntb_info *)req->buf;
+ in_size = get_unaligned_le32(&(ntb->ntb_input_size));
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_gsi_ntb_parameters.dwNtbInMaxSize))
+ goto invalid;
+
+ gsi->d_port.ntb_info.ntb_max_datagrams =
+ get_unaligned_le16(&(ntb->ntb_max_datagrams));
+ } else {
+ goto invalid;
+ }
+
+ log_event_dbg("Set NTB INPUT SIZE %d", in_size);
+
+ gsi->d_port.ntb_info.ntb_input_size = in_size;
+ return;
+
+invalid:
+ log_event_err("Illegal NTB INPUT SIZE %d from host", in_size);
+ usb_ep_set_halt(ep);
+}
+
+static void gsi_ctrl_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, req->actual);
+}
+
+static void gsi_ctrl_reset_cmd_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, 0);
+}
+
+static void gsi_ctrl_send_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_notification(gsi);
+}
+
+static int
+gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int id, value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ struct gsi_ctrl_pkt *cpkt;
+ u8 *buf;
+ u32 n;
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("usb cable is not connected");
+ return -ENOTCONN;
+ }
+
+ /* rmnet and dpl does not have ctrl_id */
+ if (gsi->ctrl_id == -ENODEV)
+ id = gsi->data_id;
+ else
+ id = gsi->ctrl_id;
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_RESET_FUNCTION:
+
+ log_event_dbg("USB_CDC_RESET_FUNCTION");
+ value = 0;
+ req->complete = gsi_ctrl_reset_cmd_complete;
+ req->context = gsi;
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ log_event_dbg("USB_CDC_SEND_ENCAPSULATED_COMMAND");
+
+ if (w_value || w_index != id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->context = gsi;
+ if (gsi->prot_id == IPA_USB_RNDIS)
+ req->complete = gsi_rndis_command_complete;
+ else
+ req->complete = gsi_ctrl_cmd_complete;
+ /* later, rndis_response_available() sends a notification */
+ break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ log_event_dbg("USB_CDC_GET_ENCAPSULATED_RESPONSE");
+ if (w_value || w_index != id)
+ goto invalid;
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ /* return the result */
+ buf = rndis_get_next_response(gsi->params, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ rndis_free_response(gsi->params, buf);
+ value = n;
+ }
+ break;
+ }
+
+ spin_lock(&gsi->c_port.lock);
+ if (list_empty(&gsi->c_port.cpkt_resp_q)) {
+ log_event_dbg("ctrl resp queue empty");
+ spin_unlock(&gsi->c_port.lock);
+ break;
+ }
+
+ cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
+ struct gsi_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ gsi->c_port.get_encap_cnt++;
+ spin_unlock(&gsi->c_port.lock);
+
+ value = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, value);
+ gsi_ctrl_pkt_free(cpkt);
+
+ req->complete = gsi_ctrl_send_response_complete;
+ req->context = gsi;
+ log_event_dbg("copied encap_resp %d bytes",
+ value);
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
+ __func__, w_value & GSI_CTRL_DTR ? 1 : 0);
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+ value = 0;
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_ETHERNET_PACKET_FILTER:
+ /* see 6.2.30: no data, wIndex = interface,
+ * wValue = packet filter bitmap
+ */
+ if (w_length != 0 || w_index != id)
+ goto invalid;
+ log_event_dbg("packet filter %02x", w_value);
+ /* REVISIT locking of cdc_filter. This assumes the UDC
+ * driver won't have a concurrent packet TX irq running on
+ * another CPU; or that if it does, this write is atomic...
+ */
+ gsi->d_port.cdc_filter = w_value;
+ value = 0;
+ break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_PARAMETERS:
+ log_event_dbg("USB_CDC_GET_NTB_PARAMETERS");
+
+ if (w_length == 0 || w_value != 0 || w_index != id)
+ break;
+
+ value = w_length > sizeof(mbim_gsi_ntb_parameters) ?
+ sizeof(mbim_gsi_ntb_parameters) : w_length;
+ memcpy(req->buf, &mbim_gsi_ntb_parameters, value);
+ break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_INPUT_SIZE:
+
+ log_event_dbg("USB_CDC_GET_NTB_INPUT_SIZE");
+
+ if (w_length < 4 || w_value != 0 || w_index != id)
+ break;
+
+ put_unaligned_le32(gsi->d_port.ntb_info.ntb_input_size,
+ req->buf);
+ value = 4;
+ log_event_dbg("Reply to host INPUT SIZE %d",
+ gsi->d_port.ntb_info.ntb_input_size);
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_NTB_INPUT_SIZE:
+ log_event_dbg("USB_CDC_SET_NTB_INPUT_SIZE");
+
+ if (w_length != 4 && w_length != 8) {
+ log_event_err("wrong NTB length %d", w_length);
+ break;
+ }
+
+ if (w_value != 0 || w_index != id)
+ break;
+
+ req->complete = gsi_ctrl_set_ntb_cmd_complete;
+ req->length = w_length;
+ req->context = gsi;
+
+ value = req->length;
+ break;
+ default:
+invalid:
+ log_event_err("inval ctrl req%02x.%02x v%04x i%04x l%d",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ log_event_dbg("req%02x.%02x v%04x i%04x l%d",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ log_event_err("response on err %d", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * function *MUST* implement a get_alt() method.
+ */
+static int gsi_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ /* RNDIS, RMNET and DPL only support alt 0*/
+ if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
+ gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG)
+ return 0;
+ else if (intf == gsi->data_id)
+ return gsi->data_interface_up;
+
+ return -EINVAL;
+}
+
+static int gsi_alloc_trb_buffer(struct f_gsi *gsi)
+{
+ u32 len_in = 0, len_out = 0;
+ int ret = 0;
+
+ log_event_dbg("allocate trb's buffer\n");
+
+ if (gsi->d_port.in_ep && !gsi->d_port.in_request.buf_base_addr) {
+ log_event_dbg("IN: num_bufs:=%zu, buf_len=%zu\n",
+ gsi->d_port.in_request.num_bufs,
+ gsi->d_port.in_request.buf_len);
+
+ len_in = gsi->d_port.in_request.buf_len *
+ gsi->d_port.in_request.num_bufs;
+ gsi->d_port.in_request.buf_base_addr =
+ dma_zalloc_coherent(gsi->gadget->dev.parent,
+ len_in, &gsi->d_port.in_request.dma, GFP_KERNEL);
+ if (!gsi->d_port.in_request.buf_base_addr) {
+ dev_err(&gsi->gadget->dev,
+ "IN buf_base_addr allocate failed %s\n",
+ gsi->function.name);
+ ret = -ENOMEM;
+ goto fail1;
+ }
+ }
+
+ if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
+ log_event_dbg("OUT: num_bufs:=%zu, buf_len=%zu\n",
+ gsi->d_port.out_request.num_bufs,
+ gsi->d_port.out_request.buf_len);
+
+ len_out = gsi->d_port.out_request.buf_len *
+ gsi->d_port.out_request.num_bufs;
+ gsi->d_port.out_request.buf_base_addr =
+ dma_zalloc_coherent(gsi->gadget->dev.parent,
+ len_out, &gsi->d_port.out_request.dma, GFP_KERNEL);
+ if (!gsi->d_port.out_request.buf_base_addr) {
+ dev_err(&gsi->gadget->dev,
+ "OUT buf_base_addr allocate failed %s\n",
+ gsi->function.name);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ log_event_dbg("finished allocating trb's buffer\n");
+ return ret;
+
+fail:
+ if (len_in && gsi->d_port.in_request.buf_base_addr) {
+ dma_free_coherent(gsi->gadget->dev.parent, len_in,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma);
+ gsi->d_port.in_request.buf_base_addr = NULL;
+ }
+fail1:
+ return ret;
+}
+
+static void gsi_free_trb_buffer(struct f_gsi *gsi)
+{
+ u32 len;
+
+ log_event_dbg("freeing trb's buffer\n");
+
+ if (gsi->d_port.out_ep &&
+ gsi->d_port.out_request.buf_base_addr) {
+ len = gsi->d_port.out_request.buf_len *
+ gsi->d_port.out_request.num_bufs;
+ dma_free_coherent(gsi->gadget->dev.parent, len,
+ gsi->d_port.out_request.buf_base_addr,
+ gsi->d_port.out_request.dma);
+ gsi->d_port.out_request.buf_base_addr = NULL;
+ }
+
+ if (gsi->d_port.in_ep &&
+ gsi->d_port.in_request.buf_base_addr) {
+ len = gsi->d_port.in_request.buf_len *
+ gsi->d_port.in_request.num_bufs;
+ dma_free_coherent(gsi->gadget->dev.parent, len,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma);
+ gsi->d_port.in_request.buf_base_addr = NULL;
+ }
+}
+
+static int gsi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct net_device *net;
+ int ret;
+
+ log_event_dbg("intf=%u, alt=%u", intf, alt);
+
+ /* Control interface has only altsetting 0 */
+ if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+ if (alt != 0)
+ goto fail;
+
+ if (!gsi->c_port.notify)
+ goto fail;
+
+ if (gsi->c_port.notify->driver_data) {
+ log_event_dbg("reset gsi control %d", intf);
+ usb_ep_disable(gsi->c_port.notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ gsi->c_port.notify);
+ if (ret) {
+ gsi->c_port.notify->desc = NULL;
+ log_event_err("Config-fail notify ep %s: err %d",
+ gsi->c_port.notify->name, ret);
+ goto fail;
+ }
+
+ ret = usb_ep_enable(gsi->c_port.notify);
+ if (ret) {
+ log_event_err("usb ep#%s enable failed, err#%d",
+ gsi->c_port.notify->name, ret);
+ goto fail;
+ }
+ gsi->c_port.notify->driver_data = gsi;
+ }
+
+ /* Data interface has two altsettings, 0 and 1 */
+ if (intf == gsi->data_id) {
+ gsi->d_port.net_ready_trigger = false;
+ /* for rndis and rmnet alt is always 0 update alt accordingly */
+ if (gsi->prot_id == IPA_USB_RNDIS ||
+ gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG) {
+ if (gsi->d_port.in_ep &&
+ !gsi->d_port.in_ep->driver_data)
+ alt = 1;
+ else
+ alt = 0;
+ }
+
+ if (alt > 1)
+ goto notify_ep_disable;
+
+ if (gsi->data_interface_up == alt)
+ return 0;
+
+ if (gsi->d_port.in_ep && gsi->d_port.in_ep->driver_data)
+ gsi->d_port.ntb_info.ntb_input_size =
+ MBIM_NTB_DEFAULT_IN_SIZE;
+ if (alt == 1) {
+ if (gsi->d_port.in_ep && !gsi->d_port.in_ep->desc
+ && config_ep_by_speed(cdev->gadget, f,
+ gsi->d_port.in_ep)) {
+ gsi->d_port.in_ep->desc = NULL;
+ goto notify_ep_disable;
+ }
+
+ if (gsi->d_port.out_ep && !gsi->d_port.out_ep->desc
+ && config_ep_by_speed(cdev->gadget, f,
+ gsi->d_port.out_ep)) {
+ gsi->d_port.out_ep->desc = NULL;
+ goto notify_ep_disable;
+ }
+
+ /* Configure EPs for GSI */
+ if (gsi->d_port.in_ep) {
+ if (gsi->prot_id == IPA_USB_DIAG)
+ gsi->d_port.in_ep->ep_intr_num = 3;
+ else
+ gsi->d_port.in_ep->ep_intr_num = 2;
+ usb_gsi_ep_op(gsi->d_port.in_ep,
+ &gsi->d_port.in_request,
+ GSI_EP_OP_CONFIG);
+ }
+
+ if (gsi->d_port.out_ep) {
+ gsi->d_port.out_ep->ep_intr_num = 1;
+ usb_gsi_ep_op(gsi->d_port.out_ep,
+ &gsi->d_port.out_request,
+ GSI_EP_OP_CONFIG);
+ }
+
+ gsi->gadget = cdev->gadget;
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ gsi_rndis_open(gsi);
+ net = gsi_rndis_get_netdev("rndis0");
+ if (IS_ERR(net))
+ goto notify_ep_disable;
+
+ log_event_dbg("RNDIS RX/TX early activation");
+ gsi->d_port.cdc_filter = 0;
+ rndis_set_param_dev(gsi->params, net,
+ &gsi->d_port.cdc_filter);
+ }
+
+ if (gsi->prot_id == IPA_USB_ECM)
+ gsi->d_port.cdc_filter = DEFAULT_FILTER;
+
+ /*
+ * For RNDIS the event is posted from the flow control
+ * handler which is invoked when the host sends the
+ * GEN_CURRENT_PACKET_FILTER message.
+ */
+ if (gsi->prot_id != IPA_USB_RNDIS)
+ post_event(&gsi->d_port,
+ EVT_CONNECT_IN_PROGRESS);
+ queue_work(gsi->d_port.ipa_usb_wq,
+ &gsi->d_port.usb_ipa_w);
+ }
+ if (alt == 0 && ((gsi->d_port.in_ep &&
+ !gsi->d_port.in_ep->driver_data) ||
+ (gsi->d_port.out_ep &&
+ !gsi->d_port.out_ep->driver_data))) {
+ ipa_disconnect_handler(&gsi->d_port);
+ }
+
+ gsi->data_interface_up = alt;
+ log_event_dbg("DATA_INTERFACE id = %d, status = %d",
+ gsi->data_id, gsi->data_interface_up);
+ }
+
+ atomic_set(&gsi->connected, 1);
+
+ /* send 0 len pkt to qti to notify state change */
+ if (gsi->prot_id == IPA_USB_DIAG)
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+
+ return 0;
+
+notify_ep_disable:
+ if (gsi->c_port.notify && gsi->c_port.notify->driver_data)
+ usb_ep_disable(gsi->c_port.notify);
+fail:
+ return -EINVAL;
+}
+
+static void gsi_disable(struct usb_function *f)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ atomic_set(&gsi->connected, 0);
+
+ if (gsi->prot_id == IPA_USB_RNDIS)
+ rndis_uninit(gsi->params);
+
+ /* Disable Control Path */
+ if (gsi->c_port.notify &&
+ gsi->c_port.notify->driver_data) {
+ usb_ep_disable(gsi->c_port.notify);
+ gsi->c_port.notify->driver_data = NULL;
+ }
+
+ gsi_ctrl_clear_cpkt_queues(gsi, false);
+ /* send 0 len pkt to qti/qbi to notify state change */
+ gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
+ gsi->c_port.notify_req_queued = false;
+ /* Disable Data Path - only if it was initialized already (alt=1) */
+ if (!gsi->data_interface_up) {
+ log_event_dbg("%s: data intf is closed", __func__);
+ return;
+ }
+
+ gsi->data_interface_up = false;
+
+ log_event_dbg("%s deactivated", gsi->function.name);
+ ipa_disconnect_handler(&gsi->d_port);
+ post_event(&gsi->d_port, EVT_DISCONNECTED);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+}
+
+static void gsi_suspend(struct usb_function *f)
+{
+ bool block_db;
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ /* Check if function is already suspended in gsi_func_suspend() */
+ if (f->func_is_suspended) {
+ log_event_dbg("%s: func already suspended, return\n", __func__);
+ return;
+ }
+
+ block_db = true;
+ usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ post_event(&gsi->d_port, EVT_SUSPEND);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+ log_event_dbg("gsi suspended");
+}
+
+static void gsi_resume(struct usb_function *f)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ log_event_dbg("%s", __func__);
+
+ /*
+ * If the function is in USB3 Function Suspend state, resume is
+ * canceled. In this case resume is done by a Function Resume request.
+ */
+ if ((cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ if (gsi->c_port.notify && !gsi->c_port.notify->desc)
+ config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify);
+
+ /* Check any pending cpkt, and queue immediately on resume */
+ gsi_ctrl_send_notification(gsi);
+
+ /*
+ * Linux host does not send RNDIS_MSG_INIT or non-zero
+ * RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
+ * Trigger state machine explicitly on resume.
+ */
+ if (gsi->prot_id == IPA_USB_RNDIS &&
+ !usb_gsi_remote_wakeup_allowed(f))
+ rndis_flow_control(gsi->params, false);
+
+ post_event(&gsi->d_port, EVT_RESUMED);
+ queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
+
+ log_event_dbg("%s: completed", __func__);
+}
+
+static int gsi_func_suspend(struct usb_function *f, u8 options)
+{
+ bool func_wakeup_allowed;
+
+ log_event_dbg("func susp %u cmd for %s",
+ options, f->name ? f->name : "");
+
+ func_wakeup_allowed =
+ ((options & FUNC_SUSPEND_OPT_RW_EN_MASK) != 0);
+
+ if (options & FUNC_SUSPEND_OPT_SUSP_MASK) {
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ if (!f->func_is_suspended) {
+ gsi_suspend(f);
+ f->func_is_suspended = true;
+ }
+ } else {
+ if (f->func_is_suspended) {
+ f->func_is_suspended = false;
+ gsi_resume(f);
+ }
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ }
+
+ return 0;
+}
+
+static int gsi_update_function_bind_params(struct f_gsi *gsi,
+ struct usb_composite_dev *cdev,
+ struct gsi_function_bind_info *info)
+{
+ struct usb_ep *ep;
+ struct usb_cdc_notification *event;
+ struct usb_function *f = &gsi->function;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (info->string_defs[0].id != 0)
+ goto skip_string_id_alloc;
+
+ if (info->ctrl_str_idx >= 0 && info->ctrl_desc) {
+ /* ctrl interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->ctrl_str_idx].id = status;
+ info->ctrl_desc->iInterface = status;
+ }
+
+ if (info->data_str_idx >= 0 && info->data_desc) {
+ /* data interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->data_str_idx].id = status;
+ info->data_desc->iInterface = status;
+ }
+
+ if (info->iad_str_idx >= 0 && info->iad_desc) {
+ /* IAD iFunction label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->iad_str_idx].id = status;
+ info->iad_desc->iFunction = status;
+ }
+
+ if (info->mac_str_idx >= 0 && info->cdc_eth_desc) {
+ /* IAD iFunction label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->mac_str_idx].id = status;
+ info->cdc_eth_desc->iMACAddress = status;
+ }
+
+skip_string_id_alloc:
+ if (info->ctrl_desc)
+ info->ctrl_desc->bInterfaceNumber = gsi->ctrl_id;
+
+ if (info->iad_desc)
+ info->iad_desc->bFirstInterface = gsi->ctrl_id;
+
+ if (info->union_desc) {
+ info->union_desc->bMasterInterface0 = gsi->ctrl_id;
+ info->union_desc->bSlaveInterface0 = gsi->data_id;
+ }
+
+ if (info->data_desc)
+ info->data_desc->bInterfaceNumber = gsi->data_id;
+
+ if (info->data_nop_desc)
+ info->data_nop_desc->bInterfaceNumber = gsi->data_id;
+
+ /* allocate instance-specific endpoints */
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig_by_name
+ (cdev->gadget, info->fs_in_desc, info->in_epname);
+ if (!ep)
+ goto fail;
+ gsi->d_port.in_ep = ep;
+ msm_ep_config(gsi->d_port.in_ep, NULL);
+ ep->driver_data = cdev; /* claim */
+ }
+
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig_by_name
+ (cdev->gadget, info->fs_out_desc, info->out_epname);
+ if (!ep)
+ goto fail;
+ gsi->d_port.out_ep = ep;
+ msm_ep_config(gsi->d_port.out_ep, NULL);
+ ep->driver_data = cdev; /* claim */
+ }
+
+ if (info->fs_notify_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+ if (!ep)
+ goto fail;
+ gsi->c_port.notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* allocate notification request and buffer */
+ gsi->c_port.notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!gsi->c_port.notify_req)
+ goto fail;
+
+ gsi->c_port.notify_req->buf =
+ kmalloc(info->notify_buf_len, GFP_KERNEL);
+ if (!gsi->c_port.notify_req->buf)
+ goto fail;
+
+ gsi->c_port.notify_req->length = info->notify_buf_len;
+ gsi->c_port.notify_req->context = gsi;
+ gsi->c_port.notify_req->complete =
+ gsi_ctrl_notify_resp_complete;
+ event = gsi->c_port.notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+
+ if (gsi->ctrl_id == -ENODEV)
+ event->wIndex = cpu_to_le16(gsi->data_id);
+ else
+ event->wIndex = cpu_to_le16(gsi->ctrl_id);
+
+ event->wLength = cpu_to_le16(0);
+ }
+
+ gsi->d_port.in_request.buf_len = info->in_req_buf_len;
+ gsi->d_port.in_request.num_bufs = info->in_req_num_buf;
+ if (gsi->d_port.out_ep) {
+ gsi->d_port.out_request.buf_len = info->out_req_buf_len;
+ gsi->d_port.out_request.num_bufs = info->out_req_num_buf;
+ }
+
+ /* Initialize event queue */
+ spin_lock_init(&gsi->d_port.evt_q.q_lock);
+ gsi->d_port.evt_q.head = gsi->d_port.evt_q.tail = MAXQUEUELEN - 1;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
+ if (!gsi->function.fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ if (info->fs_in_desc)
+ info->hs_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+ if (info->fs_out_desc)
+ info->hs_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc)
+ info->hs_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ if (info->fs_in_desc)
+ info->ss_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+
+ if (info->fs_out_desc)
+ info->ss_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc)
+ info->ss_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (gsi->c_port.notify_req) {
+ kfree(gsi->c_port.notify_req->buf);
+ usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
+ }
+ /* we might as well release our claims on endpoints */
+ if (gsi->c_port.notify)
+ gsi->c_port.notify->driver_data = NULL;
+ if (gsi->d_port.out_ep && gsi->d_port.out_ep->desc)
+ gsi->d_port.out_ep->driver_data = NULL;
+ if (gsi->d_port.in_ep && gsi->d_port.in_ep->desc)
+ gsi->d_port.in_ep->driver_data = NULL;
+ log_event_err("%s: bind failed for %s", __func__, f->name);
+ return -ENOMEM;
+}
+
+static void ipa_ready_callback(void *user_data)
+{
+ struct f_gsi *gsi = user_data;
+
+ log_event_info("%s: ipa is ready\n", __func__);
+
+ gsi->d_port.ipa_ready = true;
+ wake_up_interruptible(&gsi->d_port.wait_for_ipa_ready);
+}
+
+static int gsi_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct gsi_function_bind_info info = {0};
+ struct f_gsi *gsi = func_to_gsi(f);
+ struct rndis_params *params;
+ int status;
+
+ if (gsi->prot_id == IPA_USB_RMNET ||
+ gsi->prot_id == IPA_USB_DIAG)
+ gsi->ctrl_id = -ENODEV;
+ else {
+ status = gsi->ctrl_id = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ }
+
+ status = gsi->data_id = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+
+ switch (gsi->prot_id) {
+ case IPA_USB_RNDIS:
+ info.string_defs = rndis_gsi_string_defs;
+ info.ctrl_desc = &rndis_gsi_control_intf;
+ info.ctrl_str_idx = 0;
+ info.data_desc = &rndis_gsi_data_intf;
+ info.data_str_idx = 1;
+ info.iad_desc = &rndis_gsi_iad_descriptor;
+ info.iad_str_idx = 2;
+ info.union_desc = &rndis_gsi_union_desc;
+ info.fs_in_desc = &rndis_gsi_fs_in_desc;
+ info.fs_out_desc = &rndis_gsi_fs_out_desc;
+ info.fs_notify_desc = &rndis_gsi_fs_notify_desc;
+ info.hs_in_desc = &rndis_gsi_hs_in_desc;
+ info.hs_out_desc = &rndis_gsi_hs_out_desc;
+ info.hs_notify_desc = &rndis_gsi_hs_notify_desc;
+ info.ss_in_desc = &rndis_gsi_ss_in_desc;
+ info.ss_out_desc = &rndis_gsi_ss_out_desc;
+ info.ss_notify_desc = &rndis_gsi_ss_notify_desc;
+ info.fs_desc_hdr = gsi_eth_fs_function;
+ info.hs_desc_hdr = gsi_eth_hs_function;
+ info.ss_desc_hdr = gsi_eth_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+ gsi->d_port.in_aggr_size = GSI_IN_RNDIS_AGGR_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_AGGR_SIZE;
+ info.out_req_num_buf = num_out_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+
+ params = rndis_register(gsi_rndis_response_available, gsi,
+ gsi_rndis_flow_ctrl_enable);
+ if (IS_ERR(params))
+ goto fail;
+
+ gsi->params = params;
+
+ rndis_set_param_medium(gsi->params, RNDIS_MEDIUM_802_3, 0);
+
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
+ random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
+ log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
+ gsi->d_port.ipa_init_params.host_ethaddr,
+ gsi->d_port.ipa_init_params.device_ethaddr);
+ memcpy(gsi->ethaddr, &gsi->d_port.ipa_init_params.host_ethaddr,
+ ETH_ALEN);
+ rndis_set_host_mac(gsi->params, gsi->ethaddr);
+
+ if (gsi->manufacturer && gsi->vendorID &&
+ rndis_set_param_vendor(gsi->params, gsi->vendorID,
+ gsi->manufacturer))
+ goto dereg_rndis;
+
+ log_event_dbg("%s: max_pkt_per_xfer : %d", __func__,
+ DEFAULT_MAX_PKT_PER_XFER);
+ rndis_set_max_pkt_xfer(gsi->params, DEFAULT_MAX_PKT_PER_XFER);
+
+ /* In case of aggregated packets QC device will request
+ * aliment to 4 (2^2).
+ */
+ log_event_dbg("%s: pkt_alignment_factor : %d", __func__,
+ DEFAULT_PKT_ALIGNMENT_FACTOR);
+ rndis_set_pkt_alignment_factor(gsi->params,
+ DEFAULT_PKT_ALIGNMENT_FACTOR);
+ if (gsi->rndis_use_wceis) {
+ info.iad_desc->bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ info.iad_desc->bFunctionSubClass = 0x01;
+ info.iad_desc->bFunctionProtocol = 0x03;
+ info.ctrl_desc->bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ info.ctrl_desc->bInterfaceSubClass = 0x1;
+ info.ctrl_desc->bInterfaceProtocol = 0x03;
+ }
+ break;
+ case IPA_USB_MBIM:
+ info.string_defs = mbim_gsi_string_defs;
+ info.ctrl_desc = &mbim_gsi_control_intf;
+ info.ctrl_str_idx = 0;
+ info.data_desc = &mbim_gsi_data_intf;
+ info.data_str_idx = 1;
+ info.data_nop_desc = &mbim_gsi_data_nop_intf;
+ info.iad_desc = &mbim_gsi_iad_desc;
+ info.iad_str_idx = -1;
+ info.union_desc = &mbim_gsi_union_desc;
+ info.fs_in_desc = &mbim_gsi_fs_in_desc;
+ info.fs_out_desc = &mbim_gsi_fs_out_desc;
+ info.fs_notify_desc = &mbim_gsi_fs_notify_desc;
+ info.hs_in_desc = &mbim_gsi_hs_in_desc;
+ info.hs_out_desc = &mbim_gsi_hs_out_desc;
+ info.hs_notify_desc = &mbim_gsi_hs_notify_desc;
+ info.ss_in_desc = &mbim_gsi_ss_in_desc;
+ info.ss_out_desc = &mbim_gsi_ss_out_desc;
+ info.ss_notify_desc = &mbim_gsi_ss_notify_desc;
+ info.fs_desc_hdr = mbim_gsi_fs_function;
+ info.hs_desc_hdr = mbim_gsi_hs_function;
+ info.ss_desc_hdr = mbim_gsi_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ gsi->d_port.in_aggr_size = GSI_IN_MBIM_AGGR_SIZE;
+ info.in_req_buf_len = GSI_IN_MBIM_AGGR_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_MBIM_BUF_LEN;
+ info.out_req_num_buf = num_out_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ mbim_gsi_desc.wMaxSegmentSize = cpu_to_le16(0x800);
+
+ /*
+ * If MBIM is bound in a config other than the first, tell
+ * Windows about it by returning the num as a string in the
+ * OS descriptor's subCompatibleID field. Windows only supports
+ * up to config #4.
+ */
+ if (c->bConfigurationValue >= 2 &&
+ c->bConfigurationValue <= 4) {
+ log_event_dbg("MBIM in configuration %d",
+ c->bConfigurationValue);
+ mbim_gsi_ext_config_desc.function.subCompatibleID[0] =
+ c->bConfigurationValue + '0';
+ }
+ break;
+ case IPA_USB_RMNET:
+ info.string_defs = rmnet_gsi_string_defs;
+ info.data_desc = &rmnet_gsi_interface_desc;
+ info.data_str_idx = 0;
+ info.fs_in_desc = &rmnet_gsi_fs_in_desc;
+ info.fs_out_desc = &rmnet_gsi_fs_out_desc;
+ info.fs_notify_desc = &rmnet_gsi_fs_notify_desc;
+ info.hs_in_desc = &rmnet_gsi_hs_in_desc;
+ info.hs_out_desc = &rmnet_gsi_hs_out_desc;
+ info.hs_notify_desc = &rmnet_gsi_hs_notify_desc;
+ info.ss_in_desc = &rmnet_gsi_ss_in_desc;
+ info.ss_out_desc = &rmnet_gsi_ss_out_desc;
+ info.ss_notify_desc = &rmnet_gsi_ss_notify_desc;
+ info.fs_desc_hdr = rmnet_gsi_fs_function;
+ info.hs_desc_hdr = rmnet_gsi_hs_function;
+ info.ss_desc_hdr = rmnet_gsi_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ gsi->d_port.in_aggr_size = GSI_IN_RMNET_AGGR_SIZE;
+ info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
+ info.out_req_num_buf = num_out_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ break;
+ case IPA_USB_ECM:
+ info.string_defs = ecm_gsi_string_defs;
+ info.ctrl_desc = &ecm_gsi_control_intf;
+ info.ctrl_str_idx = 0;
+ info.data_desc = &ecm_gsi_data_intf;
+ info.data_str_idx = 2;
+ info.data_nop_desc = &ecm_gsi_data_nop_intf;
+ info.cdc_eth_desc = &ecm_gsi_desc;
+ info.mac_str_idx = 1;
+ info.union_desc = &ecm_gsi_union_desc;
+ info.fs_in_desc = &ecm_gsi_fs_in_desc;
+ info.fs_out_desc = &ecm_gsi_fs_out_desc;
+ info.fs_notify_desc = &ecm_gsi_fs_notify_desc;
+ info.hs_in_desc = &ecm_gsi_hs_in_desc;
+ info.hs_out_desc = &ecm_gsi_hs_out_desc;
+ info.hs_notify_desc = &ecm_gsi_hs_notify_desc;
+ info.ss_in_desc = &ecm_gsi_ss_in_desc;
+ info.ss_out_desc = &ecm_gsi_ss_out_desc;
+ info.ss_notify_desc = &ecm_gsi_ss_notify_desc;
+ info.fs_desc_hdr = ecm_gsi_fs_function;
+ info.hs_desc_hdr = ecm_gsi_hs_function;
+ info.ss_desc_hdr = ecm_gsi_ss_function;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "gsi-epout";
+ gsi->d_port.in_aggr_size = GSI_ECM_AGGR_SIZE;
+ info.in_req_buf_len = GSI_IN_BUFF_SIZE;
+ info.in_req_num_buf = num_in_bufs;
+ gsi->d_port.out_aggr_size = GSI_ECM_AGGR_SIZE;
+ info.out_req_buf_len = GSI_OUT_ECM_BUF_LEN;
+ info.out_req_num_buf = GSI_ECM_NUM_OUT_BUFFERS;
+ info.notify_buf_len = GSI_CTRL_NOTIFY_BUFF_LEN;
+
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
+ random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
+ log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
+ gsi->d_port.ipa_init_params.host_ethaddr,
+ gsi->d_port.ipa_init_params.device_ethaddr);
+
+ snprintf(gsi->ethaddr, sizeof(gsi->ethaddr),
+ "%02X%02X%02X%02X%02X%02X",
+ gsi->d_port.ipa_init_params.host_ethaddr[0],
+ gsi->d_port.ipa_init_params.host_ethaddr[1],
+ gsi->d_port.ipa_init_params.host_ethaddr[2],
+ gsi->d_port.ipa_init_params.host_ethaddr[3],
+ gsi->d_port.ipa_init_params.host_ethaddr[4],
+ gsi->d_port.ipa_init_params.host_ethaddr[5]);
+ info.string_defs[1].s = gsi->ethaddr;
+ break;
+ case IPA_USB_DIAG:
+ info.string_defs = qdss_gsi_string_defs;
+ info.data_desc = &qdss_gsi_data_intf_desc;
+ info.data_str_idx = 0;
+ info.fs_in_desc = &qdss_gsi_hs_data_desc;
+ info.hs_in_desc = &qdss_gsi_hs_data_desc;
+ info.ss_in_desc = &qdss_gsi_ss_data_desc;
+ info.fs_desc_hdr = qdss_gsi_hs_data_only_desc;
+ info.hs_desc_hdr = qdss_gsi_hs_data_only_desc;
+ info.ss_desc_hdr = qdss_gsi_ss_data_only_desc;
+ info.in_epname = "gsi-epin";
+ info.out_epname = "";
+ info.in_req_buf_len = 16384;
+ info.in_req_num_buf = num_in_bufs;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ break;
+ default:
+ log_event_err("%s: Invalid prot id %d", __func__,
+ gsi->prot_id);
+ return -EINVAL;
+ }
+
+ status = gsi_update_function_bind_params(gsi, cdev, &info);
+ if (status)
+ goto dereg_rndis;
+
+ status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
+ if (!status) {
+ log_event_info("%s: ipa is not ready", __func__);
+ status = wait_event_interruptible_timeout(
+ gsi->d_port.wait_for_ipa_ready, gsi->d_port.ipa_ready,
+ msecs_to_jiffies(GSI_IPA_READY_TIMEOUT));
+ if (!status) {
+ log_event_err("%s: ipa ready timeout", __func__);
+ status = -ETIMEDOUT;
+ goto dereg_rndis;
+ }
+ }
+
+ gsi->d_port.ipa_usb_notify_cb = ipa_usb_notify_cb;
+ status = ipa_usb_init_teth_prot(gsi->prot_id,
+ &gsi->d_port.ipa_init_params, gsi->d_port.ipa_usb_notify_cb,
+ gsi);
+ if (status) {
+ log_event_err("%s: failed to init teth prot %d",
+ __func__, gsi->prot_id);
+ goto dereg_rndis;
+ }
+
+ gsi->d_port.sm_state = STATE_INITIALIZED;
+
+ DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ f->name,
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ gsi->d_port.in_ep->name, gsi->d_port.out_ep->name,
+ gsi->c_port.notify->name);
+ return 0;
+
+dereg_rndis:
+ rndis_deregister(gsi->params);
+fail:
+ return status;
+}
+
+static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_gsi *gsi = func_to_gsi(f);
+
+ /*
+ * Use drain_workqueue to accomplish below conditions:
+ * 1. Make sure that any running work completed
+ * 2. Make sure to wait until all pending work completed i.e. workqueue
+ * is not having any pending work.
+ * Above conditions are making sure that ipa_usb_deinit_teth_prot()
+ * with ipa driver shall not fail due to unexpected state.
+ */
+ drain_workqueue(gsi->d_port.ipa_usb_wq);
+ ipa_usb_deinit_teth_prot(gsi->prot_id);
+
+ if (gsi->prot_id == IPA_USB_RNDIS) {
+ gsi->d_port.sm_state = STATE_UNINITIALIZED;
+ rndis_deregister(gsi->params);
+ }
+
+ if (gsi->prot_id == IPA_USB_MBIM)
+ mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (gsi->c_port.notify) {
+ kfree(gsi->c_port.notify_req->buf);
+ usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req);
+ }
+}
+
+
+static void gsi_free_func(struct usb_function *f)
+{
+ log_event_dbg("%s\n", __func__);
+}
+
+int gsi_bind_config(struct f_gsi *gsi)
+{
+ int status = 0;
+ enum ipa_usb_teth_prot prot_id = gsi->prot_id;
+
+ log_event_dbg("%s: prot id %d", __func__, prot_id);
+
+ switch (prot_id) {
+ case IPA_USB_RNDIS:
+ gsi->function.name = "rndis";
+ gsi->function.strings = rndis_gsi_strings;
+ break;
+ case IPA_USB_ECM:
+ gsi->function.name = "cdc_ethernet";
+ gsi->function.strings = ecm_gsi_strings;
+ break;
+ case IPA_USB_RMNET:
+ gsi->function.name = "rmnet";
+ gsi->function.strings = rmnet_gsi_strings;
+ break;
+ case IPA_USB_MBIM:
+ gsi->function.name = "mbim";
+ gsi->function.strings = mbim_gsi_strings;
+ break;
+ case IPA_USB_DIAG:
+ gsi->function.name = "dpl";
+ gsi->function.strings = qdss_gsi_strings;
+ break;
+ default:
+ log_event_err("%s: invalid prot id %d", __func__, prot_id);
+ return -EINVAL;
+ }
+
+ /* descriptors are per-instance copies */
+ gsi->function.bind = gsi_bind;
+ gsi->function.unbind = gsi_unbind;
+ gsi->function.set_alt = gsi_set_alt;
+ gsi->function.get_alt = gsi_get_alt;
+ gsi->function.setup = gsi_setup;
+ gsi->function.disable = gsi_disable;
+ gsi->function.free_func = gsi_free_func;
+ gsi->function.suspend = gsi_suspend;
+ gsi->function.func_suspend = gsi_func_suspend;
+ gsi->function.resume = gsi_resume;
+
+ INIT_WORK(&gsi->d_port.usb_ipa_w, ipa_work_handler);
+
+ return status;
+}
+
+static struct f_gsi *gsi_function_init(enum ipa_usb_teth_prot prot_id)
+{
+ struct f_gsi *gsi;
+ int ret = 0;
+
+ if (prot_id >= IPA_USB_MAX_TETH_PROT_SIZE) {
+ log_event_err("%s: invalid prot id %d", __func__, prot_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
+ if (!gsi) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ spin_lock_init(&gsi->d_port.lock);
+
+ init_waitqueue_head(&gsi->d_port.wait_for_ipa_ready);
+
+ gsi->d_port.in_channel_handle = -EINVAL;
+ gsi->d_port.out_channel_handle = -EINVAL;
+
+ gsi->prot_id = prot_id;
+
+ gsi->d_port.ipa_usb_wq = ipa_usb_wq;
+
+ ret = gsi_function_ctrl_port_init(gsi);
+ if (ret) {
+ kfree(gsi);
+ goto error;
+ }
+
+ return gsi;
+error:
+ return ERR_PTR(ret);
+}
+
+static void gsi_opts_release(struct config_item *item)
+{
+ struct gsi_opts *opts = to_gsi_opts(item);
+
+ log_event_dbg("Release GSI: %s\n", __func__);
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations gsi_item_ops = {
+ .release = gsi_opts_release,
+};
+
+static ssize_t gsi_info_show(struct config_item *item, char *page)
+{
+ struct ipa_usb_xdci_chan_params *ipa_chnl_params;
+ struct ipa_usb_xdci_connect_params *con_pms;
+ struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+ int ret, j = 0;
+ unsigned int len = 0;
+ char *buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (gsi && atomic_read(&gsi->connected)) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "Info: Prot_id:%d\n",
+ gsi->prot_id);
+ ipa_chnl_params = &gsi->d_port.ipa_in_channel_params;
+ con_pms = &gsi->d_port.ipa_conn_pms;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%55s\n",
+ "==================================================");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10s\n", "Ctrl Name: ", gsi->c_port.name);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Online: ",
+ gsi->c_port.ctrl_online.counter);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Open: ",
+ gsi->c_port.is_open);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Host to Modem: ",
+ gsi->c_port.host_to_modem);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Modem to Host: ",
+ gsi->c_port.modem_to_host);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Cpd to Modem: ",
+ gsi->c_port.copied_to_modem);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Cpd From Modem: ",
+ gsi->c_port.copied_from_modem);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Ctrl Pkt Drops: ",
+ gsi->c_port.cpkt_drop_cnt);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+ "==============");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Protocol ID: ", gsi->prot_id);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "SM State: ", gsi->d_port.sm_state);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN XferRscIndex: ",
+ gsi->d_port.in_xfer_rsc_index);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10d\n", "IN Chnl Hdl: ",
+ gsi->d_port.in_channel_handle);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "IN Chnl Dbl Addr: ",
+ gsi->d_port.in_db_reg_phs_addr_lsb);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN TRB Ring Len: ",
+ ipa_chnl_params->xfer_ring_len);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "IN TRB Base Addr: ", (unsigned int)
+ ipa_chnl_params->xfer_ring_base_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "GEVENTCNTLO IN Addr: ",
+ ipa_chnl_params->gevntcount_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "DEPCMDLO IN Addr: ",
+ ipa_chnl_params->xfer_scratch.depcmd_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "IN LastTRB Addr Off: ",
+ ipa_chnl_params->xfer_scratch.last_trb_addr_iova);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN Buffer Size: ",
+ ipa_chnl_params->xfer_scratch.const_buffer_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "IN/DL Aggr Size: ",
+ con_pms->teth_prot_params.max_xfer_size_bytes_to_host);
+
+ ipa_chnl_params = &gsi->d_port.ipa_out_channel_params;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+ "==============");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT XferRscIndex: ",
+ gsi->d_port.out_xfer_rsc_index);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10d\n", "OUT Channel Hdl: ",
+ gsi->d_port.out_channel_handle);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "OUT Channel Dbl Addr: ",
+ gsi->d_port.out_db_reg_phs_addr_lsb);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT TRB Ring Len: ",
+ ipa_chnl_params->xfer_ring_len);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "OUT TRB Base Addr: ", (unsigned int)
+ ipa_chnl_params->xfer_ring_base_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "GEVENTCNTLO OUT Addr: ",
+ ipa_chnl_params->gevntcount_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "DEPCMDLO OUT Addr: ",
+ ipa_chnl_params->xfer_scratch.depcmd_low_addr);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10x\n", "OUT LastTRB Addr Off: ",
+ ipa_chnl_params->xfer_scratch.last_trb_addr_iova);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT Buffer Size: ",
+ ipa_chnl_params->xfer_scratch.const_buffer_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT/UL Aggr Size: ",
+ con_pms->teth_prot_params.max_xfer_size_bytes_to_dev);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "OUT/UL Packets to dev: ",
+ con_pms->teth_prot_params.max_packet_number_to_dev);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Net_ready_trigger:",
+ gsi->d_port.net_ready_trigger);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%25s\n",
+ "USB Bus Events");
+ for (j = 0; j < MAXQUEUELEN; j++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d\t", gsi->d_port.evt_q.event[j]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Eventq head: ",
+ gsi->d_port.evt_q.head);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%25s %10u\n", "Eventq tail: ",
+ gsi->d_port.evt_q.tail);
+ }
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ ret = scnprintf(page, len, buf);
+
+ kfree(buf);
+
+ return ret;
+}
+
+CONFIGFS_ATTR_RO(gsi_, info);
+
+static ssize_t gsi_rndis_wceis_show(struct config_item *item, char *page)
+{
+ struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", gsi->rndis_use_wceis);
+}
+
+static ssize_t gsi_rndis_wceis_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_gsi *gsi = to_gsi_opts(item)->gsi;
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ gsi->rndis_use_wceis = val;
+
+ return len;
+}
+
+CONFIGFS_ATTR(gsi_, rndis_wceis);
+
+static struct configfs_attribute *gsi_rndis_attrs[] = {
+ &gsi_attr_info,
+ &gsi_attr_rndis_wceis,
+ NULL,
+};
+
+static struct config_item_type gsi_func_rndis_type = {
+ .ct_item_ops = &gsi_item_ops,
+ .ct_attrs = gsi_rndis_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+
+static struct configfs_attribute *gsi_attrs[] = {
+ &gsi_attr_info,
+ NULL,
+};
+
+static struct config_item_type gsi_func_type = {
+ .ct_item_ops = &gsi_item_ops,
+ .ct_attrs = gsi_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void gsi_inst_clean(struct gsi_opts *opts)
+{
+ if (opts->gsi->c_port.ctrl_device.fops)
+ misc_deregister(&opts->gsi->c_port.ctrl_device);
+
+ kfree(opts->gsi);
+ kfree(opts);
+}
+
+static int gsi_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int prot_id, name_len;
+ struct f_gsi *gsi;
+ struct gsi_opts *opts, *opts_prev;
+
+ opts = container_of(fi, struct gsi_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ prot_id = name_to_prot_id(name);
+ if (prot_id < 0) {
+ log_event_err("%s: failed to find prot id for %s instance\n",
+ __func__, name);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ opts_prev = inst_status[prot_id].opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err("%s: prot_id = %d, prev inst do not freed yet\n",
+ __func__, prot_id);
+ return -EBUSY;
+ }
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
+ if (prot_id == IPA_USB_RNDIS)
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &gsi_func_rndis_type);
+ gsi = gsi_function_init(prot_id);
+ if (IS_ERR(gsi))
+ return PTR_ERR(gsi);
+
+ opts->gsi = gsi;
+
+ /* Set instance status */
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ inst_status[prot_id].inst_exist = true;
+ inst_status[prot_id].opts = opts;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
+ return 0;
+}
+
+static void gsi_free_inst(struct usb_function_instance *f)
+{
+ struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
+ enum ipa_usb_teth_prot prot_id;
+
+ if (!opts->gsi)
+ return;
+
+ prot_id = opts->gsi->prot_id;
+
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ if (opts->gsi->c_port.is_open) {
+ /* Mark instance exist as false */
+ inst_status[prot_id].inst_exist = false;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err(
+ "%s: [prot_id = %d] Dev is open, free mem when dev close\n",
+ __func__, prot_id);
+ return;
+ }
+
+ /* Clear instance status */
+ gsi_inst_clean(opts);
+ inst_status[prot_id].inst_exist = false;
+ inst_status[prot_id].opts = NULL;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+}
+
+static struct usb_function_instance *gsi_alloc_inst(void)
+{
+ struct gsi_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = gsi_set_inst_name;
+ opts->func_inst.free_func_inst = gsi_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &gsi_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *gsi_alloc(struct usb_function_instance *fi)
+{
+ struct gsi_opts *opts;
+ int ret;
+
+ opts = container_of(fi, struct gsi_opts, func_inst);
+
+ ret = gsi_bind_config(opts->gsi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &opts->gsi->function;
+}
+
+DECLARE_USB_FUNCTION(gsi, gsi_alloc_inst, gsi_alloc);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("GSI function driver");
+
+static int fgsi_init(void)
+{
+ int i;
+
+ ipa_usb_wq = alloc_workqueue("k_ipa_usb",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 1);
+ if (!ipa_usb_wq) {
+ log_event_err("Failed to create workqueue for IPA");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ mutex_init(&inst_status[i].gsi_lock);
+
+ return usb_function_register(&gsiusb_func);
+}
+module_init(fgsi_init);
+
+static void __exit fgsi_exit(void)
+{
+ if (ipa_usb_wq)
+ destroy_workqueue(ipa_usb_wq);
+ usb_function_unregister(&gsiusb_func);
+}
+module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
new file mode 100644
index 000000000000..96f1b5011960
--- /dev/null
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef _F_GSI_H
+#define _F_GSI_H
+
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/ipa.h>
+#include <uapi/linux/usb/cdc.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb/msm_hsusb.h>
+
+#define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
+#define GSI_MBIM_CTRL_NAME "android_mbim"
+#define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
+#define GSI_MAX_CTRL_PKT_SIZE 8192
+#define GSI_CTRL_DTR (1 << 0)
+
+
+#define GSI_NUM_IN_BUFFERS 15
+#define GSI_IN_BUFF_SIZE 2048
+#define GSI_NUM_OUT_BUFFERS 15
+#define GSI_ECM_NUM_OUT_BUFFERS 31
+#define GSI_OUT_AGGR_SIZE 24576
+
+#define GSI_IN_RNDIS_AGGR_SIZE 9216
+#define GSI_IN_MBIM_AGGR_SIZE 16384
+#define GSI_IN_RMNET_AGGR_SIZE 16384
+#define GSI_ECM_AGGR_SIZE 2048
+
+#define GSI_OUT_MBIM_BUF_LEN 16384
+#define GSI_OUT_RMNET_BUF_LEN 16384
+#define GSI_OUT_ECM_BUF_LEN 2048
+
+#define GSI_IPA_READY_TIMEOUT 5000
+
+#define ETH_ADDR_STR_LEN 14
+
+/* mbin and ecm */
+#define GSI_CTRL_NOTIFY_BUFF_LEN 16
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define GSI_MBIM_IOCTL_MAGIC 'o'
+#define GSI_MBIM_GET_NTB_SIZE _IOR(GSI_MBIM_IOCTL_MAGIC, 2, u32)
+#define GSI_MBIM_GET_DATAGRAM_COUNT _IOR(GSI_MBIM_IOCTL_MAGIC, 3, u16)
+#define GSI_MBIM_EP_LOOKUP _IOR(GSI_MBIM_IOCTL_MAGIC, 4, struct ep_info)
+#define GSI_MBIM_DATA_EP_TYPE_HSUSB 0x2
+/* ID for Microsoft OS String */
+#define GSI_MBIM_OS_STRING_ID 0xEE
+
+#define EVT_NONE 0
+#define EVT_UNINITIALIZED 1
+#define EVT_INITIALIZED 2
+#define EVT_CONNECT_IN_PROGRESS 3
+#define EVT_CONNECTED 4
+#define EVT_HOST_NRDY 5
+#define EVT_HOST_READY 6
+#define EVT_DISCONNECTED 7
+#define EVT_SUSPEND 8
+#define EVT_IPA_SUSPEND 9
+#define EVT_RESUMED 10
+
+enum connection_state {
+ STATE_UNINITIALIZED,
+ STATE_INITIALIZED,
+ STATE_CONNECT_IN_PROGRESS,
+ STATE_CONNECTED,
+ STATE_DISCONNECTED,
+ STATE_SUSPEND_IN_PROGRESS,
+ STATE_SUSPENDED
+};
+
+enum gsi_ctrl_notify_state {
+ GSI_CTRL_NOTIFY_NONE,
+ GSI_CTRL_NOTIFY_CONNECT,
+ GSI_CTRL_NOTIFY_SPEED,
+ GSI_CTRL_NOTIFY_OFFLINE,
+ GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE,
+};
+
+#define MAXQUEUELEN 128
+struct event_queue {
+ u8 event[MAXQUEUELEN];
+ u8 head, tail;
+ spinlock_t q_lock;
+};
+
+struct gsi_ntb_info {
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+ u16 reserved;
+};
+
+struct gsi_ctrl_pkt {
+ void *buf;
+ int len;
+ enum gsi_ctrl_notify_state type;
+ struct list_head list;
+};
+
+struct gsi_function_bind_info {
+ struct usb_string *string_defs;
+ int ctrl_str_idx;
+ int data_str_idx;
+ int iad_str_idx;
+ int mac_str_idx;
+ struct usb_interface_descriptor *ctrl_desc;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_interface_assoc_descriptor *iad_desc;
+ struct usb_cdc_ether_desc *cdc_eth_desc;
+ struct usb_cdc_union_desc *union_desc;
+ struct usb_interface_descriptor *data_nop_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+ const char *in_epname;
+ const char *out_epname;
+
+ u32 in_req_buf_len;
+ u32 in_req_num_buf;
+ u32 out_req_buf_len;
+ u32 out_req_num_buf;
+ u32 notify_buf_len;
+};
+
+struct gsi_ctrl_port {
+ char name[GSI_CTRL_NAME_LEN];
+ struct miscdevice ctrl_device;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ bool notify_req_queued;
+
+ atomic_t ctrl_online;
+
+ bool is_open;
+
+ wait_queue_head_t read_wq;
+
+ struct list_head cpkt_req_q;
+ struct list_head cpkt_resp_q;
+ unsigned long cpkts_len;
+
+ spinlock_t lock;
+
+ int ipa_cons_clnt_hdl;
+ int ipa_prod_clnt_hdl;
+
+ unsigned host_to_modem;
+ unsigned copied_to_modem;
+ unsigned copied_from_modem;
+ unsigned modem_to_host;
+ unsigned cpkt_drop_cnt;
+ unsigned get_encap_cnt;
+};
+
+struct gsi_data_port {
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+ struct usb_gsi_request in_request;
+ struct usb_gsi_request out_request;
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *driver_data);
+ struct ipa_usb_teth_params ipa_init_params;
+ int in_channel_handle;
+ int out_channel_handle;
+ u32 in_db_reg_phs_addr_lsb;
+ u32 in_db_reg_phs_addr_msb;
+ u32 out_db_reg_phs_addr_lsb;
+ u32 out_db_reg_phs_addr_msb;
+ u32 in_xfer_rsc_index;
+ u32 out_xfer_rsc_index;
+ u16 in_last_trb_addr;
+ u16 cdc_filter;
+ u32 in_aggr_size;
+ u32 out_aggr_size;
+
+ bool ipa_ready;
+ bool net_ready_trigger;
+ struct gsi_ntb_info ntb_info;
+
+ spinlock_t lock;
+
+ struct work_struct usb_ipa_w;
+ struct workqueue_struct *ipa_usb_wq;
+ enum connection_state sm_state;
+ struct event_queue evt_q;
+ wait_queue_head_t wait_for_ipa_ready;
+
+ /* Track these for debugfs */
+ struct ipa_usb_xdci_chan_params ipa_in_channel_params;
+ struct ipa_usb_xdci_chan_params ipa_out_channel_params;
+ struct ipa_usb_xdci_connect_params ipa_conn_pms;
+};
+
+struct f_gsi {
+ struct usb_function function;
+ struct usb_gadget *gadget;
+ enum ipa_usb_teth_prot prot_id;
+ int ctrl_id;
+ int data_id;
+ u32 vendorID;
+ u8 ethaddr[ETH_ADDR_STR_LEN];
+ const char *manufacturer;
+ struct rndis_params *params;
+ atomic_t connected;
+ bool data_interface_up;
+ bool rndis_use_wceis;
+
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+ struct gsi_data_port d_port;
+ struct gsi_ctrl_port c_port;
+};
+
+static inline struct f_gsi *func_to_gsi(struct usb_function *f)
+{
+ return container_of(f, struct f_gsi, function);
+}
+
+static inline struct f_gsi *d_port_to_gsi(struct gsi_data_port *d)
+{
+ return container_of(d, struct f_gsi, d_port);
+}
+
+static inline struct f_gsi *c_port_to_gsi(struct gsi_ctrl_port *d)
+{
+ return container_of(d, struct f_gsi, c_port);
+}
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN 40
+
+struct gsi_opts {
+ struct usb_function_instance func_inst;
+ struct f_gsi *gsi;
+};
+
+static inline struct gsi_opts *to_gsi_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct gsi_opts,
+ func_inst.group);
+}
+
+static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+{
+ if (!name)
+ goto error;
+
+ if (!strncmp("rndis", name, MAX_INST_NAME_LEN))
+ return IPA_USB_RNDIS;
+ if (!strncmp("ecm", name, MAX_INST_NAME_LEN))
+ return IPA_USB_ECM;
+ if (!strncmp("rmnet", name, MAX_INST_NAME_LEN))
+ return IPA_USB_RMNET;
+ if (!strncasecmp("mbim", name, MAX_INST_NAME_LEN))
+ return IPA_USB_MBIM;
+ if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN))
+ return IPA_USB_DIAG;
+
+error:
+ return -EINVAL;
+}
+
+/* device descriptors */
+
+#define LOG2_STATUS_INTERVAL_MSEC 5
+#define MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+/* rmnet device descriptors */
+
+static struct usb_interface_descriptor rmnet_gsi_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_gsi_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_gsi_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_notify_comp_desc = {
+ .bLength = sizeof(rmnet_gsi_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_in_comp_desc = {
+ .bLength = sizeof(rmnet_gsi_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_gsi_ss_out_comp_desc = {
+ .bLength = sizeof(rmnet_gsi_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *rmnet_gsi_ss_function[] = {
+ (struct usb_descriptor_header *) &rmnet_gsi_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &rmnet_gsi_ss_out_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+static struct usb_string rmnet_gsi_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_gsi_strings[] = {
+ &rmnet_gsi_string_table,
+ NULL,
+};
+
+/* rndis device descriptors */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_gsi_control_intf = {
+ .bLength = sizeof(rndis_gsi_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_MISC,
+ .bInterfaceSubClass = 0x04,
+ .bInterfaceProtocol = 0x01, /* RNDIS over Ethernet */
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_gsi_header_desc = {
+ .bLength = sizeof(rndis_gsi_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_gsi_call_mgmt_descriptor = {
+ .bLength = sizeof(rndis_gsi_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_gsi_acm_descriptor = {
+ .bLength = sizeof(rndis_gsi_acm_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_gsi_union_desc = {
+ .bLength = sizeof(rndis_gsi_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_gsi_data_intf = {
+ .bLength = sizeof(rndis_gsi_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_gsi_iad_descriptor = {
+ .bLength = sizeof(rndis_gsi_iad_descriptor),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_MISC,
+ .bFunctionSubClass = 0x04,
+ .bFunctionProtocol = 0x01, /* RNDIS over Ethernet */
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *gsi_eth_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_gsi_control_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_header_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_union_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_gsi_data_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *gsi_eth_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_gsi_control_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_header_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_union_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_gsi_data_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+static struct usb_endpoint_descriptor rndis_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_intr_comp_desc = {
+ .bLength = sizeof(rndis_gsi_ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_gsi_ss_bulk_comp_desc = {
+ .bLength = sizeof(rndis_gsi_ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *gsi_eth_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_gsi_control_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_header_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_gsi_union_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_gsi_data_intf,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_gsi_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string rndis_gsi_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_gsi_strings[] = {
+ &rndis_gsi_string_table,
+ NULL,
+};
+
+/* mbim device descriptors */
+#define MBIM_NTB_DEFAULT_IN_SIZE (0x4000)
+
+static struct usb_cdc_ncm_ntb_parameters mbim_gsi_ntb_parameters = {
+ .wLength = sizeof(mbim_gsi_ntb_parameters),
+ .bmNtbFormatsSupported = cpu_to_le16(USB_CDC_NCM_NTB16_SUPPORTED),
+ .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE),
+ .wNdpInDivisor = cpu_to_le16(4),
+ .wNdpInPayloadRemainder = cpu_to_le16(0),
+ .wNdpInAlignment = cpu_to_le16(4),
+
+ .dwNtbOutMaxSize = cpu_to_le32(0x4000),
+ .wNdpOutDivisor = cpu_to_le16(4),
+ .wNdpOutPayloadRemainder = cpu_to_le16(0),
+ .wNdpOutAlignment = cpu_to_le16(4),
+ .wNtbOutMaxDatagrams = 16,
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation;
+ */
+#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor mbim_gsi_iad_desc = {
+ .bLength = sizeof(mbim_gsi_iad_desc),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = 2,
+ .bFunctionSubClass = 0x0e,
+ .bFunctionProtocol = 0,
+ /* .iFunction = DYNAMIC */
+};
+
+/* interface descriptor: */
+static struct usb_interface_descriptor mbim_gsi_control_intf = {
+ .bLength = sizeof(mbim_gsi_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0x02,
+ .bInterfaceSubClass = 0x0e,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mbim_gsi_header_desc = {
+ .bLength = sizeof(mbim_gsi_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc mbim_gsi_union_desc = {
+ .bLength = sizeof(mbim_gsi_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_mbim_desc mbim_gsi_desc = {
+ .bLength = sizeof(mbim_gsi_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_TYPE,
+
+ .bcdMBIMVersion = cpu_to_le16(0x0100),
+
+ .wMaxControlMessage = cpu_to_le16(0x1000),
+ .bNumberFilters = 0x20,
+ .bMaxFilterSize = 0x80,
+ .wMaxSegmentSize = cpu_to_le16(0xfe0),
+ .bmNetworkCapabilities = 0x20,
+};
+
+static struct usb_cdc_mbim_extended_desc mbim_gsi_ext_mbb_desc = {
+ .bLength = sizeof(mbim_gsi_ext_mbb_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_EXTENDED_TYPE,
+
+ .bcdMBIMExtendedVersion = cpu_to_le16(0x0100),
+ .bMaxOutstandingCommandMessages = 64,
+ .wMTU = cpu_to_le16(1500),
+};
+
+/* the default data interface has no endpoints ... */
+static struct usb_interface_descriptor mbim_gsi_data_nop_intf = {
+ .bLength = sizeof(mbim_gsi_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+static struct usb_interface_descriptor mbim_gsi_data_intf = {
+ .bLength = sizeof(mbim_gsi_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *mbim_gsi_fs_function[] = {
+ (struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_gsi_control_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_header_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_union_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_data_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor mbim_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor mbim_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *mbim_gsi_hs_function[] = {
+ (struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_gsi_control_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_header_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_union_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_data_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_hs_out_desc,
+ NULL,
+};
+
+/* Super Speed Support */
+static struct usb_endpoint_descriptor mbim_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_notify_comp_desc = {
+ .bLength = sizeof(mbim_gsi_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_in_comp_desc = {
+ .bLength = sizeof(mbim_gsi_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor mbim_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mbim_gsi_ss_out_comp_desc = {
+ .bLength = sizeof(mbim_gsi_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *mbim_gsi_ss_function[] = {
+ (struct usb_descriptor_header *) &mbim_gsi_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_gsi_control_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_header_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_union_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ext_mbb_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_data_intf,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &mbim_gsi_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string mbim_gsi_string_defs[] = {
+ [0].s = "MBIM Control",
+ [1].s = "MBIM Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings mbim_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = mbim_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *mbim_gsi_strings[] = {
+ &mbim_gsi_string_table,
+ NULL,
+};
+
+/* Microsoft OS Descriptors */
+
+/*
+ * We specify our own bMS_VendorCode byte which Windows will use
+ * as the bRequest value in subsequent device get requests.
+ */
+#define MBIM_VENDOR_CODE 0xA5
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mbim_gsi_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mbim_gsi_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* Microsoft Extended Configuration Descriptor */
+static struct {
+ struct mbim_gsi_ext_config_desc_header header;
+ struct mbim_gsi_ext_config_desc_function function;
+} mbim_gsi_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mbim_gsi_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = 1,
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' },
+ /* .subCompatibleID = DYNAMIC */
+ },
+};
+/* ecm device descriptors */
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5
+#define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+/* interface descriptor: */
+static struct usb_interface_descriptor ecm_gsi_control_intf = {
+ .bLength = sizeof(ecm_gsi_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_gsi_header_desc = {
+ .bLength = sizeof(ecm_gsi_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_gsi_union_desc = {
+ .bLength = sizeof(ecm_gsi_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_gsi_desc = {
+ .bLength = sizeof(ecm_gsi_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+
+ /* this descriptor actually adds value, surprise! */
+ /* .iMACAddress = DYNAMIC */
+ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
+ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
+ .wNumberMCFilters = cpu_to_le16(0),
+ .bNumberPowerFilters = 0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_gsi_data_nop_intf = {
+ .bLength = sizeof(ecm_gsi_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_gsi_data_intf = {
+ .bLength = sizeof(ecm_gsi_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+static struct usb_endpoint_descriptor ecm_gsi_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_gsi_fs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_gsi_control_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_header_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_union_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_gsi_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_data_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_fs_in_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+static struct usb_endpoint_descriptor ecm_gsi_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_gsi_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_gsi_hs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_gsi_control_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_header_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_union_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_gsi_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_data_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_hs_in_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_notify_comp_desc = {
+ .bLength = sizeof(ecm_gsi_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_in_comp_desc = {
+ .bLength = sizeof(ecm_gsi_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ecm_gsi_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_gsi_ss_out_comp_desc = {
+ .bLength = sizeof(ecm_gsi_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ecm_gsi_ss_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_gsi_control_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_header_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_union_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_gsi_ss_notify_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_gsi_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_data_intf,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_in_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_out_desc,
+ (struct usb_descriptor_header *) &ecm_gsi_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string ecm_gsi_string_defs[] = {
+ [0].s = "CDC Ethernet Control Model (ECM)",
+ [1].s = NULL /* DYNAMIC */,
+ [2].s = "CDC Ethernet Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_gsi_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = ecm_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_gsi_strings[] = {
+ &ecm_gsi_string_table,
+ NULL,
+};
+
+/* qdss device descriptor */
+
+static struct usb_interface_descriptor qdss_gsi_data_intf_desc = {
+ .bLength = sizeof(qdss_gsi_data_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_gsi_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_gsi_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_gsi_data_ep_comp_desc = {
+ .bLength = sizeof(qdss_gsi_data_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *qdss_gsi_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_gsi_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_gsi_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_gsi_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_gsi_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_gsi_ss_data_desc,
+ (struct usb_descriptor_header *) &qdss_gsi_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+static struct usb_string qdss_gsi_string_defs[] = {
+ [0].s = "QDSS DATA",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings qdss_gsi_string_table = {
+ .language = 0x0409,
+ .strings = qdss_gsi_string_defs,
+};
+
+static struct usb_gadget_strings *qdss_gsi_strings[] = {
+ &qdss_gsi_string_table,
+ NULL,
+};
+#endif
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 590e056d3618..e01d20939449 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -44,18 +44,19 @@ struct f_hidg {
/* configuration */
unsigned char bInterfaceSubClass;
unsigned char bInterfaceProtocol;
+ unsigned char protocol;
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
/* recv report */
struct list_head completed_out_req;
- spinlock_t spinlock;
+ spinlock_t read_spinlock;
wait_queue_head_t read_queue;
unsigned int qlen;
/* send report */
- struct mutex lock;
+ spinlock_t write_spinlock;
bool write_pending;
wait_queue_head_t write_queue;
struct usb_request *req;
@@ -98,6 +99,60 @@ static struct hid_descriptor hidg_desc = {
/*.desc[0].wDescriptorLenght = DYNAMIC */
};
+/* Super-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
+ .bLength = sizeof(hidg_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
+ .bLength = sizeof(hidg_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
+ NULL,
+};
+
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
@@ -204,20 +259,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
#define READ_COND (!list_empty(&hidg->completed_out_req))
/* wait for at least one buffer to complete */
while (!READ_COND) {
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(hidg->read_queue, READ_COND))
return -ERESTARTSYS;
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
}
/* pick the first one */
@@ -232,7 +287,7 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
req = list->req;
count = min_t(unsigned int, count, req->actual - list->pos);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
/* copy to user outside spinlock */
count -= copy_to_user(buffer, req->buf + list->pos, count);
@@ -254,9 +309,9 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
return ret;
}
} else {
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add(&list->list, &hidg->completed_out_req);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
}
@@ -267,13 +322,16 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+ unsigned long flags;
if (req->status != 0) {
ERROR(hidg->func.config->cdev,
"End Point Request ERROR: %d\n", req->status);
}
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
@@ -281,18 +339,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
+ struct usb_request *req;
+ unsigned long flags;
ssize_t status = -ENOMEM;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
- mutex_lock(&hidg->lock);
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
-
+try_again:
/* write queue */
while (!WRITE_COND) {
- mutex_unlock(&hidg->lock);
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
@@ -300,37 +360,59 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
hidg->write_queue, WRITE_COND))
return -ERESTARTSYS;
- mutex_lock(&hidg->lock);
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
}
+ hidg->write_pending = 1;
+ req = hidg->req;
count = min_t(unsigned, count, hidg->report_length);
- status = copy_from_user(hidg->req->buf, buffer, count);
+
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+ status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
- mutex_unlock(&hidg->lock);
- return -EINVAL;
+ status = -EINVAL;
+ goto release_write_pending;
}
- hidg->req->status = 0;
- hidg->req->zero = 0;
- hidg->req->length = count;
- hidg->req->complete = f_hidg_req_complete;
- hidg->req->context = hidg;
- hidg->write_pending = 1;
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
- status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+ /* when our function has been disabled by host */
+ if (!hidg->req) {
+ free_ep_req(hidg->in_ep, req);
+ /*
+ * TODO
+ * Should we fail with error here?
+ */
+ goto try_again;
+ }
+
+ req->status = 0;
+ req->zero = 0;
+ req->length = count;
+ req->complete = f_hidg_req_complete;
+ req->context = hidg;
+
+ status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- hidg->write_pending = 0;
- wake_up(&hidg->write_queue);
+ goto release_write_pending_unlocked;
} else {
status = count;
}
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
- mutex_unlock(&hidg->lock);
+ return status;
+release_write_pending:
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+release_write_pending_unlocked:
+ hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
+ wake_up(&hidg->write_queue);
return status;
}
@@ -377,26 +459,42 @@ static int f_hidg_open(struct inode *inode, struct file *fd)
static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *) req->context;
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
struct f_hidg_req_list *req_list;
unsigned long flags;
- req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
- if (!req_list)
- return;
+ switch (req->status) {
+ case 0:
+ req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
+ if (!req_list) {
+ ERROR(cdev, "Unable to allocate mem for req_list\n");
+ goto free_req;
+ }
- req_list->req = req;
+ req_list->req = req;
- spin_lock_irqsave(&hidg->spinlock, flags);
- list_add_tail(&req_list->list, &hidg->completed_out_req);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ list_add_tail(&req_list->list, &hidg->completed_out_req);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
- wake_up(&hidg->read_queue);
+ wake_up(&hidg->read_queue);
+ break;
+ default:
+ ERROR(cdev, "Set report failed %d\n", req->status);
+ /* FALLTHROUGH */
+ case -ECONNABORTED: /* hardware forced ep reset */
+ case -ECONNRESET: /* request dequeued */
+ case -ESHUTDOWN: /* disconnect from host */
+free_req:
+ free_ep_req(ep, req);
+ return;
+ }
}
static int hidg_setup(struct usb_function *f,
@@ -430,7 +528,9 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_PROTOCOL):
VDBG(cdev, "get_protocol\n");
- goto stall;
+ length = min_t(unsigned int, length, 1);
+ ((u8 *) req->buf)[0] = hidg->protocol;
+ goto respond;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
@@ -442,6 +542,17 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_PROTOCOL):
VDBG(cdev, "set_protocol\n");
+ if (value > HID_REPORT_PROTOCOL)
+ goto stall;
+ length = 0;
+ /*
+ * We assume that programs implementing the Boot protocol
+ * are also compatible with the Report Protocol
+ */
+ if (hidg->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) {
+ hidg->protocol = value;
+ goto respond;
+ }
goto stall;
break;
@@ -507,19 +618,30 @@ static void hidg_disable(struct usb_function *f)
usb_ep_disable(hidg->in_ep);
usb_ep_disable(hidg->out_ep);
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
free_ep_req(hidg->out_ep, list->req);
list_del(&list->list);
kfree(list);
}
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->write_pending) {
+ free_ep_req(hidg->in_ep, hidg->req);
+ hidg->write_pending = 1;
+ }
+
+ hidg->req = NULL;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
}
static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_request *req_in = NULL;
+ unsigned long flags;
int i, status = 0;
VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
@@ -540,6 +662,12 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
goto fail;
}
hidg->in_ep->driver_data = hidg;
+
+ req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length);
+ if (!req_in) {
+ status = -ENOMEM;
+ goto disable_ep_in;
+ }
}
@@ -551,12 +679,12 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
hidg->out_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
- goto fail;
+ goto free_req_in;
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
ERROR(cdev, "Enable OUT endpoint FAILED!\n");
- goto fail;
+ goto free_req_in;
}
hidg->out_ep->driver_data = hidg;
@@ -572,17 +700,37 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
req->context = hidg;
status = usb_ep_queue(hidg->out_ep, req,
GFP_ATOMIC);
- if (status)
+ if (status) {
ERROR(cdev, "%s queue req --> %d\n",
hidg->out_ep->name, status);
+ free_ep_req(hidg->out_ep, req);
+ }
} else {
- usb_ep_disable(hidg->out_ep);
status = -ENOMEM;
- goto fail;
+ goto disable_out_ep;
}
}
}
+ if (hidg->in_ep != NULL) {
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ hidg->req = req_in;
+ hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
+ wake_up(&hidg->write_queue);
+ }
+ return 0;
+disable_out_ep:
+ usb_ep_disable(hidg->out_ep);
+free_req_in:
+ if (req_in)
+ free_ep_req(hidg->in_ep, req_in);
+
+disable_ep_in:
+ if (hidg->in_ep)
+ usb_ep_disable(hidg->in_ep);
+
fail:
return status;
}
@@ -631,21 +779,18 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
hidg->out_ep = ep;
- /* preallocate request and buffer */
- status = -ENOMEM;
- hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL);
- if (!hidg->req)
- goto fail;
-
- hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL);
- if (!hidg->req->buf)
- goto fail;
-
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg->protocol = HID_REPORT_PROTOCOL;
+ hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_in_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
/*
@@ -661,13 +806,20 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_hs_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
+ hidg_ss_in_ep_desc.bEndpointAddress =
+ hidg_fs_in_ep_desc.bEndpointAddress;
+ hidg_ss_out_ep_desc.bEndpointAddress =
+ hidg_fs_out_ep_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors);
if (status)
goto fail;
- mutex_init(&hidg->lock);
- spin_lock_init(&hidg->spinlock);
+ spin_lock_init(&hidg->write_spinlock);
+ hidg->write_pending = 1;
+ hidg->req = NULL;
+ spin_lock_init(&hidg->read_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
INIT_LIST_HEAD(&hidg->completed_out_req);
@@ -693,11 +845,8 @@ fail_free_descs:
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
- if (hidg->req != NULL) {
- kfree(hidg->req->buf);
- if (hidg->in_ep != NULL)
- usb_ep_free_request(hidg->in_ep, hidg->req);
- }
+ if (hidg->req != NULL)
+ free_ep_req(hidg->in_ep, hidg->req);
return status;
}
@@ -825,11 +974,21 @@ end:
CONFIGFS_ATTR(f_hid_opts_, report_desc);
+static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
+{
+ struct f_hid_opts *opts = to_f_hid_opts(item);
+
+ return sprintf(page, "%d:%d\n", major, opts->minor);
+}
+
+CONFIGFS_ATTR_RO(f_hid_opts_, dev);
+
static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_subclass,
&f_hid_opts_attr_protocol,
&f_hid_opts_attr_report_length,
&f_hid_opts_attr_report_desc,
+ &f_hid_opts_attr_dev,
NULL,
};
@@ -853,7 +1012,7 @@ static void hidg_free_inst(struct usb_function_instance *f)
mutex_lock(&hidg_ida_lock);
hidg_put_minor(opts->minor);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
mutex_unlock(&hidg_ida_lock);
@@ -879,7 +1038,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
mutex_lock(&hidg_ida_lock);
- if (idr_is_empty(&hidg_ida.idr)) {
+ if (ida_is_empty(&hidg_ida)) {
status = ghid_setup(NULL, HIDG_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -892,7 +1051,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
goto unlock;
}
@@ -924,11 +1083,6 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
device_destroy(hidg_class, MKDEV(major, hidg->minor));
cdev_del(&hidg->cdev);
- /* disable/free request and end point */
- usb_ep_disable(hidg->in_ep);
- kfree(hidg->req->buf);
- usb_ep_free_request(hidg->in_ep, hidg->req);
-
usb_free_all_descriptors(f);
}
@@ -980,6 +1134,20 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
}
DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc);
+
+static int __init afunc_init(void)
+{
+ return usb_function_register(&hidusb_func);
+}
+
+static void __exit afunc_exit(void)
+{
+ usb_function_unregister(&hidusb_func);
+}
+
+module_init(afunc_init);
+module_exit(afunc_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabien Chouteau");
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index ddc3aad886b7..e775f89053ea 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -308,9 +308,7 @@ static void disable_loopback(struct f_loopback *loop)
static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct f_loopback *loop = ep->driver_data;
-
- return alloc_ep_req(ep, len, loop->buflen);
+ return alloc_ep_req(ep, len);
}
static int alloc_requests(struct usb_composite_dev *cdev,
@@ -333,7 +331,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
if (!in_req)
goto fail;
- out_req = lb_alloc_ep_req(loop->out_ep, 0);
+ out_req = lb_alloc_ep_req(loop->out_ep, loop->buflen);
if (!out_req)
goto fail_in;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 25488c89308a..01e25ae0fb25 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -458,13 +458,23 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
struct fsg_buffhd *bh = req->context;
if (req->status || req->actual != req->length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, req->length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->inreq_busy = 0;
+ bh->state = BUF_STATE_EMPTY;
+ return;
+ }
+
spin_lock(&common->lock);
bh->inreq_busy = 0;
bh->state = BUF_STATE_EMPTY;
@@ -477,15 +487,24 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
- dump_msg(common, "bulk-out", req->buf, req->actual);
if (req->status || req->actual != bh->bulk_out_intended_length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, bh->bulk_out_intended_length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->outreq_busy = 0;
+ return;
+ }
+
+ dump_msg(common, "bulk-out", req->buf, req->actual);
spin_lock(&common->lock);
bh->outreq_busy = 0;
bh->state = BUF_STATE_FULL;
@@ -2274,6 +2293,8 @@ reset:
fsg->bulk_out_enabled = 0;
}
+ /* allow usb LPM after eps are disabled */
+ usb_gadget_autopm_put_async(common->gadget);
common->fsg = NULL;
wake_up(&common->fsg_wait);
}
@@ -2338,6 +2359,10 @@ static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
fsg->common->new_fsg = fsg;
+
+ /* prevents usb LPM until thread runs to completion */
+ usb_gadget_autopm_get_async(fsg->common->gadget);
+
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
}
@@ -2460,9 +2485,13 @@ static void handle_exception(struct fsg_common *common)
&common->fsg->atomic_bitflags))
usb_ep_clear_halt(common->fsg->bulk_in);
- if (common->ep0_req_tag == exception_req_tag)
- ep0_queue(common); /* Complete the status stage */
-
+ if (common->ep0_req_tag == exception_req_tag) {
+ /* Complete the status stage */
+ if (common->cdev)
+ usb_composite_setup_continue(common->cdev);
+ else
+ ep0_queue(common);
+ }
/*
* Technically this should go here, but it would only be
* a waste of time. Ditto for the INTERFACE_CHANGE and
@@ -2476,8 +2505,14 @@ static void handle_exception(struct fsg_common *common)
case FSG_STATE_CONFIG_CHANGE:
do_set_interface(common, common->new_fsg);
- if (common->new_fsg)
+ if (common->new_fsg) {
+ /*
+ * make sure delayed_status flag updated when set_alt
+ * returned.
+ */
+ msleep(200);
usb_composite_setup_continue(common->cdev);
+ }
break;
case FSG_STATE_EXIT:
diff --git a/drivers/usb/gadget/function/f_mbim.c b/drivers/usb/gadget/function/f_mbim.c
new file mode 100644
index 000000000000..e7c3278f66d4
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mbim.c
@@ -0,0 +1,2147 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <linux/usb/cdc.h>
+
+#include <linux/usb/composite.h>
+#include <linux/platform_device.h>
+
+#include <linux/spinlock.h>
+
+/*
+ * This function is a "Mobile Broadband Interface Model" (MBIM) link.
+ * MBIM is intended to be used with high-speed network attachments.
+ *
+ * Note that MBIM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ */
+
+#define MBIM_BULK_BUFFER_SIZE 4096
+#define MAX_CTRL_PKT_SIZE 4096
+
+enum mbim_peripheral_ep_type {
+ MBIM_DATA_EP_TYPE_RESERVED = 0x0,
+ MBIM_DATA_EP_TYPE_HSIC = 0x1,
+ MBIM_DATA_EP_TYPE_HSUSB = 0x2,
+ MBIM_DATA_EP_TYPE_PCIE = 0x3,
+ MBIM_DATA_EP_TYPE_EMBEDDED = 0x4,
+ MBIM_DATA_EP_TYPE_BAM_DMUX = 0x5,
+};
+
+struct mbim_peripheral_ep_info {
+ enum peripheral_ep_type ep_type;
+ u32 peripheral_iface_id;
+};
+
+struct mbim_ipa_ep_pair {
+ u32 cons_pipe_num;
+ u32 prod_pipe_num;
+};
+
+struct mbim_ipa_ep_info {
+ struct mbim_peripheral_ep_info ph_ep_info;
+ struct mbim_ipa_ep_pair ipa_ep_pair;
+};
+
+#define MBIM_IOCTL_MAGIC 'o'
+#define MBIM_GET_NTB_SIZE _IOR(MBIM_IOCTL_MAGIC, 2, u32)
+#define MBIM_GET_DATAGRAM_COUNT _IOR(MBIM_IOCTL_MAGIC, 3, u16)
+
+#define MBIM_EP_LOOKUP _IOR(MBIM_IOCTL_MAGIC, 4, struct mbim_ipa_ep_info)
+
+
+#define NR_MBIM_PORTS 1
+#define MBIM_DEFAULT_PORT 0
+
+/* ID for Microsoft OS String */
+#define MBIM_OS_STRING_ID 0xEE
+
+struct ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct mbim_ep_descs {
+ struct usb_endpoint_descriptor *in;
+ struct usb_endpoint_descriptor *out;
+ struct usb_endpoint_descriptor *notify;
+};
+
+struct mbim_notify_port {
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ atomic_t notify_count;
+};
+
+enum mbim_notify_state {
+ MBIM_NOTIFY_NONE,
+ MBIM_NOTIFY_CONNECT,
+ MBIM_NOTIFY_SPEED,
+ MBIM_NOTIFY_RESPONSE_AVAILABLE,
+};
+
+struct f_mbim {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+
+ atomic_t online;
+
+ atomic_t open_excl;
+ atomic_t ioctl_excl;
+ atomic_t read_excl;
+ atomic_t write_excl;
+
+ wait_queue_head_t read_wq;
+
+ enum transport_type xport;
+ u8 port_num;
+ struct data_port bam_port;
+ struct mbim_notify_port not_port;
+
+ struct mbim_ep_descs fs;
+ struct mbim_ep_descs hs;
+
+ u8 ctrl_id, data_id;
+ bool data_interface_up;
+
+ spinlock_t lock;
+
+ struct list_head cpkt_req_q;
+ struct list_head cpkt_resp_q;
+
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+
+ atomic_t error;
+ unsigned int cpkt_drop_cnt;
+ bool remote_wakeup_enabled;
+};
+
+struct mbim_ntb_input_size {
+ u32 ntb_input_size;
+ u16 ntb_max_datagrams;
+ u16 reserved;
+};
+
+/* temporary variable used between mbim_open() and mbim_gadget_bind() */
+static struct f_mbim *_mbim_dev;
+
+static unsigned int nr_mbim_ports;
+
+static struct mbim_ports {
+ struct f_mbim *port;
+ unsigned port_num;
+} mbim_ports[NR_MBIM_PORTS];
+
+static inline struct f_mbim *func_to_mbim(struct usb_function *f)
+{
+ return container_of(f, struct f_mbim, function);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define MBIM_NTB_DEFAULT_IN_SIZE (0x4000)
+#define MBIM_NTB_OUT_SIZE (0x1000)
+#define MBIM_NDP_IN_DIVISOR (0x4)
+
+#define NTB_DEFAULT_IN_SIZE_IPA (0x4000)
+#define MBIM_NTB_OUT_SIZE_IPA (0x4000)
+
+#define MBIM_FORMATS_SUPPORTED USB_CDC_NCM_NTB16_SUPPORTED
+
+static struct usb_cdc_ncm_ntb_parameters mbim_ntb_parameters = {
+ .wLength = sizeof(mbim_ntb_parameters),
+ .bmNtbFormatsSupported = cpu_to_le16(MBIM_FORMATS_SUPPORTED),
+ .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE),
+ .wNdpInDivisor = cpu_to_le16(MBIM_NDP_IN_DIVISOR),
+ .wNdpInPayloadRemainder = cpu_to_le16(0),
+ .wNdpInAlignment = cpu_to_le16(4),
+
+ .dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE),
+ .wNdpOutDivisor = cpu_to_le16(4),
+ .wNdpOutPayloadRemainder = cpu_to_le16(0),
+ .wNdpOutAlignment = cpu_to_le16(4),
+ .wNtbOutMaxDatagrams = 0,
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor mbim_iad_desc = {
+ .bLength = sizeof(mbim_iad_desc),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = 2,
+ .bFunctionSubClass = 0x0e,
+ .bFunctionProtocol = 0,
+ /* .iFunction = DYNAMIC */
+};
+
+/* interface descriptor: */
+static struct usb_interface_descriptor mbim_control_intf = {
+ .bLength = sizeof(mbim_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0x02,
+ .bInterfaceSubClass = 0x0e,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc mbim_header_desc = {
+ .bLength = sizeof(mbim_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc mbim_union_desc = {
+ .bLength = sizeof(mbim_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_mbim_desc mbim_desc = {
+ .bLength = sizeof(mbim_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_TYPE,
+
+ .bcdMBIMVersion = cpu_to_le16(0x0100),
+
+ .wMaxControlMessage = cpu_to_le16(0x1000),
+ .bNumberFilters = 0x20,
+ .bMaxFilterSize = 0x80,
+ .wMaxSegmentSize = cpu_to_le16(0x800),
+ .bmNetworkCapabilities = 0x20,
+};
+
+static struct usb_cdc_mbim_extended_desc ext_mbb_desc = {
+ .bLength = sizeof(ext_mbb_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MBIM_EXTENDED_TYPE,
+
+ .bcdMBIMExtendedVersion = cpu_to_le16(0x0100),
+ .bMaxOutstandingCommandMessages = 64,
+ .wMTU = cpu_to_le16(1500),
+};
+
+/* the default data interface has no endpoints ... */
+static struct usb_interface_descriptor mbim_data_nop_intf = {
+ .bLength = sizeof(mbim_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+static struct usb_interface_descriptor mbim_data_intf = {
+ .bLength = sizeof(mbim_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0x0a,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0x02,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *mbim_fs_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &fs_mbim_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &fs_mbim_in_desc,
+ (struct usb_descriptor_header *) &fs_mbim_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor hs_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *mbim_hs_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &hs_mbim_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &hs_mbim_in_desc,
+ (struct usb_descriptor_header *) &hs_mbim_out_desc,
+ NULL,
+};
+
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ss_mbim_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_notify_comp_desc = {
+ .bLength = sizeof(ss_mbim_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_mbim_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_in_comp_desc = {
+ .bLength = sizeof(ss_mbim_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ss_mbim_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_mbim_out_comp_desc = {
+ .bLength = sizeof(ss_mbim_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *mbim_ss_function[] = {
+ (struct usb_descriptor_header *) &mbim_iad_desc,
+ /* MBIM control descriptors */
+ (struct usb_descriptor_header *) &mbim_control_intf,
+ (struct usb_descriptor_header *) &mbim_header_desc,
+ (struct usb_descriptor_header *) &mbim_union_desc,
+ (struct usb_descriptor_header *) &mbim_desc,
+ (struct usb_descriptor_header *) &ext_mbb_desc,
+ (struct usb_descriptor_header *) &ss_mbim_notify_desc,
+ (struct usb_descriptor_header *) &ss_mbim_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &mbim_data_nop_intf,
+ (struct usb_descriptor_header *) &mbim_data_intf,
+ (struct usb_descriptor_header *) &ss_mbim_in_desc,
+ (struct usb_descriptor_header *) &ss_mbim_in_comp_desc,
+ (struct usb_descriptor_header *) &ss_mbim_out_desc,
+ (struct usb_descriptor_header *) &ss_mbim_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+#define STRING_CTRL_IDX 0
+#define STRING_DATA_IDX 1
+
+static struct usb_string mbim_string_defs[] = {
+ [STRING_CTRL_IDX].s = "MBIM Control",
+ [STRING_DATA_IDX].s = "MBIM Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings mbim_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = mbim_string_defs,
+};
+
+static struct usb_gadget_strings *mbim_strings[] = {
+ &mbim_string_table,
+ NULL,
+};
+
+/* Microsoft OS Descriptors */
+
+/*
+ * We specify our own bMS_VendorCode byte which Windows will use
+ * as the bRequest value in subsequent device get requests.
+ */
+#define MBIM_VENDOR_CODE 0xA5
+
+/* Microsoft OS String */
+static u8 mbim_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ MBIM_VENDOR_CODE,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mbim_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mbim_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* Microsoft Extended Configuration Descriptor */
+static struct {
+ struct mbim_ext_config_desc_header header;
+ struct mbim_ext_config_desc_function function;
+} mbim_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mbim_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = 1,
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' },
+ /* .subCompatibleID = DYNAMIC */
+ },
+};
+
+static inline int mbim_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void mbim_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static struct ctrl_pkt *mbim_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void mbim_free_ctrl_pkt(struct ctrl_pkt *pkt)
+{
+ if (pkt) {
+ kfree(pkt->buf);
+ kfree(pkt);
+ }
+}
+
+static struct usb_request *mbim_alloc_req(struct usb_ep *ep, int buffer_size,
+ size_t extra_buf)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size + extra_buf, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+void fmbim_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* ---------------------------- BAM INTERFACE ----------------------------- */
+
+static int mbim_bam_setup(int no_ports)
+{
+ int ret;
+
+ pr_info("no_ports:%d\n", no_ports);
+
+ ret = bam_data_setup(USB_FUNC_MBIM, no_ports);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ pr_info("Initialized %d ports\n", no_ports);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------*/
+
+static inline void mbim_reset_values(struct f_mbim *mbim)
+{
+ mbim->ntb_input_size = MBIM_NTB_DEFAULT_IN_SIZE;
+
+ atomic_set(&mbim->online, 0);
+}
+
+static void mbim_reset_function_queue(struct f_mbim *dev)
+{
+ struct ctrl_pkt *cpkt = NULL;
+
+ pr_debug("Queue empty packet for QBI\n");
+
+ spin_lock(&dev->lock);
+
+ cpkt = mbim_alloc_ctrl_pkt(0, GFP_ATOMIC);
+ if (!cpkt) {
+ pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+ spin_unlock(&dev->lock);
+ return;
+ }
+
+ list_add_tail(&cpkt->list, &dev->cpkt_req_q);
+ spin_unlock(&dev->lock);
+
+ pr_debug("%s: Wake up read queue\n", __func__);
+ wake_up(&dev->read_wq);
+}
+
+static void fmbim_reset_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *dev = req->context;
+
+ mbim_reset_function_queue(dev);
+}
+
+static void mbim_clear_queues(struct f_mbim *mbim)
+{
+ struct ctrl_pkt *cpkt = NULL;
+ struct list_head *act, *tmp;
+
+ spin_lock(&mbim->lock);
+ list_for_each_safe(act, tmp, &mbim->cpkt_req_q) {
+ cpkt = list_entry(act, struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ list_for_each_safe(act, tmp, &mbim->cpkt_resp_q) {
+ cpkt = list_entry(act, struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock(&mbim->lock);
+}
+
+/*
+ * Context: mbim->lock held
+ */
+static void mbim_do_notify(struct f_mbim *mbim)
+{
+ struct usb_request *req = mbim->not_port.notify_req;
+ struct usb_cdc_notification *event;
+ int status;
+
+ pr_debug("notify_state: %d\n", mbim->not_port.notify_state);
+
+ if (!req)
+ return;
+
+ event = req->buf;
+
+ switch (mbim->not_port.notify_state) {
+
+ case MBIM_NOTIFY_NONE:
+ if (atomic_read(&mbim->not_port.notify_count) > 0)
+ pr_err("Pending notifications in MBIM_NOTIFY_NONE\n");
+ else
+ pr_debug("No pending notifications\n");
+
+ return;
+
+ case MBIM_NOTIFY_RESPONSE_AVAILABLE:
+ pr_debug("Notification %02x sent\n", event->bNotificationType);
+
+ if (atomic_read(&mbim->not_port.notify_count) <= 0) {
+ pr_debug("notify_response_available: done\n");
+ return;
+ }
+
+ spin_unlock(&mbim->lock);
+ status = usb_func_ep_queue(&mbim->function,
+ mbim->not_port.notify,
+ req, GFP_ATOMIC);
+ spin_lock(&mbim->lock);
+ if (status) {
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_err("Queue notify request failed, err: %d\n",
+ status);
+ }
+
+ return;
+ }
+
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(mbim->ctrl_id);
+
+ /*
+ * In double buffering if there is a space in FIFO,
+ * completion callback can be called right after the call,
+ * so unlocking
+ */
+ atomic_inc(&mbim->not_port.notify_count);
+ pr_debug("queue request: notify_count = %d\n",
+ atomic_read(&mbim->not_port.notify_count));
+ spin_unlock(&mbim->lock);
+ status = usb_func_ep_queue(&mbim->function, mbim->not_port.notify, req,
+ GFP_ATOMIC);
+ spin_lock(&mbim->lock);
+ if (status) {
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_err("usb_func_ep_queue failed, err: %d\n", status);
+ }
+}
+
+static void mbim_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *mbim = req->context;
+ struct usb_cdc_notification *event = req->buf;
+
+ pr_debug("dev:%pK\n", mbim);
+
+ spin_lock(&mbim->lock);
+ switch (req->status) {
+ case 0:
+ atomic_dec(&mbim->not_port.notify_count);
+ pr_debug("notify_count = %d\n",
+ atomic_read(&mbim->not_port.notify_count));
+ break;
+
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ mbim->not_port.notify_state = MBIM_NOTIFY_NONE;
+ atomic_set(&mbim->not_port.notify_count, 0);
+ pr_info("ESHUTDOWN/ECONNRESET, connection gone\n");
+ spin_unlock(&mbim->lock);
+ mbim_clear_queues(mbim);
+ mbim_reset_function_queue(mbim);
+ spin_lock(&mbim->lock);
+ break;
+ default:
+ pr_err("Unknown event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+
+ mbim_do_notify(mbim);
+ spin_unlock(&mbim->lock);
+
+ pr_debug("dev:%pK Exit\n", mbim);
+}
+
+static void mbim_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* now for SET_NTB_INPUT_SIZE only */
+ unsigned in_size = 0;
+ struct usb_function *f = req->context;
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct mbim_ntb_input_size *ntb = NULL;
+
+ pr_debug("dev:%pK\n", mbim);
+
+ req->context = NULL;
+ if (req->status || req->actual != req->length) {
+ pr_err("Bad control-OUT transfer\n");
+ goto invalid;
+ }
+
+ if (req->length == 4) {
+ in_size = get_unaligned_le32(req->buf);
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {
+ pr_err("Illegal INPUT SIZE (%d) from host\n", in_size);
+ goto invalid;
+ }
+ } else if (req->length == 8) {
+ ntb = (struct mbim_ntb_input_size *)req->buf;
+ in_size = get_unaligned_le32(&(ntb->ntb_input_size));
+ if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+ in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) {
+ pr_err("Illegal INPUT SIZE (%d) from host\n", in_size);
+ goto invalid;
+ }
+ mbim->ntb_max_datagrams =
+ get_unaligned_le16(&(ntb->ntb_max_datagrams));
+ } else {
+ pr_err("Illegal NTB length %d\n", in_size);
+ goto invalid;
+ }
+
+ pr_debug("Set NTB INPUT SIZE %d\n", in_size);
+
+ mbim->ntb_input_size = in_size;
+ return;
+
+invalid:
+ usb_ep_set_halt(ep);
+
+ pr_err("dev:%pK Failed\n", mbim);
+}
+
+static void
+fmbim_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_mbim *dev = req->context;
+ struct ctrl_pkt *cpkt = NULL;
+ int len = req->actual;
+ static bool first_command_sent;
+
+ if (!dev) {
+ pr_err("mbim dev is null\n");
+ return;
+ }
+
+ if (req->status < 0) {
+ pr_err("mbim command error %d\n", req->status);
+ return;
+ }
+
+ /*
+ * Wait for user to process prev MBIM_OPEN cmd before handling new one.
+ * However don't drop first command during bootup as file may not be
+ * opened by now. Queue the command in this case.
+ */
+ if (!atomic_read(&dev->open_excl) && first_command_sent) {
+ pr_err("mbim not opened yet, dropping cmd pkt = %d\n", len);
+ return;
+ }
+ if (!first_command_sent)
+ first_command_sent = true;
+
+ pr_debug("dev:%pK port#%d\n", dev, dev->port_num);
+
+ cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (!cpkt) {
+ pr_err("Unable to allocate ctrl pkt\n");
+ return;
+ }
+
+ pr_debug("Add to cpkt_req_q packet with len = %d\n", len);
+ memcpy(cpkt->buf, req->buf, len);
+
+ spin_lock(&dev->lock);
+
+ list_add_tail(&cpkt->list, &dev->cpkt_req_q);
+ spin_unlock(&dev->lock);
+
+ /* wakeup read thread */
+ pr_debug("Wake up read queue\n");
+ wake_up(&dev->read_wq);
+}
+
+static int
+mbim_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+ struct usb_request *req = cdev->req;
+ struct ctrl_pkt *cpkt = NULL;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /*
+ * composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+
+ if (!atomic_read(&mbim->online)) {
+ pr_warn("usb cable is not connected\n");
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_RESET_FUNCTION:
+
+ pr_debug("USB_CDC_RESET_FUNCTION\n");
+ value = 0;
+ req->complete = fmbim_reset_cmd_complete;
+ req->context = mbim;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+
+ pr_debug("USB_CDC_SEND_ENCAPSULATED_COMMAND\n");
+
+ if (w_length > req->length) {
+ pr_debug("w_length > req->length: %d > %d\n",
+ w_length, req->length);
+ }
+ value = w_length;
+ req->complete = fmbim_cmd_complete;
+ req->context = mbim;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+
+ pr_debug("USB_CDC_GET_ENCAPSULATED_RESPONSE\n");
+
+ if (w_value) {
+ pr_err("w_length > 0: %d\n", w_length);
+ break;
+ }
+
+ pr_debug("req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ spin_lock(&mbim->lock);
+ if (list_empty(&mbim->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty\n");
+ spin_unlock(&mbim->lock);
+ break;
+ }
+
+ cpkt = list_first_entry(&mbim->cpkt_resp_q,
+ struct ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&mbim->lock);
+
+ value = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, value);
+ mbim_free_ctrl_pkt(cpkt);
+
+ pr_debug("copied encapsulated_response %d bytes\n",
+ value);
+
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_PARAMETERS:
+
+ pr_debug("USB_CDC_GET_NTB_PARAMETERS\n");
+
+ if (w_length == 0 || w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ value = w_length > sizeof(mbim_ntb_parameters) ?
+ sizeof(mbim_ntb_parameters) : w_length;
+ memcpy(req->buf, &mbim_ntb_parameters, value);
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_NTB_INPUT_SIZE:
+
+ pr_debug("USB_CDC_GET_NTB_INPUT_SIZE\n");
+
+ if (w_length < 4 || w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ put_unaligned_le32(mbim->ntb_input_size, req->buf);
+ value = 4;
+ pr_debug("Reply to host INPUT SIZE %d\n",
+ mbim->ntb_input_size);
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_NTB_INPUT_SIZE:
+
+ pr_debug("USB_CDC_SET_NTB_INPUT_SIZE\n");
+
+ if (w_length != 4 && w_length != 8) {
+ pr_err("wrong NTB length %d\n", w_length);
+ break;
+ }
+
+ if (w_value != 0 || w_index != mbim->ctrl_id)
+ break;
+
+ req->complete = mbim_ep0out_complete;
+ req->length = w_length;
+ req->context = f;
+
+ value = req->length;
+ break;
+
+ /* optional in mbim descriptor: */
+ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
+ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
+
+ default:
+ pr_err("invalid control req: %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("control request: %02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+
+ if (value < 0) {
+ pr_err("queueing req failed: %02x.%02x, err %d\n",
+ ctrl->bRequestType,
+ ctrl->bRequest, value);
+ }
+ } else {
+ pr_err("ctrl req err %d: %02x.%02x v%04x i%04x l%d\n",
+ value, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+/*
+ * This function handles the Microsoft-specific OS descriptor control
+ * requests that are issued by Windows host drivers to determine the
+ * configuration containing the MBIM function.
+ *
+ * Unlike mbim_setup() this function handles two specific device requests,
+ * and only when a configuration has not yet been selected.
+ */
+static int mbim_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* only respond to OS descriptors when no configuration selected */
+ if (cdev->config || !mbim_ext_config_desc.function.subCompatibleID[0])
+ return value;
+
+ pr_debug("%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MSFT OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MBIM_OS_STRING_ID) {
+
+ value = (w_length < sizeof(mbim_os_string) ?
+ w_length : sizeof(mbim_os_string));
+ memcpy(cdev->req->buf, mbim_os_string, value);
+
+ } else if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+ && ctrl->bRequest == MBIM_VENDOR_CODE && w_index == 4) {
+
+ /* Handle Extended OS descriptor */
+ value = (w_length < sizeof(mbim_ext_config_desc) ?
+ w_length : sizeof(mbim_ext_config_desc));
+ memcpy(cdev->req->buf, &mbim_ext_config_desc, value);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ pr_err("response queue error: %d\n", rc);
+ }
+ return value;
+}
+
+static int mbim_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+ int ret = 0;
+
+ pr_debug("intf=%u, alt=%u\n", intf, alt);
+
+ /* Control interface has only altsetting 0 */
+ if (intf == mbim->ctrl_id) {
+
+ pr_info("CONTROL_INTERFACE\n");
+
+ if (alt != 0)
+ goto fail;
+
+ if (mbim->not_port.notify->driver_data) {
+ pr_info("reset mbim control %d\n", intf);
+ usb_ep_disable(mbim->not_port.notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->not_port.notify);
+ if (ret) {
+ mbim->not_port.notify->desc = NULL;
+ pr_err("Failed configuring notify ep %s: err %d\n",
+ mbim->not_port.notify->name, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(mbim->not_port.notify);
+ if (ret) {
+ pr_err("usb ep#%s enable failed, err#%d\n",
+ mbim->not_port.notify->name, ret);
+ return ret;
+ }
+ mbim->not_port.notify->driver_data = mbim;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == mbim->data_id) {
+
+ pr_info("DATA_INTERFACE id %d, data interface status %d\n",
+ mbim->data_id, mbim->data_interface_up);
+
+ if (alt > 1)
+ goto fail;
+
+ if (mbim->data_interface_up == alt)
+ return 0;
+
+ if (mbim->bam_port.in->driver_data) {
+ pr_info("reset mbim, alt-%d\n", alt);
+ mbim_reset_values(mbim);
+ }
+
+ if (alt == 0) {
+ /*
+ * perform bam data disconnect handshake upon usb
+ * disconnect
+ */
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_disconnect();
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ bam_data_disconnect(&mbim->bam_port,
+ USB_FUNC_MBIM, mbim->port_num);
+ if (!gadget_is_dwc3(cdev->gadget))
+ break;
+
+ if (msm_ep_unconfig(mbim->bam_port.in) ||
+ msm_ep_unconfig(mbim->bam_port.out)) {
+ pr_err("ep_unconfig failed\n");
+ goto fail;
+ }
+ default:
+ pr_err("unknown transport\n");
+ }
+ goto notify_ready;
+ }
+
+ pr_info("Alt set 1, initialize ports\n");
+
+ /*
+ * CDC Network only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ pr_info("Choose endpoints\n");
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->bam_port.in);
+ if (ret) {
+ mbim->bam_port.in->desc = NULL;
+ pr_err("IN ep %s failed: %d\n",
+ mbim->bam_port.in->name, ret);
+ return ret;
+ }
+
+ pr_info("Set mbim port in_desc = 0x%pK\n",
+ mbim->bam_port.in->desc);
+
+ ret = config_ep_by_speed(cdev->gadget, f,
+ mbim->bam_port.out);
+ if (ret) {
+ mbim->bam_port.out->desc = NULL;
+ pr_err("OUT ep %s failed: %d\n",
+ mbim->bam_port.out->name, ret);
+ return ret;
+ }
+
+ pr_info("Set mbim port out_desc = 0x%pK\n",
+ mbim->bam_port.out->desc);
+
+ pr_debug("Activate mbim\n");
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_connect(cdev->gadget, mbim->bam_port.in,
+ mbim->bam_port.out);
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ ret = bam_data_connect(&mbim->bam_port,
+ mbim->xport, mbim->port_num,
+ USB_FUNC_MBIM);
+ if (ret) {
+ pr_err("bam_data_setup failed:err:%d\n",
+ ret);
+ goto fail;
+ }
+ break;
+ default:
+ pr_err("unknown transport\n");
+ }
+notify_ready:
+ mbim->data_interface_up = alt;
+ spin_lock(&mbim->lock);
+ mbim->not_port.notify_state = MBIM_NOTIFY_RESPONSE_AVAILABLE;
+ spin_unlock(&mbim->lock);
+ } else {
+ goto fail;
+ }
+
+ atomic_set(&mbim->online, 1);
+
+ pr_info("SET DEVICE ONLINE\n");
+
+ return 0;
+
+fail:
+ pr_err("ERROR: Illegal Interface\n");
+ return -EINVAL;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * this MBIM function *MUST* implement a get_alt() method.
+ */
+static int mbim_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ if (intf == mbim->ctrl_id)
+ return 0;
+ else if (intf == mbim->data_id)
+ return mbim->data_interface_up;
+
+ return -EINVAL;
+}
+
+static void mbim_disable(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+ struct usb_composite_dev *cdev = mbim->cdev;
+
+ pr_info("SET DEVICE OFFLINE\n");
+ atomic_set(&mbim->online, 0);
+ mbim->remote_wakeup_enabled = 0;
+
+ /* Disable Control Path */
+ if (mbim->not_port.notify->driver_data) {
+ usb_ep_disable(mbim->not_port.notify);
+ mbim->not_port.notify->driver_data = NULL;
+ }
+ atomic_set(&mbim->not_port.notify_count, 0);
+ mbim->not_port.notify_state = MBIM_NOTIFY_NONE;
+
+ mbim_clear_queues(mbim);
+ mbim_reset_function_queue(mbim);
+
+ /* Disable Data Path - only if it was initialized already (alt=1) */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ gbam_mbim_disconnect();
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ if (gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(mbim->bam_port.out);
+ msm_ep_unconfig(mbim->bam_port.in);
+ }
+ bam_data_disconnect(&mbim->bam_port, USB_FUNC_MBIM,
+ mbim->port_num);
+ break;
+ default:
+ pr_err("unknown transport\n");
+ }
+
+ mbim->data_interface_up = false;
+ pr_info("mbim deactivated\n");
+}
+
+#define MBIM_ACTIVE_PORT 0
+
+static void mbim_suspend(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_info("mbim suspended\n");
+
+ pr_debug("%s(): remote_wakeup:%d\n:", __func__,
+ mbim->cdev->gadget->remote_wakeup);
+
+ if (mbim->xport == USB_GADGET_XPORT_BAM_DMUX)
+ return;
+
+ /* If the function is in Function Suspend state, avoid suspending the
+ * MBIM function again.
+ */
+ if ((mbim->cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ if (mbim->cdev->gadget->speed == USB_SPEED_SUPER)
+ mbim->remote_wakeup_enabled = f->func_wakeup_allowed;
+ else
+ mbim->remote_wakeup_enabled = mbim->cdev->gadget->remote_wakeup;
+
+ /* MBIM data interface is up only when alt setting is set to 1. */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ if (!mbim->remote_wakeup_enabled)
+ atomic_set(&mbim->online, 0);
+
+ bam_data_suspend(&mbim->bam_port, mbim->port_num, USB_FUNC_MBIM,
+ mbim->remote_wakeup_enabled);
+}
+
+static void mbim_resume(struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_info("mbim resumed\n");
+
+ if (mbim->xport == USB_GADGET_XPORT_BAM_DMUX)
+ return;
+
+ /*
+ * If the function is in USB3 Function Suspend state, resume is
+ * canceled. In this case resume is done by a Function Resume request.
+ */
+ if ((mbim->cdev->gadget->speed == USB_SPEED_SUPER) &&
+ f->func_is_suspended)
+ return;
+
+ /* resume control path by queuing notify req */
+ spin_lock(&mbim->lock);
+ mbim_do_notify(mbim);
+ spin_unlock(&mbim->lock);
+
+ /* MBIM data interface is up only when alt setting is set to 1. */
+ if (!mbim->data_interface_up) {
+ pr_debug("MBIM data interface is not opened. Returning\n");
+ return;
+ }
+
+ if (!mbim->remote_wakeup_enabled)
+ atomic_set(&mbim->online, 1);
+
+ bam_data_resume(&mbim->bam_port, mbim->port_num, USB_FUNC_MBIM,
+ mbim->remote_wakeup_enabled);
+}
+
+static int mbim_func_suspend(struct usb_function *f, unsigned char options)
+{
+ enum {
+ MBIM_FUNC_SUSPEND_MASK = 0x1,
+ MBIM_FUNC_WAKEUP_EN_MASK = 0x2
+ };
+
+ bool func_wakeup_allowed;
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ if (f == NULL)
+ return -EINVAL;
+
+ pr_debug("Got Function Suspend(%u) command for %s function\n",
+ options, f->name ? f->name : "");
+
+ /* Function Suspend is supported by Super Speed devices only */
+ if (mbim->cdev->gadget->speed != USB_SPEED_SUPER)
+ return -ENOTSUPP;
+
+ func_wakeup_allowed =
+ ((options & MBIM_FUNC_WAKEUP_EN_MASK) != 0);
+
+ if (options & MBIM_FUNC_SUSPEND_MASK) {
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ if (!f->func_is_suspended) {
+ mbim_suspend(f);
+ f->func_is_suspended = true;
+ }
+ } else {
+ if (f->func_is_suspended) {
+ f->func_is_suspended = false;
+ mbim_resume(f);
+ }
+ f->func_wakeup_allowed = func_wakeup_allowed;
+ }
+
+ return 0;
+}
+
+static int mbim_get_status(struct usb_function *f)
+{
+ enum {
+ MBIM_STS_FUNC_WAKEUP_CAP_SHIFT = 0,
+ MBIM_STS_FUNC_WAKEUP_EN_SHIFT = 1
+ };
+
+ unsigned remote_wakeup_enabled_bit;
+ const unsigned remote_wakeup_capable_bit = 1;
+
+ remote_wakeup_enabled_bit = f->func_wakeup_allowed ? 1 : 0;
+ return (remote_wakeup_enabled_bit << MBIM_STS_FUNC_WAKEUP_EN_SHIFT) |
+ (remote_wakeup_capable_bit << MBIM_STS_FUNC_WAKEUP_CAP_SHIFT);
+}
+
+/*---------------------- function driver setup/binding ---------------------*/
+
+static int
+mbim_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_mbim *mbim = func_to_mbim(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_cdc_notification *event;
+
+ pr_info("Enter\n");
+
+ mbim->cdev = cdev;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ mbim->ctrl_id = status;
+ mbim_iad_desc.bFirstInterface = status;
+
+ mbim_control_intf.bInterfaceNumber = status;
+ mbim_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ mbim->data_id = status;
+ mbim->data_interface_up = false;
+
+ mbim_data_nop_intf.bInterfaceNumber = status;
+ mbim_data_intf.bInterfaceNumber = status;
+ mbim_union_desc.bSlaveInterface0 = status;
+
+ mbim->bam_port.cdev = cdev;
+ mbim->bam_port.func = &mbim->function;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_in_desc);
+ if (!ep) {
+ pr_err("usb epin autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb epin autoconfig succeeded\n");
+ ep->driver_data = cdev; /* claim */
+ mbim->bam_port.in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_out_desc);
+ if (!ep) {
+ pr_err("usb epout autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb epout autoconfig succeeded\n");
+ ep->driver_data = cdev; /* claim */
+ mbim->bam_port.out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_notify_desc);
+ if (!ep) {
+ pr_err("usb notify ep autoconfig failed\n");
+ goto fail;
+ }
+ pr_info("usb notify ep autoconfig succeeded\n");
+ mbim->not_port.notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ mbim->not_port.notify_req = mbim_alloc_req(ep, NCM_STATUS_BYTECOUNT,
+ cdev->gadget->extra_buf_alloc);
+ if (!mbim->not_port.notify_req) {
+ pr_info("failed to allocate notify request\n");
+ goto fail;
+ }
+ pr_info("allocated notify ep request & request buffer\n");
+
+ mbim->not_port.notify_req->context = mbim;
+ mbim->not_port.notify_req->complete = mbim_notify_complete;
+ mbim->not_port.notify_req->length = sizeof(*event);
+ event = mbim->not_port.notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(mbim->ctrl_id);
+ event->wLength = cpu_to_le16(0);
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(mbim_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hs_mbim_in_desc.bEndpointAddress =
+ fs_mbim_in_desc.bEndpointAddress;
+ hs_mbim_out_desc.bEndpointAddress =
+ fs_mbim_out_desc.bEndpointAddress;
+ hs_mbim_notify_desc.bEndpointAddress =
+ fs_mbim_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(mbim_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_mbim_in_desc.bEndpointAddress =
+ fs_mbim_in_desc.bEndpointAddress;
+ ss_mbim_out_desc.bEndpointAddress =
+ fs_mbim_out_desc.bEndpointAddress;
+ ss_mbim_notify_desc.bEndpointAddress =
+ fs_mbim_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(mbim_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ /*
+ * If MBIM is bound in a config other than the first, tell Windows
+ * about it by returning the num as a string in the OS descriptor's
+ * subCompatibleID field. Windows only supports up to config #4.
+ */
+ if (c->bConfigurationValue >= 2 && c->bConfigurationValue <= 4) {
+ pr_debug("MBIM in configuration %d\n", c->bConfigurationValue);
+ mbim_ext_config_desc.function.subCompatibleID[0] =
+ c->bConfigurationValue + '0';
+ }
+
+ pr_info("mbim(%d): %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ mbim->port_num,
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ mbim->bam_port.in->name, mbim->bam_port.out->name,
+ mbim->not_port.notify->name);
+
+ return 0;
+
+fail:
+ pr_err("%s failed to bind, err %d\n", f->name, status);
+
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (mbim->not_port.notify_req) {
+ kfree(mbim->not_port.notify_req->buf);
+ usb_ep_free_request(mbim->not_port.notify,
+ mbim->not_port.notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (mbim->not_port.notify)
+ mbim->not_port.notify->driver_data = NULL;
+ if (mbim->bam_port.out)
+ mbim->bam_port.out->driver_data = NULL;
+ if (mbim->bam_port.in)
+ mbim->bam_port.in->driver_data = NULL;
+
+ return status;
+}
+
+static void mbim_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_mbim *mbim = func_to_mbim(f);
+
+ pr_debug("unbinding mbim\n");
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(mbim->not_port.notify_req->buf);
+ usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req);
+
+ mbim_ext_config_desc.function.subCompatibleID[0] = 0;
+}
+
+/**
+ * mbim_bind_config - add MBIM link to a configuration
+ * @c: the configuration to support the network link
+ * Context: single threaded during gadget setup
+ * Returns zero on success, else negative errno.
+ */
+int mbim_bind_config(struct usb_configuration *c, unsigned portno,
+ char *xport_name)
+{
+ struct f_mbim *mbim = NULL;
+ int status = 0;
+
+ pr_info("port number %u\n", portno);
+
+ if (portno >= nr_mbim_ports) {
+ pr_err("Can not add port %u. Max ports = %d\n",
+ portno, nr_mbim_ports);
+ return -ENODEV;
+ }
+
+ /* allocate and initialize one new instance */
+ mbim = mbim_ports[portno].port;
+ if (!mbim) {
+ pr_err("mbim struct not allocated\n");
+ return -ENOMEM;
+ }
+
+ mbim->xport = str_to_xport(xport_name);
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM2BAM:
+ /* Override BAM2BAM to BAM_DMUX for old ABI compatibility */
+ mbim->xport = USB_GADGET_XPORT_BAM_DMUX;
+ /* fall-through */
+ case USB_GADGET_XPORT_BAM_DMUX:
+ status = gbam_mbim_setup();
+ if (status)
+ break;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ status = mbim_bam_setup(nr_mbim_ports);
+ if (status)
+ break;
+ mbim_ntb_parameters.wNtbOutMaxDatagrams = 16;
+ /* For IPA this is proven to give maximum throughput */
+ mbim_ntb_parameters.dwNtbInMaxSize =
+ cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA);
+ mbim_ntb_parameters.dwNtbOutMaxSize =
+ cpu_to_le32(MBIM_NTB_OUT_SIZE_IPA);
+ /* update rx buffer size to be used by usb rx request buffer */
+ mbim->bam_port.rx_buffer_size = MBIM_NTB_OUT_SIZE_IPA;
+ mbim_ntb_parameters.wNdpInDivisor = 1;
+ pr_debug("MBIM: dwNtbOutMaxSize:%d\n", MBIM_NTB_OUT_SIZE_IPA);
+ break;
+ default:
+ status = -EINVAL;
+ }
+
+ if (status) {
+ pr_err("%s transport setup failed\n", xport_name);
+ return status;
+ }
+
+
+ /* maybe allocate device-global string IDs */
+ if (mbim_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ mbim_string_defs[STRING_CTRL_IDX].id = status;
+ mbim_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ mbim_string_defs[STRING_DATA_IDX].id = status;
+ mbim_data_nop_intf.iInterface = status;
+ mbim_data_intf.iInterface = status;
+ }
+
+ mbim->cdev = c->cdev;
+
+ mbim_reset_values(mbim);
+
+ mbim->function.name = "usb_mbim";
+ mbim->function.strings = mbim_strings;
+ mbim->function.bind = mbim_bind;
+ mbim->function.unbind = mbim_unbind;
+ mbim->function.set_alt = mbim_set_alt;
+ mbim->function.get_alt = mbim_get_alt;
+ mbim->function.setup = mbim_setup;
+ mbim->function.disable = mbim_disable;
+ mbim->function.suspend = mbim_suspend;
+ mbim->function.func_suspend = mbim_func_suspend;
+ mbim->function.get_status = mbim_get_status;
+ mbim->function.resume = mbim_resume;
+
+ INIT_LIST_HEAD(&mbim->cpkt_req_q);
+ INIT_LIST_HEAD(&mbim->cpkt_resp_q);
+
+ status = usb_add_function(c, &mbim->function);
+
+ pr_info("Exit status %d\n", status);
+
+ return status;
+}
+
+/* ------------ MBIM DRIVER File Operations API for USER SPACE ------------ */
+
+static ssize_t
+mbim_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct f_mbim *dev = fp->private_data;
+ struct ctrl_pkt *cpkt = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("Enter(%zu)\n", count);
+
+ if (!dev) {
+ pr_err("Received NULL mbim pointer\n");
+ return -ENODEV;
+ }
+
+ if (count > MBIM_BULK_BUFFER_SIZE) {
+ pr_err("Buffer size is too big %zu, should be at most %d\n",
+ count, MBIM_BULK_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&dev->read_excl)) {
+ pr_err("Previous reading is not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (atomic_read(&dev->error)) {
+ mbim_unlock(&dev->read_excl);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (list_empty(&dev->cpkt_req_q)) {
+ pr_debug("Requests list is empty. Wait.\n");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = wait_event_interruptible(dev->read_wq,
+ !list_empty(&dev->cpkt_req_q));
+ if (ret < 0) {
+ pr_err("Waiting failed\n");
+ mbim_unlock(&dev->read_excl);
+ return -ERESTARTSYS;
+ }
+ pr_debug("Received request packet\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_req_q, struct ctrl_pkt,
+ list);
+ if (cpkt->len > count) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->read_excl);
+ pr_err("cpkt size too big:%d > buf size:%zu\n",
+ cpkt->len, count);
+ return -ENOMEM;
+ }
+
+ pr_debug("cpkt size:%d\n", cpkt->len);
+
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->read_excl);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ pr_err("copy_to_user failed: err %d\n", ret);
+ ret = -ENOMEM;
+ } else {
+ pr_debug("copied %d bytes to user\n", cpkt->len);
+ ret = cpkt->len;
+ }
+
+ mbim_free_ctrl_pkt(cpkt);
+
+ return ret;
+}
+
+static ssize_t
+mbim_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
+{
+ struct f_mbim *dev = fp->private_data;
+ struct ctrl_pkt *cpkt = NULL;
+ struct usb_request *req = dev->not_port.notify_req;
+ int ret = 0;
+ unsigned long flags;
+
+ pr_debug("Enter(%zu)\n", count);
+
+ if (!dev || !req || !req->buf) {
+ pr_err("%s: dev %pK req %pK req->buf %pK\n",
+ __func__, dev, req, req ? req->buf : req);
+ return -ENODEV;
+ }
+
+ if (!count || count > MAX_CTRL_PKT_SIZE) {
+ pr_err("error: ctrl pkt length %zu\n", count);
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&dev->write_excl)) {
+ pr_err("Previous writing not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&dev->online)) {
+ pr_err("USB cable not connected\n");
+ mbim_unlock(&dev->write_excl);
+ return -EPIPE;
+ }
+
+ if (dev->not_port.notify_state != MBIM_NOTIFY_RESPONSE_AVAILABLE) {
+ pr_err("dev:%pK state=%d error\n", dev,
+ dev->not_port.notify_state);
+ mbim_unlock(&dev->write_excl);
+ return -EINVAL;
+ }
+
+ if (dev->function.func_is_suspended &&
+ !dev->function.func_wakeup_allowed) {
+ dev->cpkt_drop_cnt++;
+ pr_err("drop ctrl pkt of len %zu\n", count);
+ return -ENOTSUPP;
+ }
+
+ cpkt = mbim_alloc_ctrl_pkt(count, GFP_KERNEL);
+ if (!cpkt) {
+ pr_err("failed to allocate ctrl pkt\n");
+ mbim_unlock(&dev->write_excl);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cpkt->buf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed err:%d\n", ret);
+ mbim_free_ctrl_pkt(cpkt);
+ mbim_unlock(&dev->write_excl);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+
+ if (atomic_inc_return(&dev->not_port.notify_count) != 1) {
+ pr_debug("delay ep_queue: notifications queue is busy[%d]\n",
+ atomic_read(&dev->not_port.notify_count));
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mbim_unlock(&dev->write_excl);
+ return count;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_func_ep_queue(&dev->function, dev->not_port.notify,
+ req, GFP_ATOMIC);
+ if (ret == -ENOTSUPP || (ret < 0 && ret != -EAGAIN)) {
+ spin_lock_irqsave(&dev->lock, flags);
+ /* check if device disconnected while we dropped lock */
+ if (atomic_read(&dev->online)) {
+ list_del(&cpkt->list);
+ atomic_dec(&dev->not_port.notify_count);
+ mbim_free_ctrl_pkt(cpkt);
+ }
+ dev->cpkt_drop_cnt++;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_err("drop ctrl pkt of len %d error %d\n", cpkt->len, ret);
+ } else {
+ ret = 0;
+ }
+ mbim_unlock(&dev->write_excl);
+
+ pr_debug("Exit(%zu)\n", count);
+
+ return ret ? ret : count;
+}
+
+static int mbim_open(struct inode *ip, struct file *fp)
+{
+ pr_info("Open mbim driver\n");
+
+ while (!_mbim_dev) {
+ pr_err("mbim_dev not created yet\n");
+ return -ENODEV;
+ }
+
+ if (mbim_lock(&_mbim_dev->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ pr_info("Lock mbim_dev->open_excl for open\n");
+
+ if (!atomic_read(&_mbim_dev->online))
+ pr_err("USB cable not connected\n");
+
+ fp->private_data = _mbim_dev;
+
+ atomic_set(&_mbim_dev->error, 0);
+
+ pr_info("Exit, mbim file opened\n");
+
+ return 0;
+}
+
+static int mbim_release(struct inode *ip, struct file *fp)
+{
+ pr_info("Close mbim file\n");
+
+ mbim_unlock(&_mbim_dev->open_excl);
+
+ return 0;
+}
+
+#define BAM_DMUX_CHANNEL_ID 8
+static long mbim_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct f_mbim *mbim = fp->private_data;
+ struct data_port *port;
+ struct mbim_ipa_ep_info info;
+ int ret = 0;
+
+ pr_debug("Received command %d\n", cmd);
+
+ if (!mbim) {
+ pr_err("Bad parameter\n");
+ return -EINVAL;
+ }
+
+ if (mbim_lock(&mbim->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case MBIM_GET_NTB_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &mbim->ntb_input_size, sizeof(mbim->ntb_input_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent NTB size %d\n", mbim->ntb_input_size);
+ break;
+ case MBIM_GET_DATAGRAM_COUNT:
+ ret = copy_to_user((void __user *)arg,
+ &mbim->ntb_max_datagrams,
+ sizeof(mbim->ntb_max_datagrams));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent NTB datagrams count %d\n",
+ mbim->ntb_max_datagrams);
+ break;
+
+ case MBIM_EP_LOOKUP:
+ if (!atomic_read(&mbim->online)) {
+ pr_warn("usb cable is not connected\n");
+ return -ENOTCONN;
+ }
+
+ switch (mbim->xport) {
+ case USB_GADGET_XPORT_BAM_DMUX:
+ /*
+ * Rmnet and MBIM share the same BAM-DMUX channel.
+ * This channel number 8 should be in sync with
+ * the one defined in u_bam.c.
+ */
+ info.ph_ep_info.ep_type = MBIM_DATA_EP_TYPE_BAM_DMUX;
+ info.ph_ep_info.peripheral_iface_id =
+ BAM_DMUX_CHANNEL_ID;
+ info.ipa_ep_pair.cons_pipe_num = 0;
+ info.ipa_ep_pair.prod_pipe_num = 0;
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ port = &mbim->bam_port;
+ if ((port->ipa_producer_ep == -1) ||
+ (port->ipa_consumer_ep == -1)) {
+ pr_err("EP_LOOKUP failed - IPA pipes not updated\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = mbim->data_id;
+ info.ipa_ep_pair.cons_pipe_num = port->ipa_consumer_ep;
+ info.ipa_ep_pair.prod_pipe_num = port->ipa_producer_ep;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_err("unknown transport\n");
+ goto fail;
+ }
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ break;
+
+ default:
+ pr_err("wrong parameter\n");
+ ret = -EINVAL;
+ }
+
+fail:
+ mbim_unlock(&mbim->ioctl_excl);
+
+ return ret;
+}
+
+/* file operations for MBIM device /dev/android_mbim */
+static const struct file_operations mbim_fops = {
+ .owner = THIS_MODULE,
+ .open = mbim_open,
+ .release = mbim_release,
+ .read = mbim_read,
+ .write = mbim_write,
+ .unlocked_ioctl = mbim_ioctl,
+};
+
+static struct miscdevice mbim_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_mbim",
+ .fops = &mbim_fops,
+};
+
+static int mbim_init(int instances)
+{
+ int i;
+ struct f_mbim *dev = NULL;
+ int ret;
+
+ pr_info("initialize %d instances\n", instances);
+
+ if (instances > NR_MBIM_PORTS) {
+ pr_err("Max-%d instances supported\n", NR_MBIM_PORTS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < instances; i++) {
+ dev = kzalloc(sizeof(struct f_mbim), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate mbim dev\n");
+ ret = -ENOMEM;
+ goto fail_probe;
+ }
+
+ dev->port_num = i;
+ dev->bam_port.ipa_consumer_ep = -1;
+ dev->bam_port.ipa_producer_ep = -1;
+
+ spin_lock_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->cpkt_req_q);
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+ mbim_ports[i].port = dev;
+ mbim_ports[i].port_num = i;
+
+ init_waitqueue_head(&dev->read_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ nr_mbim_ports++;
+
+ }
+
+ _mbim_dev = dev;
+ ret = misc_register(&mbim_device);
+ if (ret) {
+ pr_err("mbim driver failed to register\n");
+ goto fail_probe;
+ }
+
+ pr_info("Initialized %d ports\n", nr_mbim_ports);
+
+ return ret;
+
+fail_probe:
+ pr_err("Failed\n");
+ for (i = 0; i < nr_mbim_ports; i++) {
+ kfree(mbim_ports[i].port);
+ mbim_ports[i].port = NULL;
+ }
+
+ return ret;
+}
+
+static void fmbim_cleanup(void)
+{
+ int i = 0;
+
+ pr_info("Enter\n");
+
+ for (i = 0; i < nr_mbim_ports; i++) {
+ kfree(mbim_ports[i].port);
+ mbim_ports[i].port = NULL;
+ }
+ nr_mbim_ports = 0;
+
+ misc_deregister(&mbim_device);
+
+ _mbim_dev = NULL;
+}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 0380f260b092..b942f38ab10b 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -167,6 +167,15 @@ static struct usb_endpoint_descriptor bulk_in_desc = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
/* .bLength = DYNAMIC */
@@ -198,7 +207,7 @@ static struct usb_gadget_strings *midi_strings[] = {
static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static const uint8_t f_midi_cin_length[] = {
@@ -718,6 +727,7 @@ fail:
static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
+ struct usb_descriptor_header **midi_ss_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
@@ -725,7 +735,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
struct usb_string *us;
- int status, n, jack = 1, i = 0;
+ int status, n, jack = 1, i = 0, j = 0;
midi->gadget = cdev->gadget;
tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
@@ -765,11 +775,20 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
if (!midi->out_ep)
goto fail;
+ /* allocate temporary function list for ss */
+ midi_ss_function = kcalloc((MAX_PORTS * 4) + 11,
+ sizeof(*midi_ss_function), GFP_KERNEL);
+ if (!midi_ss_function) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* allocate temporary function list */
midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
GFP_KERNEL);
if (!midi_function) {
status = -ENOMEM;
+ kfree(midi_ss_function);
goto fail;
}
@@ -783,6 +802,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_interface_desc;
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
@@ -791,6 +816,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_header_desc;
/* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
@@ -804,6 +831,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
in_ext->bJackID = jack++;
in_ext->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_ext;
out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
@@ -815,6 +843,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
out_emb->pins[0].baSourceID = in_ext->bJackID;
out_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_emb;
/* link it to the endpoint */
ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
@@ -832,6 +862,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
in_emb->bJackID = jack++;
in_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_emb;
out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
@@ -843,6 +874,8 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
out_ext->pins[0].baSourceID = in_emb->bJackID;
out_ext->pins[0].baSourcePin = 1;
midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_ext;
/* link it to the endpoint */
ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
@@ -862,6 +895,16 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
midi_function[i++] = NULL;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_out_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_out_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_in_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_in_desc;
+ midi_ss_function[j++] = NULL;
+
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -880,13 +923,23 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
goto fail_f_midi;
}
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ f->ss_descriptors = usb_copy_descriptors(midi_ss_function);
+ if (!f->ss_descriptors)
+ goto fail_f_midi;
+ }
+
kfree(midi_function);
+ kfree(midi_ss_function);
return 0;
fail_f_midi:
kfree(midi_function);
usb_free_descriptors(f->hs_descriptors);
+ kfree(midi_ss_function);
fail:
f_midi_unregister_card(midi);
fail_register:
@@ -1109,7 +1162,7 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 256;
+ opts->buflen = 1024;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
@@ -1137,6 +1190,7 @@ static void f_midi_free(struct usb_function *f)
mutex_lock(&opts->lock);
for (i = opts->in_ports - 1; i >= 0; --i)
kfree(midi->in_port[i]);
+ opts->func_inst.f = NULL;
kfree(midi);
opts->func_inst.f = NULL;
--opts->refcnt;
@@ -1157,7 +1211,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
card = midi->card;
midi->card = NULL;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
usb_free_all_descriptors(f);
}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index b25cb3594d01..2d8d5e28ec39 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -26,6 +26,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/device.h>
@@ -40,6 +42,8 @@
#include "configfs.h"
+#define MTP_RX_BUFFER_INIT_SIZE 1048576
+#define MTP_TX_BUFFER_INIT_SIZE 1048576
#define MTP_BULK_BUFFER_SIZE 16384
#define INTR_BUFFER_SIZE 28
#define MAX_INST_NAME_LEN 40
@@ -56,7 +60,7 @@
#define STATE_ERROR 4 /* error from completion routine */
/* number of tx and rx requests to allocate */
-#define TX_REQ_MAX 4
+#define MTP_TX_REQ_MAX 8
#define RX_REQ_MAX 2
#define INTR_REQ_MAX 5
@@ -74,6 +78,17 @@
#define MTP_RESPONSE_DEVICE_BUSY 0x2019
#define DRIVER_NAME "mtp"
+#define MAX_ITERATION 100
+
+unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
+module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
+
+unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
+module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
+
+unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
+module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
+
static const char mtp_shortname[] = DRIVER_NAME "_usb";
struct mtp_dev {
@@ -114,6 +129,16 @@ struct mtp_dev {
uint16_t xfer_command;
uint32_t xfer_transaction_id;
int xfer_result;
+ struct {
+ unsigned long vfs_rbytes;
+ unsigned long vfs_wbytes;
+ unsigned vfs_rtime;
+ unsigned vfs_wtime;
+ } perf[MAX_ITERATION];
+ unsigned dbg_read_index;
+ unsigned dbg_write_index;
+ bool is_ptp;
+ struct mutex read_mutex;
};
static struct usb_interface_descriptor mtp_interface_desc = {
@@ -141,27 +166,34 @@ static struct usb_endpoint_descriptor mtp_ss_in_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(1024),
+ .wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
- .bLength = sizeof(mtp_ss_in_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- /* .bMaxBurst = DYNAMIC, */
+ .bLength = sizeof(mtp_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
};
+
static struct usb_endpoint_descriptor mtp_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = __constant_cpu_to_le16(1024),
+ .wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
- .bLength = sizeof(mtp_ss_out_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- /* .bMaxBurst = DYNAMIC, */
+ .bLength = sizeof(mtp_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ .bMaxBurst = 2,
+ /* .bmAttributes = 0, */
};
static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
@@ -204,9 +236,13 @@ static struct usb_endpoint_descriptor mtp_intr_desc = {
};
static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
- .bLength = sizeof(mtp_intr_ss_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
+ .bLength = sizeof(mtp_intr_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
};
static struct usb_descriptor_header *fs_mtp_descs[] = {
@@ -310,10 +346,12 @@ struct mtp_ext_config_desc_function {
};
/* MTP Extended Configuration Descriptor */
-struct {
+struct ext_mtp_desc {
struct mtp_ext_config_desc_header header;
struct mtp_ext_config_desc_function function;
-} mtp_ext_config_desc = {
+};
+
+struct ext_mtp_desc mtp_ext_config_desc = {
.header = {
.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
.bcdVersion = __constant_cpu_to_le16(0x0100),
@@ -327,6 +365,20 @@ struct {
},
};
+struct ext_mtp_desc ptp_ext_config_desc = {
+ .header = {
+ .dwLength = cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = cpu_to_le16(0x0100),
+ .wIndex = cpu_to_le16(4),
+ .bCount = cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'P', 'T', 'P' },
+ },
+};
+
struct mtp_device_status {
__le16 wLength;
__le16 wCode;
@@ -432,7 +484,7 @@ static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
{
struct mtp_dev *dev = _mtp_dev;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
mtp_req_put(dev, &dev->tx_idle, req);
@@ -445,7 +497,7 @@ static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
struct mtp_dev *dev = _mtp_dev;
dev->rx_done = 1;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
wake_up(&dev->read_wq);
@@ -455,7 +507,7 @@ static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
{
struct mtp_dev *dev = _mtp_dev;
- if (req->status != 0)
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
mtp_req_put(dev, &dev->intr_idle, req);
@@ -473,7 +525,7 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
struct usb_ep *ep;
int i;
- DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+ DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
ep = usb_ep_autoconfig(cdev->gadget, in_desc);
if (!ep) {
@@ -502,18 +554,43 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
ep->driver_data = dev; /* claim the endpoint */
dev->ep_intr = ep;
+retry_tx_alloc:
/* now allocate requests for our endpoints */
- for (i = 0; i < TX_REQ_MAX; i++) {
- req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ for (i = 0; i < mtp_tx_reqs; i++) {
+ req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
+ if (!req) {
+ if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
+ goto fail;
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+ mtp_tx_reqs = MTP_TX_REQ_MAX;
+ goto retry_tx_alloc;
+ }
req->complete = mtp_complete_in;
mtp_req_put(dev, &dev->tx_idle, req);
}
+
+ /*
+ * The RX buffer should be aligned to EP max packet for
+ * some controllers. At bind time, we don't know the
+ * operational speed. Hence assuming super speed max
+ * packet size.
+ */
+ if (mtp_rx_req_len % 1024)
+ mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+
+retry_rx_alloc:
for (i = 0; i < RX_REQ_MAX; i++) {
- req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
- if (!req)
- goto fail;
+ req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
+ if (!req) {
+ if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
+ goto fail;
+ for (--i; i >= 0; i--)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+ goto retry_rx_alloc;
+ }
req->complete = mtp_complete_out;
dev->rx_req[i] = req;
}
@@ -538,12 +615,10 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
struct mtp_dev *dev = fp->private_data;
struct usb_composite_dev *cdev = dev->cdev;
struct usb_request *req;
- ssize_t r = count;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
- size_t len = 0;
- DBG(cdev, "mtp_read(%zu)\n", count);
+ DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
/* we will block until we're online */
DBG(cdev, "mtp_read: waiting for online state\n");
@@ -553,15 +628,12 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
r = ret;
goto done;
}
- spin_lock_irq(&dev->lock);
- if (dev->ep_out->desc) {
- len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
- if (len > MTP_BULK_BUFFER_SIZE) {
- spin_unlock_irq(&dev->lock);
- return -EINVAL;
- }
- }
+ len = ALIGN(count, dev->ep_out->maxpacket);
+ if (len > mtp_rx_req_len)
+ return -EINVAL;
+
+ spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED) {
/* report cancelation to userspace */
dev->state = STATE_READY;
@@ -571,32 +643,50 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
dev->state = STATE_BUSY;
spin_unlock_irq(&dev->lock);
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ goto done;
+ }
requeue_req:
/* queue a request */
req = dev->rx_req[0];
req->length = len;
dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
goto done;
} else {
- DBG(cdev, "rx %p queue\n", req);
+ DBG(cdev, "rx %pK queue\n", req);
}
/* wait for a request to complete */
- ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, req);
+ spin_lock_irq(&dev->lock);
+ dev->state = STATE_CANCELED;
+ spin_unlock_irq(&dev->lock);
+ goto done;
+ }
if (ret < 0) {
r = ret;
usb_ep_dequeue(dev->ep_out, req);
goto done;
}
+ mutex_lock(&dev->read_mutex);
if (dev->state == STATE_BUSY) {
/* If we got a 0-len packet, throw it back and try again. */
if (req->actual == 0)
goto requeue_req;
- DBG(cdev, "rx %p %d\n", req, req->actual);
+ DBG(cdev, "rx %pK %d\n", req, req->actual);
xfer = (req->actual < count) ? req->actual : count;
r = xfer;
if (copy_to_user(buf, req->buf, xfer))
@@ -604,6 +694,7 @@ requeue_req:
} else
r = -EIO;
+ mutex_unlock(&dev->read_mutex);
done:
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED)
@@ -612,7 +703,7 @@ done:
dev->state = STATE_READY;
spin_unlock_irq(&dev->lock);
- DBG(cdev, "mtp_read returning %zd\n", r);
+ DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
return r;
}
@@ -627,7 +718,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
int sendZLP = 0;
int ret;
- DBG(cdev, "mtp_write(%zu)\n", count);
+ DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED) {
@@ -666,12 +757,14 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
((req = mtp_req_get(dev, &dev->tx_idle))
|| dev->state != STATE_BUSY));
if (!req) {
+ DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
+ ret, dev->state);
r = ret;
break;
}
- if (count > MTP_BULK_BUFFER_SIZE)
- xfer = MTP_BULK_BUFFER_SIZE;
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
else
xfer = count;
if (xfer && copy_from_user(req->buf, buf, xfer)) {
@@ -704,7 +797,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
dev->state = STATE_READY;
spin_unlock_irq(&dev->lock);
- DBG(cdev, "mtp_write returning %zd\n", r);
+ DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
return r;
}
@@ -722,6 +815,7 @@ static void send_file_work(struct work_struct *data)
int xfer, ret, hdr_size;
int r = 0;
int sendZLP = 0;
+ ktime_t start_time;
/* read our parameters */
smp_rmb();
@@ -729,6 +823,11 @@ static void send_file_work(struct work_struct *data)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
if (dev->xfer_send_header) {
@@ -759,12 +858,15 @@ static void send_file_work(struct work_struct *data)
break;
}
if (!req) {
+ DBG(cdev,
+ "send_file_work request NULL ret:%d state:%d\n",
+ ret, dev->state);
r = ret;
break;
}
- if (count > MTP_BULK_BUFFER_SIZE)
- xfer = MTP_BULK_BUFFER_SIZE;
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
else
xfer = count;
@@ -782,21 +884,27 @@ static void send_file_work(struct work_struct *data)
header->transaction_id =
__cpu_to_le32(dev->xfer_transaction_id);
}
-
+ start_time = ktime_get();
ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
&offset);
if (ret < 0) {
r = ret;
break;
}
+
xfer = ret + hdr_size;
+ dev->perf[dev->dbg_read_index].vfs_rtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
+ dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
hdr_size = 0;
req->length = xfer;
ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
if (ret < 0) {
DBG(cdev, "send_file_work: xfer error %d\n", ret);
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
r = -EIO;
break;
}
@@ -810,7 +918,7 @@ static void send_file_work(struct work_struct *data)
if (req)
mtp_req_put(dev, &dev->tx_idle, req);
- DBG(cdev, "send_file_work returning %d\n", r);
+ DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
/* write the result */
dev->xfer_result = r;
smp_wmb();
@@ -828,6 +936,7 @@ static void receive_file_work(struct work_struct *data)
int64_t count;
int ret, cur_buf = 0;
int r = 0;
+ ktime_t start_time;
/* read our parameters */
smp_rmb();
@@ -835,35 +944,67 @@ static void receive_file_work(struct work_struct *data)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "receive_file_work(%lld)\n", count);
+ if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
+ DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
+ count, dev->ep_out->maxpacket);
while (count > 0 || write_req) {
if (count > 0) {
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
/* queue a request */
read_req = dev->rx_req[cur_buf];
cur_buf = (cur_buf + 1) % RX_REQ_MAX;
- read_req->length = (count > MTP_BULK_BUFFER_SIZE
- ? MTP_BULK_BUFFER_SIZE : count);
+ /* some h/w expects size to be aligned to ep's MTU */
+ read_req->length = mtp_rx_req_len;
+
dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
break;
}
}
if (write_req) {
- DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
+ start_time = ktime_get();
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
ret = vfs_write(filp, write_req->buf, write_req->actual,
&offset);
DBG(cdev, "vfs_write %d\n", ret);
if (ret != write_req->actual) {
r = -EIO;
- dev->state = STATE_ERROR;
+ mutex_unlock(&dev->read_mutex);
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
break;
}
+ mutex_unlock(&dev->read_mutex);
+ dev->perf[dev->dbg_write_index].vfs_wtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
+ dev->dbg_write_index =
+ (dev->dbg_write_index + 1) % MAX_ITERATION;
write_req = NULL;
}
@@ -871,8 +1012,12 @@ static void receive_file_work(struct work_struct *data)
/* wait for our last read to complete */
ret = wait_event_interruptible(dev->read_wq,
dev->rx_done || dev->state != STATE_BUSY);
- if (dev->state == STATE_CANCELED) {
- r = -ECANCELED;
+ if (dev->state == STATE_CANCELED
+ || dev->state == STATE_OFFLINE) {
+ if (dev->state == STATE_OFFLINE)
+ r = -EIO;
+ else
+ r = -ECANCELED;
if (!dev->rx_done)
usb_ep_dequeue(dev->ep_out, read_req);
break;
@@ -881,6 +1026,17 @@ static void receive_file_work(struct work_struct *data)
r = read_req->status;
break;
}
+
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
+ /* Check if we aligned the size due to MTU constraint */
+ if (count < read_req->length)
+ read_req->actual = (read_req->actual > count ?
+ count : read_req->actual);
/* if xfer_file_length is 0xFFFFFFFF, then we read until
* we get a zero length packet
*/
@@ -897,6 +1053,7 @@ static void receive_file_work(struct work_struct *data)
write_req = read_req;
read_req = NULL;
+ mutex_unlock(&dev->read_mutex);
}
}
@@ -937,85 +1094,107 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
return ret;
}
-static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
+ struct mtp_file_range *mfr)
{
struct mtp_dev *dev = fp->private_data;
struct file *filp = NULL;
+ struct work_struct *work;
int ret = -EINVAL;
- if (mtp_lock(&dev->ioctl_excl))
+ if (mtp_lock(&dev->ioctl_excl)) {
+ DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
return -EBUSY;
+ }
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancellation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr->fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr->offset;
+ dev->xfer_file_length = mfr->length;
+ /* make sure write is done before parameters are read */
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr->command;
+ dev->xfer_transaction_id = mfr->transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct mtp_event event;
+ int ret = -EINVAL;
switch (code) {
case MTP_SEND_FILE:
case MTP_RECEIVE_FILE:
case MTP_SEND_FILE_WITH_HEADER:
- {
- struct mtp_file_range mfr;
- struct work_struct *work;
-
- spin_lock_irq(&dev->lock);
- if (dev->state == STATE_CANCELED) {
- /* report cancelation to userspace */
- dev->state = STATE_READY;
- spin_unlock_irq(&dev->lock);
- ret = -ECANCELED;
- goto out;
- }
- if (dev->state == STATE_OFFLINE) {
- spin_unlock_irq(&dev->lock);
- ret = -ENODEV;
- goto out;
- }
- dev->state = STATE_BUSY;
- spin_unlock_irq(&dev->lock);
-
if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
ret = -EFAULT;
goto fail;
}
- /* hold a reference to the file while we are working with it */
- filp = fget(mfr.fd);
- if (!filp) {
- ret = -EBADF;
- goto fail;
- }
-
- /* write the parameters */
- dev->xfer_file = filp;
- dev->xfer_file_offset = mfr.offset;
- dev->xfer_file_length = mfr.length;
- smp_wmb();
-
- if (code == MTP_SEND_FILE_WITH_HEADER) {
- work = &dev->send_file_work;
- dev->xfer_send_header = 1;
- dev->xfer_command = mfr.command;
- dev->xfer_transaction_id = mfr.transaction_id;
- } else if (code == MTP_SEND_FILE) {
- work = &dev->send_file_work;
- dev->xfer_send_header = 0;
- } else {
- work = &dev->receive_file_work;
- }
-
- /* We do the file transfer on a work queue so it will run
- * in kernel context, which is necessary for vfs_read and
- * vfs_write to use our buffers in the kernel address space.
- */
- queue_work(dev->wq, work);
- /* wait for operation to complete */
- flush_workqueue(dev->wq);
- fput(filp);
-
- /* read the result */
- smp_rmb();
- ret = dev->xfer_result;
- break;
- }
+ ret = mtp_send_receive_ioctl(fp, code, &mfr);
+ break;
case MTP_SEND_EVENT:
- {
- struct mtp_event event;
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
/* return here so we don't change dev->state below,
* which would interfere with bulk transfer state.
*/
@@ -1023,28 +1202,93 @@ static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
ret = -EFAULT;
else
ret = mtp_send_event(dev, &event);
- goto out;
+ mtp_unlock(&dev->ioctl_excl);
+ break;
+ default:
+ DBG(dev->cdev, "unknown ioctl code: %d\n", code);
}
+fail:
+ return ret;
+}
+
+/*
+ * 32 bit userspace calling into 64 bit kernel. handle ioctl code
+ * and userspace pointer
+*/
+#ifdef CONFIG_COMPAT
+static long compat_mtp_ioctl(struct file *fp, unsigned code,
+ unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct __compat_mtp_file_range cmfr;
+ struct mtp_event event;
+ struct __compat_mtp_event cevent;
+ unsigned cmd;
+ bool send_file = false;
+ int ret = -EINVAL;
+
+ switch (code) {
+ case COMPAT_MTP_SEND_FILE:
+ cmd = MTP_SEND_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_RECEIVE_FILE:
+ cmd = MTP_RECEIVE_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_FILE_WITH_HEADER:
+ cmd = MTP_SEND_FILE_WITH_HEADER;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_EVENT:
+ cmd = MTP_SEND_EVENT;
+ break;
+ default:
+ DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+ ret = -ENOIOCTLCMD;
+ goto fail;
}
+ if (send_file) {
+ if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ mfr.fd = cmfr.fd;
+ mfr.offset = cmfr.offset;
+ mfr.length = cmfr.length;
+ mfr.command = cmfr.command;
+ mfr.transaction_id = cmfr.transaction_id;
+ ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
+ } else {
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&cevent, (void __user *)value,
+ sizeof(cevent))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ event.length = cevent.length;
+ event.data = compat_ptr(cevent.data);
+ ret = mtp_send_event(dev, &event);
+ mtp_unlock(&dev->ioctl_excl);
+ }
fail:
- spin_lock_irq(&dev->lock);
- if (dev->state == STATE_CANCELED)
- ret = -ECANCELED;
- else if (dev->state != STATE_OFFLINE)
- dev->state = STATE_READY;
- spin_unlock_irq(&dev->lock);
-out:
- mtp_unlock(&dev->ioctl_excl);
- DBG(dev->cdev, "ioctl returning %d\n", ret);
return ret;
}
+#endif
static int mtp_open(struct inode *ip, struct file *fp)
{
printk(KERN_INFO "mtp_open\n");
- if (mtp_lock(&_mtp_dev->open_excl))
+ if (mtp_lock(&_mtp_dev->open_excl)) {
+ pr_err("%s mtp_release not called returning EBUSY\n", __func__);
return -EBUSY;
+ }
/* clear any error condition */
if (_mtp_dev->state != STATE_OFFLINE)
@@ -1068,6 +1312,9 @@ static const struct file_operations mtp_fops = {
.read = mtp_read,
.write = mtp_write,
.unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_mtp_ioctl,
+#endif
.open = mtp_open,
.release = mtp_release,
};
@@ -1110,9 +1357,21 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
if (ctrl->bRequest == 1
&& (ctrl->bRequestType & USB_DIR_IN)
&& (w_index == 4 || w_index == 5)) {
- value = (w_length < sizeof(mtp_ext_config_desc) ?
- w_length : sizeof(mtp_ext_config_desc));
- memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ if (!dev->is_ptp) {
+ value = (w_length <
+ sizeof(mtp_ext_config_desc) ?
+ w_length :
+ sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc,
+ value);
+ } else {
+ value = (w_length <
+ sizeof(ptp_ext_config_desc) ?
+ w_length :
+ sizeof(ptp_ext_config_desc));
+ memcpy(cdev->req->buf, &ptp_ext_config_desc,
+ value);
+ }
}
} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
@@ -1181,7 +1440,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
struct mtp_instance *fi_mtp;
dev->cdev = cdev;
- DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+ DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
/* allocate interface ID(s) */
id = usb_interface_id(c, f);
@@ -1235,6 +1494,15 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
mtp_ss_out_comp_desc.bMaxBurst = max_burst;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ mtp_ss_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_ss_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ fi_mtp->func_inst.f = &dev->function;
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
gadget_is_superspeed(c->cdev->gadget) ? "super" :
(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
@@ -1246,19 +1514,24 @@ static void
mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct mtp_dev *dev = func_to_mtp(f);
+ struct mtp_instance *fi_mtp;
struct usb_request *req;
int i;
-
+ fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+ mutex_lock(&dev->read_mutex);
while ((req = mtp_req_get(dev, &dev->tx_idle)))
mtp_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
mtp_request_free(dev->rx_req[i], dev->ep_out);
while ((req = mtp_req_get(dev, &dev->intr_idle)))
mtp_request_free(req, dev->ep_intr);
+ mutex_unlock(&dev->read_mutex);
dev->state = STATE_OFFLINE;
+ dev->is_ptp = false;
kfree(f->os_desc_table);
f->os_desc_n = 0;
+ fi_mtp->func_inst.f = NULL;
}
static int mtp_function_set_alt(struct usb_function *f,
@@ -1322,6 +1595,120 @@ static void mtp_function_disable(struct usb_function *f)
VDBG(cdev, "%s disabled\n", dev->function.name);
}
+static int debug_mtp_read_stats(struct seq_file *s, void *unused)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int i;
+ unsigned long flags;
+ unsigned min, max = 0, sum = 0, iteration = 0;
+
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Write Stats:\n");
+ seq_puts(s, "\n=======================\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ min = dev->perf[0].vfs_wtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_wbytes,
+ dev->perf[i].vfs_wtime);
+ if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+ sum += dev->perf[i].vfs_wtime;
+ if (min > dev->perf[i].vfs_wtime)
+ min = dev->perf[i].vfs_wtime;
+ if (max < dev->perf[i].vfs_wtime)
+ max = dev->perf[i].vfs_wtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ min = max = sum = iteration = 0;
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Read Stats:\n");
+ seq_puts(s, "\n=======================\n");
+
+ min = dev->perf[0].vfs_rtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_rbytes,
+ dev->perf[i].vfs_rtime);
+ if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+ sum += dev->perf[i].vfs_rtime;
+ if (min > dev->perf[i].vfs_rtime)
+ min = dev->perf[i].vfs_rtime;
+ if (max < dev->perf[i].vfs_rtime)
+ max = dev->perf[i].vfs_rtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int clear_stats;
+ unsigned long flags;
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (buf == NULL) {
+ pr_err("[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
+ pr_err("Wrong value. To clear stats, enter value as 0.\n");
+ goto done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
+ dev->dbg_read_index = 0;
+ dev->dbg_write_index = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+done:
+ return count;
+}
+
+static int debug_mtp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_mtp_read_stats, inode->i_private);
+}
+
+static const struct file_operations debug_mtp_ops = {
+ .open = debug_mtp_open,
+ .read = seq_read,
+ .write = debug_mtp_reset_stats,
+};
+
+struct dentry *dent_mtp;
+static void mtp_debugfs_init(void)
+{
+ struct dentry *dent_mtp_status;
+
+ dent_mtp = debugfs_create_dir("usb_mtp", 0);
+ if (!dent_mtp || IS_ERR(dent_mtp))
+ return;
+
+ dent_mtp_status = debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ dent_mtp, 0, &debug_mtp_ops);
+ if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
+ debugfs_remove(dent_mtp);
+ dent_mtp = NULL;
+ return;
+ }
+}
+
+static void mtp_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dent_mtp);
+}
+
static int __mtp_setup(struct mtp_instance *fi_mtp)
{
struct mtp_dev *dev;
@@ -1358,6 +1745,7 @@ static int __mtp_setup(struct mtp_instance *fi_mtp)
if (ret)
goto err2;
+ mtp_debugfs_init();
return 0;
err2:
@@ -1382,6 +1770,7 @@ static void mtp_cleanup(void)
if (!dev)
return;
+ mtp_debugfs_remove();
misc_deregister(&mtp_device);
destroy_workqueue(dev->wq);
_mtp_dev = NULL;
@@ -1480,6 +1869,8 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
config_group_init_type_name(&fi_mtp->func_inst.group,
"", &mtp_func_type);
+ mutex_init(&fi_mtp->dev->read_mutex);
+
return &fi_mtp->func_inst;
}
EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
@@ -1539,6 +1930,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
dev->function.disable = mtp_function_disable;
dev->function.setup = mtp_ctrlreq_configfs;
dev->function.free_func = mtp_free;
+ dev->is_ptp = !mtp_config;
return &dev->function;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 93086efef5a8..a1332f77f173 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -334,6 +334,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
+ .bLength = sizeof(ncm_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ncm_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
+ .bLength = sizeof(ncm_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ncm_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
+ .bLength = sizeof(ncm_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ncm_ss_in_desc,
+ (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -1359,17 +1430,39 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
*/
if (!ncm_opts->bound) {
mutex_lock(&ncm_opts->lock);
+ ncm_opts->net = gether_setup_default();
+ if (IS_ERR(ncm_opts->net)) {
+ status = PTR_ERR(ncm_opts->net);
+ mutex_unlock(&ncm_opts->lock);
+ goto error;
+ }
gether_set_gadget(ncm_opts->net, cdev->gadget);
status = gether_register_netdev(ncm_opts->net);
mutex_unlock(&ncm_opts->lock);
- if (status)
- return status;
+ if (status) {
+ free_netdev(ncm_opts->net);
+ goto error;
+ }
ncm_opts->bound = true;
}
+
+ /* export host's Ethernet address in CDC format */
+ status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
+ sizeof(ncm->ethaddr));
+ if (status < 12) { /* strlen("01234567890a") */
+ ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
+ __func__, status);
+ status = -EINVAL;
+ goto netdev_cleanup;
+ }
+ ncm->port.ioport = netdev_priv(ncm_opts->net);
+
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
- if (IS_ERR(us))
- return PTR_ERR(us);
+ if (IS_ERR(us)) {
+ status = PTR_ERR(us);
+ goto netdev_cleanup;
+ }
ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1435,8 +1528,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ncm_ss_in_desc.bEndpointAddress =
+ fs_ncm_in_desc.bEndpointAddress;
+ ncm_ss_out_desc.bEndpointAddress =
+ fs_ncm_out_desc.bEndpointAddress;
+ ncm_ss_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ ncm_ss_function);
if (status)
goto fail;
@@ -1464,7 +1566,10 @@ fail:
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
+netdev_cleanup:
+ gether_cleanup(netdev_priv(ncm_opts->net));
+error:
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
@@ -1512,8 +1617,6 @@ static void ncm_free_inst(struct usb_function_instance *f)
opts = container_of(f, struct f_ncm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
- else
- free_netdev(opts->net);
kfree(opts);
}
@@ -1526,12 +1629,6 @@ static struct usb_function_instance *ncm_alloc_inst(void)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
- opts->net = gether_setup_default();
- if (IS_ERR(opts->net)) {
- struct net_device *net = opts->net;
- kfree(opts);
- return ERR_CAST(net);
- }
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
@@ -1554,9 +1651,13 @@ static void ncm_free(struct usb_function *f)
static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
+ struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
+ func_inst);
DBG(c->cdev, "ncm unbind\n");
+ opts->bound = false;
+
hrtimer_cancel(&ncm->task_timer);
tasklet_kill(&ncm->tx_tasklet);
@@ -1570,13 +1671,14 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+ gether_cleanup(netdev_priv(opts->net));
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
{
struct f_ncm *ncm;
struct f_ncm_opts *opts;
- int status;
/* allocate and initialize one new instance */
ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1586,20 +1688,9 @@ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
opts = container_of(fi, struct f_ncm_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt++;
-
- /* export host's Ethernet address in CDC format */
- status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
- sizeof(ncm->ethaddr));
- if (status < 12) { /* strlen("01234567890a") */
- kfree(ncm);
- mutex_unlock(&opts->lock);
- return ERR_PTR(-EINVAL);
- }
ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
-
spin_lock_init(&ncm->lock);
ncm_reset_values(ncm);
- ncm->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);
ncm->port.is_fixed = true;
ncm->port.supports_multi_frame = true;
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d6396e0909ee..98a72b7d6b6a 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -376,7 +376,7 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+ ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status);
return status;
}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 8e32b41fc129..2f509f8bcd4b 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1272,7 +1272,7 @@ static void gprinter_free_inst(struct usb_function_instance *f)
mutex_lock(&printer_ida_lock);
gprinter_put_minor(opts->minor);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
mutex_unlock(&printer_ida_lock);
@@ -1296,7 +1296,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
mutex_lock(&printer_ida_lock);
- if (idr_is_empty(&printer_ida.idr)) {
+ if (ida_is_empty(&printer_ida)) {
status = gprinter_setup(PRINTER_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -1309,7 +1309,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
goto unlock;
}
diff --git a/drivers/usb/gadget/function/f_qc_ecm.c b/drivers/usb/gadget/function/f_qc_ecm.c
new file mode 100644
index 000000000000..d96f727b2da4
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_ecm.c
@@ -0,0 +1,1166 @@
+/*
+ * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+
+#include "u_bam_data.h"
+#include <linux/ecm_ipa.h>
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link. The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support. It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.) A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ *
+ * This function is based on USB CDC Ethernet link function driver and
+ * contains MSM specific implementation.
+ */
+
+
+enum ecm_qc_notify_state {
+ ECM_QC_NOTIFY_NONE, /* don't notify */
+ ECM_QC_NOTIFY_CONNECT, /* issue CONNECT next */
+ ECM_QC_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
+};
+
+struct f_ecm_qc {
+ struct qc_gether port;
+ u8 ctrl_id, data_id;
+ enum transport_type xport;
+ u8 port_num;
+ char ethaddr[14];
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ bool is_open;
+ struct data_port bam_port;
+ bool ecm_mdm_ready_trigger;
+
+ bool data_interface_up;
+};
+
+static struct f_ecm_qc *__ecm;
+
+static struct ecm_ipa_params ipa_params;
+
+static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_ecm_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide. More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+/* Currently only one std ecm instance is supported - port index 0. */
+#define ECM_QC_NO_PORTS 1
+#define ECM_QC_DEFAULT_PORT 0
+#define ECM_QC_ACTIVE_PORT 0
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ecm_qc_control_intf = {
+ .bLength = sizeof(ecm_qc_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_qc_header_desc = {
+ .bLength = sizeof(ecm_qc_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_qc_union_desc = {
+ .bLength = sizeof(ecm_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_qc_desc = {
+ .bLength = sizeof(ecm_qc_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+
+ /* this descriptor actually adds value, surprise! */
+ /* .iMACAddress = DYNAMIC */
+ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
+ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
+ .wNumberMCFilters = cpu_to_le16(0),
+ .bNumberPowerFilters = 0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_qc_data_nop_intf = {
+ .bLength = sizeof(ecm_qc_data_nop_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_qc_data_intf = {
+ .bLength = sizeof(ecm_qc_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_qc_fs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_qc_hs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_hs_out_desc,
+ NULL,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_notify_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_in_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ecm_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+}
+
+static struct usb_ss_ep_comp_descriptor ecm_qc_ss_out_comp_desc = {
+ .bLength = sizeof(ecm_qc_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ecm_qc_ss_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &ecm_qc_ss_out_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_qc_string_defs[] = {
+ [0].s = "CDC Ethernet Control Model (ECM)",
+ [1].s = NULL /* DYNAMIC */,
+ [2].s = "CDC Ethernet Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = ecm_qc_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_qc_strings[] = {
+ &ecm_qc_string_table,
+ NULL,
+};
+
+static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
+{
+ struct usb_request *req = ecm->notify_req;
+ struct usb_cdc_notification *event;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ __le32 *data;
+ int status;
+
+ /* notification already in flight? */
+ if (!req)
+ return;
+
+ event = req->buf;
+ switch (ecm->notify_state) {
+ case ECM_QC_NOTIFY_NONE:
+ return;
+
+ case ECM_QC_NOTIFY_CONNECT:
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ if (ecm->is_open) {
+ event->wValue = cpu_to_le16(1);
+ ecm->notify_state = ECM_QC_NOTIFY_SPEED;
+ } else {
+ event->wValue = cpu_to_le16(0);
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ }
+ event->wLength = 0;
+ req->length = sizeof(*event);
+
+ DBG(cdev, "notify connect %s\n",
+ ecm->is_open ? "true" : "false");
+ break;
+
+ case ECM_QC_NOTIFY_SPEED:
+ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(8);
+ req->length = ECM_QC_STATUS_BYTECOUNT;
+
+ /* SPEED_CHANGE data is up/down speeds in bits/sec */
+ data = req->buf + sizeof(*event);
+ data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget));
+ data[1] = data[0];
+
+ DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget));
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ }
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+ ecm->notify_req = NULL;
+ status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+ if (status < 0) {
+ ecm->notify_req = req;
+ DBG(cdev, "notify --> %d\n", status);
+ }
+}
+
+static void ecm_qc_notify(struct f_ecm_qc *ecm)
+{
+ /* NOTE on most versions of Linux, host side cdc-ethernet
+ * won't listen for notifications until its netdevice opens.
+ * The first notification then sits in the FIFO for a long
+ * time, and the second one is queued.
+ */
+ ecm->notify_state = ECM_QC_NOTIFY_CONNECT;
+ ecm_qc_do_notify(ecm);
+}
+
+void *ecm_qc_get_ipa_rx_cb(void)
+{
+ return ipa_params.ecm_ipa_rx_dp_notify;
+}
+
+void *ecm_qc_get_ipa_tx_cb(void)
+{
+ return ipa_params.ecm_ipa_tx_dp_notify;
+}
+
+void *ecm_qc_get_ipa_priv(void)
+{
+ return ipa_params.private;
+}
+
+bool ecm_qc_get_skip_ep_config(void)
+{
+ return ipa_params.skip_ep_cfg;
+}
+/*-------------------------------------------------------------------------*/
+
+
+
+static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ecm_qc *ecm = req->context;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ struct usb_cdc_notification *event = req->buf;
+
+ switch (req->status) {
+ case 0:
+ /* no fault */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ default:
+ DBG(cdev, "event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+ ecm->notify_req = req;
+ ecm_qc_do_notify(ecm);
+}
+
+static int ecm_qc_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("Enter\n");
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_ETHERNET_PACKET_FILTER:
+ /* see 6.2.30: no data, wIndex = interface,
+ * wValue = packet filter bitmap
+ */
+ if (w_length != 0 || w_index != ecm->ctrl_id)
+ goto invalid;
+ DBG(cdev, "packet filter %02x\n", w_value);
+ /* REVISIT locking of cdc_filter. This assumes the UDC
+ * driver won't have a concurrent packet TX irq running on
+ * another CPU; or that if it does, this write is atomic...
+ */
+ ecm->port.cdc_filter = w_value;
+ value = 0;
+ break;
+
+ /* and optionally:
+ * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+ * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_STATISTIC:
+ */
+
+ default:
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("ecm req %02x.%02x response err %d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* Control interface has only altsetting 0 */
+ if (intf == ecm->ctrl_id) {
+ if (alt != 0) {
+ pr_warn("fail, alt setting is not 0\n");
+ goto fail;
+ }
+
+ if (ecm->notify->driver_data) {
+ VDBG(cdev, "reset ecm control %d\n", intf);
+ usb_ep_disable(ecm->notify);
+ }
+ if (!(ecm->notify->desc)) {
+ VDBG(cdev, "init ecm ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+ goto fail;
+ }
+ usb_ep_enable(ecm->notify);
+ ecm->notify->driver_data = ecm;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == ecm->data_id) {
+ if (alt > 1)
+ goto fail;
+
+ if (ecm->data_interface_up == alt)
+ return 0;
+
+ if (!ecm->port.in_ep->desc ||
+ !ecm->port.out_ep->desc) {
+ DBG(cdev, "init ecm\n");
+ __ecm->ecm_mdm_ready_trigger = false;
+ if (config_ep_by_speed(cdev->gadget, f,
+ ecm->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ ecm->port.out_ep)) {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ goto fail;
+ }
+ }
+
+ if (alt == 0 && ecm->port.in_ep->driver_data) {
+ DBG(cdev, "reset ecm\n");
+ __ecm->ecm_mdm_ready_trigger = false;
+ /* ecm->port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ bam_data_disconnect(&ecm->bam_port, USB_FUNC_ECM,
+ ecm->port_num);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
+ } else if (ecm->data_interface_up &&
+ gadget_is_dwc3(cdev->gadget)) {
+ if (msm_ep_unconfig(ecm->port.in_ep) ||
+ msm_ep_unconfig(ecm->port.out_ep)) {
+ pr_err("%s: ep_unconfig failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+ }
+ /* CDC Ethernet only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ if (alt == 1) {
+ struct net_device *net;
+
+ /* Enable zlps by default for ECM conformance;
+ * override for musb_hdrc (avoids txdma ovhead).
+ */
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+ );
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+ DBG(cdev, "activate ecm\n");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ net = gether_qc_connect_name(&ecm->port,
+ "ecm0", true);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ }
+
+ ecm->bam_port.cdev = cdev;
+ ecm->bam_port.func = &ecm->port.func;
+ ecm->bam_port.in = ecm->port.in_ep;
+ ecm->bam_port.out = ecm->port.out_ep;
+ if (bam_data_connect(&ecm->bam_port, ecm->xport,
+ ecm->port_num, USB_FUNC_ECM))
+ goto fail;
+ }
+
+ ecm->data_interface_up = alt;
+ /* NOTE this can be a minor disagreement with the ECM spec,
+ * which says speed notifications will "always" follow
+ * connection notifications. But we allow one connect to
+ * follow another (if the first is in flight), and instead
+ * just guarantee that a speed notification is always sent.
+ */
+ ecm_qc_notify(ecm);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_qc_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ if (intf == ecm->ctrl_id)
+ return 0;
+ return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_qc_disable(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+
+ DBG(cdev, "ecm deactivated\n");
+
+ if (ecm->port.in_ep->driver_data) {
+ bam_data_disconnect(&ecm->bam_port, USB_FUNC_ECM,
+ ecm->port_num);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
+ } else {
+ /* release EPs incase no set_alt(1) yet */
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ }
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA &&
+ gadget_is_dwc3(cdev->gadget)) {
+ msm_ep_unconfig(ecm->port.out_ep);
+ msm_ep_unconfig(ecm->port.in_ep);
+ }
+
+ if (ecm->notify->driver_data) {
+ usb_ep_disable(ecm->notify);
+ ecm->notify->driver_data = NULL;
+ ecm->notify->desc = NULL;
+ }
+
+ ecm->data_interface_up = false;
+}
+
+static void ecm_qc_suspend(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ bool remote_wakeup_allowed;
+
+ /* Is DATA interface initialized? */
+ if (!ecm->data_interface_up) {
+ pr_err("%s(): data interface not up\n", __func__);
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed =
+ f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s(): remote_wakeup:%d\n:", __func__, remote_wakeup_allowed);
+ if (!remote_wakeup_allowed)
+ __ecm->ecm_mdm_ready_trigger = false;
+
+ bam_data_suspend(&ecm->bam_port, ecm->port_num, USB_FUNC_ECM,
+ remote_wakeup_allowed);
+
+ pr_debug("ecm suspended\n");
+}
+
+static void ecm_qc_resume(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (!ecm->data_interface_up) {
+ pr_err("%s(): data interface was not up\n", __func__);
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed =
+ f->config->cdev->gadget->remote_wakeup;
+
+ bam_data_resume(&ecm->bam_port, ecm->port_num, USB_FUNC_ECM,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ ecm->is_open = ecm->ecm_mdm_ready_trigger ? true : false;
+ ecm_qc_notify(ecm);
+ }
+
+ pr_debug("ecm resumed\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ * - disconnected/unconfigured
+ * - configured but inactive (data alt 0)
+ * - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting). Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_qc_open(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+}
+
+static void ecm_qc_close(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = false;
+ ecm_qc_notify(ecm);
+}
+
+/* Callback to let ECM_IPA trigger us when network interface is up */
+void ecm_mdm_ready(void)
+{
+ struct f_ecm_qc *ecm = __ecm;
+ int port_num;
+
+ if (!ecm) {
+ pr_err("can't set ecm_ready_trigger, no ecm instance\n");
+ return;
+ }
+
+ if (ecm->ecm_mdm_ready_trigger) {
+ pr_err("already triggered - can't set ecm_ready_trigger\n");
+ return;
+ }
+
+ pr_debug("set ecm_ready_trigger\n");
+ ecm->ecm_mdm_ready_trigger = true;
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+ port_num = (u_bam_data_func_to_port(USB_FUNC_ECM,
+ ECM_QC_ACTIVE_PORT));
+ if (port_num < 0)
+ return;
+ bam_data_start_rx_tx(port_num);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+
+ ecm->ctrl_id = status;
+
+ ecm_qc_control_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0) {
+ pr_debug("no more interface IDs can be allocated\n");
+ goto fail;
+ }
+
+ ecm->data_id = status;
+
+ ecm_qc_data_nop_intf.bInterfaceNumber = status;
+ ecm_qc_data_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_in)\n");
+ goto fail;
+ }
+
+ ecm->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_out)\n");
+ goto fail;
+ }
+
+ ecm->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is *OPTIONAL* but we
+ * don't treat it that way. It's simpler, and some newer CDC
+ * profiles (wireless handsets) no longer treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc);
+ if (!ep) {
+ pr_debug("can not allocate endpoint (fs_notify)\n");
+ goto fail;
+ }
+ ecm->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!ecm->notify_req) {
+ pr_debug("can not allocate notification request\n");
+ goto fail;
+ }
+ ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!ecm->notify_req->buf)
+ goto fail;
+ ecm->notify_req->context = ecm;
+ ecm->notify_req->complete = ecm_qc_notify_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(ecm_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ ecm_qc_hs_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_hs_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_hs_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ecm_qc_ss_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_ss_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_ss_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ f->ss_descriptors = usb_copy_descriptors(ecm_qc_ss_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ ecm->port.open = ecm_qc_open;
+ ecm->port.close = ecm_qc_close;
+
+ DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ ecm->port.in_ep->name, ecm->port.out_ep->name,
+ ecm->notify->name);
+ return 0;
+
+fail:
+
+ if (f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (ecm->notify_req) {
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+ if (ecm->port.out_ep->desc)
+ ecm->port.out_ep->driver_data = NULL;
+ if (ecm->port.in_ep->desc)
+ ecm->port.in_ep->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ DBG(c->cdev, "ecm unbind\n");
+
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+ ecm_qc_string_defs[1].s = NULL;
+
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ bam_data_flush_workqueue();
+ ecm_ipa_cleanup(ipa_params.private);
+ }
+
+ kfree(ecm);
+ __ecm = NULL;
+}
+
+/**
+ * ecm_qc_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * @xport_name: data path transport type name ("BAM2BAM" or "BAM2BAM_IPA")
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_qc_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name)
+{
+ struct f_ecm_qc *ecm;
+ int status;
+
+ if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+ return -EINVAL;
+
+ pr_debug("data transport type is %s\n", xport_name);
+
+ /* maybe allocate device-global string IDs */
+ if (ecm_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[0].id = status;
+ ecm_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[2].id = status;
+ ecm_qc_data_intf.iInterface = status;
+
+ /* MAC address */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[1].id = status;
+ ecm_qc_desc.iMACAddress = status;
+ }
+
+ /* allocate and initialize one new instance */
+ ecm = kzalloc(sizeof(*ecm), GFP_KERNEL);
+ if (!ecm)
+ return -ENOMEM;
+ __ecm = ecm;
+
+ ecm->xport = str_to_xport(xport_name);
+ pr_debug("set xport = %d\n", ecm->xport);
+
+ /* export host's Ethernet address in CDC format */
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_get_macs(ipa_params.device_ethaddr,
+ ipa_params.host_ethaddr);
+ snprintf(ecm->ethaddr, sizeof(ecm->ethaddr),
+ "%02X%02X%02X%02X%02X%02X",
+ ipa_params.host_ethaddr[0], ipa_params.host_ethaddr[1],
+ ipa_params.host_ethaddr[2], ipa_params.host_ethaddr[3],
+ ipa_params.host_ethaddr[4], ipa_params.host_ethaddr[5]);
+ ipa_params.device_ready_notify = ecm_mdm_ready;
+ } else
+ snprintf(ecm->ethaddr, sizeof(ecm->ethaddr),
+ "%02X%02X%02X%02X%02X%02X",
+ ethaddr[0], ethaddr[1], ethaddr[2],
+ ethaddr[3], ethaddr[4], ethaddr[5]);
+
+ ecm_qc_string_defs[1].s = ecm->ethaddr;
+
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+
+ ecm->port.func.name = "cdc_ethernet";
+ ecm->port.func.strings = ecm_qc_strings;
+ /* descriptors are per-instance copies */
+ ecm->port.func.bind = ecm_qc_bind;
+ ecm->port.func.unbind = ecm_qc_unbind;
+ ecm->port.func.set_alt = ecm_qc_set_alt;
+ ecm->port.func.get_alt = ecm_qc_get_alt;
+ ecm->port.func.setup = ecm_qc_setup;
+ ecm->port.func.disable = ecm_qc_disable;
+ ecm->port.func.suspend = ecm_qc_suspend;
+ ecm->port.func.resume = ecm_qc_resume;
+ ecm->ecm_mdm_ready_trigger = false;
+
+ status = usb_add_function(c, &ecm->port.func);
+ if (status) {
+ pr_err("failed to add function\n");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ __ecm = NULL;
+ return status;
+ }
+
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return status;
+
+ pr_debug("setting ecm_ipa, host_ethaddr=%pM, device_ethaddr=%pM",
+ ipa_params.host_ethaddr, ipa_params.device_ethaddr);
+ status = ecm_ipa_init(&ipa_params);
+ if (status) {
+ pr_err("failed to initialize ecm_ipa\n");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ __ecm = NULL;
+
+ } else {
+ pr_debug("ecm_ipa successful created\n");
+ }
+
+ return status;
+}
+
+static int ecm_qc_init(void)
+{
+ int ret;
+
+ pr_debug("initialize ecm qc port instance\n");
+
+ ret = bam_data_setup(USB_FUNC_ECM, ECM_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
new file mode 100644
index 000000000000..2d62b07cb3f6
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -0,0 +1,1552 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+#include "u_data_ipa.h"
+#include <linux/rndis_ipa.h>
+#include "configfs.h"
+
+unsigned int rndis_dl_max_xfer_size = 9216;
+module_param(rndis_dl_max_xfer_size, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_xfer_size,
+ "Max size of bus transfer to host");
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet. The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex. Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short: it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets. Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data. The control model is built around
+ * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored). RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface. That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely. Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ * - Power management ... references data that's scattered around lots
+ * of other documentation, which is incorrect/incomplete there too.
+ *
+ * - There are various undocumented protocol requirements, like the need
+ * to send garbage in some control-OUT messages.
+ *
+ * - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+ struct usb_function func;
+ u8 ctrl_id, data_id;
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ u8 ul_max_pkt_per_xfer;
+ u8 pkt_alignment_factor;
+ u32 max_pkt_size;
+ const char *manufacturer;
+ struct rndis_params *params;
+ atomic_t ioctl_excl;
+ atomic_t open_excl;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ atomic_t notify_count;
+ struct gadget_ipa_port bam_port;
+ u8 port_num;
+ u16 cdc_filter;
+ bool net_ready_trigger;
+ bool use_wceis;
+};
+
+static struct ipa_usb_init_params rndis_ipa_params;
+static spinlock_t rndis_lock;
+static bool rndis_ipa_supported;
+static void rndis_qc_open(struct f_rndis_qc *rndis);
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_rndis_qc, func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT 8 /* 8 bytes data */
+
+/* currently only one rndis instance is supported - port
+ * index 0.
+ */
+#define RNDIS_QC_NO_PORTS 1
+#define RNDIS_QC_ACTIVE_PORT 0
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define RNDIS_QC_IOCTL_MAGIC 'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE _IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
+
+
+/* interface descriptor: */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+ .bLength = sizeof(rndis_qc_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_MISC,
+ .bInterfaceSubClass = 0x04,
+ .bInterfaceProtocol = 0x01, /* RNDIS over ethernet */
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+ .bLength = sizeof(rndis_qc_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+ .bLength = sizeof(rndis_qc_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+ .bLength = sizeof(rndis_qc_acm_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+ .bLength = sizeof(rndis_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+ .bLength = sizeof(rndis_qc_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+ .bLength = sizeof(rndis_qc_iad_descriptor),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_MISC,
+ .bFunctionSubClass = 0x04,
+ .bFunctionProtocol = 0x01, /* RNDIS over ethernet */
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+ .bLength = sizeof(rndis_qc_ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+ .bLength = sizeof(rndis_qc_ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+ &rndis_qc_string_table,
+ NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1)
+ return 0;
+
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void rndis_qc_response_available(void *_rndis)
+{
+ struct f_rndis_qc *rndis = _rndis;
+ struct usb_request *req = rndis->notify_req;
+ __le32 *data = req->buf;
+ int status;
+
+ if (atomic_inc_return(&rndis->notify_count) != 1)
+ return;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ /* Send RNDIS RESPONSE_AVAILABLE notification; a
+ * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+ *
+ * This is the only notification defined by RNDIS.
+ */
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ pr_info("notify/0 --> %d\n", status);
+ }
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis;
+ int status = req->status;
+ struct usb_composite_dev *cdev;
+ struct usb_ep *notify_ep;
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ if (!rndis->func.config || !rndis->func.config->cdev) {
+ pr_err("%s(): cdev or config is NULL.\n", __func__);
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ cdev = rndis->func.config->cdev;
+
+ /* after TX:
+ * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+ * - RNDIS_RESPONSE_AVAILABLE (status/irq)
+ */
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&rndis->notify_count, 0);
+ goto out;
+ default:
+ pr_info("RNDIS %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ /* FALLTHROUGH */
+ case 0:
+ if (ep != rndis->notify)
+ goto out;
+
+ /* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&rndis->notify_count))
+ goto out;
+ notify_ep = rndis->notify;
+ spin_unlock(&rndis_lock);
+ status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&rndis_lock);
+ if (!_rndis_qc)
+ goto out;
+ atomic_dec(&_rndis_qc->notify_count);
+ DBG(cdev, "notify/1 --> %d\n", status);
+ spin_unlock(&rndis_lock);
+ }
+ }
+
+ return;
+
+out:
+ spin_unlock(&rndis_lock);
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis;
+ int status;
+ rndis_init_msg_type *buf;
+ u32 ul_max_xfer_size, dl_max_xfer_size;
+
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error %d\n",
+ __func__, req->status);
+ return;
+ }
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+ if (status < 0)
+ pr_err("RNDIS command error %d, %d/%d\n",
+ status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params);
+ ipa_data_set_ul_max_xfer_size(ul_max_xfer_size);
+ /*
+ * For consistent data throughput from IPA, it is required to
+ * fine tune aggregation byte limit as 7KB. RNDIS IPA driver
+ * use provided this value to calculate aggregation byte limit
+ * and program IPA hardware for aggregation.
+ * Host provides 8KB or 16KB as Max Transfer size, hence select
+ * minimum out of host provided value and optimum transfer size
+ * to get 7KB as aggregation byte limit.
+ */
+ if (rndis_dl_max_xfer_size)
+ dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
+ rndis_get_dl_max_xfer_size(rndis->params));
+ else
+ dl_max_xfer_size =
+ rndis_get_dl_max_xfer_size(rndis->params);
+ ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
+ }
+ spin_unlock(&rndis_lock);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("%s: Enter\n", __func__);
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* RNDIS uses the CDC command encapsulation mechanism to implement
+ * an RPC scheme, with much getting/setting of attributes by OID.
+ */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->complete = rndis_qc_command_complete;
+ /* later, rndis_response_available() sends a notification */
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ else {
+ u8 *buf;
+ u32 n;
+
+ /* return the result */
+ buf = rndis_get_next_response(rndis->params, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ req->complete = rndis_qc_response_complete;
+ rndis_free_response(rndis->params, buf);
+ value = n;
+ }
+ /* else stalls ... spec says to avoid that */
+ }
+ break;
+
+ default:
+invalid:
+ VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->context = rndis;
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("rndis response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+struct net_device *rndis_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct f_rndis_qc_opts *opts;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ int ret;
+
+ /* we know alt == 0 */
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ if (intf == rndis->ctrl_id) {
+ if (rndis->notify->driver_data) {
+ VDBG(cdev, "reset rndis control %d\n", intf);
+ usb_ep_disable(rndis->notify);
+ }
+ if (!rndis->notify->desc) {
+ VDBG(cdev, "init rndis ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+ goto fail;
+ }
+ usb_ep_enable(rndis->notify);
+ rndis->notify->driver_data = rndis;
+
+ } else if (intf == rndis->data_id) {
+ struct net_device *net;
+
+ rndis->net_ready_trigger = false;
+ if (rndis->bam_port.in->driver_data) {
+ DBG(cdev, "reset rndis\n");
+ /* bam_port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ ipa_data_disconnect(&rndis->bam_port,
+ USB_IPA_FUNC_RNDIS);
+ }
+
+ if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) {
+ DBG(cdev, "init rndis\n");
+ if (config_ep_by_speed(cdev->gadget, f,
+ rndis->bam_port.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ rndis->bam_port.out)) {
+ rndis->bam_port.in->desc = NULL;
+ rndis->bam_port.out->desc = NULL;
+ goto fail;
+ }
+ }
+
+ /* RNDIS should be in the "RNDIS uninitialized" state,
+ * either never activated or after rndis_uninit().
+ *
+ * We don't want data to flow here until a nonzero packet
+ * filter is set, at which point it enters "RNDIS data
+ * initialized" state ... but we do want the endpoints
+ * to be activated. It's a strange little state.
+ *
+ * REVISIT the RNDIS gadget code has done this wrong for a
+ * very long time. We need another call to the link layer
+ * code -- gether_updown(...bool) maybe -- to do it right.
+ */
+ rndis->cdc_filter = 0;
+
+ rndis->bam_port.cdev = cdev;
+ rndis->bam_port.func = &rndis->func;
+ ipa_data_port_select(USB_IPA_FUNC_RNDIS);
+ usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ rndis->port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ rndis->port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ return ret;
+ }
+ if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ src_connection_idx, dst_connection_idx))
+ goto fail;
+
+ DBG(cdev, "RNDIS RX/TX early activation ...\n");
+ rndis_qc_open(rndis);
+ net = rndis_qc_get_net("rndis0");
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ opts->net = net;
+
+ rndis_set_param_dev(rndis->params, net,
+ &rndis->cdc_filter);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ DBG(cdev, "rndis deactivated\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis_uninit(rndis->params);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
+
+ msm_ep_unconfig(rndis->bam_port.out);
+ msm_ep_unconfig(rndis->bam_port.in);
+ usb_ep_disable(rndis->notify);
+ rndis->notify->driver_data = NULL;
+}
+
+static void rndis_qc_suspend(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n:",
+ __func__, remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ /* This is required as Linux host side RNDIS driver doesn't
+ * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus.
+ * Hence we perform same operations explicitly here for Linux
+ * host case. In case of windows, this RNDIS state machine is
+ * already updated due to receiving of PACKET_FILTER.
+ */
+ rndis_flow_control(rndis->params, true);
+ pr_debug("%s(): Disconnecting\n", __func__);
+ }
+
+ ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
+ pr_debug("rndis suspended\n");
+}
+
+static void rndis_qc_resume(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ pr_debug("%s: rndis resumed\n", __func__);
+
+ /* Nothing to do if DATA interface wasn't initialized */
+ if (!rndis->bam_port.cdev) {
+ pr_debug("data interface was not up\n");
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ rndis_qc_open(rndis);
+ /*
+ * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
+ * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
+ * resume. Hence trigger USB IPA transfer functionality
+ * explicitly here. For Windows host case is also being
+ * handle with RNDIS state machine.
+ */
+ rndis_flow_control(rndis->params, false);
+ }
+
+ pr_debug("%s: RNDIS resume completed\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested. A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct f_rndis_qc *rndis)
+{
+ struct usb_composite_dev *cdev = rndis->func.config->cdev;
+
+ DBG(cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+ rndis_qc_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->params);
+}
+
+void ipa_data_flow_control_enable(bool enable, struct rndis_params *param)
+{
+ if (enable)
+ ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS);
+ else
+ ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct rndis_params *params;
+ int status;
+ struct usb_ep *ep;
+
+ status = rndis_ipa_init(&rndis_ipa_params);
+ if (status) {
+ pr_err("%s: failed to init rndis_ipa\n", __func__);
+ return status;
+ }
+
+ rndis_ipa_supported = true;
+ /* maybe allocate device-global string IDs */
+ if (rndis_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[0].id = status;
+ rndis_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[1].id = status;
+ rndis_qc_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[2].id = status;
+ rndis_qc_iad_descriptor.iFunction = status;
+ }
+
+ if (rndis->use_wceis) {
+ rndis_qc_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_qc_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_qc_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_control_intf.bInterfaceSubClass = 0x1;
+ rndis_qc_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->ctrl_id = status;
+ rndis_qc_iad_descriptor.bFirstInterface = status;
+
+ rndis_qc_control_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->data_id = status;
+
+ rndis_qc_data_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+ if (!ep)
+ goto fail;
+ rndis->bam_port.in = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+ if (!ep)
+ goto fail;
+ rndis->bam_port.out = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is, strictly speaking,
+ * optional. We don't treat it that way though! It's simpler,
+ * and some newer profiles don't treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ rndis->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!rndis->notify_req)
+ goto fail;
+ rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!rndis->notify_req->buf)
+ goto fail;
+ rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+ rndis->notify_req->context = rndis;
+ rndis->notify_req->complete = rndis_qc_response_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rndis_qc_hs_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_hs_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_hs_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ rndis_qc_ss_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_ss_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_ss_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ params = rndis_register(rndis_qc_response_available, rndis,
+ ipa_data_flow_control_enable);
+ if (params < 0)
+ goto fail;
+ rndis->params = params;
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->params, rndis->ethaddr);
+
+ if (rndis->manufacturer && rndis->vendorID &&
+ rndis_set_param_vendor(rndis->params, rndis->vendorID,
+ rndis->manufacturer))
+ goto fail;
+
+ pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
+ rndis->ul_max_pkt_per_xfer);
+ rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer);
+
+ /* In case of aggregated packets QC device will request
+ * aliment to 4 (2^2).
+ */
+ pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
+ rndis->pkt_alignment_factor);
+ rndis_set_pkt_alignment_factor(rndis->params,
+ rndis->pkt_alignment_factor);
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ rndis->bam_port.in->name, rndis->bam_port.out->name,
+ rndis->notify->name);
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (rndis->notify_req) {
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+ if (rndis->bam_port.out->desc)
+ rndis->bam_port.out->driver_data = NULL;
+ if (rndis->bam_port.in->desc)
+ rndis->bam_port.in->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void rndis_qc_free(struct usb_function *f)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ opts->refcnt--;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+
+ pr_debug("rndis_qc_unbind: free\n");
+ rndis_deregister(rndis->params);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ ipa_data_flush_workqueue();
+ rndis_ipa_cleanup(rndis_ipa_params.private);
+ rndis_ipa_supported = false;
+
+}
+
+void rndis_ipa_reset_trigger(void)
+{
+ struct f_rndis_qc *rndis;
+
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ return;
+ }
+
+ rndis->net_ready_trigger = false;
+}
+
+/*
+ * Callback let RNDIS_IPA trigger us when network interface is up
+ * and userspace is ready to answer DHCP requests
+ */
+void rndis_net_ready_notify(void)
+{
+ struct f_rndis_qc *rndis;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+ if (rndis->net_ready_trigger) {
+ pr_err("%s: Already triggered", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+
+ pr_debug("%s: Set net_ready_trigger", __func__);
+ rndis->net_ready_trigger = true;
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS);
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+
+static struct
+usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
+ u32 vendorID, const char *manufacturer,
+ u8 max_pkt_per_xfer, u8 pkt_alignment_factor)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+
+ /* allocate and initialize one new instance */
+ opts = container_of(fi, struct f_rndis_qc_opts, func_inst);
+
+ opts->refcnt++;
+ rndis = opts->rndis;
+
+ rndis->vendorID = opts->vendor_id;
+ rndis->manufacturer = opts->manufacturer;
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(rndis_ipa_params.host_ethaddr);
+ random_ether_addr(rndis_ipa_params.device_ethaddr);
+ pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+ rndis_ipa_params.host_ethaddr,
+ rndis_ipa_params.device_ethaddr);
+ ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr);
+ rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
+
+ /* if max_pkt_per_xfer was not configured set to default value */
+ rndis->ul_max_pkt_per_xfer =
+ max_pkt_per_xfer ? max_pkt_per_xfer :
+ DEFAULT_MAX_PKT_PER_XFER;
+ ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+
+ /*
+ * Check no RNDIS aggregation, and alignment if not mentioned,
+ * use alignment factor as zero. If aggregated RNDIS data transfer,
+ * max packet per transfer would be default if it is not set
+ * explicitly, and same way use alignment factor as 2 by default.
+ * This would eliminate need of writing to sysfs if default RNDIS
+ * aggregation setting required. Writing to both sysfs entries,
+ * those values will always override default values.
+ */
+ if ((rndis->pkt_alignment_factor == 0) &&
+ (rndis->ul_max_pkt_per_xfer == 1))
+ rndis->pkt_alignment_factor = 0;
+ else
+ rndis->pkt_alignment_factor = pkt_alignment_factor ?
+ pkt_alignment_factor :
+ DEFAULT_PKT_ALIGNMENT_FACTOR;
+
+ /* RNDIS activates when the host changes this filter */
+ rndis->cdc_filter = 0;
+
+ rndis->func.name = "rndis";
+ rndis->func.strings = rndis_qc_strings;
+ /* descriptors are per-instance copies */
+ rndis->func.bind = rndis_qc_bind;
+ rndis->func.unbind = rndis_qc_unbind;
+ rndis->func.set_alt = rndis_qc_set_alt;
+ rndis->func.setup = rndis_qc_setup;
+ rndis->func.disable = rndis_qc_disable;
+ rndis->func.suspend = rndis_qc_suspend;
+ rndis->func.resume = rndis_qc_resume;
+ rndis->func.free_func = rndis_qc_free;
+
+ _rndis_qc = rndis;
+
+ return &rndis->func;
+}
+
+static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
+{
+ return rndis_qc_bind_config_vendor(fi, 0, NULL, 0, 0);
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+ int ret = 0;
+ unsigned long flags;
+ pr_info("Open rndis QC driver\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not created yet\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+ pr_err("Already opened\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ fp->private_data = _rndis_qc;
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
+
+ if (!ret)
+ pr_info("rndis QC file opened\n");
+
+ return ret;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ pr_info("Close rndis QC file\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return -ENODEV;
+ }
+ rndis_qc_unlock(&_rndis_qc->open_excl);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ u8 qc_max_pkt_per_xfer = 0;
+ u32 qc_max_pkt_size = 0;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer;
+ qc_max_pkt_size = _rndis_qc->max_pkt_size;
+
+ if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ spin_unlock_irqrestore(&rndis_lock, flags);
+
+ pr_info("Received command %d\n", cmd);
+
+ switch (cmd) {
+ case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+ ret = copy_to_user((void __user *)arg,
+ &qc_max_pkt_per_xfer,
+ sizeof(qc_max_pkt_per_xfer));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent UL max packets per xfer %d\n",
+ qc_max_pkt_per_xfer);
+ break;
+ case RNDIS_QC_GET_MAX_PKT_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &qc_max_pkt_size,
+ sizeof(qc_max_pkt_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_debug("Sent max packet size %d\n",
+ qc_max_pkt_size);
+ break;
+ default:
+ pr_err("Unsupported IOCTL\n");
+ ret = -EINVAL;
+ }
+
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ rndis_qc_unlock(&_rndis_qc->ioctl_excl);
+
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+ .owner = THIS_MODULE,
+ .open = rndis_qc_open_dev,
+ .release = rndis_qc_release_dev,
+ .unlocked_ioctl = rndis_qc_ioctl,
+};
+
+static struct miscdevice rndis_qc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_rndis_qc",
+ .fops = &rndis_qc_fops,
+};
+
+static void qcrndis_free_inst(struct usb_function_instance *f)
+{
+ struct f_rndis_qc_opts *opts = container_of(f,
+ struct f_rndis_qc_opts, func_inst);
+ unsigned long flags;
+
+ misc_deregister(&rndis_qc_device);
+
+ ipa_data_free(USB_IPA_FUNC_RNDIS);
+ spin_lock_irqsave(&rndis_lock, flags);
+ kfree(opts->rndis);
+ _rndis_qc = NULL;
+ kfree(opts);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+static int qcrndis_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+ int name_len;
+ int ret;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ pr_debug("initialize rndis QC instance\n");
+ rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+ if (!rndis) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&rndis_lock);
+ opts->rndis = rndis;
+ ret = misc_register(&rndis_qc_device);
+ if (ret)
+ pr_err("rndis QC driver failed to register\n");
+
+ ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ kfree(rndis);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline
+struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_rndis_qc_opts,
+ func_inst.group);
+}
+
+static void qcrndis_attr_release(struct config_item *item)
+{
+ struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qcrndis_item_ops = {
+ .release = qcrndis_attr_release,
+};
+
+
+static ssize_t qcrndis_wceis_show(struct config_item *item, char *page)
+{
+ struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", rndis->use_wceis);
+}
+
+static ssize_t qcrndis_wceis_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis;
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ rndis->use_wceis = val;
+
+ return len;
+}
+
+CONFIGFS_ATTR(qcrndis_, wceis);
+
+static struct configfs_attribute *qcrndis_attrs[] = {
+ &qcrndis_attr_wceis,
+ NULL,
+};
+
+static struct config_item_type qcrndis_func_type = {
+ .ct_item_ops = &qcrndis_item_ops,
+ .ct_attrs = qcrndis_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct usb_function_instance *qcrndis_alloc_inst(void)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = qcrndis_set_inst_name;
+ opts->func_inst.free_func_inst = qcrndis_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &qcrndis_func_type);
+
+ return &opts->func_inst;
+}
+
+void *rndis_qc_get_ipa_rx_cb(void)
+{
+ return rndis_ipa_params.ipa_rx_notify;
+}
+
+void *rndis_qc_get_ipa_tx_cb(void)
+{
+ return rndis_ipa_params.ipa_tx_notify;
+}
+
+void *rndis_qc_get_ipa_priv(void)
+{
+ return rndis_ipa_params.private;
+}
+
+bool rndis_qc_get_skip_ep_config(void)
+{
+ return rndis_ipa_params.skip_ep_cfg;
+}
+
+DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc);
+
+static int __init usb_qcrndis_init(void)
+{
+ int ret;
+
+ ret = usb_function_register(&rndis_bamusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit usb_qcrndis_exit(void)
+{
+ usb_function_unregister(&rndis_bamusb_func);
+}
+
+module_init(usb_qcrndis_init);
+module_exit(usb_qcrndis_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
new file mode 100644
index 000000000000..2c416472e279
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -0,0 +1,1187 @@
+/*
+ * f_qdss.c -- QDSS function Driver
+ *
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/usb_qdss.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/cdc.h>
+
+#include "f_qdss.h"
+
+static DEFINE_SPINLOCK(qdss_lock);
+static LIST_HEAD(usb_qdss_ch_list);
+
+static struct usb_interface_descriptor qdss_data_intf_desc = {
+ .bLength = sizeof qdss_data_intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_data_ep_comp_desc = {
+ .bLength = sizeof qdss_data_ep_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_interface_descriptor qdss_ctrl_intf_desc = {
+ .bLength = sizeof qdss_ctrl_intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor qdss_hs_ctrl_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_ss_ctrl_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor qdss_hs_ctrl_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor qdss_ss_ctrl_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(0x400),
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_ctrl_in_ep_comp_desc = {
+ .bLength = sizeof qdss_ctrl_in_ep_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_ss_ep_comp_descriptor qdss_ctrl_out_ep_comp_desc = {
+ .bLength = sizeof qdss_ctrl_out_ep_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *qdss_hs_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_hs_data_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_intf_desc,
+ (struct usb_descriptor_header *) &qdss_hs_ctrl_in_desc,
+ (struct usb_descriptor_header *) &qdss_hs_ctrl_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_ss_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_ss_data_desc,
+ (struct usb_descriptor_header *) &qdss_data_ep_comp_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_intf_desc,
+ (struct usb_descriptor_header *) &qdss_ss_ctrl_in_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_in_ep_comp_desc,
+ (struct usb_descriptor_header *) &qdss_ss_ctrl_out_desc,
+ (struct usb_descriptor_header *) &qdss_ctrl_out_ep_comp_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *qdss_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &qdss_data_intf_desc,
+ (struct usb_descriptor_header *) &qdss_ss_data_desc,
+ (struct usb_descriptor_header *) &qdss_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+#define MSM_QDSS_DATA_IDX 0
+#define MSM_QDSS_CTRL_IDX 1
+#define MDM_QDSS_DATA_IDX 2
+#define MDM_QDSS_CTRL_IDX 3
+
+static struct usb_string qdss_string_defs[] = {
+ [MSM_QDSS_DATA_IDX].s = "MSM QDSS Data",
+ [MSM_QDSS_CTRL_IDX].s = "MSM QDSS Control",
+ [MDM_QDSS_DATA_IDX].s = "MDM QDSS Data",
+ [MDM_QDSS_CTRL_IDX].s = "MDM QDSS Control",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings qdss_string_table = {
+ .language = 0x0409,
+ .strings = qdss_string_defs,
+};
+
+static struct usb_gadget_strings *qdss_strings[] = {
+ &qdss_string_table,
+ NULL,
+};
+
+static inline struct f_qdss *func_to_qdss(struct usb_function *f)
+{
+ return container_of(f, struct f_qdss, port.function);
+}
+
+static struct usb_qdss_opts *to_fi_usb_qdss_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct usb_qdss_opts, func_inst);
+}
+/*----------------------------------------------------------------------*/
+
+static void qdss_write_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_qdss *qdss = ep->driver_data;
+ struct qdss_request *d_req = req->context;
+ struct usb_ep *in;
+ struct list_head *list_pool;
+ enum qdss_state state;
+ unsigned long flags;
+
+ pr_debug("qdss_ctrl_write_complete\n");
+
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ state = USB_QDSS_CTRL_WRITE_DONE;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ state = USB_QDSS_DATA_WRITE_DONE;
+ }
+
+ if (!req->status) {
+ /* send zlp */
+ if ((req->length >= ep->maxpacket) &&
+ ((req->length % ep->maxpacket) == 0)) {
+ req->length = 0;
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ if (!usb_ep_queue(in, req, GFP_ATOMIC))
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, list_pool);
+ if (req->length != 0) {
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+ }
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, state, d_req, NULL);
+}
+
+static void qdss_ctrl_read_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_qdss *qdss = ep->driver_data;
+ struct qdss_request *d_req = req->context;
+ unsigned long flags;
+
+ pr_debug("qdss_ctrl_read_complete\n");
+
+ d_req->actual = req->actual;
+ d_req->status = req->status;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->ctrl_read_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, USB_QDSS_CTRL_READ_DONE, d_req,
+ NULL);
+}
+
+void usb_qdss_free_req(struct usb_qdss_ch *ch)
+{
+ struct f_qdss *qdss;
+ struct usb_request *req;
+ struct list_head *act, *tmp;
+
+ pr_debug("usb_qdss_free_req\n");
+
+ qdss = ch->priv_usb;
+ if (!qdss) {
+ pr_err("usb_qdss_free_req: qdss ctx is NULL\n");
+ return;
+ }
+
+ list_for_each_safe(act, tmp, &qdss->data_write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.data, req);
+ }
+
+ list_for_each_safe(act, tmp, &qdss->ctrl_write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.ctrl_in, req);
+ }
+
+ list_for_each_safe(act, tmp, &qdss->ctrl_read_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.ctrl_out, req);
+ }
+}
+EXPORT_SYMBOL(usb_qdss_free_req);
+
+int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf,
+ int no_read_buf)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ struct usb_request *req;
+ struct usb_ep *in;
+ struct list_head *list_pool;
+ int i;
+
+ pr_debug("usb_qdss_alloc_req\n");
+
+ if (!qdss) {
+ pr_err("usb_qdss_alloc_req: channel %s closed\n", ch->name);
+ return -ENODEV;
+ }
+
+ if ((qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf <= 0)) ||
+ (!qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf))) {
+ pr_err("usb_qdss_alloc_req: missing params\n");
+ return -ENODEV;
+ }
+
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ }
+
+ for (i = 0; i < no_write_buf; i++) {
+ req = usb_ep_alloc_request(in, GFP_ATOMIC);
+ if (!req) {
+ pr_err("usb_qdss_alloc_req: ctrl_in allocation err\n");
+ goto fail;
+ }
+ req->complete = qdss_write_complete;
+ list_add_tail(&req->list, list_pool);
+ }
+
+ for (i = 0; i < no_read_buf; i++) {
+ req = usb_ep_alloc_request(qdss->port.ctrl_out, GFP_ATOMIC);
+ if (!req) {
+ pr_err("usb_qdss_alloc_req:ctrl_out allocation err\n");
+ goto fail;
+ }
+ req->complete = qdss_ctrl_read_complete;
+ list_add_tail(&req->list, &qdss->ctrl_read_pool);
+ }
+
+ return 0;
+
+fail:
+ usb_qdss_free_req(ch);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(usb_qdss_alloc_req);
+
+static void clear_eps(struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+
+ pr_debug("clear_eps\n");
+
+ if (qdss->port.ctrl_in)
+ qdss->port.ctrl_in->driver_data = NULL;
+ if (qdss->port.ctrl_out)
+ qdss->port.ctrl_out->driver_data = NULL;
+ if (qdss->port.data)
+ qdss->port.data->driver_data = NULL;
+}
+
+static void clear_desc(struct usb_gadget *gadget, struct usb_function *f)
+{
+ pr_debug("clear_desc\n");
+
+ if (gadget_is_superspeed(gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+}
+
+static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_gadget *gadget = c->cdev->gadget;
+ struct f_qdss *qdss = func_to_qdss(f);
+ struct usb_ep *ep;
+ int iface, id, str_data_id, str_ctrl_id;
+
+ pr_debug("qdss_bind\n");
+
+ if (!gadget_is_dualspeed(gadget) && !gadget_is_superspeed(gadget)) {
+ pr_err("qdss_bind: full-speed is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* Allocate data I/F */
+ iface = usb_interface_id(c, f);
+ if (iface < 0) {
+ pr_err("interface allocation error\n");
+ return iface;
+ }
+ qdss_data_intf_desc.bInterfaceNumber = iface;
+ qdss->data_iface_id = iface;
+
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+
+ str_data_id = MSM_QDSS_DATA_IDX;
+ str_ctrl_id = MSM_QDSS_CTRL_IDX;
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
+ str_data_id = MDM_QDSS_DATA_IDX;
+ str_ctrl_id = MDM_QDSS_CTRL_IDX;
+ }
+
+ qdss_string_defs[str_data_id].id = id;
+ qdss_data_intf_desc.iInterface = id;
+
+ if (qdss->debug_inface_enabled) {
+ /* Allocate ctrl I/F */
+ iface = usb_interface_id(c, f);
+ if (iface < 0) {
+ pr_err("interface allocation error\n");
+ return iface;
+ }
+ qdss_ctrl_intf_desc.bInterfaceNumber = iface;
+ qdss->ctrl_iface_id = iface;
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[str_ctrl_id].id = id;
+ qdss_ctrl_intf_desc.iInterface = id;
+ }
+
+ ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
+ &qdss_data_ep_comp_desc);
+ if (!ep) {
+ pr_err("ep_autoconfig error\n");
+ goto fail;
+ }
+ qdss->port.data = ep;
+ ep->driver_data = qdss;
+
+ if (qdss->debug_inface_enabled) {
+ ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_ctrl_in_desc,
+ &qdss_ctrl_in_ep_comp_desc);
+ if (!ep) {
+ pr_err("ep_autoconfig error\n");
+ goto fail;
+ }
+ qdss->port.ctrl_in = ep;
+ ep->driver_data = qdss;
+
+ ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_ctrl_out_desc,
+ &qdss_ctrl_out_ep_comp_desc);
+ if (!ep) {
+ pr_err("ep_autoconfig error\n");
+ goto fail;
+ }
+ qdss->port.ctrl_out = ep;
+ ep->driver_data = qdss;
+ }
+
+ /*update descriptors*/
+ qdss_hs_data_desc.bEndpointAddress =
+ qdss_ss_data_desc.bEndpointAddress;
+ if (qdss->debug_inface_enabled) {
+ qdss_hs_ctrl_in_desc.bEndpointAddress =
+ qdss_ss_ctrl_in_desc.bEndpointAddress;
+ qdss_hs_ctrl_out_desc.bEndpointAddress =
+ qdss_ss_ctrl_out_desc.bEndpointAddress;
+ f->hs_descriptors = usb_copy_descriptors(qdss_hs_desc);
+ } else
+ f->hs_descriptors = usb_copy_descriptors(
+ qdss_hs_data_only_desc);
+ if (!f->hs_descriptors) {
+ pr_err("usb_copy_descriptors error\n");
+ goto fail;
+ }
+
+ /* update ss descriptors */
+ if (gadget_is_superspeed(gadget)) {
+ if (qdss->debug_inface_enabled)
+ f->ss_descriptors =
+ usb_copy_descriptors(qdss_ss_desc);
+ else
+ f->ss_descriptors =
+ usb_copy_descriptors(qdss_ss_data_only_desc);
+ if (!f->ss_descriptors) {
+ pr_err("usb_copy_descriptors error\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ clear_eps(f);
+ clear_desc(gadget, f);
+ return -ENOTSUPP;
+}
+
+
+static void qdss_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
+
+ pr_debug("qdss_unbind\n");
+
+ flush_workqueue(qdss->wq);
+
+ clear_eps(f);
+ clear_desc(gadget, f);
+}
+
+static void qdss_eps_disable(struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+
+ pr_debug("qdss_eps_disable\n");
+
+ if (qdss->ctrl_in_enabled) {
+ usb_ep_disable(qdss->port.ctrl_in);
+ qdss->ctrl_in_enabled = 0;
+ }
+
+ if (qdss->ctrl_out_enabled) {
+ usb_ep_disable(qdss->port.ctrl_out);
+ qdss->ctrl_out_enabled = 0;
+ }
+
+ if (qdss->data_enabled) {
+ usb_ep_disable(qdss->port.data);
+ qdss->data_enabled = 0;
+ }
+}
+
+static void usb_qdss_disconnect_work(struct work_struct *work)
+{
+ struct f_qdss *qdss;
+ int status;
+ unsigned long flags;
+
+ qdss = container_of(work, struct f_qdss, disconnect_w);
+ pr_debug("usb_qdss_disconnect_work\n");
+
+
+ /* Notify qdss to cancel all active transfers */
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv,
+ USB_QDSS_DISCONNECT,
+ NULL,
+ NULL);
+
+ /* Uninitialized init data i.e. ep specific operation */
+ if (qdss->ch.app_conn && !strcmp(qdss->ch.name, USB_QDSS_CH_MSM)) {
+ status = uninit_data(qdss->port.data);
+ if (status)
+ pr_err("%s: uninit_data error\n", __func__);
+
+ status = set_qdss_data_connection(qdss, 0);
+ if (status)
+ pr_err("qdss_disconnect error");
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ if (qdss->endless_req) {
+ usb_ep_free_request(qdss->port.data,
+ qdss->endless_req);
+ qdss->endless_req = NULL;
+ }
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ }
+
+ /*
+ * Decrement usage count which was incremented
+ * before calling connect work
+ */
+ usb_gadget_autopm_put_async(qdss->gadget);
+}
+
+static void qdss_disable(struct usb_function *f)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+ unsigned long flags;
+
+ pr_debug("qdss_disable\n");
+ spin_lock_irqsave(&qdss->lock, flags);
+ if (!qdss->usb_connected) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return;
+ }
+
+ qdss->usb_connected = 0;
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ /*cancell all active xfers*/
+ qdss_eps_disable(f);
+ queue_work(qdss->wq, &qdss->disconnect_w);
+}
+
+static void usb_qdss_connect_work(struct work_struct *work)
+{
+ struct f_qdss *qdss;
+ int status;
+ struct usb_request *req = NULL;
+ unsigned long flags;
+
+ qdss = container_of(work, struct f_qdss, connect_w);
+
+ /* If cable is already removed, discard connect_work */
+ if (qdss->usb_connected == 0) {
+ pr_debug("%s: discard connect_work\n", __func__);
+ cancel_work_sync(&qdss->disconnect_w);
+ return;
+ }
+
+ pr_debug("usb_qdss_connect_work\n");
+
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+ goto notify;
+
+ status = set_qdss_data_connection(qdss, 1);
+ if (status) {
+ pr_err("set_qdss_data_connection error(%d)", status);
+ return;
+ }
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ req = qdss->endless_req;
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ if (!req)
+ return;
+
+ status = usb_ep_queue(qdss->port.data, req, GFP_ATOMIC);
+ if (status) {
+ pr_err("%s: usb_ep_queue error (%d)\n", __func__, status);
+ return;
+ }
+
+notify:
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
+ NULL, &qdss->ch);
+}
+
+static int qdss_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_qdss *qdss = func_to_qdss(f);
+ struct usb_gadget *gadget = f->config->cdev->gadget;
+ struct usb_qdss_ch *ch = &qdss->ch;
+ int ret = 0;
+
+ pr_debug("qdss_set_alt qdss pointer = %pK\n", qdss);
+ qdss->gadget = gadget;
+
+ if (alt != 0)
+ goto fail1;
+
+ if (gadget->speed != USB_SPEED_SUPER &&
+ gadget->speed != USB_SPEED_HIGH) {
+ pr_err("qdss_st_alt: qdss supportes HS or SS only\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ if (intf == qdss->data_iface_id) {
+ /* Increment usage count on connect */
+ usb_gadget_autopm_get_async(qdss->gadget);
+
+ if (config_ep_by_speed(gadget, f, qdss->port.data)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = usb_ep_enable(qdss->port.data);
+ if (ret)
+ goto fail;
+
+ qdss->port.data->driver_data = qdss;
+ qdss->data_enabled = 1;
+
+
+ } else if ((intf == qdss->ctrl_iface_id) &&
+ (qdss->debug_inface_enabled)) {
+
+ if (config_ep_by_speed(gadget, f, qdss->port.ctrl_in)) {
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ ret = usb_ep_enable(qdss->port.ctrl_in);
+ if (ret)
+ goto fail1;
+
+ qdss->port.ctrl_in->driver_data = qdss;
+ qdss->ctrl_in_enabled = 1;
+
+ if (config_ep_by_speed(gadget, f, qdss->port.ctrl_out)) {
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+
+ ret = usb_ep_enable(qdss->port.ctrl_out);
+ if (ret)
+ goto fail1;
+
+ qdss->port.ctrl_out->driver_data = qdss;
+ qdss->ctrl_out_enabled = 1;
+ }
+
+ if (qdss->debug_inface_enabled) {
+ if (qdss->ctrl_out_enabled && qdss->ctrl_in_enabled &&
+ qdss->data_enabled) {
+ qdss->usb_connected = 1;
+ pr_debug("qdss_set_alt usb_connected INTF enabled\n");
+ }
+ } else {
+ if (qdss->data_enabled) {
+ qdss->usb_connected = 1;
+ pr_debug("qdss_set_alt usb_connected INTF disabled\n");
+ }
+ }
+
+ if (qdss->usb_connected && ch->app_conn)
+ queue_work(qdss->wq, &qdss->connect_w);
+
+ return 0;
+fail:
+ /* Decrement usage count in case of failure */
+ usb_gadget_autopm_put_async(qdss->gadget);
+fail1:
+ pr_err("qdss_set_alt failed\n");
+ qdss_eps_disable(f);
+ return ret;
+}
+
+static struct f_qdss *alloc_usb_qdss(char *channel_name)
+{
+ struct f_qdss *qdss;
+ int found = 0;
+ struct usb_qdss_ch *ch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ list_for_each_entry(ch, &usb_qdss_ch_list, list) {
+ if (!strcmp(channel_name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ pr_err("%s: (%s) is already available.\n",
+ __func__, channel_name);
+ return ERR_PTR(-EEXIST);
+ }
+
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ qdss = kzalloc(sizeof(struct f_qdss), GFP_KERNEL);
+ if (!qdss) {
+ pr_err("%s: Unable to allocate qdss device\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qdss->wq = create_singlethread_workqueue(channel_name);
+ if (!qdss->wq) {
+ kfree(qdss);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ ch = &qdss->ch;
+ ch->name = channel_name;
+ list_add_tail(&ch->list, &usb_qdss_ch_list);
+ spin_unlock_irqrestore(&qdss_lock, flags);
+
+ spin_lock_init(&qdss->lock);
+ INIT_LIST_HEAD(&qdss->ctrl_read_pool);
+ INIT_LIST_HEAD(&qdss->ctrl_write_pool);
+ INIT_LIST_HEAD(&qdss->data_write_pool);
+ INIT_WORK(&qdss->connect_w, usb_qdss_connect_work);
+ INIT_WORK(&qdss->disconnect_w, usb_qdss_disconnect_work);
+
+ return qdss;
+}
+
+int usb_qdss_ctrl_read(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_read\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->ctrl_read_pool)) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("error: usb_qdss_ctrl_read list is empty\n");
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->ctrl_read_pool, struct usb_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+
+ if (usb_ep_queue(qdss->port.ctrl_out, req, GFP_ATOMIC)) {
+ /* If error add the link to linked list again*/
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->ctrl_read_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_ctrl_read);
+
+int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_write\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->ctrl_write_pool)) {
+ pr_err("error: usb_qdss_ctrl_write list is empty\n");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->ctrl_write_pool, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(qdss->port.ctrl_in, req, GFP_ATOMIC)) {
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->ctrl_write_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_ctrl_write);
+
+int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_write\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->data_write_pool)) {
+ pr_err("error: usb_qdss_data_write list is empty\n");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->data_write_pool, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->data_write_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_write);
+
+struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv,
+ void (*notify)(void *, unsigned, struct qdss_request *,
+ struct usb_qdss_ch *))
+{
+ struct usb_qdss_ch *ch;
+ struct f_qdss *qdss;
+ unsigned long flags;
+ int found = 0;
+
+ pr_debug("usb_qdss_open\n");
+
+ if (!notify) {
+ pr_err("usb_qdss_open: notification func is missing\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ /* Check if we already have a channel with this name */
+ list_for_each_entry(ch, &usb_qdss_ch_list, list) {
+ if (!strcmp(name, ch->name)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ pr_debug("usb_qdss_open failed as %s not found\n", name);
+ return NULL;
+ } else {
+ pr_debug("usb_qdss_open: qdss ctx found\n");
+ qdss = container_of(ch, struct f_qdss, ch);
+ ch->priv_usb = qdss;
+ }
+
+ ch->priv = priv;
+ ch->notify = notify;
+ ch->app_conn = 1;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+
+ /* the case USB cabel was connected befor qdss called qdss_open*/
+ if (qdss->usb_connected == 1)
+ queue_work(qdss->wq, &qdss->connect_w);
+
+ return ch;
+}
+EXPORT_SYMBOL(usb_qdss_open);
+
+void usb_qdss_close(struct usb_qdss_ch *ch)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ struct usb_gadget *gadget;
+ unsigned long flags;
+ int status;
+
+ pr_debug("usb_qdss_close\n");
+
+ spin_lock_irqsave(&qdss_lock, flags);
+ ch->priv_usb = NULL;
+ if (!qdss || !qdss->usb_connected ||
+ !strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
+ ch->app_conn = 0;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ return;
+ }
+
+ if (qdss->endless_req) {
+ usb_ep_dequeue(qdss->port.data, qdss->endless_req);
+ usb_ep_free_request(qdss->port.data, qdss->endless_req);
+ qdss->endless_req = NULL;
+ }
+ gadget = qdss->gadget;
+ ch->app_conn = 0;
+ spin_unlock_irqrestore(&qdss_lock, flags);
+
+ status = uninit_data(qdss->port.data);
+ if (status)
+ pr_err("%s: uninit_data error\n", __func__);
+
+ status = set_qdss_data_connection(qdss, 0);
+ if (status)
+ pr_err("%s:qdss_disconnect error\n", __func__);
+}
+EXPORT_SYMBOL(usb_qdss_close);
+
+static void qdss_cleanup(void)
+{
+ struct f_qdss *qdss;
+ struct list_head *act, *tmp;
+ struct usb_qdss_ch *_ch;
+ unsigned long flags;
+
+ pr_debug("qdss_cleanup\n");
+
+ list_for_each_safe(act, tmp, &usb_qdss_ch_list) {
+ _ch = list_entry(act, struct usb_qdss_ch, list);
+ qdss = container_of(_ch, struct f_qdss, ch);
+ spin_lock_irqsave(&qdss_lock, flags);
+ destroy_workqueue(qdss->wq);
+ if (!_ch->priv) {
+ list_del(&_ch->list);
+ kfree(qdss);
+ }
+ spin_unlock_irqrestore(&qdss_lock, flags);
+ }
+}
+
+static void qdss_free_func(struct usb_function *f)
+{
+ /* Do nothing as usb_qdss_alloc() doesn't alloc anything. */
+}
+
+static inline struct usb_qdss_opts *to_f_qdss_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct usb_qdss_opts,
+ func_inst.group);
+}
+
+static void qdss_attr_release(struct config_item *item)
+{
+ struct usb_qdss_opts *opts = to_f_qdss_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qdss_item_ops = {
+ .release = qdss_attr_release,
+};
+
+static ssize_t qdss_enable_debug_inface_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ (to_f_qdss_opts(item)->usb_qdss->debug_inface_enabled == 1) ?
+ "Enabled" : "Disabled");
+}
+
+static ssize_t qdss_enable_debug_inface_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_qdss *qdss = to_f_qdss_opts(item)->usb_qdss;
+ unsigned long flags;
+ u8 stats;
+
+ if (page == NULL) {
+ pr_err("Invalid buffer");
+ return len;
+ }
+
+ if (kstrtou8(page, 0, &stats) != 0 && (stats != 0 || stats != 1)) {
+ pr_err("(%u)Wrong value. enter 0 to disable or 1 to enable.\n",
+ stats);
+ return len;
+ }
+
+ spin_lock_irqsave(&qdss->lock, flags);
+ qdss->debug_inface_enabled = (stats == 1 ? "true" : "false");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return len;
+}
+
+CONFIGFS_ATTR(qdss_, enable_debug_inface);
+static struct configfs_attribute *qdss_attrs[] = {
+ &qdss_attr_enable_debug_inface,
+ NULL,
+};
+
+static struct config_item_type qdss_func_type = {
+ .ct_item_ops = &qdss_item_ops,
+ .ct_attrs = qdss_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void usb_qdss_free_inst(struct usb_function_instance *fi)
+{
+ struct usb_qdss_opts *opts;
+
+ opts = container_of(fi, struct usb_qdss_opts, func_inst);
+ kfree(opts->usb_qdss);
+ kfree(opts);
+}
+
+static int usb_qdss_set_inst_name(struct usb_function_instance *f, const char *name)
+{
+ struct usb_qdss_opts *opts =
+ container_of(f, struct usb_qdss_opts, func_inst);
+ char *ptr;
+ size_t name_len;
+ struct f_qdss *usb_qdss;
+
+ /* get channel_name as expected input qdss.<channel_name> */
+ name_len = strlen(name) + 1;
+ if (name_len > 15)
+ return -ENAMETOOLONG;
+
+ /* get channel name */
+ ptr = kstrndup(name, name_len, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("error:%ld\n", PTR_ERR(ptr));
+ return -ENOMEM;
+ }
+
+ opts->channel_name = ptr;
+ pr_debug("qdss: channel_name:%s\n", opts->channel_name);
+
+ usb_qdss = alloc_usb_qdss(opts->channel_name);
+ if (IS_ERR(usb_qdss)) {
+ pr_err("Failed to create usb_qdss port(%s)\n", opts->channel_name);
+ return -ENOMEM;
+ }
+
+ opts->usb_qdss = usb_qdss;
+ return 0;
+}
+
+static struct usb_function_instance *qdss_alloc_inst(void)
+{
+ struct usb_qdss_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.free_func_inst = usb_qdss_free_inst;
+ opts->func_inst.set_inst_name = usb_qdss_set_inst_name;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &qdss_func_type);
+ return &opts->func_inst;
+}
+
+static struct usb_function *qdss_alloc(struct usb_function_instance *fi)
+{
+ struct usb_qdss_opts *opts = to_fi_usb_qdss_opts(fi);
+ struct f_qdss *usb_qdss = opts->usb_qdss;
+
+ usb_qdss->port.function.name = "usb_qdss";
+ usb_qdss->port.function.fs_descriptors = qdss_hs_desc;
+ usb_qdss->port.function.hs_descriptors = qdss_hs_desc;
+ usb_qdss->port.function.strings = qdss_strings;
+ usb_qdss->port.function.bind = qdss_bind;
+ usb_qdss->port.function.unbind = qdss_unbind;
+ usb_qdss->port.function.set_alt = qdss_set_alt;
+ usb_qdss->port.function.disable = qdss_disable;
+ usb_qdss->port.function.setup = NULL;
+ usb_qdss->port.function.free_func = qdss_free_func;
+
+ return &usb_qdss->port.function;
+}
+
+DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc);
+static int __init usb_qdss_init(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&usb_qdss_ch_list);
+ ret = usb_function_register(&qdssusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit usb_qdss_exit(void)
+{
+ usb_function_unregister(&qdssusb_func);
+ qdss_cleanup();
+}
+
+module_init(usb_qdss_init);
+module_exit(usb_qdss_exit);
+MODULE_DESCRIPTION("USB QDSS Function Driver");
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
new file mode 100644
index 000000000000..fb7c01c0f939
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef _F_QDSS_H
+#define _F_QDSS_H
+
+#include <linux/kernel.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/usb_qdss.h>
+
+#include "u_rmnet.h"
+
+struct usb_qdss_bam_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ unsigned long usb_bam_handle;
+ struct sps_mem_buffer *data_fifo;
+};
+
+struct gqdss {
+ struct usb_function function;
+ struct usb_ep *ctrl_out;
+ struct usb_ep *ctrl_in;
+ struct usb_ep *data;
+ int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+ void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+};
+
+/* struct f_qdss - USB qdss function driver private structure */
+struct f_qdss {
+ struct gqdss port;
+ struct usb_qdss_bam_connect_info bam_info;
+ struct usb_gadget *gadget;
+ short int port_num;
+ u8 ctrl_iface_id;
+ u8 data_iface_id;
+ int usb_connected;
+ bool debug_inface_enabled;
+ struct usb_request *endless_req;
+ struct usb_qdss_ch ch;
+ struct list_head ctrl_read_pool;
+ struct list_head ctrl_write_pool;
+
+ /* for mdm channel SW path */
+ struct list_head data_write_pool;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ spinlock_t lock;
+ unsigned int data_enabled:1;
+ unsigned int ctrl_in_enabled:1;
+ unsigned int ctrl_out_enabled:1;
+ struct workqueue_struct *wq;
+};
+
+struct usb_qdss_opts {
+ struct usb_function_instance func_inst;
+ struct f_qdss *usb_qdss;
+ char *channel_name;
+};
+
+int uninit_data(struct usb_ep *ep);
+int set_qdss_data_connection(struct f_qdss *qdss, int enable);
+#endif
diff --git a/drivers/usb/gadget/function/f_rmnet.c b/drivers/usb/gadget/function/f_rmnet.c
new file mode 100644
index 000000000000..6b54e8d4fe8b
--- /dev/null
+++ b/drivers/usb/gadget/function/f_rmnet.c
@@ -0,0 +1,1271 @@
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/usb_bam.h>
+#include <linux/module.h>
+
+#include "u_rmnet.h"
+#include "u_data_ipa.h"
+#include "configfs.h"
+
+#define RMNET_NOTIFY_INTERVAL 5
+#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+
+#define ACM_CTRL_DTR (1 << 0)
+
+struct f_rmnet {
+ struct usb_function func;
+ enum qti_port_type qti_port_type;
+ enum ipa_func_type func_type;
+ struct grmnet port;
+ int ifc_id;
+ atomic_t online;
+ atomic_t ctrl_online;
+ struct usb_composite_dev *cdev;
+ struct gadget_ipa_port ipa_port;
+ spinlock_t lock;
+
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+
+ /* control info */
+ struct list_head cpkt_resp_q;
+ unsigned long notify_count;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_fs_out_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_in_desc,
+ (struct usb_descriptor_header *) &rmnet_hs_out_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+ .bInterval = RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
+ .bLength = sizeof(rmnet_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
+ .bLength = sizeof(rmnet_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
+ .bLength = sizeof(rmnet_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *rmnet_ss_function[] = {
+ (struct usb_descriptor_header *) &rmnet_interface_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_notify_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_in_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_out_desc,
+ (struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+ [0].s = "RmNet",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+ &rmnet_string_table,
+ NULL,
+};
+
+static struct usb_interface_descriptor dpl_data_intf_desc = {
+ .bLength = sizeof(dpl_data_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+};
+
+static struct usb_endpoint_descriptor dpl_hs_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dpl_ss_data_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dpl_data_ep_comp_desc = {
+ .bLength = sizeof(dpl_data_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *dpl_hs_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_hs_data_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *dpl_ss_data_only_desc[] = {
+ (struct usb_descriptor_header *) &dpl_data_intf_desc,
+ (struct usb_descriptor_header *) &dpl_ss_data_desc,
+ (struct usb_descriptor_header *) &dpl_data_ep_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string dpl_string_defs[] = {
+ [0].s = "QDSS DATA",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings dpl_string_table = {
+ .language = 0x0409,
+ .strings = dpl_string_defs,
+};
+
+static struct usb_gadget_strings *dpl_strings[] = {
+ &dpl_string_table,
+ NULL,
+};
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev);
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+ return container_of(f, struct f_rmnet, func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+ return container_of(r, struct f_rmnet, port);
+}
+
+int name_to_prot(struct f_rmnet *dev, const char *name)
+{
+ if (!name)
+ goto error;
+
+ if (!strncasecmp("rmnet", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_RMNET;
+ dev->func_type = USB_IPA_FUNC_RMNET;
+ } else if (!strncasecmp("dpl", name, MAX_INST_NAME_LEN)) {
+ dev->qti_port_type = QTI_PORT_DPL;
+ dev->func_type = USB_IPA_FUNC_DPL;
+ }
+ return 0;
+
+error:
+ return -EINVAL;
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->buf = kmalloc(len, flags);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->length = len;
+
+ return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gport_rmnet_connect(struct f_rmnet *dev)
+{
+ int ret;
+ int src_connection_idx = 0, dst_connection_idx = 0;
+ struct usb_gadget *gadget = dev->cdev->gadget;
+ enum usb_ctrl usb_bam_type;
+ int bam_pipe_num = (dev->qti_port_type == QTI_PORT_DPL) ? 1 : 0;
+
+ ret = gqti_ctrl_connect(&dev->port, dev->qti_port_type, dev->ifc_id);
+ if (ret) {
+ pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ if (dev->qti_port_type == QTI_PORT_DPL)
+ dev->port.send_encap_cmd(QTI_PORT_DPL, NULL, 0);
+ dev->ipa_port.cdev = dev->cdev;
+ ipa_data_port_select(dev->func_type);
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+
+ if (dev->ipa_port.in) {
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (dev->ipa_port.out) {
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (dst_connection_idx < 0 || src_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ return -EINVAL;
+ }
+ ret = ipa_data_connect(&dev->ipa_port, dev->func_type,
+ src_connection_idx, dst_connection_idx);
+ if (ret) {
+ pr_err("%s: ipa_data_connect failed: err:%d\n",
+ __func__, ret);
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ return ret;
+ }
+ return 0;
+}
+
+static int gport_rmnet_disconnect(struct f_rmnet *dev)
+{
+ gqti_ctrl_disconnect(&dev->port, dev->qti_port_type);
+ ipa_data_disconnect(&dev->ipa_port, dev->func_type);
+ return 0;
+}
+
+static void frmnet_free(struct usb_function *f)
+{
+ struct f_rmnet_opts *opts;
+
+ opts = container_of(f->fi, struct f_rmnet_opts, func_inst);
+ opts->refcnt--;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
+
+ pr_debug("%s: start unbinding\nclear_desc\n", __func__);
+ if (gadget_is_superspeed(gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+
+ if (gadget_is_dualspeed(gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
+}
+
+static void frmnet_purge_responses(struct f_rmnet *dev)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: Purging responses\n", __func__);
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->cpkt_resp_q)) {
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void frmnet_suspend(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s: dev: %pK remote_wakeup: %d\n", __func__, dev,
+ remote_wakeup_allowed);
+
+ if (dev->notify) {
+ usb_ep_fifo_flush(dev->notify);
+ frmnet_purge_responses(dev);
+ }
+ ipa_data_suspend(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
+}
+
+static void frmnet_resume(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_debug("%s: dev: %pK remote_wakeup: %d\n", __func__, dev,
+ remote_wakeup_allowed);
+
+ ipa_data_resume(&dev->ipa_port, dev->func_type, remote_wakeup_allowed);
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+
+ pr_debug("%s: Disabling\n", __func__);
+ atomic_set(&dev->online, 0);
+ if (dev->notify) {
+ usb_ep_disable(dev->notify);
+ dev->notify->driver_data = NULL;
+ frmnet_purge_responses(dev);
+ }
+
+ gport_rmnet_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret = 0;
+
+ pr_debug("%s:dev:%pK\n", __func__, dev);
+ dev->cdev = cdev;
+ if (dev->notify) {
+ if (dev->notify->driver_data) {
+ pr_debug("%s: reset port\n", __func__);
+ usb_ep_disable(dev->notify);
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
+ if (ret) {
+ dev->notify->desc = NULL;
+ ERROR(cdev,
+ "config_ep_by_speed failed for ep %s, result %d\n",
+ dev->notify->name, ret);
+ return ret;
+ }
+
+ ret = usb_ep_enable(dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, dev->notify->name, ret);
+ dev->notify->desc = NULL;
+ return ret;
+ }
+
+ dev->notify->driver_data = dev;
+ }
+
+ if (dev->ipa_port.in && !dev->ipa_port.in->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.in)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.in->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+
+ if (dev->ipa_port.out && !dev->ipa_port.out->desc
+ && config_ep_by_speed(cdev->gadget, f, dev->ipa_port.out)) {
+ pr_err("%s(): config_ep_by_speed failed.\n",
+ __func__);
+ dev->ipa_port.out->desc = NULL;
+ ret = -EINVAL;
+ goto err_disable_ep;
+ }
+
+ ret = gport_rmnet_connect(dev);
+ if (ret) {
+ pr_err("%s(): gport_rmnet_connect fail with err:%d\n",
+ __func__, ret);
+ goto err_disable_ep;
+ }
+
+ atomic_set(&dev->online, 1);
+ /*
+ * In case notifications were aborted, but there are
+ * pending control packets in the response queue,
+ * re-add the notifications.
+ */
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ struct list_head *cpkt;
+
+ list_for_each(cpkt, &dev->cpkt_resp_q)
+ frmnet_ctrl_response_available(dev);
+ }
+
+ return ret;
+err_disable_ep:
+ if (dev->notify && dev->notify->driver_data)
+ usb_ep_disable(dev->notify);
+
+ return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+ struct usb_request *req = dev->notify_req;
+ struct usb_cdc_notification *event;
+ unsigned long flags;
+ int ret;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s:dev:%pK\n", __func__, dev);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!atomic_read(&dev->online) || !req || !req->buf) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ if (++dev->notify_count != 1) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+
+ event = req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (ret) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", ret);
+ }
+}
+
+static void frmnet_connect(struct grmnet *gr)
+{
+ struct f_rmnet *dev;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%pK\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ atomic_set(&dev->ctrl_online, 1);
+}
+
+static void frmnet_disconnect(struct grmnet *gr)
+{
+ struct f_rmnet *dev;
+ struct usb_cdc_notification *event;
+ int status;
+
+ if (!gr) {
+ pr_err("%s: Invalid grmnet:%pK\n", __func__, gr);
+ return;
+ }
+
+ dev = port_to_rmnet(gr);
+
+ atomic_set(&dev->ctrl_online, 0);
+
+ if (!atomic_read(&dev->online)) {
+ pr_debug("%s: nothing to do\n", __func__);
+ return;
+ }
+
+ usb_ep_fifo_flush(dev->notify);
+
+ event = dev->notify_req->buf;
+ event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ event->wValue = cpu_to_le16(0);
+ event->wIndex = cpu_to_le16(dev->ifc_id);
+ event->wLength = cpu_to_le16(0);
+
+ status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+ if (status < 0) {
+ if (!atomic_read(&dev->online))
+ return;
+ pr_err("%s: rmnet notify ep enqueue error %d\n",
+ __func__, status);
+ }
+
+ frmnet_purge_responses(dev);
+}
+
+static int
+frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
+{
+ struct f_rmnet *dev;
+ struct rmnet_ctrl_pkt *cpkt;
+ unsigned long flags;
+
+ if (!gr || !buf) {
+ pr_err("%s: Invalid grmnet/buf, grmnet:%pK buf:%pK\n",
+ __func__, gr, buf);
+ return -ENODEV;
+ }
+ cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ dev = port_to_rmnet(gr);
+
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
+ rmnet_free_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ frmnet_ctrl_response_available(dev);
+
+ return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ struct usb_composite_dev *cdev;
+
+ if (!dev) {
+ pr_err("%s: rmnet dev is null\n", __func__);
+ return;
+ }
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ cdev = dev->cdev;
+
+ if (dev->port.send_encap_cmd) {
+ dev->port.send_encap_cmd(QTI_PORT_RMNET, req->buf, req->actual);
+ }
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_rmnet *dev = req->context;
+ int status = req->status;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ default:
+ pr_err("rmnet notify ep error %d\n", status);
+ /* FALLTHROUGH */
+ case 0:
+ if (!atomic_read(&dev->ctrl_online))
+ break;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->notify_count > 0) {
+ dev->notify_count--;
+ if (dev->notify_count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ } else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock_irqsave(&dev->lock, flags);
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_err("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock,
+ flags);
+ break;
+ }
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ pr_debug("ep enqueue error %d\n", status);
+ }
+ break;
+ }
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = cdev->req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int ret = -EOPNOTSUPP;
+
+ pr_debug("%s: dev: %pK\n", __func__, dev);
+ if (!atomic_read(&dev->online)) {
+ pr_warn("%s: usb cable is not connected\n", __func__);
+ return -ENOTCONN;
+ }
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ pr_debug("%s: USB_CDC_SEND_ENCAPSULATED_COMMAND\n"
+ , __func__);
+ ret = w_length;
+ req->complete = frmnet_cmd_complete;
+ req->context = dev;
+ break;
+
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ pr_debug("%s: USB_CDC_GET_ENCAPSULATED_RESPONSE\n", __func__);
+ if (w_value) {
+ pr_err("%s: invalid w_value = %04x\n",
+ __func__, w_value);
+ goto invalid;
+ } else {
+ unsigned len;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ spin_lock(&dev->lock);
+ if (list_empty(&dev->cpkt_resp_q)) {
+ pr_err("ctrl resp queue empty: ");
+ pr_err("req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ ret = 0;
+ spin_unlock(&dev->lock);
+ goto invalid;
+ }
+
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+ list_del(&cpkt->list);
+ spin_unlock(&dev->lock);
+
+ len = min_t(unsigned, w_length, cpkt->len);
+ memcpy(req->buf, cpkt->buf, len);
+ ret = len;
+
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+ pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d\n",
+ __func__, w_value & ACM_CTRL_DTR ? 1 : 0);
+ if (dev->port.notify_modem) {
+ dev->port.notify_modem(&dev->port,
+ QTI_PORT_RMNET, w_value);
+ }
+ ret = 0;
+
+ break;
+ default:
+
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (ret < w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int ipa_update_function_bind_params(struct f_rmnet *dev,
+ struct usb_composite_dev *cdev, struct ipa_function_bind_info *info)
+{
+ struct usb_ep *ep;
+ struct usb_function *f = &dev->func;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (info->string_defs[0].id != 0)
+ goto skip_string_id_alloc;
+
+ if (info->data_str_idx >= 0 && info->data_desc) {
+ /* data interface label */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ info->string_defs[info->data_str_idx].id = status;
+ info->data_desc->iInterface = status;
+ }
+
+skip_string_id_alloc:
+ if (info->data_desc)
+ info->data_desc->bInterfaceNumber = dev->ifc_id;
+
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n",
+ __func__);
+ return -ENODEV;
+ }
+ dev->ipa_port.in = ep;
+ ep->driver_data = cdev;
+ }
+
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ dev->ipa_port.out = ep;
+ ep->driver_data = cdev;
+ }
+
+ if (info->fs_notify_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n",
+ __func__);
+ status = -ENODEV;
+ goto ep_auto_notify_fail;
+ }
+ dev->notify = ep;
+ ep->driver_data = cdev;
+ dev->notify_req = frmnet_alloc_req(ep,
+ sizeof(struct usb_cdc_notification),
+ GFP_KERNEL);
+ if (IS_ERR(dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ status = -ENOMEM;
+ goto ep_notify_alloc_fail;
+ }
+
+ dev->notify_req->complete = frmnet_notify_complete;
+ dev->notify_req->context = dev;
+ }
+
+ status = -ENOMEM;
+ f->fs_descriptors = usb_copy_descriptors(info->fs_desc_hdr);
+ if (!f->fs_descriptors) {
+ pr_err("%s: no descriptors, usb_copy descriptors(fs)failed\n",
+ __func__);
+ goto fail;
+ }
+
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ if (info->fs_in_desc && info->hs_in_desc)
+ info->hs_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+ if (info->fs_out_desc && info->hs_out_desc)
+ info->hs_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->hs_notify_desc)
+ info->hs_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(info->hs_desc_hdr);
+ if (!f->hs_descriptors) {
+ pr_err("%s: no hs_descriptors, usb_copy descriptors(hs)failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ if (gadget_is_superspeed(cdev->gadget)) {
+ if (info->fs_in_desc && info->ss_in_desc)
+ info->ss_in_desc->bEndpointAddress =
+ info->fs_in_desc->bEndpointAddress;
+
+ if (info->fs_out_desc && info->ss_out_desc)
+ info->ss_out_desc->bEndpointAddress =
+ info->fs_out_desc->bEndpointAddress;
+ if (info->fs_notify_desc && info->ss_notify_desc)
+ info->ss_notify_desc->bEndpointAddress =
+ info->fs_notify_desc->bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(info->ss_desc_hdr);
+ if (!f->ss_descriptors) {
+ pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+ if (dev->notify_req)
+ frmnet_free_req(dev->notify, dev->notify_req);
+ep_notify_alloc_fail:
+ dev->notify->driver_data = NULL;
+ dev->notify = NULL;
+ep_auto_notify_fail:
+ dev->ipa_port.out->driver_data = NULL;
+ dev->ipa_port.out = NULL;
+ep_auto_out_fail:
+ dev->ipa_port.in->driver_data = NULL;
+ dev->ipa_port.in = NULL;
+
+ return status;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rmnet *dev = func_to_rmnet(f);
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+ struct ipa_function_bind_info info = {0};
+
+ pr_debug("%s: start binding\n", __func__);
+ dev->ifc_id = usb_interface_id(c, f);
+ if (dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d\n",
+ __func__, dev->ifc_id);
+ return dev->ifc_id;
+ }
+
+ info.data_str_idx = 0;
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ info.string_defs = rmnet_string_defs;
+ info.data_desc = &rmnet_interface_desc;
+ info.fs_in_desc = &rmnet_fs_in_desc;
+ info.fs_out_desc = &rmnet_fs_out_desc;
+ info.fs_notify_desc = &rmnet_fs_notify_desc;
+ info.hs_in_desc = &rmnet_hs_in_desc;
+ info.hs_out_desc = &rmnet_hs_out_desc;
+ info.hs_notify_desc = &rmnet_hs_notify_desc;
+ info.ss_in_desc = &rmnet_ss_in_desc;
+ info.ss_out_desc = &rmnet_ss_out_desc;
+ info.ss_notify_desc = &rmnet_ss_notify_desc;
+ info.fs_desc_hdr = rmnet_fs_function;
+ info.hs_desc_hdr = rmnet_hs_function;
+ info.ss_desc_hdr = rmnet_ss_function;
+ } else {
+ info.string_defs = dpl_string_defs;
+ info.data_desc = &dpl_data_intf_desc;
+ info.fs_in_desc = &dpl_hs_data_desc;
+ info.hs_in_desc = &dpl_hs_data_desc;
+ info.ss_in_desc = &dpl_ss_data_desc;
+ info.fs_desc_hdr = dpl_hs_data_only_desc;
+ info.hs_desc_hdr = dpl_hs_data_only_desc;
+ info.ss_desc_hdr = dpl_ss_data_only_desc;
+ }
+
+ ret = ipa_update_function_bind_params(dev, cdev, &info);
+
+ return ret;
+}
+
+static struct usb_function *frmnet_bind_config(struct usb_function_instance *fi)
+{
+ struct f_rmnet_opts *opts;
+ struct f_rmnet *dev;
+ struct usb_function *f;
+
+ opts = container_of(fi, struct f_rmnet_opts, func_inst);
+ opts->refcnt++;
+ dev = opts->dev;
+ f = &dev->func;
+ if (dev->qti_port_type == QTI_PORT_RMNET) {
+ f->name = "rmnet";
+ f->strings = rmnet_strings;
+ } else {
+ f->name = "dpl";
+ f->strings = dpl_strings;
+ }
+
+ f->bind = frmnet_bind;
+ f->unbind = frmnet_unbind;
+ f->disable = frmnet_disable;
+ f->set_alt = frmnet_set_alt;
+ f->setup = frmnet_setup;
+ f->suspend = frmnet_suspend;
+ f->resume = frmnet_resume;
+ f->free_func = frmnet_free;
+ dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+ dev->port.disconnect = frmnet_disconnect;
+ dev->port.connect = frmnet_connect;
+
+ pr_debug("%s: complete\n", __func__);
+
+ return f;
+}
+
+static int rmnet_init(void)
+{
+ return gqti_ctrl_init();
+}
+
+static void frmnet_cleanup(void)
+{
+ gqti_ctrl_cleanup();
+}
+
+static void rmnet_free_inst(struct usb_function_instance *f)
+{
+ struct f_rmnet_opts *opts = container_of(f, struct f_rmnet_opts,
+ func_inst);
+ ipa_data_free(opts->dev->func_type);
+ kfree(opts->dev);
+ kfree(opts);
+}
+
+static int rmnet_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int name_len, ret = 0;
+ struct f_rmnet *dev;
+ struct f_rmnet_opts *opts = container_of(fi,
+ struct f_rmnet_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ /* Update qti->qti_port_type */
+ ret = name_to_prot(dev, name);
+ if (ret < 0) {
+ pr_err("%s: failed to find prot for %s instance\n",
+ __func__, name);
+ goto fail;
+ }
+
+ if (dev->qti_port_type >= QTI_NUM_PORTS ||
+ dev->func_type >= USB_IPA_NUM_FUNCS) {
+ pr_err("%s: invalid prot\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&dev->cpkt_resp_q);
+ ret = ipa_data_setup(dev->func_type);
+ if (ret)
+ goto fail;
+
+ opts->dev = dev;
+ return 0;
+
+fail:
+ kfree(dev);
+ return ret;
+}
+
+static inline struct f_rmnet_opts *to_f_rmnet_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_rmnet_opts,
+ func_inst.group);
+}
+
+static void rmnet_opts_release(struct config_item *item)
+{
+ struct f_rmnet_opts *opts = to_f_rmnet_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+};
+
+static struct configfs_item_operations rmnet_item_ops = {
+ .release = rmnet_opts_release,
+};
+
+static struct config_item_type rmnet_func_type = {
+ .ct_item_ops = &rmnet_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct usb_function_instance *rmnet_alloc_inst(void)
+{
+ struct f_rmnet_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = rmnet_set_inst_name;
+ opts->func_inst.free_func_inst = rmnet_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &rmnet_func_type);
+ return &opts->func_inst;
+}
+
+static struct usb_function *rmnet_alloc(struct usb_function_instance *fi)
+{
+ return frmnet_bind_config(fi);
+}
+
+DECLARE_USB_FUNCTION(rmnet_bam, rmnet_alloc_inst, rmnet_alloc);
+
+static int __init usb_rmnet_init(void)
+{
+ int ret;
+
+ ret = rmnet_init();
+ if (!ret) {
+ ret = usb_function_register(&rmnet_bamusb_func);
+ if (ret) {
+ pr_err("%s: failed to register rmnet %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void __exit usb_rmnet_exit(void)
+{
+ usb_function_unregister(&rmnet_bamusb_func);
+ frmnet_cleanup();
+}
+
+module_init(usb_rmnet_init);
+module_exit(usb_rmnet_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 829204f7971d..9e1b838ce86f 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -465,6 +465,12 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
int status;
rndis_init_msg_type *buf;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error:%d\n",
+ __func__, req->status);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
@@ -1021,7 +1027,7 @@ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
rndis->port.func.disable = rndis_disable;
rndis->port.func.free_func = rndis_free;
- params = rndis_register(rndis_response_available, rndis);
+ params = rndis_register(rndis_response_available, rndis, NULL);
if (IS_ERR(params)) {
kfree(rndis);
return ERR_CAST(params);
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 6bb44d613bab..8f98c1089e12 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -4,6 +4,7 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
@@ -14,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/usb/cdc.h>
#include "u_serial.h"
@@ -31,8 +33,35 @@ struct f_gser {
struct gserial port;
u8 data_id;
u8 port_num;
+ u8 pending;
+ spinlock_t lock;
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ struct usb_cdc_line_coding port_line_coding;
+ u8 online;
+ /* SetControlLineState request */
+ u16 port_handshake_bits;
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+ /* SerialState notification */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
};
+static inline struct f_gser *port_to_gser(struct gserial *p)
+{
+ return container_of(p, struct f_gser, port);
+}
+
+#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
+
static inline struct f_gser *func_to_gser(struct usb_function *f)
{
return container_of(f, struct f_gser, port.func);
@@ -46,15 +75,55 @@ static struct usb_interface_descriptor gser_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
- .bNumEndpoints = 2,
+ .bNumEndpoints = 3,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
+static struct usb_cdc_header_desc gser_header_desc = {
+ .bLength = sizeof(gser_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+ gser_call_mgmt_descriptor = {
+ .bLength = sizeof(gser_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+ .bmCapabilities = 0,
+ /* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor gser_descriptor = {
+ .bLength = sizeof(gser_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+ .bmCapabilities = USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc gser_union_desc = {
+ .bLength = sizeof(gser_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
/* full speed support: */
+static struct usb_endpoint_descriptor gser_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
static struct usb_endpoint_descriptor gser_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -71,12 +140,25 @@ static struct usb_endpoint_descriptor gser_fs_out_desc = {
static struct usb_descriptor_header *gser_fs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_fs_notify_desc,
(struct usb_descriptor_header *) &gser_fs_in_desc,
(struct usb_descriptor_header *) &gser_fs_out_desc,
NULL,
};
/* high speed support: */
+static struct usb_endpoint_descriptor gser_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
static struct usb_endpoint_descriptor gser_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
@@ -94,6 +176,11 @@ static struct usb_endpoint_descriptor gser_hs_out_desc = {
static struct usb_descriptor_header *gser_hs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_hs_notify_desc,
(struct usb_descriptor_header *) &gser_hs_in_desc,
(struct usb_descriptor_header *) &gser_hs_out_desc,
NULL,
@@ -114,12 +201,36 @@ static struct usb_endpoint_descriptor gser_ss_out_desc = {
};
static struct usb_ss_ep_comp_descriptor gser_ss_bulk_comp_desc = {
- .bLength = sizeof gser_ss_bulk_comp_desc,
+ .bLength = sizeof(gser_ss_bulk_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
+static struct usb_endpoint_descriptor gser_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_ss_ep_comp_descriptor gser_ss_notify_comp_desc = {
+ .bLength = sizeof(gser_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+};
+
static struct usb_descriptor_header *gser_ss_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_ss_notify_desc,
+ (struct usb_descriptor_header *) &gser_ss_notify_comp_desc,
(struct usb_descriptor_header *) &gser_ss_in_desc,
(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &gser_ss_out_desc,
@@ -145,13 +256,131 @@ static struct usb_gadget_strings *gser_strings[] = {
};
/*-------------------------------------------------------------------------*/
+static void gser_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gser *gser = ep->driver_data;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ if (req->status != 0) {
+ dev_dbg(&cdev->gadget->dev, "gser ttyGS%d completion, err %d\n",
+ gser->port_num, req->status);
+ return;
+ }
+
+ /* normal completion */
+ if (req->actual != sizeof(gser->port_line_coding)) {
+ dev_dbg(&cdev->gadget->dev, "gser ttyGS%d short resp, len %d\n",
+ gser->port_num, req->actual);
+ usb_ep_set_halt(ep);
+ } else {
+ struct usb_cdc_line_coding *value = req->buf;
+
+ gser->port_line_coding = *value;
+ }
+}
+
+static int
+gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gser *gser = func_to_gser(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* SET_LINE_CODING ... just read and save what the host sends */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_LINE_CODING:
+ if (w_length != sizeof(struct usb_cdc_line_coding))
+ goto invalid;
+
+ value = w_length;
+ cdev->gadget->ep0->driver_data = gser;
+ req->complete = gser_complete_set_line_coding;
+ break;
+
+ /* GET_LINE_CODING ... return what host sent, or initial value */
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_GET_LINE_CODING:
+ value = min_t(unsigned, w_length,
+ sizeof(struct usb_cdc_line_coding));
+ memcpy(req->buf, &gser->port_line_coding, value);
+ break;
+
+ /* SET_CONTROL_LINE_STATE ... save what the host sent */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+ value = 0;
+ gser->port_handshake_bits = w_value;
+ pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d RST:%d\n",
+ __func__, w_value & ACM_CTRL_DTR ? 1 : 0,
+ w_value & ACM_CTRL_RTS ? 1 : 0);
+
+ if (gser->port.notify_modem)
+ gser->port.notify_modem(&gser->port, 0, w_value);
+
+ break;
+
+ default:
+invalid:
+ dev_dbg(&cdev->gadget->dev,
+ "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ dev_dbg(&cdev->gadget->dev,
+ "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+ gser->port_num, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "gser response on ttyGS%d, err %d\n",
+ gser->port_num, value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
+ int rc = 0;
/* we know alt == 0, so this is an activation or a reset */
+ if (gser->notify->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset generic ctl ttyGS%d\n", gser->port_num);
+ usb_ep_disable(gser->notify);
+ }
+
+ if (!gser->notify->desc) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->notify)) {
+ gser->notify->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ rc = usb_ep_enable(gser->notify);
+ if (rc) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ gser->notify->name, rc);
+ return rc;
+ }
+ gser->notify->driver_data = gser;
if (gser->port.in->enabled) {
dev_dbg(&cdev->gadget->dev,
@@ -169,7 +398,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
}
gserial_connect(&gser->port, gser->port_num);
- return 0;
+ gser->online = 1;
+ return rc;
}
static void gser_disable(struct usb_function *f)
@@ -180,6 +410,176 @@ static void gser_disable(struct usb_function *f)
dev_dbg(&cdev->gadget->dev,
"generic ttyGS%d deactivated\n", gser->port_num);
gserial_disconnect(&gser->port);
+ usb_ep_fifo_flush(gser->notify);
+ usb_ep_disable(gser->notify);
+ gser->online = 0;
+}
+
+static int gser_notify(struct f_gser *gser, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = gser->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ req = gser->notify_req;
+ gser->notify_req = NULL;
+ gser->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(gser->data_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
+ gser->port_num, status);
+ gser->notify_req = req;
+ }
+
+ return status;
+}
+
+static int gser_notify_serial_state(struct f_gser *gser)
+{
+ int status;
+ unsigned long flags;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (gser->notify_req) {
+ DBG(cdev, "gser ttyGS%d serial state %04x\n",
+ gser->port_num, gser->serial_state);
+ status = gser_notify(gser, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &gser->serial_state,
+ sizeof(gser->serial_state));
+ } else {
+ gser->pending = true;
+ status = 0;
+ }
+
+ spin_unlock_irqrestore(&gser->lock, flags);
+ return status;
+}
+
+static void gser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gser *gser = req->context;
+ u8 doit = false;
+ unsigned long flags;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (req->status != -ESHUTDOWN)
+ doit = gser->pending;
+
+ gser->notify_req = req;
+ spin_unlock_irqrestore(&gser->lock, flags);
+
+ if (doit && gser->online)
+ gser_notify_serial_state(gser);
+}
+
+static void gser_connect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ gser_notify_serial_state(gser);
+}
+
+unsigned int gser_get_dtr(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_DTR)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gser_get_rts(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_RTS)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gser_send_carrier_detect(struct gserial *port, unsigned int yes)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_DCD;
+ if (yes)
+ state |= ACM_CTRL_DCD;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+unsigned int gser_send_ring_indicator(struct gserial *port, unsigned int yes)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_RI;
+ if (yes)
+ state |= ACM_CTRL_RI;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static void gser_disconnect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ gser_notify_serial_state(gser);
+}
+
+static int gser_send_break(struct gserial *port, int duration)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static int gser_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state = ctrl_bits;
+
+ return gser_notify_serial_state(gser);
}
/*-------------------------------------------------------------------------*/
@@ -225,6 +625,21 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
gser->port.out = ep;
+ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ gser->notify = ep;
+
+ /* allocate notification */
+ gser->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!gser->notify_req)
+ goto fail;
+
+ gser->notify_req->complete = gser_notify_complete;
+ gser->notify_req->context = gser;
+
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -235,6 +650,11 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress;
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
+ gser_hs_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+ gser_ss_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
gser_ss_function);
if (status)
@@ -247,6 +667,9 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
+ if (gser->notify_req)
+ gs_free_req(gser->notify, gser->notify_req);
+
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
@@ -327,7 +750,10 @@ static void gser_free(struct usb_function *f)
static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct f_gser *gser = func_to_gser(f);
+
usb_free_all_descriptors(f);
+ gs_free_req(gser->notify, gser->notify_req);
}
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
@@ -342,6 +768,7 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
opts = container_of(fi, struct f_serial_opts, func_inst);
+ spin_lock_init(&gser->lock);
gser->port_num = opts->port_num;
gser->port.func.name = "gser";
@@ -352,6 +779,24 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
gser->port.func.disable = gser_disable;
gser->port.func.free_func = gser_free;
+ /* We support only three ports for now */
+ if (opts->port_num == 0)
+ gser->port.func.name = "modem";
+ else if (opts->port_num == 1)
+ gser->port.func.name = "nmea";
+ else
+ gser->port.func.name = "modem2";
+
+ gser->port.func.setup = gser_setup;
+ gser->port.connect = gser_connect;
+ gser->port.get_dtr = gser_get_dtr;
+ gser->port.get_rts = gser_get_rts;
+ gser->port.send_carrier_detect = gser_send_carrier_detect;
+ gser->port.send_ring_indicator = gser_send_ring_indicator;
+ gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
+ gser->port.disconnect = gser_disconnect;
+ gser->port.send_break = gser_send_break;
+
return &gser->port.func;
}
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index d7d095781be1..77681c43318d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -298,9 +298,7 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct f_sourcesink *ss = ep->driver_data;
-
- return alloc_ep_req(ep, len, ss->buflen);
+ return alloc_ep_req(ep, len);
}
static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
@@ -611,7 +609,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
req = ss_alloc_ep_req(ep, size);
} else {
ep = is_in ? ss->in_ep : ss->out_ep;
- req = ss_alloc_ep_req(ep, 0);
+ req = ss_alloc_ep_req(ep, ss->buflen);
}
if (!req)
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index a7782ce673d6..0445b2e1d8b5 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -1,24 +1,38 @@
/*
- * f_audio.c -- USB Audio class function driver
- *
- * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
- * Copyright (C) 2008 Analog Devices, Inc
+ * f_uac1.c -- USB Audio Class 1.0 Function (using u_audio API)
*
- * Enter bugs at http://blackfin.uclinux.org/
+ * Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
*
- * Licensed under the GPL-2 or later.
+ * This driver doesn't expect any real Audio codec to be present
+ * on the device - the audio streams are simply sinked to and
+ * sourced from a virtual ALSA sound card created.
+ *
+ * This file is based on f_uac1.c which is
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*/
-#include <linux/slab.h>
-#include <linux/kernel.h>
+#include <linux/usb/audio.h>
#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/atomic.h>
+#include "u_audio.h"
#include "u_uac1.h"
-static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
-static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+struct f_uac1 {
+ struct g_audio g_audio;
+ u8 ac_intf, as_in_intf, as_out_intf;
+ u8 ac_alt, as_in_alt, as_out_alt; /* needed for get_alt() */
+};
+
+static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
+{
+ return container_of(f, struct f_uac1, g_audio.func);
+}
/*
* DESCRIPTORS ... most are static, but strings and full
@@ -26,12 +40,17 @@ static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
*/
/*
- * We have two interfaces- AudioControl and AudioStreaming
- * TODO: only supcard playback currently
+ * We have three interfaces - one AudioControl and two AudioStreaming
+ *
+ * The driver implements a simple UAC_1 topology.
+ * USB-OUT -> IT_1 -> OT_2 -> ALSA_Capture
+ * ALSA_Playback -> IT_3 -> OT_4 -> USB-IN
*/
-#define F_AUDIO_AC_INTERFACE 0
-#define F_AUDIO_AS_INTERFACE 1
-#define F_AUDIO_NUM_INTERFACES 1
+#define F_AUDIO_AC_INTERFACE 0
+#define F_AUDIO_AS_OUT_INTERFACE 1
+#define F_AUDIO_AS_IN_INTERFACE 2
+/* Number of streaming interfaces */
+#define F_AUDIO_NUM_INTERFACES 2
/* B.3.1 Standard AC Interface Descriptor */
static struct usb_interface_descriptor ac_interface_desc = {
@@ -46,89 +65,86 @@ static struct usb_interface_descriptor ac_interface_desc = {
* The number of AudioStreaming and MIDIStreaming interfaces
* in the Audio Interface Collection
*/
-DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
-/* 1 input terminal, 1 output terminal and 1 feature unit */
-#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
- + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* 2 input terminals and 2 output terminals */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+ + 2*UAC_DT_INPUT_TERMINAL_SIZE + 2*UAC_DT_OUTPUT_TERMINAL_SIZE)
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_1 ac_header_desc = {
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
- .bcdADC = __constant_cpu_to_le16(0x0100),
- .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bcdADC = cpu_to_le16(0x0100),
+ .wTotalLength = cpu_to_le16(UAC_DT_TOTAL_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
- .baInterfaceNr = {
- /* Interface number of the first AudioStream interface */
- [0] = 1,
- }
};
-#define INPUT_TERMINAL_ID 1
-static struct uac_input_terminal_descriptor input_terminal_desc = {
+#define USB_OUT_IT_ID 1
+static struct uac_input_terminal_descriptor usb_out_it_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = INPUT_TERMINAL_ID,
- .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bTerminalID = USB_OUT_IT_ID,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .wChannelConfig = 0x3,
+ .wChannelConfig = cpu_to_le16(0x3),
};
-DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
-
-#define FEATURE_UNIT_ID 2
-static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
- .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+#define IO_OUT_OT_ID 2
+static struct uac1_output_terminal_descriptor io_out_ot_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = UAC_FEATURE_UNIT,
- .bUnitID = FEATURE_UNIT_ID,
- .bSourceID = INPUT_TERMINAL_ID,
- .bControlSize = 2,
- .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = IO_OUT_OT_ID,
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
+ .bAssocTerminal = 0,
+ .bSourceID = USB_OUT_IT_ID,
};
-static struct usb_audio_control mute_control = {
- .list = LIST_HEAD_INIT(mute_control.list),
- .name = "Mute Control",
- .type = UAC_FU_MUTE,
- /* Todo: add real Mute control code */
- .set = generic_set_cmd,
- .get = generic_get_cmd,
+#define IO_IN_IT_ID 3
+static struct uac_input_terminal_descriptor io_in_it_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = IO_IN_IT_ID,
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
+ .bAssocTerminal = 0,
+ .wChannelConfig = cpu_to_le16(0x3),
};
-static struct usb_audio_control volume_control = {
- .list = LIST_HEAD_INIT(volume_control.list),
- .name = "Volume Control",
- .type = UAC_FU_VOLUME,
- /* Todo: add real Volume control code */
- .set = generic_set_cmd,
- .get = generic_get_cmd,
+#define USB_IN_OT_ID 4
+static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = USB_IN_OT_ID,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
+ .bAssocTerminal = 0,
+ .bSourceID = IO_IN_IT_ID,
};
-static struct usb_audio_control_selector feature_unit = {
- .list = LIST_HEAD_INIT(feature_unit.list),
- .id = FEATURE_UNIT_ID,
- .name = "Mute & Volume Control",
- .type = UAC_FEATURE_UNIT,
- .desc = (struct usb_descriptor_header *)&feature_unit_desc,
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_out_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-#define OUTPUT_TERMINAL_ID 3
-static struct uac1_output_terminal_descriptor output_terminal_desc = {
- .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = OUTPUT_TERMINAL_ID,
- .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
- .bAssocTerminal = FEATURE_UNIT_ID,
- .bSourceID = FEATURE_UNIT_ID,
+static struct usb_interface_descriptor as_out_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-/* B.4.1 Standard AS Interface Descriptor */
-static struct usb_interface_descriptor as_interface_alt_0_desc = {
+static struct usb_interface_descriptor as_in_interface_alt_0_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
@@ -137,7 +153,7 @@ static struct usb_interface_descriptor as_interface_alt_0_desc = {
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-static struct usb_interface_descriptor as_interface_alt_1_desc = {
+static struct usb_interface_descriptor as_in_interface_alt_1_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
@@ -147,18 +163,27 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
};
/* B.4.2 Class-Specific AS Interface Descriptor */
-static struct uac1_as_header_descriptor as_header_desc = {
+static struct uac1_as_header_descriptor as_out_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = USB_OUT_IT_ID,
+ .bDelay = 1,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
+};
+
+static struct uac1_as_header_descriptor as_in_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = INPUT_TERMINAL_ID,
+ .bTerminalLink = USB_IN_OT_ID,
.bDelay = 1,
- .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
-static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+static struct uac_format_type_i_discrete_descriptor_1 as_out_type_i_desc = {
.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -179,53 +204,147 @@ static struct usb_endpoint_descriptor as_out_ep_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor as_out_ep_comp_desc = {
+ .bLength = sizeof(as_out_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ .wBytesPerInterval = cpu_to_le16(1024),
+};
+
/* Class-specific AS ISO OUT Endpoint Descriptor */
static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
- .bmAttributes = 1,
+ .bmAttributes = 1,
.bLockDelayUnits = 1,
- .wLockDelay = __constant_cpu_to_le16(1),
+ .wLockDelay = cpu_to_le16(1),
+};
+
+static struct uac_format_type_i_discrete_descriptor_1 as_in_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_ASYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+static struct usb_ss_ep_comp_descriptor as_in_ep_comp_desc = {
+ .bLength = sizeof(as_in_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ .wBytesPerInterval = cpu_to_le16(1024),
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 0,
+ .wLockDelay = 0,
};
static struct usb_descriptor_header *f_audio_desc[] = {
(struct usb_descriptor_header *)&ac_interface_desc,
(struct usb_descriptor_header *)&ac_header_desc,
- (struct usb_descriptor_header *)&input_terminal_desc,
- (struct usb_descriptor_header *)&output_terminal_desc,
- (struct usb_descriptor_header *)&feature_unit_desc,
+ (struct usb_descriptor_header *)&usb_out_it_desc,
+ (struct usb_descriptor_header *)&io_out_ot_desc,
+ (struct usb_descriptor_header *)&io_in_it_desc,
+ (struct usb_descriptor_header *)&usb_in_ot_desc,
+
+ (struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_out_header_desc,
+
+ (struct usb_descriptor_header *)&as_out_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+
+ (struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_in_header_desc,
+
+ (struct usb_descriptor_header *)&as_in_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *f_audio_ss_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&usb_out_it_desc,
+ (struct usb_descriptor_header *)&io_out_ot_desc,
+ (struct usb_descriptor_header *)&io_in_it_desc,
+ (struct usb_descriptor_header *)&usb_in_ot_desc,
- (struct usb_descriptor_header *)&as_interface_alt_0_desc,
- (struct usb_descriptor_header *)&as_interface_alt_1_desc,
- (struct usb_descriptor_header *)&as_header_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_out_header_desc,
- (struct usb_descriptor_header *)&as_type_i_desc,
+ (struct usb_descriptor_header *)&as_out_type_i_desc,
(struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_out_ep_comp_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
+
+ (struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_in_header_desc,
+
+ (struct usb_descriptor_header *)&as_in_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_in_ep_comp_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
NULL,
};
enum {
STR_AC_IF,
- STR_INPUT_TERMINAL,
- STR_INPUT_TERMINAL_CH_NAMES,
- STR_FEAT_DESC_0,
- STR_OUTPUT_TERMINAL,
- STR_AS_IF_ALT0,
- STR_AS_IF_ALT1,
+ STR_USB_OUT_IT,
+ STR_USB_OUT_IT_CH_NAMES,
+ STR_IO_OUT_OT,
+ STR_IO_IN_IT,
+ STR_IO_IN_IT_CH_NAMES,
+ STR_USB_IN_OT,
+ STR_AS_OUT_IF_ALT0,
+ STR_AS_OUT_IF_ALT1,
+ STR_AS_IN_IF_ALT0,
+ STR_AS_IN_IF_ALT1,
};
static struct usb_string strings_uac1[] = {
[STR_AC_IF].s = "AC Interface",
- [STR_INPUT_TERMINAL].s = "Input terminal",
- [STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
- [STR_FEAT_DESC_0].s = "Volume control & mute",
- [STR_OUTPUT_TERMINAL].s = "Output terminal",
- [STR_AS_IF_ALT0].s = "AS Interface",
- [STR_AS_IF_ALT1].s = "AS Interface",
+ [STR_USB_OUT_IT].s = "Playback Input terminal",
+ [STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels",
+ [STR_IO_OUT_OT].s = "Playback Output terminal",
+ [STR_IO_IN_IT].s = "Capture Input terminal",
+ [STR_IO_IN_IT_CH_NAMES].s = "Capture Channels",
+ [STR_USB_IN_OT].s = "Capture Output terminal",
+ [STR_AS_OUT_IF_ALT0].s = "Playback Inactive",
+ [STR_AS_OUT_IF_ALT1].s = "Playback Active",
+ [STR_AS_IN_IF_ALT0].s = "Capture Inactive",
+ [STR_AS_IN_IF_ALT1].s = "Capture Active",
{ },
};
@@ -243,218 +362,6 @@ static struct usb_gadget_strings *uac1_strings[] = {
* This function is an ALSA sound card following USB Audio Class Spec 1.0.
*/
-/*-------------------------------------------------------------------------*/
-struct f_audio_buf {
- u8 *buf;
- int actual;
- struct list_head list;
-};
-
-static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
-{
- struct f_audio_buf *copy_buf;
-
- copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
- if (!copy_buf)
- return ERR_PTR(-ENOMEM);
-
- copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
- if (!copy_buf->buf) {
- kfree(copy_buf);
- return ERR_PTR(-ENOMEM);
- }
-
- return copy_buf;
-}
-
-static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
-{
- kfree(audio_buf->buf);
- kfree(audio_buf);
-}
-/*-------------------------------------------------------------------------*/
-
-struct f_audio {
- struct gaudio card;
-
- /* endpoints handle full and/or high speeds */
- struct usb_ep *out_ep;
-
- spinlock_t lock;
- struct f_audio_buf *copy_buf;
- struct work_struct playback_work;
- struct list_head play_queue;
-
- /* Control Set command */
- struct list_head cs;
- u8 set_cmd;
- struct usb_audio_control *set_con;
-};
-
-static inline struct f_audio *func_to_audio(struct usb_function *f)
-{
- return container_of(f, struct f_audio, card.func);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void f_audio_playback_work(struct work_struct *data)
-{
- struct f_audio *audio = container_of(data, struct f_audio,
- playback_work);
- struct f_audio_buf *play_buf;
-
- spin_lock_irq(&audio->lock);
- if (list_empty(&audio->play_queue)) {
- spin_unlock_irq(&audio->lock);
- return;
- }
- play_buf = list_first_entry(&audio->play_queue,
- struct f_audio_buf, list);
- list_del(&play_buf->list);
- spin_unlock_irq(&audio->lock);
-
- u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
- f_audio_buffer_free(play_buf);
-}
-
-static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct f_audio *audio = req->context;
- struct usb_composite_dev *cdev = audio->card.func.config->cdev;
- struct f_audio_buf *copy_buf = audio->copy_buf;
- struct f_uac1_opts *opts;
- int audio_buf_size;
- int err;
-
- opts = container_of(audio->card.func.fi, struct f_uac1_opts,
- func_inst);
- audio_buf_size = opts->audio_buf_size;
-
- if (!copy_buf)
- return -EINVAL;
-
- /* Copy buffer is full, add it to the play_queue */
- if (audio_buf_size - copy_buf->actual < req->actual) {
- spin_lock_irq(&audio->lock);
- list_add_tail(&copy_buf->list, &audio->play_queue);
- spin_unlock_irq(&audio->lock);
- schedule_work(&audio->playback_work);
- copy_buf = f_audio_buffer_alloc(audio_buf_size);
- if (IS_ERR(copy_buf))
- return -ENOMEM;
- }
-
- memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
- copy_buf->actual += req->actual;
- audio->copy_buf = copy_buf;
-
- err = usb_ep_queue(ep, req, GFP_ATOMIC);
- if (err)
- ERROR(cdev, "%s queue req: %d\n", ep->name, err);
-
- return 0;
-
-}
-
-static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct f_audio *audio = req->context;
- int status = req->status;
- u32 data = 0;
- struct usb_ep *out_ep = audio->out_ep;
-
- switch (status) {
-
- case 0: /* normal completion? */
- if (ep == out_ep)
- f_audio_out_ep_complete(ep, req);
- else if (audio->set_con) {
- memcpy(&data, req->buf, req->length);
- audio->set_con->set(audio->set_con, audio->set_cmd,
- le16_to_cpu(data));
- audio->set_con = NULL;
- }
- break;
- default:
- break;
- }
-}
-
-static int audio_set_intf_req(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
-{
- struct f_audio *audio = func_to_audio(f);
- struct usb_composite_dev *cdev = f->config->cdev;
- struct usb_request *req = cdev->req;
- u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
- u16 len = le16_to_cpu(ctrl->wLength);
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u8 con_sel = (w_value >> 8) & 0xFF;
- u8 cmd = (ctrl->bRequest & 0x0F);
- struct usb_audio_control_selector *cs;
- struct usb_audio_control *con;
-
- DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
- ctrl->bRequest, w_value, len, id);
-
- list_for_each_entry(cs, &audio->cs, list) {
- if (cs->id == id) {
- list_for_each_entry(con, &cs->control, list) {
- if (con->type == con_sel) {
- audio->set_con = con;
- break;
- }
- }
- break;
- }
- }
-
- audio->set_cmd = cmd;
- req->context = audio;
- req->complete = f_audio_complete;
-
- return len;
-}
-
-static int audio_get_intf_req(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
-{
- struct f_audio *audio = func_to_audio(f);
- struct usb_composite_dev *cdev = f->config->cdev;
- struct usb_request *req = cdev->req;
- int value = -EOPNOTSUPP;
- u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
- u16 len = le16_to_cpu(ctrl->wLength);
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u8 con_sel = (w_value >> 8) & 0xFF;
- u8 cmd = (ctrl->bRequest & 0x0F);
- struct usb_audio_control_selector *cs;
- struct usb_audio_control *con;
-
- DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
- ctrl->bRequest, w_value, len, id);
-
- list_for_each_entry(cs, &audio->cs, list) {
- if (cs->id == id) {
- list_for_each_entry(con, &cs->control, list) {
- if (con->type == con_sel && con->get) {
- value = con->get(con, cmd);
- break;
- }
- }
- break;
- }
- }
-
- req->context = audio;
- req->complete = f_audio_complete;
- len = min_t(size_t, sizeof(value), len);
- memcpy(req->buf, &value, len);
-
- return len;
-}
-
static int audio_set_endpoint_req(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
@@ -533,14 +440,6 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
* activation uses set_alt().
*/
switch (ctrl->bRequestType) {
- case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
- value = audio_set_intf_req(f, ctrl);
- break;
-
- case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
- value = audio_get_intf_req(f, ctrl);
- break;
-
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_set_endpoint_req(f, ctrl);
break;
@@ -573,143 +472,161 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
- struct f_audio *audio = func_to_audio(f);
struct usb_composite_dev *cdev = f->config->cdev;
- struct usb_ep *out_ep = audio->out_ep;
- struct usb_request *req;
- struct f_uac1_opts *opts;
- int req_buf_size, req_count, audio_buf_size;
- int i = 0, err = 0;
-
- DBG(cdev, "intf %d, alt %d\n", intf, alt);
+ struct usb_gadget *gadget = cdev->gadget;
+ struct device *dev = &gadget->dev;
+ struct f_uac1 *uac1 = func_to_uac1(f);
+ int ret = 0;
+
+ /* No i/f has more than 2 alt settings */
+ if (alt > 1) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
- opts = container_of(f->fi, struct f_uac1_opts, func_inst);
- req_buf_size = opts->req_buf_size;
- req_count = opts->req_count;
- audio_buf_size = opts->audio_buf_size;
-
- if (intf == 1) {
- if (alt == 1) {
- err = config_ep_by_speed(cdev->gadget, f, out_ep);
- if (err)
- return err;
-
- usb_ep_enable(out_ep);
- audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
- if (IS_ERR(audio->copy_buf))
- return -ENOMEM;
-
- /*
- * allocate a bunch of read buffers
- * and queue them all at once.
- */
- for (i = 0; i < req_count && err == 0; i++) {
- req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
- if (req) {
- req->buf = kzalloc(req_buf_size,
- GFP_ATOMIC);
- if (req->buf) {
- req->length = req_buf_size;
- req->context = audio;
- req->complete =
- f_audio_complete;
- err = usb_ep_queue(out_ep,
- req, GFP_ATOMIC);
- if (err)
- ERROR(cdev,
- "%s queue req: %d\n",
- out_ep->name, err);
- } else
- err = -ENOMEM;
- } else
- err = -ENOMEM;
- }
-
- } else {
- struct f_audio_buf *copy_buf = audio->copy_buf;
- if (copy_buf) {
- list_add_tail(&copy_buf->list,
- &audio->play_queue);
- schedule_work(&audio->playback_work);
- }
+ if (intf == uac1->ac_intf) {
+ /* Control I/f has only 1 AltSetting - 0 */
+ if (alt) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
}
+ return 0;
+ }
+
+ if (intf == uac1->as_out_intf) {
+ uac1->as_out_alt = alt;
+
+ if (alt)
+ ret = u_audio_start_capture(&uac1->g_audio);
+ else
+ u_audio_stop_capture(&uac1->g_audio);
+ } else if (intf == uac1->as_in_intf) {
+ uac1->as_in_alt = alt;
+
+ if (alt)
+ ret = u_audio_start_playback(&uac1->g_audio);
+ else
+ u_audio_stop_playback(&uac1->g_audio);
+ } else {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
}
- return err;
+ return ret;
}
-static void f_audio_disable(struct usb_function *f)
+static int f_audio_get_alt(struct usb_function *f, unsigned intf)
{
- return;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_gadget *gadget = cdev->gadget;
+ struct device *dev = &gadget->dev;
+ struct f_uac1 *uac1 = func_to_uac1(f);
+
+ if (intf == uac1->ac_intf)
+ return uac1->ac_alt;
+ else if (intf == uac1->as_out_intf)
+ return uac1->as_out_alt;
+ else if (intf == uac1->as_in_intf)
+ return uac1->as_in_alt;
+ else
+ dev_err(dev, "%s:%d Invalid Interface %d!\n",
+ __func__, __LINE__, intf);
+
+ return -EINVAL;
}
-/*-------------------------------------------------------------------------*/
-static void f_audio_build_desc(struct f_audio *audio)
+static void f_audio_disable(struct usb_function *f)
{
- struct gaudio *card = &audio->card;
- u8 *sam_freq;
- int rate;
-
- /* Set channel numbers */
- input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
- as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
-
- /* Set sample rates */
- rate = u_audio_get_playback_rate(card);
- sam_freq = as_type_i_desc.tSamFreq[0];
- memcpy(sam_freq, &rate, 3);
+ struct f_uac1 *uac1 = func_to_uac1(f);
- /* Todo: Set Sample bits and other parameters */
+ uac1->as_out_alt = 0;
+ uac1->as_in_alt = 0;
- return;
+ u_audio_stop_capture(&uac1->g_audio);
+ u_audio_stop_playback(&uac1->g_audio);
}
+/*-------------------------------------------------------------------------*/
+
/* audio function driver setup/binding */
-static int
-f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
{
- struct usb_composite_dev *cdev = c->cdev;
- struct f_audio *audio = func_to_audio(f);
- struct usb_string *us;
- int status;
- struct usb_ep *ep = NULL;
- struct f_uac1_opts *audio_opts;
+ struct usb_composite_dev *cdev = c->cdev;
+ struct usb_gadget *gadget = cdev->gadget;
+ struct f_uac1 *uac1 = func_to_uac1(f);
+ struct g_audio *audio = func_to_g_audio(f);
+ struct f_uac1_opts *audio_opts;
+ struct usb_ep *ep = NULL;
+ struct usb_string *us;
+ u8 *sam_freq;
+ int rate;
+ int status;
audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
- audio->card.gadget = c->cdev->gadget;
- /* set up ASLA audio devices */
- if (!audio_opts->bound) {
- status = gaudio_setup(&audio->card);
- if (status < 0)
- return status;
- audio_opts->bound = true;
- }
+
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
if (IS_ERR(us))
return PTR_ERR(us);
ac_interface_desc.iInterface = us[STR_AC_IF].id;
- input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
- input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
- feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
- output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
- as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
- as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
+ usb_out_it_desc.iTerminal = us[STR_USB_OUT_IT].id;
+ usb_out_it_desc.iChannelNames = us[STR_USB_OUT_IT_CH_NAMES].id;
+ io_out_ot_desc.iTerminal = us[STR_IO_OUT_OT].id;
+ as_out_interface_alt_0_desc.iInterface = us[STR_AS_OUT_IF_ALT0].id;
+ as_out_interface_alt_1_desc.iInterface = us[STR_AS_OUT_IF_ALT1].id;
+ io_in_it_desc.iTerminal = us[STR_IO_IN_IT].id;
+ io_in_it_desc.iChannelNames = us[STR_IO_IN_IT_CH_NAMES].id;
+ usb_in_ot_desc.iTerminal = us[STR_USB_IN_OT].id;
+ as_in_interface_alt_0_desc.iInterface = us[STR_AS_IN_IF_ALT0].id;
+ as_in_interface_alt_1_desc.iInterface = us[STR_AS_IN_IF_ALT1].id;
+ /* Set channel numbers */
+ usb_out_it_desc.bNrChannels = num_channels(audio_opts->c_chmask);
+ usb_out_it_desc.wChannelConfig = cpu_to_le16(audio_opts->c_chmask);
+ as_out_type_i_desc.bNrChannels = num_channels(audio_opts->c_chmask);
+ as_out_type_i_desc.bSubframeSize = audio_opts->c_ssize;
+ as_out_type_i_desc.bBitResolution = audio_opts->c_ssize * 8;
+ io_in_it_desc.bNrChannels = num_channels(audio_opts->p_chmask);
+ io_in_it_desc.wChannelConfig = cpu_to_le16(audio_opts->p_chmask);
+ as_in_type_i_desc.bNrChannels = num_channels(audio_opts->p_chmask);
+ as_in_type_i_desc.bSubframeSize = audio_opts->p_ssize;
+ as_in_type_i_desc.bBitResolution = audio_opts->p_ssize * 8;
- f_audio_build_desc(audio);
+ /* Set sample rates */
+ rate = audio_opts->c_srate;
+ sam_freq = as_out_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+ rate = audio_opts->p_srate;
+ sam_freq = as_in_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
ac_interface_desc.bInterfaceNumber = status;
+ uac1->ac_intf = status;
+ uac1->ac_alt = 0;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_header_desc.baInterfaceNr[0] = status;
+ as_out_interface_alt_0_desc.bInterfaceNumber = status;
+ as_out_interface_alt_1_desc.bInterfaceNumber = status;
+ uac1->as_out_intf = status;
+ uac1->as_out_alt = 0;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
- as_interface_alt_0_desc.bInterfaceNumber = status;
- as_interface_alt_1_desc.bInterfaceNumber = status;
+ ac_header_desc.baInterfaceNr[1] = status;
+ as_in_interface_alt_0_desc.bInterfaceNumber = status;
+ as_in_interface_alt_1_desc.bInterfaceNumber = status;
+ uac1->as_in_intf = status;
+ uac1->as_in_alt = 0;
+
+ audio->gadget = gadget;
status = -ENODEV;
@@ -720,51 +637,42 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
audio->out_ep = ep;
audio->out_ep->desc = &as_out_ep_desc;
- status = -ENOMEM;
+ ep = usb_ep_autoconfig(cdev->gadget, &as_in_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->in_ep = ep;
+ audio->in_ep->desc = &as_in_ep_desc;
/* copy descriptors, and track endpoint copies */
- status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL);
+ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc,
+ f_audio_ss_desc);
if (status)
goto fail;
+
+ audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
+ audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
+ audio->params.c_chmask = audio_opts->c_chmask;
+ audio->params.c_srate = audio_opts->c_srate;
+ audio->params.c_ssize = audio_opts->c_ssize;
+ audio->params.p_chmask = audio_opts->p_chmask;
+ audio->params.p_srate = audio_opts->p_srate;
+ audio->params.p_ssize = audio_opts->p_ssize;
+ audio->params.req_number = audio_opts->req_number;
+
+ status = g_audio_setup(audio, "UAC1_PCM", "UAC1_Gadget");
+ if (status)
+ goto err_card_register;
+
return 0;
+err_card_register:
+ usb_free_all_descriptors(f);
fail:
- gaudio_cleanup(&audio->card);
return status;
}
/*-------------------------------------------------------------------------*/
-static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
-{
- con->data[cmd] = value;
-
- return 0;
-}
-
-static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
-{
- return con->data[cmd];
-}
-
-/* Todo: add more control selecotor dynamically */
-static int control_selector_init(struct f_audio *audio)
-{
- INIT_LIST_HEAD(&audio->cs);
- list_add(&feature_unit.list, &audio->cs);
-
- INIT_LIST_HEAD(&feature_unit.control);
- list_add(&mute_control.list, &feature_unit.control);
- list_add(&volume_control.list, &feature_unit.control);
-
- volume_control.data[UAC__CUR] = 0xffc0;
- volume_control.data[UAC__MIN] = 0xe3a0;
- volume_control.data[UAC__MAX] = 0xfff0;
- volume_control.data[UAC__RES] = 0x0030;
-
- return 0;
-}
-
static inline struct f_uac1_opts *to_f_uac1_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uac1_opts,
@@ -782,9 +690,10 @@ static struct configfs_item_operations f_uac1_item_ops = {
.release = f_uac1_attr_release,
};
-#define UAC1_INT_ATTRIBUTE(name) \
-static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
- char *page) \
+#define UAC1_ATTRIBUTE(name) \
+static ssize_t f_uac1_opts_##name##_show( \
+ struct config_item *item, \
+ char *page) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
int result; \
@@ -796,7 +705,8 @@ static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
return result; \
} \
\
-static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
+static ssize_t f_uac1_opts_##name##_store( \
+ struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uac1_opts *opts = to_f_uac1_opts(item); \
@@ -823,64 +733,22 @@ end: \
\
CONFIGFS_ATTR(f_uac1_opts_, name)
-UAC1_INT_ATTRIBUTE(req_buf_size);
-UAC1_INT_ATTRIBUTE(req_count);
-UAC1_INT_ATTRIBUTE(audio_buf_size);
-
-#define UAC1_STR_ATTRIBUTE(name) \
-static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
- char *page) \
-{ \
- struct f_uac1_opts *opts = to_f_uac1_opts(item); \
- int result; \
- \
- mutex_lock(&opts->lock); \
- result = sprintf(page, "%s\n", opts->name); \
- mutex_unlock(&opts->lock); \
- \
- return result; \
-} \
- \
-static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
- const char *page, size_t len) \
-{ \
- struct f_uac1_opts *opts = to_f_uac1_opts(item); \
- int ret = -EBUSY; \
- char *tmp; \
- \
- mutex_lock(&opts->lock); \
- if (opts->refcnt) \
- goto end; \
- \
- tmp = kstrndup(page, len, GFP_KERNEL); \
- if (tmp) { \
- ret = -ENOMEM; \
- goto end; \
- } \
- if (opts->name##_alloc) \
- kfree(opts->name); \
- opts->name##_alloc = true; \
- opts->name = tmp; \
- ret = len; \
- \
-end: \
- mutex_unlock(&opts->lock); \
- return ret; \
-} \
- \
-CONFIGFS_ATTR(f_uac1_opts_, name)
-
-UAC1_STR_ATTRIBUTE(fn_play);
-UAC1_STR_ATTRIBUTE(fn_cap);
-UAC1_STR_ATTRIBUTE(fn_cntl);
+UAC1_ATTRIBUTE(c_chmask);
+UAC1_ATTRIBUTE(c_srate);
+UAC1_ATTRIBUTE(c_ssize);
+UAC1_ATTRIBUTE(p_chmask);
+UAC1_ATTRIBUTE(p_srate);
+UAC1_ATTRIBUTE(p_ssize);
+UAC1_ATTRIBUTE(req_number);
static struct configfs_attribute *f_uac1_attrs[] = {
- &f_uac1_opts_attr_req_buf_size,
- &f_uac1_opts_attr_req_count,
- &f_uac1_opts_attr_audio_buf_size,
- &f_uac1_opts_attr_fn_play,
- &f_uac1_opts_attr_fn_cap,
- &f_uac1_opts_attr_fn_cntl,
+ &f_uac1_opts_attr_c_chmask,
+ &f_uac1_opts_attr_c_srate,
+ &f_uac1_opts_attr_c_ssize,
+ &f_uac1_opts_attr_p_chmask,
+ &f_uac1_opts_attr_p_srate,
+ &f_uac1_opts_attr_p_ssize,
+ &f_uac1_opts_attr_req_number,
NULL,
};
@@ -895,12 +763,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
struct f_uac1_opts *opts;
opts = container_of(f, struct f_uac1_opts, func_inst);
- if (opts->fn_play_alloc)
- kfree(opts->fn_play);
- if (opts->fn_cap_alloc)
- kfree(opts->fn_cap);
- if (opts->fn_cntl_alloc)
- kfree(opts->fn_cntl);
kfree(opts);
}
@@ -918,21 +780,22 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
config_group_init_type_name(&opts->func_inst.group, "",
&f_uac1_func_type);
- opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
- opts->req_count = UAC1_REQ_COUNT;
- opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
- opts->fn_play = FILE_PCM_PLAYBACK;
- opts->fn_cap = FILE_PCM_CAPTURE;
- opts->fn_cntl = FILE_CONTROL;
+ opts->c_chmask = UAC1_DEF_CCHMASK;
+ opts->c_srate = UAC1_DEF_CSRATE;
+ opts->c_ssize = UAC1_DEF_CSSIZE;
+ opts->p_chmask = UAC1_DEF_PCHMASK;
+ opts->p_srate = UAC1_DEF_PSRATE;
+ opts->p_ssize = UAC1_DEF_PSSIZE;
+ opts->req_number = UAC1_DEF_REQ_NUM;
return &opts->func_inst;
}
static void f_audio_free(struct usb_function *f)
{
- struct f_audio *audio = func_to_audio(f);
+ struct g_audio *audio;
struct f_uac1_opts *opts;
- gaudio_cleanup(&audio->card);
+ audio = func_to_g_audio(f);
opts = container_of(f->fi, struct f_uac1_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
@@ -942,42 +805,54 @@ static void f_audio_free(struct usb_function *f)
static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct g_audio *audio = func_to_g_audio(f);
+
+ g_audio_cleanup(audio);
usb_free_all_descriptors(f);
+
+ audio->gadget = NULL;
}
static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
{
- struct f_audio *audio;
+ struct f_uac1 *uac1;
struct f_uac1_opts *opts;
/* allocate and initialize one new instance */
- audio = kzalloc(sizeof(*audio), GFP_KERNEL);
- if (!audio)
+ uac1 = kzalloc(sizeof(*uac1), GFP_KERNEL);
+ if (!uac1)
return ERR_PTR(-ENOMEM);
- audio->card.func.name = "g_audio";
-
opts = container_of(fi, struct f_uac1_opts, func_inst);
mutex_lock(&opts->lock);
++opts->refcnt;
mutex_unlock(&opts->lock);
- INIT_LIST_HEAD(&audio->play_queue);
- spin_lock_init(&audio->lock);
- audio->card.func.bind = f_audio_bind;
- audio->card.func.unbind = f_audio_unbind;
- audio->card.func.set_alt = f_audio_set_alt;
- audio->card.func.setup = f_audio_setup;
- audio->card.func.disable = f_audio_disable;
- audio->card.func.free_func = f_audio_free;
+ uac1->g_audio.func.name = "uac1_func";
+ uac1->g_audio.func.bind = f_audio_bind;
+ uac1->g_audio.func.unbind = f_audio_unbind;
+ uac1->g_audio.func.set_alt = f_audio_set_alt;
+ uac1->g_audio.func.get_alt = f_audio_get_alt;
+ uac1->g_audio.func.setup = f_audio_setup;
+ uac1->g_audio.func.disable = f_audio_disable;
+ uac1->g_audio.func.free_func = f_audio_free;
+
+ return &uac1->g_audio.func;
+}
- control_selector_init(audio);
+DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc);
- INIT_WORK(&audio->playback_work, f_audio_playback_work);
+static int __init afunc_init(void)
+{
+ return usb_function_register(&uac1usb_func);
+}
- return &audio->card.func;
+static void __exit afunc_exit(void)
+{
+ usb_function_unregister(&uac1usb_func);
}
-DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc);
+module_init(afunc_init);
+module_exit(afunc_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bryan Wu");
+MODULE_AUTHOR("Ruslan Bilovol");
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
new file mode 100644
index 000000000000..d6c60c08d511
--- /dev/null
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -0,0 +1,1022 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+
+#include "u_uac1_legacy.h"
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE 0
+#define F_AUDIO_AS_INTERFACE 1
+#define F_AUDIO_NUM_INTERFACES 1
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+/*
+ * The number of AudioStreaming and MIDIStreaming interfaces
+ * in the Audio Interface Collection
+ */
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+ + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_1 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bInCollection = F_AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ /* Interface number of the first AudioStream interface */
+ [0] = 1,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+ .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+ .list = LIST_HEAD_INIT(mute_control.list),
+ .name = "Mute Control",
+ .type = UAC_FU_MUTE,
+ /* Todo: add real Mute control code */
+ .set = generic_set_cmd,
+ .get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+ .list = LIST_HEAD_INIT(volume_control.list),
+ .name = "Volume Control",
+ .type = UAC_FU_VOLUME,
+ /* Todo: add real Volume control code */
+ .set = generic_set_cmd,
+ .get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+ .list = LIST_HEAD_INIT(feature_unit.list),
+ .id = FEATURE_UNIT_ID,
+ .name = "Mute & Volume Control",
+ .type = UAC_FEATURE_UNIT,
+ .desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+ NULL,
+};
+
+enum {
+ STR_AC_IF,
+ STR_INPUT_TERMINAL,
+ STR_INPUT_TERMINAL_CH_NAMES,
+ STR_FEAT_DESC_0,
+ STR_OUTPUT_TERMINAL,
+ STR_AS_IF_ALT0,
+ STR_AS_IF_ALT1,
+};
+
+static struct usb_string strings_uac1[] = {
+ [STR_AC_IF].s = "AC Interface",
+ [STR_INPUT_TERMINAL].s = "Input terminal",
+ [STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
+ [STR_FEAT_DESC_0].s = "Volume control & mute",
+ [STR_OUTPUT_TERMINAL].s = "Output terminal",
+ [STR_AS_IF_ALT0].s = "AS Interface",
+ [STR_AS_IF_ALT1].s = "AS Interface",
+ { },
+};
+
+static struct usb_gadget_strings str_uac1 = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_uac1,
+};
+
+static struct usb_gadget_strings *uac1_strings[] = {
+ &str_uac1,
+ NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+ u8 *buf;
+ int actual;
+ struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+ struct f_audio_buf *copy_buf;
+
+ copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+ if (!copy_buf)
+ return ERR_PTR(-ENOMEM);
+
+ copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+ if (!copy_buf->buf) {
+ kfree(copy_buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+ kfree(audio_buf->buf);
+ kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+ struct gaudio card;
+
+ u8 ac_intf, ac_alt;
+ u8 as_intf, as_alt;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *out_ep;
+
+ spinlock_t lock;
+ struct f_audio_buf *copy_buf;
+ struct work_struct playback_work;
+ struct list_head play_queue;
+
+ /* Control Set command */
+ struct list_head cs;
+ u8 set_cmd;
+ struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+ struct f_audio *audio = container_of(data, struct f_audio,
+ playback_work);
+ struct f_audio_buf *play_buf;
+
+ spin_lock_irq(&audio->lock);
+ if (list_empty(&audio->play_queue)) {
+ spin_unlock_irq(&audio->lock);
+ return;
+ }
+ play_buf = list_first_entry(&audio->play_queue,
+ struct f_audio_buf, list);
+ list_del(&play_buf->list);
+ spin_unlock_irq(&audio->lock);
+
+ u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+ f_audio_buffer_free(play_buf);
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_audio *audio = req->context;
+ struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+ struct f_audio_buf *copy_buf = audio->copy_buf;
+ struct f_uac1_legacy_opts *opts;
+ int audio_buf_size;
+ int err;
+
+ opts = container_of(audio->card.func.fi, struct f_uac1_legacy_opts,
+ func_inst);
+ audio_buf_size = opts->audio_buf_size;
+
+ if (!copy_buf)
+ return -EINVAL;
+
+ /* Copy buffer is full, add it to the play_queue */
+ if (audio_buf_size - copy_buf->actual < req->actual) {
+ spin_lock_irq(&audio->lock);
+ list_add_tail(&copy_buf->list, &audio->play_queue);
+ spin_unlock_irq(&audio->lock);
+ schedule_work(&audio->playback_work);
+ copy_buf = f_audio_buffer_alloc(audio_buf_size);
+ if (IS_ERR(copy_buf))
+ return -ENOMEM;
+ }
+
+ memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+ copy_buf->actual += req->actual;
+ audio->copy_buf = copy_buf;
+
+ err = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (err)
+ ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+ return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_audio *audio = req->context;
+ int status = req->status;
+ u32 data = 0;
+ struct usb_ep *out_ep = audio->out_ep;
+
+ switch (status) {
+
+ case 0: /* normal completion? */
+ if (ep == out_ep)
+ f_audio_out_ep_complete(ep, req);
+ else if (audio->set_con) {
+ memcpy(&data, req->buf, req->length);
+ audio->set_con->set(audio->set_con, audio->set_cmd,
+ le16_to_cpu(data));
+ audio->set_con = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 con_sel = (w_value >> 8) & 0xFF;
+ u8 cmd = (ctrl->bRequest & 0x0F);
+ struct usb_audio_control_selector *cs;
+ struct usb_audio_control *con;
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+ ctrl->bRequest, w_value, len, id);
+
+ list_for_each_entry(cs, &audio->cs, list) {
+ if (cs->id == id) {
+ list_for_each_entry(con, &cs->control, list) {
+ if (con->type == con_sel) {
+ audio->set_con = con;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ audio->set_cmd = cmd;
+ req->context = audio;
+ req->complete = f_audio_complete;
+
+ return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 con_sel = (w_value >> 8) & 0xFF;
+ u8 cmd = (ctrl->bRequest & 0x0F);
+ struct usb_audio_control_selector *cs;
+ struct usb_audio_control *con;
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+ ctrl->bRequest, w_value, len, id);
+
+ list_for_each_entry(cs, &audio->cs, list) {
+ if (cs->id == id) {
+ list_for_each_entry(con, &cs->control, list) {
+ if (con->type == con_sel && con->get) {
+ value = con->get(con, cmd);
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ req->context = audio;
+ req->complete = f_audio_complete;
+ len = min_t(size_t, sizeof(value), len);
+ memcpy(req->buf, &value, len);
+
+ return len;
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ value = len;
+ break;
+
+ case UAC_SET_MIN:
+ break;
+
+ case UAC_SET_MAX:
+ break;
+
+ case UAC_SET_RES:
+ break;
+
+ case UAC_SET_MEM:
+ break;
+
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ value = len;
+ break;
+ case UAC_GET_MEM:
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+ value = audio_set_intf_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+ value = audio_get_intf_req(f, ctrl);
+ break;
+
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+
+ default:
+ ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_ep *out_ep = audio->out_ep;
+ struct usb_request *req;
+ struct f_uac1_legacy_opts *opts;
+ int req_buf_size, req_count, audio_buf_size;
+ int i = 0, err = 0;
+
+ DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+ opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+ req_buf_size = opts->req_buf_size;
+ req_count = opts->req_count;
+ audio_buf_size = opts->audio_buf_size;
+
+ /* No i/f has more than 2 alt settings */
+ if (alt > 1) {
+ ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (intf == audio->ac_intf) {
+ /* Control I/f has only 1 AltSetting - 0 */
+ if (alt) {
+ ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+ } else if (intf == audio->as_intf) {
+ if (alt == 1) {
+ err = config_ep_by_speed(cdev->gadget, f, out_ep);
+ if (err)
+ return err;
+
+ usb_ep_enable(out_ep);
+ audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+ if (IS_ERR(audio->copy_buf))
+ return -ENOMEM;
+
+ /*
+ * allocate a bunch of read buffers
+ * and queue them all at once.
+ */
+ for (i = 0; i < req_count && err == 0; i++) {
+ req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+ if (req) {
+ req->buf = kzalloc(req_buf_size,
+ GFP_ATOMIC);
+ if (req->buf) {
+ req->length = req_buf_size;
+ req->context = audio;
+ req->complete =
+ f_audio_complete;
+ err = usb_ep_queue(out_ep,
+ req, GFP_ATOMIC);
+ if (err)
+ ERROR(cdev,
+ "%s queue req: %d\n",
+ out_ep->name, err);
+ } else
+ err = -ENOMEM;
+ } else
+ err = -ENOMEM;
+ }
+
+ } else {
+ struct f_audio_buf *copy_buf = audio->copy_buf;
+ if (copy_buf) {
+ list_add_tail(&copy_buf->list,
+ &audio->play_queue);
+ schedule_work(&audio->playback_work);
+ }
+ }
+ audio->as_alt = alt;
+ }
+
+ return err;
+}
+
+static int f_audio_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ if (intf == audio->ac_intf)
+ return audio->ac_alt;
+ else if (intf == audio->as_intf)
+ return audio->as_alt;
+ else
+ ERROR(cdev, "%s:%d Invalid Interface %d!\n",
+ __func__, __LINE__, intf);
+
+ return -EINVAL;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+ return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+ struct gaudio *card = &audio->card;
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+ as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+ /* Set sample rates */
+ rate = u_audio_get_playback_rate(card);
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+
+ /* Todo: Set Sample bits and other parameters */
+
+ return;
+}
+
+/* audio function driver setup/binding */
+static int
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_string *us;
+ int status;
+ struct usb_ep *ep = NULL;
+ struct f_uac1_legacy_opts *audio_opts;
+
+ audio_opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+ audio->card.gadget = c->cdev->gadget;
+ /* set up ASLA audio devices */
+ if (!audio_opts->bound) {
+ status = gaudio_setup(&audio->card);
+ if (status < 0)
+ return status;
+ audio_opts->bound = true;
+ }
+ us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
+ if (IS_ERR(us))
+ return PTR_ERR(us);
+ ac_interface_desc.iInterface = us[STR_AC_IF].id;
+ input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
+ input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
+ feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
+ output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
+ as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
+ as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
+
+
+ f_audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+ audio->ac_intf = status;
+ audio->ac_alt = 0;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+ audio->as_intf = status;
+ audio->as_alt = 0;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->out_ep = ep;
+ audio->out_ep->desc = &as_out_ep_desc;
+
+ status = -ENOMEM;
+
+ /* copy descriptors, and track endpoint copies */
+ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL);
+ if (status)
+ goto fail;
+ return 0;
+
+fail:
+ gaudio_cleanup(&audio->card);
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+ con->data[cmd] = value;
+
+ return 0;
+}
+
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+ return con->data[cmd];
+}
+
+/* Todo: add more control selecotor dynamically */
+static int control_selector_init(struct f_audio *audio)
+{
+ INIT_LIST_HEAD(&audio->cs);
+ list_add(&feature_unit.list, &audio->cs);
+
+ INIT_LIST_HEAD(&feature_unit.control);
+ list_add(&mute_control.list, &feature_unit.control);
+ list_add(&volume_control.list, &feature_unit.control);
+
+ volume_control.data[UAC__CUR] = 0xffc0;
+ volume_control.data[UAC__MIN] = 0xe3a0;
+ volume_control.data[UAC__MAX] = 0xfff0;
+ volume_control.data[UAC__RES] = 0x0030;
+
+ return 0;
+}
+
+static inline
+struct f_uac1_legacy_opts *to_f_uac1_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_uac1_legacy_opts,
+ func_inst.group);
+}
+
+static void f_uac1_attr_release(struct config_item *item)
+{
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations f_uac1_item_ops = {
+ .release = f_uac1_attr_release,
+};
+
+#define UAC1_INT_ATTRIBUTE(name) \
+static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%u\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int ret; \
+ u32 num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtou32(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ opts->name = num; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_uac1_opts_, name)
+
+UAC1_INT_ATTRIBUTE(req_buf_size);
+UAC1_INT_ATTRIBUTE(req_count);
+UAC1_INT_ATTRIBUTE(audio_buf_size);
+
+#define UAC1_STR_ATTRIBUTE(name) \
+static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%s\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item); \
+ int ret = -EBUSY; \
+ char *tmp; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) \
+ goto end; \
+ \
+ tmp = kstrndup(page, len, GFP_KERNEL); \
+ if (tmp) { \
+ ret = -ENOMEM; \
+ goto end; \
+ } \
+ if (opts->name##_alloc) \
+ kfree(opts->name); \
+ opts->name##_alloc = true; \
+ opts->name = tmp; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_uac1_opts_, name)
+
+UAC1_STR_ATTRIBUTE(fn_play);
+UAC1_STR_ATTRIBUTE(fn_cap);
+UAC1_STR_ATTRIBUTE(fn_cntl);
+
+static struct configfs_attribute *f_uac1_attrs[] = {
+ &f_uac1_opts_attr_req_buf_size,
+ &f_uac1_opts_attr_req_count,
+ &f_uac1_opts_attr_audio_buf_size,
+ &f_uac1_opts_attr_fn_play,
+ &f_uac1_opts_attr_fn_cap,
+ &f_uac1_opts_attr_fn_cntl,
+ NULL,
+};
+
+static struct config_item_type f_uac1_func_type = {
+ .ct_item_ops = &f_uac1_item_ops,
+ .ct_attrs = f_uac1_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void f_audio_free_inst(struct usb_function_instance *f)
+{
+ struct f_uac1_legacy_opts *opts;
+
+ opts = container_of(f, struct f_uac1_legacy_opts, func_inst);
+ if (opts->fn_play_alloc)
+ kfree(opts->fn_play);
+ if (opts->fn_cap_alloc)
+ kfree(opts->fn_cap);
+ if (opts->fn_cntl_alloc)
+ kfree(opts->fn_cntl);
+ kfree(opts);
+}
+
+static struct usb_function_instance *f_audio_alloc_inst(void)
+{
+ struct f_uac1_legacy_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = f_audio_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &f_uac1_func_type);
+
+ opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
+ opts->req_count = UAC1_REQ_COUNT;
+ opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
+ opts->fn_play = FILE_PCM_PLAYBACK;
+ opts->fn_cap = FILE_PCM_CAPTURE;
+ opts->fn_cntl = FILE_CONTROL;
+ return &opts->func_inst;
+}
+
+static void f_audio_free(struct usb_function *f)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct f_uac1_legacy_opts *opts;
+
+ gaudio_cleanup(&audio->card);
+ opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+ kfree(audio);
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+}
+
+static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ usb_free_all_descriptors(f);
+}
+
+static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
+{
+ struct f_audio *audio;
+ struct f_uac1_legacy_opts *opts;
+
+ /* allocate and initialize one new instance */
+ audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ return ERR_PTR(-ENOMEM);
+
+ audio->card.func.name = "g_audio";
+
+ opts = container_of(fi, struct f_uac1_legacy_opts, func_inst);
+ mutex_lock(&opts->lock);
+ ++opts->refcnt;
+ mutex_unlock(&opts->lock);
+ INIT_LIST_HEAD(&audio->play_queue);
+ spin_lock_init(&audio->lock);
+
+ audio->card.func.bind = f_audio_bind;
+ audio->card.func.unbind = f_audio_unbind;
+ audio->card.func.set_alt = f_audio_set_alt;
+ audio->card.func.get_alt = f_audio_get_alt;
+ audio->card.func.setup = f_audio_setup;
+ audio->card.func.disable = f_audio_disable;
+ audio->card.func.free_func = f_audio_free;
+
+ control_selector_init(audio);
+
+ INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+ return &audio->card.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(uac1_legacy, f_audio_alloc_inst, f_audio_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bryan Wu");
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6903d02a933f..59de3f246f42 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -13,18 +13,11 @@
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
+#include "u_audio.h"
#include "u_uac2.h"
-/* Keep everyone on toes */
-#define USB_XFERS 2
-
/*
* The driver implements a simple UAC_2 topology.
* USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
@@ -54,504 +47,23 @@
#define UNFLW_CTRL 8
#define OVFLW_CTRL 10
-static const char *uac2_name = "snd_uac2";
-
-struct uac2_req {
- struct uac2_rtd_params *pp; /* parent param */
- struct usb_request *req;
-};
-
-struct uac2_rtd_params {
- struct snd_uac2_chip *uac2; /* parent chip */
- bool ep_enabled; /* if the ep is enabled */
- /* Size of the ring buffer */
- size_t dma_bytes;
- unsigned char *dma_area;
-
- struct snd_pcm_substream *ss;
-
- /* Ring buffer */
- ssize_t hw_ptr;
-
- void *rbuf;
-
- size_t period_size;
-
- unsigned max_psize;
- struct uac2_req ureq[USB_XFERS];
-
- spinlock_t lock;
-};
-
-struct snd_uac2_chip {
- struct platform_device pdev;
- struct platform_driver pdrv;
-
- struct uac2_rtd_params p_prm;
- struct uac2_rtd_params c_prm;
-
- struct snd_card *card;
- struct snd_pcm *pcm;
-
- /* timekeeping for the playback endpoint */
- unsigned int p_interval;
- unsigned int p_residue;
-
- /* pre-calculated values for playback iso completion */
- unsigned int p_pktsize;
- unsigned int p_pktsize_residue;
- unsigned int p_framesize;
-};
-
-#define BUFF_SIZE_MAX (PAGE_SIZE * 16)
-#define PRD_SIZE_MAX PAGE_SIZE
-#define MIN_PERIODS 4
-
-static struct snd_pcm_hardware uac2_pcm_hardware = {
- .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
- | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
- | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
- .buffer_bytes_max = BUFF_SIZE_MAX,
- .period_bytes_max = PRD_SIZE_MAX,
- .periods_min = MIN_PERIODS,
-};
-
-struct audio_dev {
- u8 ac_intf, ac_alt;
- u8 as_out_intf, as_out_alt;
- u8 as_in_intf, as_in_alt;
-
- struct usb_ep *in_ep, *out_ep;
- struct usb_function func;
-
- /* The ALSA Sound Card it represents on the USB-Client side */
- struct snd_uac2_chip uac2;
+struct f_uac2 {
+ struct g_audio g_audio;
+ u8 ac_intf, as_in_intf, as_out_intf;
+ u8 ac_alt, as_in_alt, as_out_alt; /* needed for get_alt() */
};
-static inline
-struct audio_dev *func_to_agdev(struct usb_function *f)
-{
- return container_of(f, struct audio_dev, func);
-}
-
-static inline
-struct audio_dev *uac2_to_agdev(struct snd_uac2_chip *u)
+static inline struct f_uac2 *func_to_uac2(struct usb_function *f)
{
- return container_of(u, struct audio_dev, uac2);
+ return container_of(f, struct f_uac2, g_audio.func);
}
static inline
-struct snd_uac2_chip *pdev_to_uac2(struct platform_device *p)
-{
- return container_of(p, struct snd_uac2_chip, pdev);
-}
-
-static inline
-struct f_uac2_opts *agdev_to_uac2_opts(struct audio_dev *agdev)
+struct f_uac2_opts *g_audio_to_uac2_opts(struct g_audio *agdev)
{
return container_of(agdev->func.fi, struct f_uac2_opts, func_inst);
}
-static inline
-uint num_channels(uint chanmask)
-{
- uint num = 0;
-
- while (chanmask) {
- num += (chanmask & 1);
- chanmask >>= 1;
- }
-
- return num;
-}
-
-static void
-agdev_iso_complete(struct usb_ep *ep, struct usb_request *req)
-{
- unsigned pending;
- unsigned long flags;
- unsigned int hw_ptr;
- bool update_alsa = false;
- int status = req->status;
- struct uac2_req *ur = req->context;
- struct snd_pcm_substream *substream;
- struct uac2_rtd_params *prm = ur->pp;
- struct snd_uac2_chip *uac2 = prm->uac2;
-
- /* i/f shutting down */
- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
- return;
-
- /*
- * We can't really do much about bad xfers.
- * Afterall, the ISOCH xfers could fail legitimately.
- */
- if (status)
- pr_debug("%s: iso_complete status(%d) %d/%d\n",
- __func__, status, req->actual, req->length);
-
- substream = prm->ss;
-
- /* Do nothing if ALSA isn't active */
- if (!substream)
- goto exit;
-
- spin_lock_irqsave(&prm->lock, flags);
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /*
- * For each IN packet, take the quotient of the current data
- * rate and the endpoint's interval as the base packet size.
- * If there is a residue from this division, add it to the
- * residue accumulator.
- */
- req->length = uac2->p_pktsize;
- uac2->p_residue += uac2->p_pktsize_residue;
-
- /*
- * Whenever there are more bytes in the accumulator than we
- * need to add one more sample frame, increase this packet's
- * size and decrease the accumulator.
- */
- if (uac2->p_residue / uac2->p_interval >= uac2->p_framesize) {
- req->length += uac2->p_framesize;
- uac2->p_residue -= uac2->p_framesize *
- uac2->p_interval;
- }
-
- req->actual = req->length;
- }
-
- pending = prm->hw_ptr % prm->period_size;
- pending += req->actual;
- if (pending >= prm->period_size)
- update_alsa = true;
-
- hw_ptr = prm->hw_ptr;
- prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
-
- spin_unlock_irqrestore(&prm->lock, flags);
-
- /* Pack USB load in ALSA ring buffer */
- pending = prm->dma_bytes - hw_ptr;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (unlikely(pending < req->actual)) {
- memcpy(req->buf, prm->dma_area + hw_ptr, pending);
- memcpy(req->buf + pending, prm->dma_area,
- req->actual - pending);
- } else {
- memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
- }
- } else {
- if (unlikely(pending < req->actual)) {
- memcpy(prm->dma_area + hw_ptr, req->buf, pending);
- memcpy(prm->dma_area, req->buf + pending,
- req->actual - pending);
- } else {
- memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
- }
- }
-
-exit:
- if (usb_ep_queue(ep, req, GFP_ATOMIC))
- dev_err(&uac2->pdev.dev, "%d Error!\n", __LINE__);
-
- if (update_alsa)
- snd_pcm_period_elapsed(substream);
-
- return;
-}
-
-static int
-uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
- unsigned long flags;
- int err = 0;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- spin_lock_irqsave(&prm->lock, flags);
-
- /* Reset */
- prm->hw_ptr = 0;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- prm->ss = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- prm->ss = NULL;
- break;
- default:
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&prm->lock, flags);
-
- /* Clear buffer after Play stops */
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
- memset(prm->rbuf, 0, prm->max_psize * USB_XFERS);
-
- return err;
-}
-
-static snd_pcm_uframes_t uac2_pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- return bytes_to_frames(substream->runtime, prm->hw_ptr);
-}
-
-static int uac2_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
- int err;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- err = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- if (err >= 0) {
- prm->dma_bytes = substream->runtime->dma_bytes;
- prm->dma_area = substream->runtime->dma_area;
- prm->period_size = params_period_bytes(hw_params);
- }
-
- return err;
-}
-
-static int uac2_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct uac2_rtd_params *prm;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac2->p_prm;
- else
- prm = &uac2->c_prm;
-
- prm->dma_area = NULL;
- prm->dma_bytes = 0;
- prm->period_size = 0;
-
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int uac2_pcm_open(struct snd_pcm_substream *substream)
-{
- struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct audio_dev *audio_dev;
- struct f_uac2_opts *opts;
- int p_ssize, c_ssize;
- int p_srate, c_srate;
- int p_chmask, c_chmask;
-
- audio_dev = uac2_to_agdev(uac2);
- opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst);
- p_ssize = opts->p_ssize;
- c_ssize = opts->c_ssize;
- p_srate = opts->p_srate;
- c_srate = opts->c_srate;
- p_chmask = opts->p_chmask;
- c_chmask = opts->c_chmask;
- uac2->p_residue = 0;
-
- runtime->hw = uac2_pcm_hardware;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- spin_lock_init(&uac2->p_prm.lock);
- runtime->hw.rate_min = p_srate;
- switch (p_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
- runtime->hw.channels_min = num_channels(p_chmask);
- runtime->hw.period_bytes_min = 2 * uac2->p_prm.max_psize
- / runtime->hw.periods_min;
- } else {
- spin_lock_init(&uac2->c_prm.lock);
- runtime->hw.rate_min = c_srate;
- switch (c_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
- runtime->hw.channels_min = num_channels(c_chmask);
- runtime->hw.period_bytes_min = 2 * uac2->c_prm.max_psize
- / runtime->hw.periods_min;
- }
-
- runtime->hw.rate_max = runtime->hw.rate_min;
- runtime->hw.channels_max = runtime->hw.channels_min;
-
- snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-
- return 0;
-}
-
-/* ALSA cries without these function pointers */
-static int uac2_pcm_null(struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
-static struct snd_pcm_ops uac2_pcm_ops = {
- .open = uac2_pcm_open,
- .close = uac2_pcm_null,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uac2_pcm_hw_params,
- .hw_free = uac2_pcm_hw_free,
- .trigger = uac2_pcm_trigger,
- .pointer = uac2_pcm_pointer,
- .prepare = uac2_pcm_null,
-};
-
-static int snd_uac2_probe(struct platform_device *pdev)
-{
- struct snd_uac2_chip *uac2 = pdev_to_uac2(pdev);
- struct snd_card *card;
- struct snd_pcm *pcm;
- struct audio_dev *audio_dev;
- struct f_uac2_opts *opts;
- int err;
- int p_chmask, c_chmask;
-
- audio_dev = uac2_to_agdev(uac2);
- opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst);
- p_chmask = opts->p_chmask;
- c_chmask = opts->c_chmask;
-
- /* Choose any slot, with no id */
- err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
- if (err < 0)
- return err;
-
- uac2->card = card;
-
- /*
- * Create first PCM device
- * Create a substream only for non-zero channel streams
- */
- err = snd_pcm_new(uac2->card, "UAC2 PCM", 0,
- p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
- if (err < 0)
- goto snd_fail;
-
- strcpy(pcm->name, "UAC2 PCM");
- pcm->private_data = uac2;
-
- uac2->pcm = pcm;
-
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac2_pcm_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac2_pcm_ops);
-
- strcpy(card->driver, "UAC2_Gadget");
- strcpy(card->shortname, "UAC2_Gadget");
- sprintf(card->longname, "UAC2_Gadget %i", pdev->id);
-
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
-
- err = snd_card_register(card);
- if (!err) {
- platform_set_drvdata(pdev, card);
- return 0;
- }
-
-snd_fail:
- snd_card_free(card);
-
- uac2->pcm = NULL;
- uac2->card = NULL;
-
- return err;
-}
-
-static int snd_uac2_remove(struct platform_device *pdev)
-{
- struct snd_card *card = platform_get_drvdata(pdev);
-
- if (card)
- return snd_card_free(card);
-
- return 0;
-}
-
-static void snd_uac2_release(struct device *dev)
-{
- dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
-}
-
-static int alsa_uac2_init(struct audio_dev *agdev)
-{
- struct snd_uac2_chip *uac2 = &agdev->uac2;
- int err;
-
- uac2->pdrv.probe = snd_uac2_probe;
- uac2->pdrv.remove = snd_uac2_remove;
- uac2->pdrv.driver.name = uac2_name;
-
- uac2->pdev.id = 0;
- uac2->pdev.name = uac2_name;
- uac2->pdev.dev.release = snd_uac2_release;
-
- /* Register snd_uac2 driver */
- err = platform_driver_register(&uac2->pdrv);
- if (err)
- return err;
-
- /* Register snd_uac2 device */
- err = platform_device_register(&uac2->pdev);
- if (err)
- platform_driver_unregister(&uac2->pdrv);
-
- return err;
-}
-
-static void alsa_uac2_exit(struct audio_dev *agdev)
-{
- struct snd_uac2_chip *uac2 = &agdev->uac2;
-
- platform_driver_unregister(&uac2->pdrv);
- platform_device_unregister(&uac2->pdev);
-}
-
-
/* --------- USB Function Interface ------------- */
enum {
@@ -939,30 +451,6 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static inline void
-free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
-{
- struct snd_uac2_chip *uac2 = prm->uac2;
- int i;
-
- if (!prm->ep_enabled)
- return;
-
- prm->ep_enabled = false;
-
- for (i = 0; i < USB_XFERS; i++) {
- if (prm->ureq[i].req) {
- usb_ep_dequeue(ep, prm->ureq[i].req);
- usb_ep_free_request(ep, prm->ureq[i].req);
- prm->ureq[i].req = NULL;
- }
- }
-
- if (usb_ep_disable(ep))
- dev_err(&uac2->pdev.dev,
- "%s:%d Error!\n", __func__, __LINE__);
-}
-
static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
enum usb_device_speed speed, bool is_playback)
@@ -1007,12 +495,11 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
+ struct g_audio *agdev = func_to_g_audio(fn);
struct usb_composite_dev *cdev = cfg->cdev;
struct usb_gadget *gadget = cdev->gadget;
- struct device *dev = &uac2->pdev.dev;
- struct uac2_rtd_params *prm;
+ struct device *dev = &gadget->dev;
struct f_uac2_opts *uac2_opts;
struct usb_string *us;
int ret;
@@ -1061,8 +548,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
iad_desc.bFirstInterface = ret;
std_ac_if_desc.bInterfaceNumber = ret;
- agdev->ac_intf = ret;
- agdev->ac_alt = 0;
+ uac2->ac_intf = ret;
+ uac2->ac_alt = 0;
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
@@ -1071,8 +558,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
- agdev->as_out_intf = ret;
- agdev->as_out_alt = 0;
+ uac2->as_out_intf = ret;
+ uac2->as_out_alt = 0;
ret = usb_interface_id(cfg, fn);
if (ret < 0) {
@@ -1081,23 +568,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_in_if0_desc.bInterfaceNumber = ret;
std_as_in_if1_desc.bInterfaceNumber = ret;
- agdev->as_in_intf = ret;
- agdev->as_in_alt = 0;
-
- agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
- if (!agdev->out_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
- }
-
- agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
- if (!agdev->in_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
- }
-
- uac2->p_prm.uac2 = uac2;
- uac2->c_prm.uac2 = uac2;
+ uac2->as_in_intf = ret;
+ uac2->as_in_alt = 0;
/* Calculate wMaxPacketSize according to audio bandwidth */
ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
@@ -1128,6 +600,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
return ret;
}
+ agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
+ if (!agdev->out_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
+ if (!agdev->in_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize,
+ hs_epin_desc.wMaxPacketSize);
+ agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize,
+ hs_epout_desc.wMaxPacketSize);
+
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
@@ -1135,47 +624,34 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
if (ret)
return ret;
- prm = &agdev->uac2.c_prm;
- prm->max_psize = hs_epout_desc.wMaxPacketSize;
- prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
- if (!prm->rbuf) {
- prm->max_psize = 0;
- goto err_free_descs;
- }
+ agdev->gadget = gadget;
- prm = &agdev->uac2.p_prm;
- prm->max_psize = hs_epin_desc.wMaxPacketSize;
- prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
- if (!prm->rbuf) {
- prm->max_psize = 0;
- goto err;
- }
-
- ret = alsa_uac2_init(agdev);
+ agdev->params.p_chmask = uac2_opts->p_chmask;
+ agdev->params.p_srate = uac2_opts->p_srate;
+ agdev->params.p_ssize = uac2_opts->p_ssize;
+ agdev->params.c_chmask = uac2_opts->c_chmask;
+ agdev->params.c_srate = uac2_opts->c_srate;
+ agdev->params.c_ssize = uac2_opts->c_ssize;
+ agdev->params.req_number = uac2_opts->req_number;
+ ret = g_audio_setup(agdev, "UAC2 PCM", "UAC2_Gadget");
if (ret)
- goto err;
+ goto err_free_descs;
return 0;
-err:
- kfree(agdev->uac2.p_prm.rbuf);
- kfree(agdev->uac2.c_prm.rbuf);
err_free_descs:
usb_free_all_descriptors(fn);
- return -EINVAL;
+ agdev->gadget = NULL;
+ return ret;
}
static int
afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = fn->config->cdev;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
struct usb_gadget *gadget = cdev->gadget;
- struct device *dev = &uac2->pdev.dev;
- struct usb_request *req;
- struct usb_ep *ep;
- struct uac2_rtd_params *prm;
- int req_len, i;
+ struct device *dev = &gadget->dev;
+ int ret = 0;
/* No i/f has more than 2 alt settings */
if (alt > 1) {
@@ -1183,7 +659,7 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
return -EINVAL;
}
- if (intf == agdev->ac_intf) {
+ if (intf == uac2->ac_intf) {
/* Control I/f has only 1 AltSetting - 0 */
if (alt) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
@@ -1192,96 +668,42 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
return 0;
}
- if (intf == agdev->as_out_intf) {
- ep = agdev->out_ep;
- prm = &uac2->c_prm;
- config_ep_by_speed(gadget, fn, ep);
- agdev->as_out_alt = alt;
- req_len = prm->max_psize;
- } else if (intf == agdev->as_in_intf) {
- struct f_uac2_opts *opts = agdev_to_uac2_opts(agdev);
- unsigned int factor, rate;
- struct usb_endpoint_descriptor *ep_desc;
-
- ep = agdev->in_ep;
- prm = &uac2->p_prm;
- config_ep_by_speed(gadget, fn, ep);
- agdev->as_in_alt = alt;
-
- /* pre-calculate the playback endpoint's interval */
- if (gadget->speed == USB_SPEED_FULL) {
- ep_desc = &fs_epin_desc;
- factor = 1000;
- } else {
- ep_desc = &hs_epin_desc;
- factor = 8000;
- }
-
- /* pre-compute some values for iso_complete() */
- uac2->p_framesize = opts->p_ssize *
- num_channels(opts->p_chmask);
- rate = opts->p_srate * uac2->p_framesize;
- uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
- prm->max_psize);
+ if (intf == uac2->as_out_intf) {
+ uac2->as_out_alt = alt;
- if (uac2->p_pktsize < prm->max_psize)
- uac2->p_pktsize_residue = rate % uac2->p_interval;
+ if (alt)
+ ret = u_audio_start_capture(&uac2->g_audio);
else
- uac2->p_pktsize_residue = 0;
+ u_audio_stop_capture(&uac2->g_audio);
+ } else if (intf == uac2->as_in_intf) {
+ uac2->as_in_alt = alt;
- req_len = uac2->p_pktsize;
- uac2->p_residue = 0;
+ if (alt)
+ ret = u_audio_start_playback(&uac2->g_audio);
+ else
+ u_audio_stop_playback(&uac2->g_audio);
} else {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
}
- if (alt == 0) {
- free_ep(prm, ep);
- return 0;
- }
-
- prm->ep_enabled = true;
- usb_ep_enable(ep);
-
- for (i = 0; i < USB_XFERS; i++) {
- if (!prm->ureq[i].req) {
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req == NULL)
- return -ENOMEM;
-
- prm->ureq[i].req = req;
- prm->ureq[i].pp = prm;
-
- req->zero = 0;
- req->context = &prm->ureq[i];
- req->length = req_len;
- req->complete = agdev_iso_complete;
- req->buf = prm->rbuf + i * prm->max_psize;
- }
-
- if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- }
-
- return 0;
+ return ret;
}
static int
afunc_get_alt(struct usb_function *fn, unsigned intf)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
-
- if (intf == agdev->ac_intf)
- return agdev->ac_alt;
- else if (intf == agdev->as_out_intf)
- return agdev->as_out_alt;
- else if (intf == agdev->as_in_intf)
- return agdev->as_in_alt;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
+ struct g_audio *agdev = func_to_g_audio(fn);
+
+ if (intf == uac2->ac_intf)
+ return uac2->ac_alt;
+ else if (intf == uac2->as_out_intf)
+ return uac2->as_out_alt;
+ else if (intf == uac2->as_in_intf)
+ return uac2->as_in_alt;
else
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d Invalid Interface %d!\n",
__func__, __LINE__, intf);
@@ -1291,22 +713,19 @@ afunc_get_alt(struct usb_function *fn, unsigned intf)
static void
afunc_disable(struct usb_function *fn)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
-
- free_ep(&uac2->p_prm, agdev->in_ep);
- agdev->as_in_alt = 0;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
- free_ep(&uac2->c_prm, agdev->out_ep);
- agdev->as_out_alt = 0;
+ uac2->as_in_alt = 0;
+ uac2->as_out_alt = 0;
+ u_audio_stop_capture(&uac2->g_audio);
+ u_audio_stop_playback(&uac2->g_audio);
}
static int
in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts;
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
@@ -1316,7 +735,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
int value = -EOPNOTSUPP;
int p_srate, c_srate;
- opts = agdev_to_uac2_opts(agdev);
+ opts = g_audio_to_uac2_opts(agdev);
p_srate = opts->p_srate;
c_srate = opts->c_srate;
@@ -1335,7 +754,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
*(u8 *)req->buf = 1;
value = min_t(unsigned, w_length, 1);
} else {
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
@@ -1347,8 +766,7 @@ static int
in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_request *req = fn->config->cdev->req;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct g_audio *agdev = func_to_g_audio(fn);
struct f_uac2_opts *opts;
u16 w_length = le16_to_cpu(cr->wLength);
u16 w_index = le16_to_cpu(cr->wIndex);
@@ -1359,7 +777,7 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
int value = -EOPNOTSUPP;
int p_srate, c_srate;
- opts = agdev_to_uac2_opts(agdev);
+ opts = g_audio_to_uac2_opts(agdev);
p_srate = opts->p_srate;
c_srate = opts->c_srate;
@@ -1378,7 +796,7 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
value = min_t(unsigned, w_length, sizeof r);
memcpy(req->buf, &r, value);
} else {
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d control_selector=%d TODO!\n",
__func__, __LINE__, control_selector);
}
@@ -1413,13 +831,13 @@ out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
static int
setup_rq_inf(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct f_uac2 *uac2 = func_to_uac2(fn);
+ struct g_audio *agdev = func_to_g_audio(fn);
u16 w_index = le16_to_cpu(cr->wIndex);
u8 intf = w_index & 0xff;
- if (intf != agdev->ac_intf) {
- dev_err(&uac2->pdev.dev,
+ if (intf != uac2->ac_intf) {
+ dev_err(&agdev->gadget->dev,
"%s:%d Error!\n", __func__, __LINE__);
return -EOPNOTSUPP;
}
@@ -1436,8 +854,7 @@ static int
afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
{
struct usb_composite_dev *cdev = fn->config->cdev;
- struct audio_dev *agdev = func_to_agdev(fn);
- struct snd_uac2_chip *uac2 = &agdev->uac2;
+ struct g_audio *agdev = func_to_g_audio(fn);
struct usb_request *req = cdev->req;
u16 w_length = le16_to_cpu(cr->wLength);
int value = -EOPNOTSUPP;
@@ -1449,14 +866,15 @@ afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if ((cr->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE)
value = setup_rq_inf(fn, cr);
else
- dev_err(&uac2->pdev.dev, "%s:%d Error!\n", __func__, __LINE__);
+ dev_err(&agdev->gadget->dev, "%s:%d Error!\n",
+ __func__, __LINE__);
if (value >= 0) {
req->length = value;
req->zero = value < w_length;
value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
- dev_err(&uac2->pdev.dev,
+ dev_err(&agdev->gadget->dev,
"%s:%d Error!\n", __func__, __LINE__);
req->status = 0;
}
@@ -1529,6 +947,7 @@ UAC2_ATTRIBUTE(p_ssize);
UAC2_ATTRIBUTE(c_chmask);
UAC2_ATTRIBUTE(c_srate);
UAC2_ATTRIBUTE(c_ssize);
+UAC2_ATTRIBUTE(req_number);
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
@@ -1537,6 +956,7 @@ static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_c_chmask,
&f_uac2_opts_attr_c_srate,
&f_uac2_opts_attr_c_ssize,
+ &f_uac2_opts_attr_req_number,
NULL,
};
@@ -1574,15 +994,16 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->c_chmask = UAC2_DEF_CCHMASK;
opts->c_srate = UAC2_DEF_CSRATE;
opts->c_ssize = UAC2_DEF_CSSIZE;
+ opts->req_number = UAC2_DEF_REQ_NUM;
return &opts->func_inst;
}
static void afunc_free(struct usb_function *f)
{
- struct audio_dev *agdev;
+ struct g_audio *agdev;
struct f_uac2_opts *opts;
- agdev = func_to_agdev(f);
+ agdev = func_to_g_audio(f);
opts = container_of(f->fi, struct f_uac2_opts, func_inst);
kfree(agdev);
mutex_lock(&opts->lock);
@@ -1592,26 +1013,21 @@ static void afunc_free(struct usb_function *f)
static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
{
- struct audio_dev *agdev = func_to_agdev(f);
- struct uac2_rtd_params *prm;
+ struct g_audio *agdev = func_to_g_audio(f);
- alsa_uac2_exit(agdev);
-
- prm = &agdev->uac2.p_prm;
- kfree(prm->rbuf);
-
- prm = &agdev->uac2.c_prm;
- kfree(prm->rbuf);
+ g_audio_cleanup(agdev);
usb_free_all_descriptors(f);
+
+ agdev->gadget = NULL;
}
static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
{
- struct audio_dev *agdev;
+ struct f_uac2 *uac2;
struct f_uac2_opts *opts;
- agdev = kzalloc(sizeof(*agdev), GFP_KERNEL);
- if (agdev == NULL)
+ uac2 = kzalloc(sizeof(*uac2), GFP_KERNEL);
+ if (uac2 == NULL)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_uac2_opts, func_inst);
@@ -1619,16 +1035,16 @@ static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
++opts->refcnt;
mutex_unlock(&opts->lock);
- agdev->func.name = "uac2_func";
- agdev->func.bind = afunc_bind;
- agdev->func.unbind = afunc_unbind;
- agdev->func.set_alt = afunc_set_alt;
- agdev->func.get_alt = afunc_get_alt;
- agdev->func.disable = afunc_disable;
- agdev->func.setup = afunc_setup;
- agdev->func.free_func = afunc_free;
+ uac2->g_audio.func.name = "uac2_func";
+ uac2->g_audio.func.bind = afunc_bind;
+ uac2->g_audio.func.unbind = afunc_unbind;
+ uac2->g_audio.func.set_alt = afunc_set_alt;
+ uac2->g_audio.func.get_alt = afunc_get_alt;
+ uac2->g_audio.func.disable = afunc_disable;
+ uac2->g_audio.func.setup = afunc_setup;
+ uac2->g_audio.func.free_func = afunc_free;
- return &agdev->func;
+ return &uac2->g_audio.func;
}
DECLARE_USB_FUNCTION_INIT(uac2, afunc_alloc_inst, afunc_alloc);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 4dba794a6ad5..1d13d79d5070 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -538,14 +538,11 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID,
*/
retval = 0;
if (*params->filter) {
- params->state = RNDIS_DATA_INITIALIZED;
- netif_carrier_on(params->dev);
- if (netif_running(params->dev))
- netif_wake_queue(params->dev);
+ pr_debug("%s(): disable flow control\n", __func__);
+ rndis_flow_control(params, false);
} else {
- params->state = RNDIS_INITIALIZED;
- netif_carrier_off(params->dev);
- netif_stop_queue(params->dev);
+ pr_err("%s(): enable flow control\n", __func__);
+ rndis_flow_control(params, true);
}
break;
@@ -595,10 +592,11 @@ static int rndis_init_response(struct rndis_params *params,
+ sizeof(struct ethhdr)
+ sizeof(struct rndis_packet_msg_type)
+ 22));
- resp->PacketAlignmentFactor = cpu_to_le32(0);
+ resp->PacketAlignmentFactor = cpu_to_le32(params->pkt_alignment_factor);
resp->AFListOffset = cpu_to_le32(0);
resp->AFListSize = cpu_to_le32(0);
+ params->ul_max_xfer_size = le32_to_cpu(resp->MaxTransferSize);
params->resp_avail(params->v);
return 0;
}
@@ -801,7 +799,7 @@ EXPORT_SYMBOL_GPL(rndis_set_host_mac);
*/
int rndis_msg_parser(struct rndis_params *params, u8 *buf)
{
- u32 MsgType, MsgLength;
+ u32 MsgType, MsgLength, major, minor, max_transfer_size;
__le32 *tmp;
if (!buf)
@@ -824,16 +822,36 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
case RNDIS_MSG_INIT:
pr_debug("%s: RNDIS_MSG_INIT\n",
__func__);
+ tmp++; /* to get RequestID */
+ major = get_unaligned_le32(tmp++);
+ minor = get_unaligned_le32(tmp++);
+ max_transfer_size = get_unaligned_le32(tmp++);
+
+ params->host_rndis_major_ver = major;
+ params->host_rndis_minor_ver = minor;
+ params->dl_max_xfer_size = max_transfer_size;
+
+ pr_debug("%s(): RNDIS Host Major:%d Minor:%d version\n",
+ __func__, major, minor);
+ pr_debug("%s(): UL Max Transfer size:%x\n", __func__,
+ max_transfer_size);
+
params->state = RNDIS_INITIALIZED;
return rndis_init_response(params, (rndis_init_msg_type *)buf);
case RNDIS_MSG_HALT:
pr_debug("%s: RNDIS_MSG_HALT\n",
__func__);
- params->state = RNDIS_UNINITIALIZED;
- if (params->dev) {
- netif_carrier_off(params->dev);
- netif_stop_queue(params->dev);
+ if (params->state == RNDIS_DATA_INITIALIZED) {
+ if (params->flow_ctrl_enable) {
+ params->flow_ctrl_enable(true, params);
+ } else {
+ if (params->dev) {
+ netif_carrier_off(params->dev);
+ netif_stop_queue(params->dev);
+ }
+ }
+ params->state = RNDIS_UNINITIALIZED;
}
return 0;
@@ -885,7 +903,8 @@ static inline void rndis_put_nr(int nr)
ida_simple_remove(&rndis_ida, nr);
}
-struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
+struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v,
+ void (*flow_ctrl_enable)(bool enable, struct rndis_params *params))
{
struct rndis_params *params;
int i;
@@ -929,6 +948,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->state = RNDIS_UNINITIALIZED;
params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
params->resp_avail = resp_avail;
+ params->flow_ctrl_enable = flow_ctrl_enable;
params->v = v;
INIT_LIST_HEAD(&(params->resp_queue));
pr_debug("%s: configNr = %d\n", __func__, i);
@@ -1007,6 +1027,18 @@ int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed)
}
EXPORT_SYMBOL_GPL(rndis_set_param_medium);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->dl_max_xfer_size;
+}
+
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->ul_max_xfer_size;
+}
+
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
{
pr_debug("%s:\n", __func__);
@@ -1014,6 +1046,47 @@ void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
params->max_pkt_per_xfer = max_pkt_per_xfer;
}
+/**
+ * rndis_flow_control: enable/disable flow control with USB RNDIS interface
+ * params - RNDIS network parameter
+ * enable_flow_control - true: perform flow control, false: disable flow control
+ *
+ * In hw accelerated mode, this function triggers functionality to start/stop
+ * endless transfers, otherwise it enables/disables RNDIS network interface.
+ */
+void rndis_flow_control(struct rndis_params *params, bool enable_flow_control)
+{
+ if (!params) {
+ pr_err("%s: failed, params NULL\n", __func__);
+ return;
+ }
+
+ pr_debug("%s(): params->state:%x\n", __func__, params->state);
+
+ if (enable_flow_control) {
+ if (params->state == RNDIS_DATA_INITIALIZED) {
+ if (params->flow_ctrl_enable) {
+ params->flow_ctrl_enable(enable_flow_control, params);
+ } else {
+ netif_carrier_off(params->dev);
+ netif_stop_queue(params->dev);
+ }
+ }
+ params->state = RNDIS_INITIALIZED;
+ } else {
+ if (params->state != RNDIS_DATA_INITIALIZED) {
+ if (params->flow_ctrl_enable) {
+ params->flow_ctrl_enable(enable_flow_control, params);
+ } else {
+ netif_carrier_on(params->dev);
+ if (netif_running(params->dev))
+ netif_wake_queue(params->dev);
+ }
+ }
+ params->state = RNDIS_DATA_INITIALIZED;
+ }
+}
+
void rndis_add_hdr(struct sk_buff *skb)
{
struct rndis_packet_msg_type *header;
@@ -1159,6 +1232,19 @@ int rndis_rm_hdr(struct gether *port,
}
EXPORT_SYMBOL_GPL(rndis_rm_hdr);
+void rndis_set_pkt_alignment_factor(struct rndis_params *params,
+ u8 pkt_alignment_factor)
+{
+ pr_debug("%s:\n", __func__);
+
+ if (!params) {
+ pr_err("%s: failed, params NULL\n", __func__);
+ return;
+ }
+
+ params->pkt_alignment_factor = pkt_alignment_factor;
+}
+
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static int rndis_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 310cac3f088e..3d130b0576fc 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -192,14 +192,23 @@ typedef struct rndis_params
u32 vendorID;
u8 max_pkt_per_xfer;
const char *vendorDescr;
+ u8 pkt_alignment_factor;
void (*resp_avail)(void *v);
+ void (*flow_ctrl_enable)(bool enable,
+ struct rndis_params *params);
+
void *v;
struct list_head resp_queue;
+ u32 host_rndis_major_ver;
+ u32 host_rndis_minor_ver;
+ u32 ul_max_xfer_size;
+ u32 dl_max_xfer_size;
} rndis_params;
/* RNDIS Message parser and other useless functions */
int rndis_msg_parser(struct rndis_params *params, u8 *buf);
-struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v);
+struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v,
+ void (*flow_ctrl_enable)(bool enable, struct rndis_params *params));
void rndis_deregister(struct rndis_params *params);
int rndis_set_param_dev(struct rndis_params *params, struct net_device *dev,
u16 *cdc_filter);
@@ -208,6 +217,8 @@ int rndis_set_param_vendor(struct rndis_params *params, u32 vendorID,
int rndis_set_param_medium(struct rndis_params *params, u32 medium,
u32 speed);
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params);
void rndis_add_hdr(struct sk_buff *skb);
int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
struct sk_buff_head *list);
@@ -219,5 +230,8 @@ int rndis_signal_connect(struct rndis_params *params);
int rndis_signal_disconnect(struct rndis_params *params);
int rndis_state(struct rndis_params *params);
extern void rndis_set_host_mac(struct rndis_params *params, const u8 *addr);
+void rndis_flow_control(struct rndis_params *params, bool enable_flow_control);
+void rndis_set_pkt_alignment_factor(struct rndis_params *params,
+ u8 pkt_alignment_factor);
#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
new file mode 100644
index 000000000000..435f0614d572
--- /dev/null
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -0,0 +1,645 @@
+/*
+ * u_audio.c -- interface to USB gadget "ALSA sound card" utilities
+ *
+ * Copyright (C) 2016
+ * Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * Sound card implementation was cut-and-pasted with changes
+ * from f_uac2.c and has:
+ * Copyright (C) 2011
+ * Yadwinder Singh (yadi.brar01@gmail.com)
+ * Jaswinder Singh (jaswinder.singh@linaro.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "u_audio.h"
+
+#define BUFF_SIZE_MAX (PAGE_SIZE * 16)
+#define PRD_SIZE_MAX PAGE_SIZE
+#define MIN_PERIODS 4
+
+struct uac_req {
+ struct uac_rtd_params *pp; /* parent param */
+ struct usb_request *req;
+};
+
+/* Runtime data params for one stream */
+struct uac_rtd_params {
+ struct snd_uac_chip *uac; /* parent chip */
+ bool ep_enabled; /* if the ep is enabled */
+
+ struct snd_pcm_substream *ss;
+
+ /* Ring buffer */
+ ssize_t hw_ptr;
+
+ void *rbuf;
+
+ unsigned max_psize; /* MaxPacketSize of endpoint */
+ struct uac_req *ureq;
+
+ spinlock_t lock;
+};
+
+struct snd_uac_chip {
+ struct g_audio *audio_dev;
+
+ struct uac_rtd_params p_prm;
+ struct uac_rtd_params c_prm;
+
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+
+ /* timekeeping for the playback endpoint */
+ unsigned int p_interval;
+ unsigned int p_residue;
+
+ /* pre-calculated values for playback iso completion */
+ unsigned int p_pktsize;
+ unsigned int p_pktsize_residue;
+ unsigned int p_framesize;
+};
+
+static const struct snd_pcm_hardware uac_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
+ | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
+ | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
+ .buffer_bytes_max = BUFF_SIZE_MAX,
+ .period_bytes_max = PRD_SIZE_MAX,
+ .periods_min = MIN_PERIODS,
+};
+
+static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ unsigned pending;
+ unsigned long flags, flags2;
+ unsigned int hw_ptr;
+ int status = req->status;
+ struct uac_req *ur = req->context;
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+ struct uac_rtd_params *prm = ur->pp;
+ struct snd_uac_chip *uac = prm->uac;
+
+ /* i/f shutting down */
+ if (!prm->ep_enabled || req->status == -ESHUTDOWN)
+ return;
+
+ /*
+ * We can't really do much about bad xfers.
+ * Afterall, the ISOCH xfers could fail legitimately.
+ */
+ if (status)
+ pr_debug("%s: iso_complete status(%d) %d/%d\n",
+ __func__, status, req->actual, req->length);
+
+ substream = prm->ss;
+
+ /* Do nothing if ALSA isn't active */
+ if (!substream)
+ goto exit;
+
+ snd_pcm_stream_lock_irqsave(substream, flags2);
+
+ runtime = substream->runtime;
+ if (!runtime || !snd_pcm_running(substream)) {
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ goto exit;
+ }
+
+ spin_lock_irqsave(&prm->lock, flags);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /*
+ * For each IN packet, take the quotient of the current data
+ * rate and the endpoint's interval as the base packet size.
+ * If there is a residue from this division, add it to the
+ * residue accumulator.
+ */
+ req->length = uac->p_pktsize;
+ uac->p_residue += uac->p_pktsize_residue;
+
+ /*
+ * Whenever there are more bytes in the accumulator than we
+ * need to add one more sample frame, increase this packet's
+ * size and decrease the accumulator.
+ */
+ if (uac->p_residue / uac->p_interval >= uac->p_framesize) {
+ req->length += uac->p_framesize;
+ uac->p_residue -= uac->p_framesize *
+ uac->p_interval;
+ }
+
+ req->actual = req->length;
+ }
+
+ hw_ptr = prm->hw_ptr;
+
+ spin_unlock_irqrestore(&prm->lock, flags);
+
+ /* Pack USB load in ALSA ring buffer */
+ pending = runtime->dma_bytes - hw_ptr;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (unlikely(pending < req->actual)) {
+ memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+ memcpy(req->buf + pending, runtime->dma_area,
+ req->actual - pending);
+ } else {
+ memcpy(req->buf, runtime->dma_area + hw_ptr,
+ req->actual);
+ }
+ } else {
+ if (unlikely(pending < req->actual)) {
+ memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+ memcpy(runtime->dma_area, req->buf + pending,
+ req->actual - pending);
+ } else {
+ memcpy(runtime->dma_area + hw_ptr, req->buf,
+ req->actual);
+ }
+ }
+
+ spin_lock_irqsave(&prm->lock, flags);
+ /* update hw_ptr after data is copied to memory */
+ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+ hw_ptr = prm->hw_ptr;
+ spin_unlock_irqrestore(&prm->lock, flags);
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+ if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+ snd_pcm_period_elapsed(substream);
+
+exit:
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ dev_err(uac->card->dev, "%d Error!\n", __LINE__);
+}
+
+static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+ struct uac_rtd_params *prm;
+ struct g_audio *audio_dev;
+ struct uac_params *params;
+ unsigned long flags;
+ int err = 0;
+
+ audio_dev = uac->audio_dev;
+ params = &audio_dev->params;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ prm = &uac->p_prm;
+ else
+ prm = &uac->c_prm;
+
+ spin_lock_irqsave(&prm->lock, flags);
+
+ /* Reset */
+ prm->hw_ptr = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ prm->ss = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ prm->ss = NULL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&prm->lock, flags);
+
+ /* Clear buffer after Play stops */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
+ memset(prm->rbuf, 0, prm->max_psize * params->req_number);
+
+ return err;
+}
+
+static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+ struct uac_rtd_params *prm;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ prm = &uac->p_prm;
+ else
+ prm = &uac->c_prm;
+
+ return bytes_to_frames(substream->runtime, prm->hw_ptr);
+}
+
+static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int uac_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct g_audio *audio_dev;
+ struct uac_params *params;
+ int p_ssize, c_ssize;
+ int p_srate, c_srate;
+ int p_chmask, c_chmask;
+
+ audio_dev = uac->audio_dev;
+ params = &audio_dev->params;
+ p_ssize = params->p_ssize;
+ c_ssize = params->c_ssize;
+ p_srate = params->p_srate;
+ c_srate = params->c_srate;
+ p_chmask = params->p_chmask;
+ c_chmask = params->c_chmask;
+ uac->p_residue = 0;
+
+ runtime->hw = uac_pcm_hardware;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ spin_lock_init(&uac->p_prm.lock);
+ runtime->hw.rate_min = p_srate;
+ switch (p_ssize) {
+ case 3:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ }
+ runtime->hw.channels_min = num_channels(p_chmask);
+ runtime->hw.period_bytes_min = 2 * uac->p_prm.max_psize
+ / runtime->hw.periods_min;
+ } else {
+ spin_lock_init(&uac->c_prm.lock);
+ runtime->hw.rate_min = c_srate;
+ switch (c_ssize) {
+ case 3:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ }
+ runtime->hw.channels_min = num_channels(c_chmask);
+ runtime->hw.period_bytes_min = 2 * uac->c_prm.max_psize
+ / runtime->hw.periods_min;
+ }
+
+ runtime->hw.rate_max = runtime->hw.rate_min;
+ runtime->hw.channels_max = runtime->hw.channels_min;
+
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+ return 0;
+}
+
+/* ALSA cries without these function pointers */
+static int uac_pcm_null(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+static const struct snd_pcm_ops uac_pcm_ops = {
+ .open = uac_pcm_open,
+ .close = uac_pcm_null,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = uac_pcm_hw_params,
+ .hw_free = uac_pcm_hw_free,
+ .trigger = uac_pcm_trigger,
+ .pointer = uac_pcm_pointer,
+ .prepare = uac_pcm_null,
+};
+
+static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
+{
+ struct snd_uac_chip *uac = prm->uac;
+ struct g_audio *audio_dev;
+ struct uac_params *params;
+ int i;
+
+ if (!prm->ep_enabled)
+ return;
+
+ prm->ep_enabled = false;
+
+ audio_dev = uac->audio_dev;
+ params = &audio_dev->params;
+
+ for (i = 0; i < params->req_number; i++) {
+ if (prm->ureq[i].req) {
+ usb_ep_dequeue(ep, prm->ureq[i].req);
+ usb_ep_free_request(ep, prm->ureq[i].req);
+ prm->ureq[i].req = NULL;
+ }
+ }
+
+ if (usb_ep_disable(ep))
+ dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
+}
+
+
+int u_audio_start_capture(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+ struct usb_gadget *gadget = audio_dev->gadget;
+ struct device *dev = &gadget->dev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ struct uac_rtd_params *prm;
+ struct uac_params *params = &audio_dev->params;
+ int req_len, i, ret;
+
+ ep = audio_dev->out_ep;
+ prm = &uac->c_prm;
+ ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
+ if (ret)
+ return ret;
+
+ req_len = prm->max_psize;
+
+ prm->ep_enabled = true;
+ usb_ep_enable(ep);
+
+ for (i = 0; i < params->req_number; i++) {
+ if (!prm->ureq[i].req) {
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req == NULL)
+ return -ENOMEM;
+
+ prm->ureq[i].req = req;
+ prm->ureq[i].pp = prm;
+
+ req->zero = 0;
+ req->context = &prm->ureq[i];
+ req->length = req_len;
+ req->complete = u_audio_iso_complete;
+ req->buf = prm->rbuf + i * prm->max_psize;
+ }
+
+ if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(u_audio_start_capture);
+
+void u_audio_stop_capture(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+
+ free_ep(&uac->c_prm, audio_dev->out_ep);
+}
+EXPORT_SYMBOL_GPL(u_audio_stop_capture);
+
+int u_audio_start_playback(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+ struct usb_gadget *gadget = audio_dev->gadget;
+ struct device *dev = &gadget->dev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ struct uac_rtd_params *prm;
+ struct uac_params *params = &audio_dev->params;
+ unsigned int factor, rate;
+ const struct usb_endpoint_descriptor *ep_desc;
+ int req_len, i, ret;
+
+ ep = audio_dev->in_ep;
+ prm = &uac->p_prm;
+ ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
+ if (ret)
+ return ret;
+
+ ep_desc = ep->desc;
+
+ /* pre-calculate the playback endpoint's interval */
+ if (gadget->speed == USB_SPEED_FULL)
+ factor = 1000;
+ else
+ factor = 8000;
+
+ /* pre-compute some values for iso_complete() */
+ uac->p_framesize = params->p_ssize *
+ num_channels(params->p_chmask);
+ rate = params->p_srate * uac->p_framesize;
+ uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
+ uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+ prm->max_psize);
+
+ if (uac->p_pktsize < prm->max_psize)
+ uac->p_pktsize_residue = rate % uac->p_interval;
+ else
+ uac->p_pktsize_residue = 0;
+
+ req_len = uac->p_pktsize;
+ uac->p_residue = 0;
+
+ prm->ep_enabled = true;
+ usb_ep_enable(ep);
+
+ for (i = 0; i < params->req_number; i++) {
+ if (!prm->ureq[i].req) {
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req == NULL)
+ return -ENOMEM;
+
+ prm->ureq[i].req = req;
+ prm->ureq[i].pp = prm;
+
+ req->zero = 0;
+ req->context = &prm->ureq[i];
+ req->length = req_len;
+ req->complete = u_audio_iso_complete;
+ req->buf = prm->rbuf + i * prm->max_psize;
+ }
+
+ if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(u_audio_start_playback);
+
+void u_audio_stop_playback(struct g_audio *audio_dev)
+{
+ struct snd_uac_chip *uac = audio_dev->uac;
+
+ free_ep(&uac->p_prm, audio_dev->in_ep);
+}
+EXPORT_SYMBOL_GPL(u_audio_stop_playback);
+
+int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ const char *card_name)
+{
+ struct snd_uac_chip *uac;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct uac_params *params;
+ int p_chmask, c_chmask;
+ int err;
+
+ if (!g_audio)
+ return -EINVAL;
+
+ uac = kzalloc(sizeof(*uac), GFP_KERNEL);
+ if (!uac)
+ return -ENOMEM;
+ g_audio->uac = uac;
+ uac->audio_dev = g_audio;
+
+ params = &g_audio->params;
+ p_chmask = params->p_chmask;
+ c_chmask = params->c_chmask;
+
+ if (c_chmask) {
+ struct uac_rtd_params *prm = &uac->c_prm;
+
+ uac->c_prm.uac = uac;
+ prm->max_psize = g_audio->out_ep_maxpsize;
+
+ prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
+ GFP_KERNEL);
+ if (!prm->ureq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ prm->rbuf = kcalloc(params->req_number, prm->max_psize,
+ GFP_KERNEL);
+ if (!prm->rbuf) {
+ prm->max_psize = 0;
+ err = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ if (p_chmask) {
+ struct uac_rtd_params *prm = &uac->p_prm;
+
+ uac->p_prm.uac = uac;
+ prm->max_psize = g_audio->in_ep_maxpsize;
+
+ prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
+ GFP_KERNEL);
+ if (!prm->ureq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ prm->rbuf = kcalloc(params->req_number, prm->max_psize,
+ GFP_KERNEL);
+ if (!prm->rbuf) {
+ prm->max_psize = 0;
+ err = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ /* Choose any slot, with no id */
+ err = snd_card_new(&g_audio->gadget->dev,
+ -1, NULL, THIS_MODULE, 0, &card);
+ if (err < 0)
+ goto fail;
+
+ uac->card = card;
+
+ /*
+ * Create first PCM device
+ * Create a substream only for non-zero channel streams
+ */
+ err = snd_pcm_new(uac->card, pcm_name, 0,
+ p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
+ if (err < 0)
+ goto snd_fail;
+
+ strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
+ pcm->private_data = uac;
+ uac->pcm = pcm;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
+
+ strlcpy(card->driver, card_name, sizeof(card->driver));
+ strlcpy(card->shortname, card_name, sizeof(card->shortname));
+ sprintf(card->longname, "%s %i", card_name, card->dev->id);
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+
+ err = snd_card_register(card);
+
+ if (!err)
+ return 0;
+
+snd_fail:
+ snd_card_free(card);
+fail:
+ kfree(uac->p_prm.ureq);
+ kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.rbuf);
+ kfree(uac->c_prm.rbuf);
+ kfree(uac);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(g_audio_setup);
+
+void g_audio_cleanup(struct g_audio *g_audio)
+{
+ struct snd_uac_chip *uac;
+ struct snd_card *card;
+
+ if (!g_audio || !g_audio->uac)
+ return;
+
+ uac = g_audio->uac;
+ card = uac->card;
+ if (card)
+ snd_card_free(card);
+
+ kfree(uac->p_prm.ureq);
+ kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.rbuf);
+ kfree(uac->c_prm.rbuf);
+ kfree(uac);
+}
+EXPORT_SYMBOL_GPL(g_audio_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB gadget \"ALSA sound card\" utilities");
+MODULE_AUTHOR("Ruslan Bilovol");
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
new file mode 100644
index 000000000000..07e13784cbb8
--- /dev/null
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -0,0 +1,95 @@
+/*
+ * u_audio.h -- interface to USB gadget "ALSA sound card" utilities
+ *
+ * Copyright (C) 2016
+ * Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/usb/composite.h>
+
+struct uac_params {
+ /* playback */
+ int p_chmask; /* channel mask */
+ int p_srate; /* rate in Hz */
+ int p_ssize; /* sample size */
+
+ /* capture */
+ int c_chmask; /* channel mask */
+ int c_srate; /* rate in Hz */
+ int c_ssize; /* sample size */
+
+ int req_number; /* number of preallocated requests */
+};
+
+struct g_audio {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ /* Max packet size for all in_ep possible speeds */
+ unsigned int in_ep_maxpsize;
+ /* Max packet size for all out_ep possible speeds */
+ unsigned int out_ep_maxpsize;
+
+ /* The ALSA Sound Card it represents on the USB-Client side */
+ struct snd_uac_chip *uac;
+
+ struct uac_params params;
+};
+
+static inline struct g_audio *func_to_g_audio(struct usb_function *f)
+{
+ return container_of(f, struct g_audio, func);
+}
+
+static inline uint num_channels(uint chanmask)
+{
+ uint num = 0;
+
+ while (chanmask) {
+ num += (chanmask & 1);
+ chanmask >>= 1;
+ }
+
+ return num;
+}
+
+/*
+ * g_audio_setup - initialize one virtual ALSA sound card
+ * @g_audio: struct with filled params, in_ep_maxpsize, out_ep_maxpsize
+ * @pcm_name: the id string for a PCM instance of this sound card
+ * @card_name: name of this soundcard
+ *
+ * This sets up the single virtual ALSA sound card that may be exported by a
+ * gadget driver using this framework.
+ *
+ * Context: may sleep
+ *
+ * Returns zero on success, or a negative error on failure.
+ */
+int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ const char *card_name);
+void g_audio_cleanup(struct g_audio *g_audio);
+
+int u_audio_start_capture(struct g_audio *g_audio);
+void u_audio_stop_capture(struct g_audio *g_audio);
+int u_audio_start_playback(struct g_audio *g_audio);
+void u_audio_stop_playback(struct g_audio *g_audio);
+
+#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/gadget/function/u_bam.c b/drivers/usb/gadget/function/u_bam.c
new file mode 100644
index 000000000000..7947bb76f512
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam.c
@@ -0,0 +1,2521 @@
+/* Copyright (c) 2011-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <soc/qcom/smd.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include <soc/qcom/bam_dmux.h>
+
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/usb_bam.h>
+
+#include "usb_gadget_xport.h"
+#include "u_rmnet.h"
+
+#define BAM_N_PORTS 2
+#define BAM2BAM_N_PORTS 4
+
+static struct workqueue_struct *gbam_wq;
+static int n_bam_ports;
+static int n_bam2bam_ports;
+static unsigned n_tx_req_queued;
+
+static unsigned bam_ch_ids[BAM_N_PORTS] = {
+ BAM_DMUX_USB_RMNET_0,
+ BAM_DMUX_USB_DPL
+};
+
+static char bam_ch_names[BAM_N_PORTS][BAM_DMUX_CH_NAME_MAX_LEN];
+
+static const enum ipa_client_type usb_prod[BAM2BAM_N_PORTS] = {
+ IPA_CLIENT_USB_PROD, IPA_CLIENT_USB2_PROD,
+ IPA_CLIENT_USB3_PROD, IPA_CLIENT_USB4_PROD
+};
+static const enum ipa_client_type usb_cons[BAM2BAM_N_PORTS] = {
+ IPA_CLIENT_USB_CONS, IPA_CLIENT_USB2_CONS,
+ IPA_CLIENT_USB3_CONS, IPA_CLIENT_USB4_CONS
+};
+
+#define BAM_PENDING_PKTS_LIMIT 220
+#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
+#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
+#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
+#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
+
+#define BAM_MUX_HDR 8
+
+#define BAM_MUX_RX_Q_SIZE 128
+#define BAM_MUX_TX_Q_SIZE 200
+#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+
+#define DL_INTR_THRESHOLD 20
+#define BAM_PENDING_BYTES_LIMIT (50 * BAM_MUX_RX_REQ_SIZE)
+#define BAM_PENDING_BYTES_FCTRL_EN_TSHOLD (BAM_PENDING_BYTES_LIMIT / 3)
+
+/* Extra buffer size to allocate for tx */
+#define EXTRA_ALLOCATION_SIZE_U_BAM 128
+
+static unsigned int bam_pending_pkts_limit = BAM_PENDING_PKTS_LIMIT;
+module_param(bam_pending_pkts_limit, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_pending_bytes_limit = BAM_PENDING_BYTES_LIMIT;
+module_param(bam_pending_bytes_limit, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_pending_bytes_fctrl_en_thold =
+ BAM_PENDING_BYTES_FCTRL_EN_TSHOLD;
+module_param(bam_pending_bytes_fctrl_en_thold, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
+module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
+module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
+module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
+module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
+module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned long bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
+module_param(bam_mux_rx_req_size, ulong, S_IRUGO);
+
+static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
+module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
+
+#define BAM_CH_OPENED BIT(0)
+#define BAM_CH_READY BIT(1)
+#define BAM_CH_WRITE_INPROGRESS BIT(2)
+
+enum u_bam_event_type {
+ U_BAM_DISCONNECT_E = 0,
+ U_BAM_CONNECT_E,
+ U_BAM_SUSPEND_E,
+ U_BAM_RESUME_E
+};
+
+struct sys2ipa_sw {
+ void *teth_priv;
+ ipa_notify_cb teth_cb;
+};
+
+struct bam_ch_info {
+ unsigned long flags;
+ unsigned id;
+
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_q;
+
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ struct sk_buff_head rx_skb_idle;
+
+ struct gbam_port *port;
+ struct work_struct write_tobam_w;
+ struct work_struct write_tohost_w;
+
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
+
+ /* added to support sys to ipa sw UL path */
+ struct sys2ipa_sw ul_params;
+ enum usb_bam_pipe_type src_pipe_type;
+ enum usb_bam_pipe_type dst_pipe_type;
+
+ /* stats */
+ unsigned int pending_pkts_with_bam;
+ unsigned int pending_bytes_with_bam;
+ unsigned int tohost_drp_cnt;
+ unsigned int tomodem_drp_cnt;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ unsigned long to_modem;
+ unsigned long to_host;
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ unsigned int max_num_pkts_pending_with_bam;
+ unsigned int max_bytes_pending_with_bam;
+ unsigned int delayed_bam_mux_write_done;
+ unsigned long skb_expand_cnt;
+};
+
+struct gbam_port {
+ bool is_connected;
+ enum u_bam_event_type last_event;
+ unsigned port_num;
+ spinlock_t port_lock_ul;
+ spinlock_t port_lock_dl;
+ spinlock_t port_lock;
+
+ struct grmnet *port_usb;
+ struct usb_gadget *gadget;
+
+ struct bam_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+};
+
+static struct bam_portmaster {
+ struct gbam_port *port;
+ struct platform_driver pdrv;
+} bam_ports[BAM_N_PORTS];
+
+struct u_bam_data_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ unsigned long usb_bam_handle;
+};
+
+struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
+static void gbam_start_rx(struct gbam_port *port);
+static void gbam_start_endless_rx(struct gbam_port *port);
+static void gbam_start_endless_tx(struct gbam_port *port);
+static void gbam_notify(void *p, int event, unsigned long data);
+static void gbam_data_write_tobam(struct work_struct *w);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%pK head:%pK num:%d cb:%pK", __func__,
+ ep, head, num, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t gbam_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock_ul lock held */
+static struct sk_buff *gbam_alloc_skb_from_pool(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t skb_buf_dma_addr;
+ struct usb_gadget *gadget;
+
+ if (!port)
+ return NULL;
+
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+
+ if (!skb)
+ goto alloc_exit;
+
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+
+ gadget = port->port_usb->gadget;
+
+ skb_buf_dma_addr =
+ dma_map_single(&gadget->dev, skb->data,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&gadget->dev, skb_buf_dma_addr)) {
+ pr_err("%s: Could not DMA map SKB buffer\n",
+ __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+ } else {
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ if (skb_headroom(skb) < BAM_MUX_HDR)
+ skb_reserve(skb, BAM_MUX_HDR);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+/* This function should be called with port_lock_ul lock held */
+static void gbam_free_skb_to_pool(struct gbam_port *port, struct sk_buff *skb)
+{
+ struct bam_ch_info *d;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void gbam_free_rx_skb_idle_list(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ gadget = port->port_usb->gadget;
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr,
+ sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/*----- sys2bam towards the IPA --------------- */
+static void gbam_ipa_sys2bam_notify_cb(void *priv, enum ipa_dp_evt_type event,
+ unsigned long data)
+{
+ struct sys2ipa_sw *ul = (struct sys2ipa_sw *)priv;
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ switch (event) {
+ case IPA_WRITE_DONE:
+ d = container_of(ul, struct bam_ch_info, ul_params);
+ port = container_of(d, struct gbam_port, data_ch);
+ /* call into bam_demux functionality that'll recycle the data */
+ gbam_notify(port, BAM_DMUX_WRITE_DONE, data);
+ break;
+ case IPA_RECEIVE:
+ /* call the callback given by tethering driver init function
+ * (and was given to ipa_connect)
+ */
+ if (ul->teth_cb)
+ ul->teth_cb(ul->teth_priv, event, data);
+ break;
+ default:
+ /* unexpected event */
+ pr_err("%s: unexpected event %d\n", __func__, event);
+ break;
+ }
+}
+
+
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+ unsigned long flags;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ int ret;
+ int tail_room = 0;
+ int extra_alloc = 0;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ ep = port->port_usb->in;
+
+ while (!list_empty(&d->tx_idle)) {
+ skb = __skb_dequeue(&d->tx_skb_q);
+ if (!skb)
+ break;
+
+ /*
+ * Some UDC requires allocation of some extra bytes for
+ * TX buffer due to hardware requirement. Check if extra
+ * bytes are already there, otherwise allocate new buffer
+ * with extra bytes and do memcpy.
+ */
+ if (port->gadget->extra_buf_alloc)
+ extra_alloc = EXTRA_ALLOCATION_SIZE_U_BAM;
+ tail_room = skb_tailroom(skb);
+ if (tail_room < extra_alloc) {
+ pr_debug("%s: tail_room %d less than %d\n", __func__,
+ tail_room, extra_alloc);
+ new_skb = skb_copy_expand(skb, 0, extra_alloc -
+ tail_room, GFP_ATOMIC);
+ if (!new_skb) {
+ pr_err("skb_copy_expand failed\n");
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ d->skb_expand_cnt++;
+ }
+
+ req = list_first_entry(&d->tx_idle,
+ struct usb_request,
+ list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+ n_tx_req_queued++;
+ if (n_tx_req_queued == dl_intr_threshold) {
+ req->no_interrupt = 0;
+ n_tx_req_queued = 0;
+ } else {
+ req->no_interrupt = 1;
+ }
+
+ /* Send ZLP in case packet length is multiple of maxpacksize */
+ req->zero = 1;
+
+ list_del(&req->list);
+
+ spin_unlock(&port->port_lock_dl);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock_dl);
+ if (ret) {
+ pr_err("%s: usb epIn failed with %d\n", __func__, ret);
+ list_add(&req->list, &d->tx_idle);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ d->to_host++;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_write_data_tohost_w(struct work_struct *w)
+{
+ struct bam_ch_info *d;
+ struct gbam_port *port;
+
+ d = container_of(w, struct bam_ch_info, write_tohost_w);
+ port = d->port;
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ pr_debug("%s: p:%pK#%d d:%pK skb_len:%d\n", __func__,
+ port, port->port_num, d, skb->len);
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
+ d->tohost_drp_cnt++;
+ if (printk_ratelimited())
+ pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
+ __func__, d->tohost_drp_cnt);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ __skb_queue_tail(&d->tx_skb_q, skb);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+
+ gbam_write_data_tohost(port);
+}
+
+void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ gbam_free_skb_to_pool(port, skb);
+
+ pr_debug("%s:port:%pK d:%pK tom:%lu ppkt:%u pbytes:%u pno:%d\n",
+ __func__,
+ port, d, d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ /*
+ * If BAM doesn't have much pending data then push new data from here:
+ * write_complete notify only to avoid any underruns due to wq latency
+ */
+ if (d->pending_bytes_with_bam <= bam_pending_bytes_fctrl_en_thold) {
+ gbam_data_write_tobam(&d->write_tobam_w);
+ } else {
+ d->delayed_bam_mux_write_done++;
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+}
+
+/* This function should be called with port_lock_ul spinlock acquired */
+static bool gbam_ul_bam_limit_reached(struct bam_ch_info *data_ch)
+{
+ unsigned int curr_pending_pkts = data_ch->pending_pkts_with_bam;
+ unsigned int curr_pending_bytes = data_ch->pending_bytes_with_bam;
+ struct sk_buff *skb;
+
+ if (curr_pending_pkts >= bam_pending_pkts_limit)
+ return true;
+
+ /* check if next skb length doesn't exceed pending_bytes_limit */
+ skb = skb_peek(&data_ch->rx_skb_q);
+ if (!skb)
+ return false;
+
+ if ((curr_pending_bytes + skb->len) > bam_pending_bytes_limit)
+ return true;
+ else
+ return false;
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+ int qlen;
+
+ d = container_of(w, struct bam_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ /* Bail out if already in progress */
+ if (test_bit(BAM_CH_WRITE_INPROGRESS, &d->flags)) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ set_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+
+ while (!gbam_ul_bam_limit_reached(d) &&
+ (d->trans != USB_GADGET_XPORT_BAM2BAM_IPA ||
+ usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx))) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_pkts_with_bam++;
+ d->pending_bytes_with_bam += skb->len;
+ d->to_modem++;
+
+ pr_debug("%s: port:%pK d:%pK tom:%lu ppkts:%u pbytes:%u pno:%d\n",
+ __func__, port, d,
+ d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ dma_addr_t skb_dma_addr;
+ struct ipa_tx_meta ipa_meta = {0x0};
+
+ skb_dma_addr = gbam_get_dma_from_skb(skb);
+ if (skb_dma_addr != DMA_ERROR_CODE) {
+ ipa_meta.dma_address = skb_dma_addr;
+ ipa_meta.dma_address_valid = true;
+ }
+
+ ret = ipa_tx_dp(usb_prod[port->port_num],
+ skb,
+ &ipa_meta);
+ } else {
+ ret = msm_bam_dmux_write(d->id, skb);
+ }
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ pr_debug("%s: write error:%d\n", __func__, ret);
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ d->to_modem--;
+ d->tomodem_drp_cnt++;
+ gbam_free_skb_to_pool(port, skb);
+ break;
+ }
+ if (d->pending_pkts_with_bam > d->max_num_pkts_pending_with_bam)
+ d->max_num_pkts_pending_with_bam =
+ d->pending_pkts_with_bam;
+ if (d->pending_bytes_with_bam > d->max_bytes_pending_with_bam)
+ d->max_bytes_pending_with_bam =
+ d->pending_bytes_with_bam;
+ }
+
+ qlen = d->rx_skb_q.qlen;
+
+ clear_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (qlen < bam_mux_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ gbam_start_rx(port);
+ }
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: data tx ep error %d\n",
+ __func__, status);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ if (!port)
+ return;
+
+ spin_lock(&port->port_lock_dl);
+ d = &port->data_ch;
+ list_add_tail(&req->list, &d->tx_idle);
+ spin_unlock(&port->port_lock_dl);
+
+ queue_work(gbam_wq, &d->write_tohost_w);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ if (printk_ratelimited())
+ pr_err("%s: %s response error %d, %d/%d\n",
+ __func__, ep->name, status,
+ req->ac_tual, req->length);
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ break;
+ }
+
+ spin_lock(&port->port_lock_ul);
+
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ if ((d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
+ !usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+
+ /* TODO: Handle flow control gracefully by having
+ * having call back mechanism from bam driver
+ */
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+ spin_unlock(&port->port_lock_ul);
+
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+
+ if (printk_ratelimit())
+ pr_err("%s: data rx enqueue err %d\n",
+ __func__, status);
+
+ spin_lock(&port->port_lock_ul);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ }
+}
+
+static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s status: %d\n", __func__, status);
+}
+
+static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s status: %d\n", __func__, status);
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+ struct usb_request *req;
+ struct bam_ch_info *d;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb || !port->port_usb->out) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ gbam_free_skb_to_pool(port, skb);
+
+ if (printk_ratelimit())
+ pr_err("%s: rx queue failed %d\n",
+ __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_start_endless_rx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_start_endless_tx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_stop_endless_rx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ unsigned long flags;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->rx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+static void gbam_stop_endless_tx(struct gbam_port *port)
+{
+ struct bam_ch_info *d = &port->data_ch;
+ int status;
+ unsigned long flags;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_err("%s: port->port_usb is NULL", __func__);
+ return;
+ }
+
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->tx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+
+/*
+ * This function configured data fifo based on index passed to get bam2bam
+ * configuration.
+ */
+static void configure_data_fifo(enum usb_ctrl bam_type, u8 idx,
+ struct usb_ep *ep, enum usb_bam_pipe_type pipe_type)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ if (pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+
+ msm_data_fifo_config(ep,
+ data_fifo.phys_base,
+ data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+ }
+}
+
+
+static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct gbam_port *port = param;
+ struct usb_gadget *gadget = NULL;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (port == NULL) {
+ pr_err("%s: port is NULL\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb == NULL) {
+ pr_err("%s: port_usb is NULL, disconnected\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ gadget = port->port_usb->gadget;
+ d = &port->data_ch;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (gadget == NULL) {
+ pr_err("%s: gadget is NULL\n", __func__);
+ return;
+ }
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gbam_start_endless_rx(port);
+ else {
+ gbam_start_rx(port);
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+ } else {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ }
+ gbam_start_endless_tx(port);
+ }
+}
+
+static void gbam_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct gbam_port *port = param;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ /*
+ * Only handling BAM2BAM, as there is no equivalent to
+ * gbam_stop_endless_rx() for the SYS2BAM use case
+ */
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gbam_stop_endless_rx(port);
+ } else {
+ gbam_stop_endless_tx(port);
+ }
+}
+
+static int _gbam_start_io(struct gbam_port *port, bool in)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned queue_size;
+ spinlock_t *spinlock;
+ void (*ep_complete)(struct usb_ep *, struct usb_request *);
+
+ if (in)
+ spinlock = &port->port_lock_dl;
+ else
+ spinlock = &port->port_lock_ul;
+
+ spin_lock_irqsave(spinlock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(spinlock, flags);
+ return -EBUSY;
+ }
+
+ if (in) {
+ ep = port->port_usb->in;
+ idle = &port->data_ch.tx_idle;
+ queue_size = bam_mux_tx_q_size;
+ ep_complete = gbam_epin_complete;
+ } else {
+ ep = port->port_usb->out;
+ if (!ep)
+ goto out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_mux_rx_q_size;
+ ep_complete = gbam_epout_complete;
+ }
+
+ ret = gbam_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+out:
+ spin_unlock_irqrestore(spinlock, flags);
+ if (ret)
+ pr_err("%s: allocation failed\n", __func__);
+
+ return ret;
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+ unsigned long flags;
+
+ pr_debug("%s: port:%pK\n", __func__, port);
+
+ if (_gbam_start_io(port, true))
+ return;
+
+ if (_gbam_start_io(port, false)) {
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (port->port_usb)
+ gbam_free_requests(port->port_usb->in,
+ &port->data_ch.tx_idle);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ /* queue out requests */
+ gbam_start_rx(port);
+}
+
+static void gbam_notify(void *p, int event, unsigned long data)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+
+ if (port == NULL)
+ pr_err("BAM DMUX notifying after channel close\n");
+
+ switch (event) {
+ case BAM_DMUX_RECEIVE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_recv_cb(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_WRITE_DONE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_write_done(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_TRANSMIT_SIZE:
+ d = &port->data_ch;
+ if (test_bit(BAM_CH_OPENED, &d->flags))
+ pr_warn("%s, BAM channel opened already", __func__);
+ bam_mux_rx_req_size = data;
+ pr_debug("%s rx_req_size: %lu", __func__, bam_mux_rx_req_size);
+ break;
+ }
+}
+
+static void gbam_free_rx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ if (!port->port_usb || !port->port_usb->out)
+ goto free_rx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ gbam_free_rx_skb_idle_list(port);
+
+free_rx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_free_tx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+
+ if (!port->port_usb)
+ goto free_tx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->in, &d->tx_idle);
+
+ while ((skb = __skb_dequeue(&d->tx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+free_tx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+ gbam_free_rx_buffers(port);
+ gbam_free_tx_buffers(port);
+}
+
+static void gbam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ if (!test_bit(BAM_CH_OPENED, &d->flags)) {
+ pr_err("%s: Bam channel is not opened\n", __func__);
+ goto exit;
+ }
+
+ msm_bam_dmux_close(d->id);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+exit:
+ return;
+}
+
+static void gbam2bam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->is_connected) {
+ pr_debug("%s: Port already disconnected. Bailing out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->is_connected = false;
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ ret = usb_bam_disconnect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret)
+ pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
+ __func__, ret);
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+ teth_bridge_disconnect(d->ipa_params.src_client);
+ /*
+ * Decrement usage count which was incremented upon cable
+ * connect or cable disconnect in suspended state
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+ }
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct bam_ch_info *d = &port->data_ch;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (!test_bit(BAM_CH_READY, &d->flags)) {
+ pr_err("%s: Bam channel is not ready\n", __func__);
+ return;
+ }
+
+ ret = msm_bam_dmux_open(d->id, port, gbam_notify);
+ if (ret) {
+ pr_err("%s: unable open bam ch:%d err:%d\n",
+ __func__, d->id, ret);
+ return;
+ }
+
+ set_bit(BAM_CH_OPENED, &d->flags);
+
+ gbam_start_io(port);
+
+ pr_debug("%s: done\n", __func__);
+}
+
+static void gbam2bam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct usb_gadget *gadget = NULL;
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ struct bam_ch_info *d;
+ u32 sps_params;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnected. Bailing out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->is_connected = true;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ gadget = port->port_usb->gadget;
+ if (!gadget) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb.gadget is NULL, exiting\n", __func__);
+ return;
+ }
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ d->ipa_params.usb_connection_speed = gadget->speed;
+
+ /*
+ * Invalidate prod and cons client handles from previous
+ * disconnect.
+ */
+ d->ipa_params.cons_clnt_hdl = -1;
+ d->ipa_params.prod_clnt_hdl = -1;
+
+ if (usb_bam_get_pipe_type(d->usb_bam_type, d->ipa_params.src_idx,
+ &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(d->usb_bam_type, d->ipa_params.dst_idx,
+ &d->dst_pipe_type)) {
+ pr_err("%s:usb_bam_get_pipe_type() failed\n", __func__);
+ return;
+ }
+ if (d->dst_pipe_type != USB_BAM_PIPE_BAM2BAM) {
+ pr_err("%s: no software preparation for DL not using bam2bam\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_alloc_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_alloc_fifos(d->usb_bam_type, d->dst_connection_idx);
+ gadget->bam2bam_func_enabled = true;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (!port || !port->port_usb) {
+ pr_debug("%s: cable is disconnected.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock,
+ flags);
+ goto free_fifos;
+ }
+ if (gadget_is_dwc3(gadget)) {
+ /* Configure for RX */
+ configure_data_fifo(d->usb_bam_type, d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | MSM_PRODUCER |
+ d->src_pipe_idx;
+ d->rx_req->length = 32*1024;
+ d->rx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->out, d->rx_req);
+
+ /* Configure for TX */
+ configure_data_fifo(d->usb_bam_type, d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | d->dst_pipe_idx;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->in, d->tx_req);
+
+ } else {
+ /* Configure for RX */
+ sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ d->rx_req->udc_priv = sps_params;
+
+ /* Configure for TX */
+ sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ teth_bridge_params.client = d->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n", __func__);
+ goto ep_unconfig;
+ }
+
+ /* Support for UL using system-to-IPA */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ul_params.teth_priv =
+ teth_bridge_params.private_data;
+ d->ul_params.teth_cb =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.notify = gbam_ipa_sys2bam_notify_cb;
+ d->ipa_params.priv = &d->ul_params;
+ d->ipa_params.reset_pipe_after_lpm = false;
+
+ } else {
+ d->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.priv =
+ teth_bridge_params.private_data;
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ }
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ d->ipa_params.skip_ep_cfg = teth_bridge_params.skip_ep_cfg;
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto ep_unconfig;
+ }
+
+ /* Remove support for UL using system-to-IPA towards DL */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ipa_params.notify = d->ul_params.teth_cb;
+ d->ipa_params.priv = d->ul_params.teth_priv;
+ }
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ else
+ d->ipa_params.reset_pipe_after_lpm = false;
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto ep_unconfig;
+ }
+
+ gqti_ctrl_update_ipa_pipes(port->port_usb, port->port_num,
+ d->ipa_params.ipa_prod_ep_idx,
+ d->ipa_params.ipa_cons_ep_idx);
+
+ connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
+ connect_params.client_type = d->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n", __func__);
+ goto ep_unconfig;
+ }
+
+ /* queue in & out requests */
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ gbam_start_endless_rx(port);
+ } else {
+ /* The use-case of UL (OUT) ports using sys2bam is based on
+ * partial reuse of the system-to-bam_demux code. The following
+ * lines perform the branching out of the standard bam2bam flow
+ * on the USB side of the UL channel
+ */
+ if (_gbam_start_io(port, false)) {
+ pr_err("%s: _gbam_start_io failed\n", __func__);
+ return;
+ }
+ gbam_start_rx(port);
+ }
+ gbam_start_endless_tx(port);
+
+ pr_debug("%s: done\n", __func__);
+ return;
+
+ep_unconfig:
+ if (gadget_is_dwc3(gadget)) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+free_fifos:
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+}
+
+static int gbam_wake_cb(void *param)
+{
+ struct gbam_port *port = (struct gbam_port *)param;
+ struct usb_gadget *gadget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENODEV;
+ }
+
+ gadget = port->port_usb->gadget;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ return usb_gadget_wakeup(gadget);
+}
+
+static void gbam2bam_suspend_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if ((port->last_event == U_BAM_DISCONNECT_E) ||
+ (port->last_event == U_BAM_RESUME_E)) {
+ pr_debug("%s: Port is about to disconnect/resume. Bail out\n",
+ __func__);
+ goto exit;
+ }
+
+ d = &port->data_ch;
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ gbam_wake_cb, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(d->usb_bam_type,
+ d->dst_connection_idx, gbam_start, gbam_stop, port);
+
+ /*
+ * release lock here because gbam_start() or
+ * gbam_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(d->usb_bam_type, &d->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+
+exit:
+ /*
+ * Decrement usage count after IPA handshake is done to allow gadget
+ * parent to go to lpm. This counter was incremented upon cable connect
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void gbam2bam_resume_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
+ struct bam_ch_info *d;
+ struct usb_gadget *gadget = NULL;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (port->last_event == U_BAM_DISCONNECT_E || !port->port_usb) {
+ pr_debug("%s: usb cable is disconnected, exiting\n",
+ __func__);
+ goto exit;
+ }
+
+ d = &port->data_ch;
+ gadget = port->port_usb->gadget;
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ NULL, NULL);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ usb_bam_resume(d->usb_bam_type, &d->ipa_params);
+ }
+
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/* BAM data channel ready, allow attempt to open */
+static int gbam_data_ch_probe(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+ bool do_work = false;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ d = &port->data_ch;
+
+ if (!strncmp(bam_ch_names[i], pdev->name,
+ BAM_DMUX_CH_NAME_MAX_LEN)) {
+ set_bit(BAM_CH_READY, &d->flags);
+
+ /* if usb is online, try opening bam_ch */
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb)
+ do_work = true;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (do_work)
+ queue_work(gbam_wq, &port->connect_w);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* BAM data channel went inactive, so close it */
+static int gbam_data_ch_remove(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct usb_ep *ep_in = NULL;
+ struct usb_ep *ep_out = NULL;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < n_bam_ports; i++) {
+ if (!strncmp(bam_ch_names[i], pdev->name,
+ BAM_DMUX_CH_NAME_MAX_LEN)) {
+ port = bam_ports[i].port;
+ d = &port->data_ch;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb) {
+ ep_in = port->port_usb->in;
+ ep_out = port->port_usb->out;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (ep_in)
+ usb_ep_fifo_flush(ep_in);
+ if (ep_out)
+ usb_ep_fifo_flush(ep_out);
+
+ gbam_free_buffers(port);
+
+ msm_bam_dmux_close(d->id);
+
+ /* bam dmux will free all pending skbs */
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+
+ clear_bit(BAM_CH_READY, &d->flags);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+ }
+ }
+
+ return 0;
+}
+
+static void gbam_port_free(int portno)
+{
+ struct gbam_port *port = bam_ports[portno].port;
+ struct platform_driver *pdrv = &bam_ports[portno].pdrv;
+
+ kfree(port);
+ platform_driver_unregister(pdrv);
+}
+
+static void gbam2bam_port_free(int portno)
+{
+ struct gbam_port *port = bam2bam_ports[portno];
+
+ kfree(port);
+}
+
+static int gbam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct platform_driver *pdrv;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ port->is_connected = false;
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+ INIT_WORK(&port->connect_w, gbam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ INIT_LIST_HEAD(&d->tx_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+ INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
+ skb_queue_head_init(&d->tx_skb_q);
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ d->id = bam_ch_ids[portno];
+
+ bam_ports[portno].port = port;
+
+ scnprintf(bam_ch_names[portno], BAM_DMUX_CH_NAME_MAX_LEN,
+ "bam_dmux_ch_%d", bam_ch_ids[portno]);
+ pdrv = &bam_ports[portno].pdrv;
+ pdrv->probe = gbam_data_ch_probe;
+ pdrv->remove = gbam_data_ch_remove;
+ pdrv->driver.name = bam_ch_names[portno];
+ pdrv->driver.owner = THIS_MODULE;
+
+ platform_driver_register(pdrv);
+ pr_debug("%s: port:%pK portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+static int gbam2bam_port_alloc(int portno)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = portno;
+
+ /* port initialization */
+ port->is_connected = false;
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, gbam2bam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
+ INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
+ INIT_WORK(&port->resume_w, gbam2bam_resume_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ d->ipa_params.src_client = usb_prod[portno];
+ d->ipa_params.dst_client = usb_cons[portno];
+ bam2bam_ports[portno] = port;
+
+ /* UL workaround requirements */
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+
+ pr_debug("%s: port:%pK portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port:%pK data_ch:%pK#\n"
+ "dpkts_to_usbhost: %lu\n"
+ "dpkts_to_modem: %lu\n"
+ "dpkts_pwith_bam: %u\n"
+ "dbytes_pwith_bam: %u\n"
+ "to_usbhost_dcnt: %u\n"
+ "tomodem__dcnt: %u\n"
+ "rx_flow_control_disable_count: %u\n"
+ "rx_flow_control_enable_count: %u\n"
+ "rx_flow_control_triggered: %u\n"
+ "max_num_pkts_pending_with_bam: %u\n"
+ "max_bytes_pending_with_bam: %u\n"
+ "delayed_bam_mux_write_done: %u\n"
+ "tx_buf_len: %u\n"
+ "rx_buf_len: %u\n"
+ "data_ch_open: %d\n"
+ "data_ch_ready: %d\n"
+ "skb_expand_cnt: %lu\n",
+ i, port, &port->data_ch,
+ d->to_host, d->to_modem,
+ d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam,
+ d->tohost_drp_cnt, d->tomodem_drp_cnt,
+ d->rx_flow_control_disable,
+ d->rx_flow_control_enable,
+ d->rx_flow_control_triggered,
+ d->max_num_pkts_pending_with_bam,
+ d->max_bytes_pending_with_bam,
+ d->delayed_bam_mux_write_done,
+ d->tx_skb_q.qlen, d->rx_skb_q.qlen,
+ test_bit(BAM_CH_OPENED, &d->flags),
+ test_bit(BAM_CH_READY, &d->flags),
+ d->skb_expand_cnt);
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < n_bam_ports; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ d->skb_expand_cnt = 0;
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+ return count;
+}
+
+const struct file_operations gbam_stats_ops = {
+ .read = gbam_read_stats,
+ .write = gbam_reset_stats,
+};
+
+struct dentry *gbam_dent;
+static void gbam_debugfs_init(void)
+{
+ struct dentry *dfile;
+
+ if (gbam_dent)
+ return;
+
+ gbam_dent = debugfs_create_dir("usb_rmnet", 0);
+ if (!gbam_dent || IS_ERR(gbam_dent))
+ return;
+
+ dfile = debugfs_create_file("status", 0444, gbam_dent, 0,
+ &gbam_stats_ops);
+ if (!dfile || IS_ERR(dfile)) {
+ debugfs_remove(gbam_dent);
+ gbam_dent = NULL;
+ return;
+ }
+}
+static void gbam_debugfs_remove(void)
+{
+ debugfs_remove_recursive(gbam_dent);
+}
+#else
+static inline void gbam_debugfs_init(void) {}
+static inline void gbam_debugfs_remove(void) {}
+#endif
+
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ unsigned long flags, flags_ul, flags_dl;
+ struct bam_ch_info *d;
+
+ pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, port_num);
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM) {
+ pr_err("%s: invalid xport#%d\n", __func__, trans);
+ return;
+ }
+ if (trans == USB_GADGET_XPORT_BAM_DMUX &&
+ port_num >= n_bam_ports) {
+ pr_err("%s: invalid bam portno#%d\n",
+ __func__, port_num);
+ return;
+ }
+
+ if ((trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
+ port_num >= n_bam2bam_ports) {
+ pr_err("%s: invalid bam2bam portno#%d\n",
+ __func__, port_num);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ port = bam_ports[port_num].port;
+ else
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ /*
+ * Suspend with remote wakeup enabled. Increment usage
+ * count when disconnect happens in suspended state.
+ * Corresponding decrement happens in the end of this
+ * function if IPA handshake is already done or it is done
+ * in disconnect work after finishing IPA handshake.
+ */
+ if (port->last_event == U_BAM_SUSPEND_E)
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ port->port_usb = gr;
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ gbam_free_buffers(port);
+ else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ gbam_free_rx_buffers(port);
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = 0;
+ n_tx_req_queued = 0;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ /* disable endpoints */
+ if (gr->out) {
+ usb_ep_disable(gr->out);
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ if (d->rx_req) {
+ usb_ep_free_request(gr->out, d->rx_req);
+ d->rx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ }
+ }
+ usb_ep_disable(gr->in);
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ spin_lock_irqsave(&port->port_lock_dl, flags_dl);
+ if (d->tx_req) {
+ usb_ep_free_request(gr->in, d->tx_req);
+ d->tx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags_dl);
+ }
+
+ /*
+ * Set endless flag to false as USB Endpoint is already
+ * disable.
+ */
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ gr->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM && gr->out)
+ gr->out->endless = false;
+ }
+
+ gr->in->driver_data = NULL;
+ if (gr->out)
+ gr->out->driver_data = NULL;
+
+ port->last_event = U_BAM_DISCONNECT_E;
+ /* Disable usb irq for CI gadget. It will be enabled in
+ * usb_bam_disconnect_pipe() after disconnecting all pipes
+ * and USB BAM reset is done.
+ */
+ if (!gadget_is_dwc3(port->gadget) &&
+ (trans == USB_GADGET_XPORT_BAM2BAM_IPA))
+ msm_usb_irq_disable(true);
+
+ queue_work(gbam_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_connect(struct grmnet *gr, u8 port_num,
+ enum transport_type trans, u8 src_connection_idx,
+ u8 dst_connection_idx)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, port_num);
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!gr->gadget) {
+ pr_err("%s: gadget handle not passed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM) {
+ pr_err("%s: invalid xport#%d\n", __func__, trans);
+ return -EINVAL;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX && port_num >= n_bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if ((trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ && port_num >= n_bam2bam_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if (trans == USB_GADGET_XPORT_BAM_DMUX)
+ port = bam_ports[port_num].port;
+ else
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ d->trans = trans;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = gr;
+ port->gadget = port->port_usb->gadget;
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out,
+ GFP_ATOMIC);
+ if (!d->rx_req) {
+ pr_err("%s: RX request allocation failed\n", __func__);
+ d->rx_req = NULL;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENOMEM;
+ }
+
+ d->rx_req->context = port;
+ d->rx_req->complete = gbam_endless_rx_complete;
+ d->rx_req->length = 0;
+ d->rx_req->no_interrupt = 1;
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in,
+ GFP_ATOMIC);
+ if (!d->tx_req) {
+ pr_err("%s: TX request allocation failed\n", __func__);
+ d->tx_req = NULL;
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return -ENOMEM;
+ }
+
+ d->tx_req->context = port;
+ d->tx_req->complete = gbam_endless_tx_complete;
+ d->tx_req->length = 0;
+ d->tx_req->no_interrupt = 1;
+ }
+
+ if (d->trans == USB_GADGET_XPORT_BAM_DMUX) {
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ }
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->src_connection_idx = src_connection_idx;
+ d->dst_connection_idx = dst_connection_idx;
+ d->usb_bam_type = usb_bam_get_bam_type(gr->gadget->name);
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.src_idx = src_connection_idx;
+ d->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Query pipe type using IPA src/dst index with
+ * usbbam driver. It is being set either as
+ * BAM2BAM or SYS2BAM.
+ */
+ if (usb_bam_get_pipe_type(d->usb_bam_type,
+ d->ipa_params.src_idx, &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(d->usb_bam_type,
+ d->ipa_params.dst_idx, &d->dst_pipe_type)) {
+ pr_err("%s:usb_bam_get_pipe_type() failed\n",
+ __func__);
+ ret = -EINVAL;
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ goto exit;
+ }
+ /*
+ * Check for pipe_type. If it is BAM2BAM, then it is required
+ * to disable Xfer complete and Xfer not ready interrupts for
+ * that particular endpoint. Hence it set endless flag based
+ * it which is considered into UDC driver while enabling
+ * USB Endpoint.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = true;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = true;
+ }
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%pK",
+ __func__, gr->in);
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+ goto exit;
+ }
+ gr->in->driver_data = port;
+
+ /*
+ * DPL traffic is routed through BAM-DMUX on some targets.
+ * DPL function has only 1 IN endpoint. Add out endpoint
+ * checks for BAM-DMUX transport.
+ */
+ if (gr->out) {
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%pK",
+ __func__, gr->out);
+ gr->in->driver_data = 0;
+ usb_ep_disable(gr->in);
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+ d->rx_req = NULL;
+ usb_ep_free_request(port->port_usb->in, d->tx_req);
+ d->tx_req = NULL;
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+ goto exit;
+ }
+ gr->out->driver_data = port;
+ }
+
+ port->last_event = U_BAM_CONNECT_E;
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(gbam_wq, &port->connect_w);
+
+ ret = 0;
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int gbam_setup(unsigned int no_bam_port)
+{
+ int i;
+ int ret;
+ int bam_port_start = n_bam_ports;
+ int total_bam_ports = bam_port_start + no_bam_port;
+
+ pr_debug("%s: requested BAM ports:%d\n", __func__, no_bam_port);
+
+ if (!no_bam_port || total_bam_ports > BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, no_bam_port);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ for (i = bam_port_start; i < (bam_port_start + no_bam_port); i++) {
+ n_bam_ports++;
+ pr_debug("gbam_port_alloc called for %d\n", i);
+ ret = gbam_port_alloc(i);
+ if (ret) {
+ n_bam_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam_ports;
+ }
+ }
+
+ gbam_debugfs_init();
+
+ return bam_port_start;
+
+free_bam_ports:
+ for (i = 0; i < n_bam_ports; i++)
+ gbam_port_free(i);
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+int gbam2bam_setup(unsigned int no_bam2bam_port)
+{
+ int i;
+ int ret;
+ int bam2bam_port_start = n_bam2bam_ports;
+ int total_bam2bam_ports = bam2bam_port_start + no_bam2bam_port;
+
+ pr_debug("%s: requested BAM2BAM ports:%d\n", __func__, no_bam2bam_port);
+
+ if (!no_bam2bam_port || total_bam2bam_ports > BAM2BAM_N_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, no_bam2bam_port);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ for (i = bam2bam_port_start; i < (bam2bam_port_start +
+ no_bam2bam_port); i++) {
+ n_bam2bam_ports++;
+ ret = gbam2bam_port_alloc(i);
+ if (ret) {
+ n_bam2bam_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_bam2bam_ports;
+ }
+ }
+
+ gbam_debugfs_init();
+
+ return bam2bam_port_start;
+
+free_bam2bam_ports:
+ for (i = 0; i < n_bam2bam_ports; i++)
+ gbam2bam_port_free(i);
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+void gbam_cleanup(void)
+{
+ gbam_debugfs_remove();
+}
+
+void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return;
+
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ pr_debug("%s: suspended port %d\n", __func__, port_num);
+
+ port->last_event = U_BAM_SUSPEND_E;
+ queue_work(gbam_wq, &port->suspend_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ unsigned long flags;
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return;
+
+ port = bam2bam_ports[port_num];
+
+ if (!port) {
+ pr_err("%s: NULL port", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ port->last_event = U_BAM_RESUME_E;
+ /*
+ * Increment usage count here to disallow gadget parent suspend.
+ * This counter will decrement after IPA handshake is done in
+ * disconnect work (due to cable disconnect) or in bam_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(gbam_wq, &port->resume_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
+ struct usb_ep *out)
+{
+ struct grmnet *gr;
+
+ gr = kzalloc(sizeof(*gr), GFP_ATOMIC);
+ if (!gr)
+ return -ENOMEM;
+ gr->in = in;
+ gr->out = out;
+ gr->gadget = g;
+
+ return gbam_connect(gr, 0, USB_GADGET_XPORT_BAM_DMUX, 0, 0);
+}
+
+void gbam_mbim_disconnect(void)
+{
+ struct gbam_port *port = bam_ports[0].port;
+ struct grmnet *gr = port->port_usb;
+
+ if (!gr) {
+ pr_err("%s: port_usb is NULL\n", __func__);
+ return;
+ }
+
+ gbam_disconnect(gr, 0, USB_GADGET_XPORT_BAM_DMUX);
+ kfree(gr);
+}
+
+int gbam_mbim_setup(void)
+{
+ int ret = 0;
+
+ /*
+ * MBIM requires only 1 USB_GADGET_XPORT_BAM_DMUX
+ * port. The port is always 0 and is shared
+ * between RMNET and MBIM.
+ */
+ if (!n_bam_ports)
+ ret = gbam_setup(1);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/u_bam_data.c b/drivers/usb/gadget/function/u_bam_data.c
new file mode 100644
index 000000000000..56bb5724ea52
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_data.c
@@ -0,0 +1,2109 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/usb/gadget.h>
+
+#include <linux/usb_bam.h>
+
+#include "u_bam_data.h"
+
+#define BAM_DATA_RX_Q_SIZE 128
+#define BAM_DATA_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+#define BAM_DATA_PENDING_LIMIT 220
+
+#define SYS_BAM_RX_PKT_FLOW_CTRL_SUPPORT 1
+#define SYS_BAM_RX_PKT_FCTRL_EN_TSHOLD 500
+#define SYS_BAM_RX_PKT_FCTRL_DIS_TSHOLD 300
+
+static unsigned int bam_ipa_rx_fctrl_support = SYS_BAM_RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(bam_ipa_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_ipa_rx_fctrl_en_thld = SYS_BAM_RX_PKT_FCTRL_EN_TSHOLD;
+module_param(bam_ipa_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_ipa_rx_fctrl_dis_thld = SYS_BAM_RX_PKT_FCTRL_DIS_TSHOLD;
+module_param(bam_ipa_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+static struct workqueue_struct *bam_data_wq;
+static int n_bam2bam_data_ports;
+
+unsigned int bam_data_rx_q_size = BAM_DATA_RX_Q_SIZE;
+module_param(bam_data_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int bam_data_mux_rx_req_size = BAM_DATA_MUX_RX_REQ_SIZE;
+module_param(bam_data_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+#define SPS_PARAMS_SPS_MODE BIT(5)
+#define SPS_PARAMS_TBE BIT(6)
+#define MSM_VENDOR_ID BIT(16)
+
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
+struct sys2ipa_sw_data {
+ void *teth_priv;
+ ipa_notify_cb teth_cb;
+};
+
+struct bam_data_ch_info {
+ unsigned long flags;
+ unsigned id;
+
+ struct bam_data_port *port;
+ struct work_struct write_tobam_w;
+
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ enum function_type func_type;
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
+
+ /* UL workaround parameters */
+ struct sys2ipa_sw_data ul_params;
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ int total_skb;
+ int freed_skb;
+ int freed_rx_reqs;
+ int alloc_rx_reqs;
+ struct sk_buff_head rx_skb_idle;
+ enum usb_bam_pipe_type src_pipe_type;
+ enum usb_bam_pipe_type dst_pipe_type;
+ unsigned int pending_with_bam;
+ int rx_buffer_size;
+
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ /*
+ * used for RNDIS/ECM network interface based design
+ * to indicate ecm/rndis pipe connect notifiaction is sent
+ * to ecm_ipa/rndis_ipa.
+ */
+ atomic_t pipe_connect_notified;
+ bool tx_req_dequeued;
+ bool rx_req_dequeued;
+};
+
+enum u_bam_data_event_type {
+ U_BAM_DATA_DISCONNECT_E = 0,
+ U_BAM_DATA_CONNECT_E,
+ U_BAM_DATA_SUSPEND_E,
+ U_BAM_DATA_RESUME_E
+};
+
+struct bam_data_port {
+ bool is_ipa_connected;
+ enum u_bam_data_event_type last_event;
+ unsigned port_num;
+ spinlock_t port_lock;
+ unsigned int ref_count;
+ struct data_port *port_usb;
+ struct usb_gadget *gadget;
+ struct bam_data_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+};
+struct usb_bam_data_connect_info {
+ u32 usb_bam_pipe_idx;
+ u32 peer_pipe_idx;
+ u32 usb_bam_handle;
+};
+
+struct bam_data_port *bam2bam_data_ports[BAM2BAM_DATA_N_PORTS];
+static struct rndis_data_ch_info rndis_data;
+
+static void bam2bam_data_suspend_work(struct work_struct *w);
+static void bam2bam_data_resume_work(struct work_struct *w);
+static void bam_data_free_reqs(struct bam_data_port *port);
+
+/*----- sys2bam towards the IPA (UL workaround) --------------- */
+
+static int bam_data_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct bam_data_port *port = ep->driver_data;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%pK head:%pK num:%d cb:%pK", __func__,
+ ep, head, num, cb);
+
+ if (d->alloc_rx_reqs) {
+ pr_err("%s(): reqs are already allocated.\n", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ d->alloc_rx_reqs++;
+ req->complete = cb;
+ list_add_tail(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t bam_data_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock lock taken */
+static struct sk_buff *bam_data_alloc_skb_from_pool(
+ struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb = NULL;
+ dma_addr_t skb_buf_dma_addr;
+ struct data_port *data_port;
+ struct usb_gadget *gadget;
+
+ if (!port)
+ return NULL;
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(d->rx_buffer_size + BAM_MUX_HDR, GFP_ATOMIC);
+ if (!skb) {
+ pr_err("%s: alloc skb failed\n", __func__);
+ goto alloc_exit;
+ }
+
+ d->total_skb++;
+ skb_reserve(skb, BAM_MUX_HDR);
+
+ data_port = port->port_usb;
+ if (data_port && data_port->cdev && data_port->cdev->gadget) {
+ gadget = data_port->cdev->gadget;
+
+ skb_buf_dma_addr =
+ dma_map_single(&gadget->dev, skb->data,
+ d->rx_buffer_size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&gadget->dev, skb_buf_dma_addr)) {
+ pr_err("%s: Could not DMA map SKB buffer\n",
+ __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+ } else {
+ pr_err("%s: Could not DMA map SKB buffer\n", __func__);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+ }
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+static void bam_data_free_skb_to_pool(
+ struct bam_data_port *port,
+ struct sk_buff *skb)
+{
+ struct bam_data_ch_info *d;
+
+ if (!port) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ d = &port->data_ch;
+ if (!d) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void bam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct bam_data_port *port = p;
+ struct bam_data_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+
+ d->pending_with_bam--;
+
+ pr_debug("%s: port:%pK d:%pK pbam:%u, pno:%d\n", __func__,
+ port, d, d->pending_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ queue_work(bam_data_wq, &d->write_tobam_w);
+}
+
+static void bam_data_ipa_sys2bam_notify_cb(void *priv,
+ enum ipa_dp_evt_type event, unsigned long data)
+{
+ struct sys2ipa_sw_data *ul = (struct sys2ipa_sw_data *)priv;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ switch (event) {
+ case IPA_WRITE_DONE:
+ d = container_of(ul, struct bam_data_ch_info, ul_params);
+ port = container_of(d, struct bam_data_port, data_ch);
+ /* call into bam_demux functionality that'll recycle the data */
+ bam_data_write_done(port, (struct sk_buff *)(data));
+ break;
+ case IPA_RECEIVE:
+ /* call the callback given by tethering driver init function
+ * (and was given to ipa_connect)
+ */
+ if (ul->teth_cb)
+ ul->teth_cb(ul->teth_priv, event, data);
+ break;
+ default:
+ /* unexpected event */
+ pr_err("%s: unexpected event %d\n", __func__, event);
+ break;
+ }
+}
+
+
+static void bam_data_start_rx(struct bam_data_port *port)
+{
+ struct usb_request *req;
+ struct bam_data_ch_info *d;
+ struct usb_ep *ep;
+ int ret;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!port->port_usb)
+ return;
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_ipa_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+ skb = bam_data_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = bam_data_get_dma_from_skb(skb);
+ req->length = d->rx_buffer_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ bam_data_free_skb_to_pool(port, skb);
+
+ pr_err("%s: rx queue failed %d\n", __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct bam_data_port *port = ep->driver_data;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+ unsigned long flags;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+ d->freed_rx_reqs++;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: %s response error %d, %d/%d\n", __func__,
+ ep->name, status, req->actual, req->length);
+ spin_lock_irqsave(&port->port_lock, flags);
+ bam_data_free_skb_to_pool(port, skb);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ break;
+ }
+
+ spin_lock(&port->port_lock);
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ if (!usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ pr_err_ratelimited("usb bam prod is not granted.\n");
+ return;
+ }
+ queue_work(bam_data_wq, &d->write_tobam_w);
+ }
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+
+ skb = bam_data_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ return;
+ }
+ spin_unlock(&port->port_lock);
+
+ req->buf = skb->data;
+ req->dma = bam_data_get_dma_from_skb(skb);
+ req->length = d->rx_buffer_size;
+
+ if (req->dma != DMA_ERROR_CODE)
+ req->dma_pre_mapped = true;
+ else
+ req->dma_pre_mapped = false;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ pr_err_ratelimited("%s: data rx enqueue err %d\n",
+ __func__, status);
+ spin_lock(&port->port_lock);
+ bam_data_free_skb_to_pool(port, skb);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock);
+ }
+}
+/* It should be called with port_lock acquire. */
+static int bam_data_sys2bam_alloc_req(struct bam_data_port *port, bool in)
+{
+ int ret;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned queue_size;
+ void (*ep_complete)(struct usb_ep *, struct usb_request *);
+
+ if (!port->port_usb)
+ return -EBUSY;
+ if (in)
+ return -ENODEV;
+
+ ep = port->port_usb->out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_data_rx_q_size;
+ ep_complete = bam_data_epout_complete;
+
+ ret = bam_data_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+ if (ret)
+ pr_err("%s: allocation failed\n", __func__);
+
+ return ret;
+}
+
+static void bam_data_write_toipa(struct work_struct *w)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb;
+ int ret;
+ int qlen;
+ unsigned long flags;
+ dma_addr_t skb_dma_addr;
+ struct ipa_tx_meta ipa_meta = {0x0};
+
+ d = container_of(w, struct bam_data_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ while (d->pending_with_bam < BAM_PENDING_PKTS_LIMIT &&
+ usb_bam_get_prod_granted(d->usb_bam_type,
+ d->dst_connection_idx)) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_with_bam++;
+
+ pr_debug("%s: port:%pK d:%pK pbam:%u pno:%d\n", __func__,
+ port, d, d->pending_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ skb_dma_addr = bam_data_get_dma_from_skb(skb);
+ if (skb_dma_addr != DMA_ERROR_CODE) {
+ ipa_meta.dma_address = skb_dma_addr;
+ ipa_meta.dma_address_valid = true;
+ }
+
+ ret = ipa_tx_dp(IPA_CLIENT_USB_PROD, skb, &ipa_meta);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (ret) {
+ pr_debug_ratelimited("%s: write error:%d\n",
+ __func__, ret);
+ d->pending_with_bam--;
+ bam_data_free_skb_to_pool(port, skb);
+ break;
+ }
+ }
+
+ qlen = d->rx_skb_q.qlen;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (qlen < bam_ipa_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ bam_data_start_rx(port);
+ }
+
+}
+
+/*------------data_path----------------------------*/
+
+static void bam_data_endless_rx_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s: status: %d\n", __func__, status);
+}
+
+static void bam_data_endless_tx_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ int status = req->status;
+
+ pr_debug("%s: status: %d\n", __func__, status);
+}
+
+static void bam_data_start_endless_rx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !d->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->out;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing transfer, %d\n", status);
+}
+
+static void bam_data_start_endless_tx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !d->tx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: enqueue\n", __func__);
+ status = usb_ep_queue(ep, d->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing transfer, %d\n", status);
+}
+
+static void bam_data_stop_endless_rx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d->rx_req_dequeued = true;
+
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam_data_stop_endless_tx(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ep = port->port_usb->in;
+ d->tx_req_dequeued = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("%s: dequeue\n", __func__);
+ status = usb_ep_dequeue(ep, d->tx_req);
+ if (status)
+ pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+}
+
+static void bam2bam_free_rx_skb_idle_list(struct bam_data_port *port)
+{
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+ if (!d) {
+ pr_err("%s(): port->data_ch is NULL.\n", __func__);
+ return;
+ }
+
+ if (!port->port_usb) {
+ pr_err("%s(): port->port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ if (!port->port_usb->cdev) {
+ pr_err("port->port_usb->cdev is NULL");
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ pr_err("%s(): gadget is NULL.\n", __func__);
+ return;
+ }
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr, sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ d->freed_skb++;
+ }
+
+ pr_debug("%s(): Freed %d SKBs from rx_skb_idle queue\n", __func__,
+ d->freed_skb);
+}
+
+/*
+ * bam_data_ipa_disconnect()- Perform USB IPA function level disconnect
+ * struct bam_data_ch_info - Per USB IPA port data structure
+ *
+ * Make sure to call IPA rndis/ecm/mbim related disconnect APIs() only
+ * if those APIs init counterpart is already performed.
+ * MBIM: teth_bridge_connect() is NO_OPS and teth_bridge_init() is
+ * being called with atomic context on cable connect, hence there is no
+ * need to consider for this check. pipe_connect_notified is being used
+ * for RNDIS/ECM driver due to its different design with usage of
+ * network interface created by IPA driver.
+ */
+static void bam_data_ipa_disconnect(struct bam_data_ch_info *d)
+{
+ pr_debug("%s(): pipe_connect_notified:%d\n",
+ __func__, atomic_read(&d->pipe_connect_notified));
+ /*
+ * Check if pipe_connect_notified is set to 1, then perform disconnect
+ * part and set pipe_connect_notified to zero.
+ */
+ if (atomic_xchg(&d->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ if (d->func_type == USB_FUNC_ECM) {
+ priv = ecm_qc_get_ipa_priv();
+ ecm_ipa_disconnect(priv);
+ } else if (d->func_type == USB_FUNC_RNDIS) {
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ pr_debug("%s(): net interface is disconnected.\n", __func__);
+ }
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ pr_debug("%s(): teth_bridge() disconnected\n", __func__);
+ teth_bridge_disconnect(d->ipa_params.src_client);
+ }
+}
+
+static void bam2bam_data_disconnect_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, disconnect_w);
+ struct bam_data_ch_info *d;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->is_ipa_connected) {
+ pr_debug("%s: Already disconnected. Bailing out.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ /*
+ * Unlock the port here and not at the end of this work,
+ * because we do not want to activate usb_bam, ipa and
+ * tethe bridge logic in atomic context and wait uneeded time.
+ * Either way other works will not fire until end of this work
+ * and event functions (as bam_data_connect) will not influance
+ * while lower layers connect pipes, etc.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ ret = usb_bam_disconnect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related pipes
+ * before calling IPA tethered function related disconnect API. IPA
+ * tethered function related disconnect API delete depedency graph
+ * with IPA RM which would results into IPA not pulling data although
+ * there is pending data on USB BAM producer pipe.
+ */
+ bam_data_ipa_disconnect(d);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_ipa_connected = false;
+
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ pr_debug("Disconnect workqueue done (port %pK)\n", port);
+}
+/*
+ * This function configured data fifo based on index passed to get bam2bam
+ * configuration.
+ */
+static void configure_usb_data_fifo(enum usb_ctrl bam_type,
+ u8 idx, struct usb_ep *ep, enum usb_bam_pipe_type pipe_type)
+{
+ struct u_bam_data_connect_info bam_info;
+ struct sps_mem_buffer data_fifo = {0};
+
+ if (pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ get_bam2bam_connection_info(bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+
+ msm_data_fifo_config(ep,
+ data_fifo.phys_base,
+ data_fifo.size,
+ bam_info.usb_bam_pipe_idx);
+ }
+}
+
+/* Start RX transfers according to pipe_type */
+static inline void bam_data_start_rx_transfers(struct bam_data_ch_info *d,
+ struct bam_data_port *port)
+{
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_start_endless_rx(port);
+ else
+ bam_data_start_rx(port);
+}
+
+static void bam2bam_data_connect_work(struct work_struct *w)
+{
+ struct bam_data_port *port = container_of(w, struct bam_data_port,
+ connect_w);
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ struct bam_data_ch_info *d;
+ struct data_port *d_port;
+ struct usb_gadget *gadget = NULL;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: Connect workqueue started", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ d_port = port->port_usb;
+
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnect. Bail out.\n",
+ __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (d_port && d_port->cdev)
+ gadget = d_port->cdev->gadget;
+
+ if (!gadget) {
+ pr_err("%s: NULL gadget\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->port_usb) {
+ pr_err("port_usb is NULL");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->port_usb->out) {
+ pr_err("port_usb->out (bulk out ep) is NULL");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * bam_data_connect()
+ */
+ if (port->is_ipa_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
+ d->ipa_params.usb_connection_speed = gadget->speed;
+ d->ipa_params.cons_clnt_hdl = -1;
+ d->ipa_params.prod_clnt_hdl = -1;
+
+ if (d->dst_pipe_type != USB_BAM_PIPE_BAM2BAM) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: no software preparation for DL not using bam2bam\n",
+ __func__);
+ return;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ usb_bam_alloc_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_alloc_fifos(d->usb_bam_type, d->dst_connection_idx);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ pr_err("Disconnected.port_usb is NULL\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto free_fifos;
+ }
+
+ if (gadget_is_dwc3(gadget)) {
+ /* Configure RX */
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | d->src_pipe_idx;
+ d->rx_req->length = 32*1024;
+ d->rx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->out, d->rx_req);
+
+ /* Configure TX */
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | d->dst_pipe_idx;
+ d->tx_req->length = 32*1024;
+ d->tx_req->udc_priv = sps_params;
+ msm_ep_config(port->port_usb->in, d->tx_req);
+
+ } else {
+ /* Configure RX */
+ sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
+ MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
+ d->rx_req->udc_priv = sps_params;
+
+ /* Configure TX */
+ sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
+ d->tx_req->udc_priv = sps_params;
+ }
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ teth_bridge_params.client = d->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:teth_bridge_init() failed\n",
+ __func__);
+ goto free_fifos;
+ }
+ d->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ d->ipa_params.priv =
+ teth_bridge_params.private_data;
+ d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ d->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg = ecm_qc_get_skip_ep_config();
+ }
+
+ if (d->func_type == USB_FUNC_RNDIS) {
+ d->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = rndis_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ /* Support for UL using system-to-IPA */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ul_params.teth_cb = d->ipa_params.notify;
+ d->ipa_params.notify =
+ bam_data_ipa_sys2bam_notify_cb;
+ d->ul_params.teth_priv = d->ipa_params.priv;
+ d->ipa_params.priv = &d->ul_params;
+ d->ipa_params.reset_pipe_after_lpm = false;
+ } else {
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto free_fifos;
+ }
+ gadget->bam2bam_func_enabled = true;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:%d: Port is being disconnected.\n",
+ __func__, __LINE__);
+ goto disconnect_ipa;
+ }
+
+ d_port->ipa_consumer_ep = d->ipa_params.ipa_cons_ep_idx;
+
+ /* Remove support for UL using system-to-IPA towards DL */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+ d->ipa_params.notify = d->ul_params.teth_cb;
+ d->ipa_params.priv = d->ul_params.teth_priv;
+ }
+
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg = ecm_qc_get_skip_ep_config();
+ }
+ if (d->func_type == USB_FUNC_RNDIS) {
+ d->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = rndis_qc_get_ipa_priv();
+ d->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ }
+
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM) {
+ d->ipa_params.reset_pipe_after_lpm =
+ (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget));
+ } else {
+ d->ipa_params.reset_pipe_after_lpm = false;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(d->usb_bam_type, &d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ goto disconnect_ipa;
+ }
+
+ /*
+ * Cable might have been disconnected after releasing the
+ * spinlock and re-enabling IRQs. Hence check again.
+ */
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s:%d: port is beind disconnected.\n",
+ __func__, __LINE__);
+ goto disconnect_ipa;
+ }
+
+ port->is_ipa_connected = true;
+
+ d_port->ipa_producer_ep = d->ipa_params.ipa_prod_ep_idx;
+ pr_debug("%s(): ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ __func__, d_port->ipa_producer_ep,
+ d_port->ipa_consumer_ep);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (d->func_type == USB_FUNC_MBIM) {
+ connect_params.ipa_usb_pipe_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_MBIM;
+ connect_params.client_type = d->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n",
+ __func__);
+ return;
+ }
+ }
+
+ if (d->func_type == USB_FUNC_ECM) {
+ ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
+ d->ipa_params.prod_clnt_hdl,
+ d->ipa_params.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ if (d->func_type == USB_FUNC_RNDIS) {
+ rndis_data.prod_clnt_hdl =
+ d->ipa_params.prod_clnt_hdl;
+ rndis_data.cons_clnt_hdl =
+ d->ipa_params.cons_clnt_hdl;
+ rndis_data.priv = d->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data.ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data.ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data.dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data.cons_clnt_hdl,
+ rndis_data.prod_clnt_hdl,
+ rndis_data.ul_max_transfer_size,
+ rndis_data.ul_max_packets_number,
+ rndis_data.dl_max_transfer_size,
+ rndis_data.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+ atomic_set(&d->pipe_connect_notified, 1);
+
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (d->func_type == USB_FUNC_RNDIS || d->func_type == USB_FUNC_ECM) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
+ /* queue in & out requests */
+ bam_data_start_rx_transfers(d, port);
+ bam_data_start_endless_tx(port);
+
+ pr_debug("Connect workqueue done (port %pK)", port);
+ return;
+
+disconnect_ipa:
+ /* let disconnect work take care of ipa disconnect */
+ port->is_ipa_connected = true;
+ return;
+
+free_fifos:
+ usb_bam_free_fifos(d->usb_bam_type, d->src_connection_idx);
+ usb_bam_free_fifos(d->usb_bam_type, d->dst_connection_idx);
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void bam_data_start_rx_tx(u8 port_num)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ unsigned long flags;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+
+ /* queue in & out requests */
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ d = &port->data_ch;
+
+ if (!port->port_usb || !port->port_usb->in->driver_data
+ || !port->port_usb->out->driver_data) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ goto out;
+ }
+
+ if (!d->rx_req || !d->tx_req) {
+ pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+ d->rx_req, d->tx_req);
+ goto out;
+ }
+ if (!port->is_ipa_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ bam_data_start_rx_transfers(d, port);
+
+ pr_debug("%s: Starting tx", __func__);
+ bam_data_start_endless_tx(port);
+
+ return;
+out:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+inline int u_bam_data_func_to_port(enum function_type func, u8 func_port)
+{
+ if (func >= USB_NUM_FUNCS || func_port >= PORTS_PER_FUNC) {
+ pr_err("func=%d and func_port=%d are an illegal combination\n",
+ func, func_port);
+ return -EINVAL;
+ }
+ return (PORTS_PER_FUNC * func) + func_port;
+}
+
+static int bam2bam_data_port_alloc(int portno)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ if (bam2bam_data_ports[portno] != NULL) {
+ pr_debug("port %d already allocated.\n", portno);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct bam_data_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ bam2bam_data_ports[portno] = port;
+ d = &port->data_ch;
+ d->port = port;
+
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
+ INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+ INIT_WORK(&d->write_tobam_w, bam_data_write_toipa);
+ return 0;
+}
+
+void u_bam_data_start_rndis_ipa(void)
+{
+ int port_num;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ pr_debug("%s\n", __func__);
+
+ port_num = u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT);
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ if (!atomic_read(&d->pipe_connect_notified)) {
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(bam_data_wq, &port->connect_w);
+ } else {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ }
+}
+
+void u_bam_data_stop_rndis_ipa(void)
+{
+ int port_num;
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+
+ pr_debug("%s\n", __func__);
+
+ port_num = u_bam_data_func_to_port(USB_FUNC_RNDIS,
+ RNDIS_QC_ACTIVE_PORT);
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ d = &port->data_ch;
+
+ if (atomic_read(&d->pipe_connect_notified)) {
+ rndis_ipa_reset_trigger();
+ bam_data_stop_endless_tx(port);
+ bam_data_stop_endless_rx(port);
+ queue_work(bam_data_wq, &port->disconnect_w);
+ }
+}
+
+void bam_data_flow_control_enable(bool enable)
+{
+ if (enable)
+ u_bam_data_stop_rndis_ipa();
+ else
+ u_bam_data_start_rndis_ipa();
+}
+
+static void bam_data_free_reqs(struct bam_data_port *port)
+{
+
+ struct list_head *head;
+ struct usb_request *req;
+
+ if (port->data_ch.src_pipe_type != USB_BAM_PIPE_SYS2BAM)
+ return;
+
+ head = &port->data_ch.rx_idle;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(port->port_usb->out, req);
+ port->data_ch.freed_rx_reqs++;
+ }
+}
+
+void bam_data_disconnect(struct data_port *gr, enum function_type func,
+ u8 dev_port_num)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("dev:%pK port number:%d\n", gr, port_num);
+
+ if (!gr) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = bam2bam_data_ports[port_num];
+
+ if (!port) {
+ pr_err("port %u is NULL", port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ /*
+ * Suspend with remote wakeup enabled. Increment usage
+ * count when disconnect happens in suspended state.
+ * Corresponding decrement happens in the end of this
+ * function if IPA handshake is already done or it is done
+ * in disconnect work after finishing IPA handshake.
+ * In case of RNDIS, if connect_w by rndis_flow_control is not triggered
+ * yet then don't perform pm_runtime_get as suspend_w would have bailed
+ * w/o runtime_get.
+ * And restrict check to only RNDIS to handle cases where connect_w is
+ * already scheduled but execution is pending which must be rare though.
+ */
+ if (port->last_event == U_BAM_DATA_SUSPEND_E &&
+ (d->func_type != USB_FUNC_RNDIS || port->is_ipa_connected))
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ if (port->port_usb) {
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in && port->port_usb->in->driver_data) {
+
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ if (d->rx_req) {
+ usb_ep_free_request(port->port_usb->out,
+ d->rx_req);
+ d->rx_req = NULL;
+ }
+
+ usb_ep_disable(port->port_usb->in);
+ if (d->tx_req) {
+ usb_ep_free_request(port->port_usb->in,
+ d->tx_req);
+ d->tx_req = NULL;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* Only for SYS2BAM mode related UL workaround */
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+
+ pr_debug("SKBs_RX_Q: freed:%d\n",
+ d->rx_skb_q.qlen);
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ bam2bam_free_rx_skb_idle_list(port);
+ pr_debug("SKBs: allocated:%d freed:%d\n",
+ d->total_skb, d->freed_skb);
+ pr_debug("rx_reqs: allocated:%d freed:%d\n",
+ d->alloc_rx_reqs, d->freed_rx_reqs);
+
+ /* reset all skb/reqs related statistics */
+ d->total_skb = 0;
+ d->freed_skb = 0;
+ d->freed_rx_reqs = 0;
+ d->alloc_rx_reqs = 0;
+ }
+
+ /*
+ * Set endless flag to false as USB Endpoint
+ * is already disable.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = false;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = false;
+
+ port->port_usb->in->driver_data = NULL;
+ port->port_usb->out->driver_data = NULL;
+
+ port->port_usb = NULL;
+ }
+ }
+
+ port->last_event = U_BAM_DATA_DISCONNECT_E;
+ /* Disable usb irq for CI gadget. It will be enabled in
+ * usb_bam_disconnect_pipe() after disconnecting all pipes
+ * and USB BAM reset is done.
+ */
+ if (!gadget_is_dwc3(port->gadget))
+ msm_usb_irq_disable(true);
+
+ queue_work(bam_data_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int bam_data_connect(struct data_port *gr, enum transport_type trans,
+ u8 dev_port_num, enum function_type func)
+{
+ struct bam_data_port *port;
+ struct bam_data_ch_info *d;
+ int ret, port_num;
+ unsigned long flags;
+ u8 src_connection_idx, dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+
+ if (!gr) {
+ pr_err("data port is null\n");
+ return -ENODEV;
+ }
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid portno#%d\n", port_num);
+ return -EINVAL;
+ }
+
+ if (trans != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ pr_err("invalid xport#%d\n", trans);
+ return -EINVAL;
+ }
+
+ pr_debug("dev:%pK port#%d\n", gr, port_num);
+
+ usb_bam_type = usb_bam_get_bam_type(gr->cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ dev_port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ dev_port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
+ return ret;
+ }
+
+ port = bam2bam_data_ports[port_num];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ port->port_usb = gr;
+ port->gadget = gr->cdev->gadget;
+ d = &port->data_ch;
+ d->src_connection_idx = src_connection_idx;
+ d->dst_connection_idx = dst_connection_idx;
+ d->usb_bam_type = usb_bam_type;
+
+ d->trans = trans;
+ d->func_type = func;
+ d->rx_buffer_size = (gr->rx_buffer_size ? gr->rx_buffer_size :
+ bam_mux_rx_req_size);
+
+ if (usb_bam_type == HSIC_CTRL) {
+ d->ipa_params.src_client = IPA_CLIENT_HSIC1_PROD;
+ d->ipa_params.dst_client = IPA_CLIENT_HSIC1_CONS;
+ } else {
+ d->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ d->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ }
+
+ pr_debug("%s(): rx_buffer_size:%d\n", __func__, d->rx_buffer_size);
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.src_idx = src_connection_idx;
+ d->ipa_params.dst_idx = dst_connection_idx;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+
+ /*
+ * Query pipe type using IPA src/dst index with
+ * usbbam driver. It is being set either as
+ * BAM2BAM or SYS2BAM.
+ */
+ if (usb_bam_get_pipe_type(usb_bam_type, d->ipa_params.src_idx,
+ &d->src_pipe_type) ||
+ usb_bam_get_pipe_type(usb_bam_type, d->ipa_params.dst_idx,
+ &d->dst_pipe_type)) {
+ pr_err("usb_bam_get_pipe_type() failed\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * Check for pipe_type. If it is BAM2BAM, then it is required
+ * to disable Xfer complete and Xfer not ready interrupts for
+ * that particular endpoint. Hence it set endless flag based
+ * it which is considered into UDC driver while enabling
+ * USB Endpoint.
+ */
+ if (d->dst_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->in->endless = true;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ port->port_usb->out->endless = true;
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK", gr->in);
+ goto exit;
+ }
+
+ gr->in->driver_data = port;
+
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK", gr->out);
+ goto disable_in_ep;
+ }
+
+ gr->out->driver_data = port;
+
+ if (d->src_pipe_type == USB_BAM_PIPE_SYS2BAM) {
+
+ /* UL workaround requirements */
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+
+ ret = bam_data_sys2bam_alloc_req(port, false);
+ if (ret) {
+ pr_err("%s: sys2bam_alloc_req failed(%d)",
+ __func__, ret);
+ goto disable_out_ep;
+ }
+ }
+
+ d->rx_req = usb_ep_alloc_request(port->port_usb->out,
+ GFP_ATOMIC);
+ if (!d->rx_req) {
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ goto bam_data_free;
+ }
+ d->rx_req->context = port;
+ d->rx_req->complete = bam_data_endless_rx_complete;
+ d->rx_req->length = 0;
+ d->rx_req->no_interrupt = 1;
+
+ d->tx_req = usb_ep_alloc_request(port->port_usb->in,
+ GFP_ATOMIC);
+ if (!d->tx_req) {
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto ep_out_req_free;
+ }
+
+ d->tx_req->context = port;
+ d->tx_req->complete = bam_data_endless_tx_complete;
+ d->tx_req->length = 0;
+ d->tx_req->no_interrupt = 1;
+
+ gr->out->driver_data = port;
+
+ port->last_event = U_BAM_DATA_CONNECT_E;
+
+ /* Wait for host to enable flow_control */
+ if (d->func_type == USB_FUNC_RNDIS) {
+ ret = 0;
+ goto exit;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ queue_work(bam_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return 0;
+
+ep_out_req_free:
+ usb_ep_free_request(port->port_usb->out, d->rx_req);
+bam_data_free:
+ bam_data_free_reqs(port);
+disable_out_ep:
+ gr->out->driver_data = 0;
+ usb_ep_disable(gr->out);
+disable_in_ep:
+ gr->in->driver_data = 0;
+ usb_ep_disable(gr->in);
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int bam_data_setup(enum function_type func, unsigned int no_bam2bam_port)
+{
+ int i;
+ int ret;
+
+ pr_debug("requested %d BAM2BAM ports", no_bam2bam_port);
+
+ if (!no_bam2bam_port || no_bam2bam_port > PORTS_PER_FUNC ||
+ func >= USB_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d or function type:%d\n",
+ no_bam2bam_port, func);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < no_bam2bam_port; i++) {
+ n_bam2bam_data_ports++;
+ ret = bam2bam_data_port_alloc(u_bam_data_func_to_port(func, i));
+ if (ret) {
+ n_bam2bam_data_ports--;
+ pr_err("Failed to alloc port:%d\n", i);
+ goto free_bam_ports;
+ }
+ }
+
+ pr_debug("n_bam2bam_data_ports:%d\n", n_bam2bam_data_ports);
+
+ if (bam_data_wq) {
+ pr_debug("bam_data is already setup.");
+ return 0;
+ }
+
+ bam_data_wq = alloc_workqueue("k_bam_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!bam_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_bam_ports;
+ }
+
+ return 0;
+
+free_bam_ports:
+ for (i = 0; i < n_bam2bam_data_ports; i++) {
+ kfree(bam2bam_data_ports[i]);
+ bam2bam_data_ports[i] = NULL;
+ if (bam_data_wq) {
+ destroy_workqueue(bam_data_wq);
+ bam_data_wq = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int bam_data_wake_cb(void *param)
+{
+ int ret;
+ struct bam_data_port *port = (struct bam_data_port *)param;
+ struct data_port *d_port = port->port_usb;
+ struct usb_gadget *gadget;
+ struct usb_function *func;
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ if (!d_port) {
+ pr_err("FAILED: d_port == NULL");
+ return -ENODEV;
+ }
+
+ if (!d_port->cdev) {
+ pr_err("FAILED: d_port->cdev == NULL");
+ return -ENODEV;
+ }
+
+ gadget = d_port->cdev->gadget;
+ if (!gadget) {
+ pr_err("FAILED: d_port->cdev->gadget == NULL");
+ return -ENODEV;
+ }
+
+ func = d_port->func;
+
+ /*
+ * In Super-Speed mode, remote wakeup is not allowed for suspended
+ * functions which have been disallowed by the host to issue Function
+ * Remote Wakeup.
+ * Note - We deviate here from the USB 3.0 spec and allow
+ * non-suspended functions to issue remote-wakeup even if they were not
+ * allowed to do so by the host. This is done in order to support non
+ * fully USB 3.0 compatible hosts.
+ */
+ if ((gadget->speed == USB_SPEED_SUPER) && (func->func_is_suspended))
+ ret = usb_func_wakeup(func);
+ else
+ ret = usb_gadget_wakeup(gadget);
+
+ if ((ret == -EBUSY) || (ret == -EAGAIN))
+ pr_debug("Remote wakeup is delayed due to LPM exit.\n");
+ else if (ret)
+ pr_err("Failed to wake up the USB core. ret=%d.\n", ret);
+
+ return ret;
+}
+
+static void bam_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct bam_data_port *port = param;
+ struct data_port *d_port = port->port_usb;
+ struct bam_data_ch_info *d = &port->data_ch;
+ struct usb_gadget *gadget;
+
+ if (!d_port || !d_port->cdev || !d_port->cdev->gadget) {
+ pr_err("%s:d_port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+ if (port->last_event != U_BAM_DATA_RESUME_E) {
+ pr_err("%s: Port state changed since resume. Bail out.\n",
+ __func__);
+ return;
+ }
+
+ gadget = d_port->cdev->gadget;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_start_endless_rx(port);
+ else {
+ bam_data_start_rx(port);
+ queue_work(bam_data_wq, &d->write_tobam_w);
+ }
+ } else {
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ }
+ bam_data_start_endless_tx(port);
+ }
+
+}
+
+static void bam_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct bam_data_port *port = param;
+
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ /*
+ * Only handling BAM2BAM, as there is no equivalent to
+ * bam_data_stop_endless_rx() for the SYS2BAM use case
+ */
+ if (port->data_ch.src_pipe_type == USB_BAM_PIPE_BAM2BAM)
+ bam_data_stop_endless_rx(port);
+ } else {
+ bam_data_stop_endless_tx(port);
+ }
+}
+
+void bam_data_suspend(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled)
+{
+ struct bam_data_port *port;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("%s: suspended port %d\n", __func__, port_num);
+
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ port_usb->in_ep_desc_backup = port_usb->in->desc;
+ port_usb->out_ep_desc_backup = port_usb->out->desc;
+
+ pr_debug("in_ep_desc_backup = %pK, out_ep_desc_backup = %pK",
+ port_usb->in_ep_desc_backup,
+ port_usb->out_ep_desc_backup);
+
+ bam_data_disconnect(port_usb, func, dev_port_num);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->last_event = U_BAM_DATA_SUSPEND_E;
+ queue_work(bam_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void bam_data_resume(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled)
+{
+ struct bam_data_port *port;
+ unsigned long flags;
+ int port_num;
+
+ port_num = u_bam_data_func_to_port(func, dev_port_num);
+ if (port_num < 0) {
+ pr_err("invalid bam2bam portno#%d\n", port_num);
+ return;
+ }
+
+ pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ port = bam2bam_data_ports[port_num];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /* Restore endpoint descriptors info. */
+ port_usb->in->desc = port_usb->in_ep_desc_backup;
+ port_usb->out->desc = port_usb->out_ep_desc_backup;
+
+ pr_debug("in_ep_desc_backup = %pK, out_ep_desc_backup = %pK",
+ port_usb->in_ep_desc_backup,
+ port_usb->out_ep_desc_backup);
+
+ bam_data_connect(port_usb, port->data_ch.trans,
+ dev_port_num, func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->last_event = U_BAM_DATA_RESUME_E;
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(bam_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void bam_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(bam_data_wq);
+}
+
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, suspend_w);
+ struct bam_data_ch_info *d;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_ipa_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if ((port->last_event == U_BAM_DATA_DISCONNECT_E) ||
+ (port->last_event == U_BAM_DATA_RESUME_E)) {
+ pr_debug("%s: Port is about to disconnect/resume. Bail out.\n",
+ __func__);
+ goto exit;
+ }
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ bam_data_wake_cb, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ usb_bam_register_start_stop_cbs(d->usb_bam_type, d->dst_connection_idx,
+ bam_data_start, bam_data_stop,
+ port);
+
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(d->usb_bam_type, &d->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+exit:
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, resume_w);
+ struct bam_data_ch_info *d;
+ struct data_port *d_port;
+ struct usb_gadget *gadget;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ pr_err("port->port_usb is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev) {
+ pr_err("!port->port_usb->cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("!port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
+ d = &port->data_ch;
+ d_port = port->port_usb;
+ gadget = d_port->cdev->gadget;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ if (port->last_event == U_BAM_DATA_DISCONNECT_E) {
+ pr_debug("%s: Port is about to disconnect. Bail out.\n",
+ __func__);
+ goto exit;
+ }
+
+ ret = usb_bam_register_wake_cb(d->usb_bam_type, d->dst_connection_idx,
+ NULL, NULL);
+ if (ret) {
+ pr_err("%s(): Failed to un-register BAM wake callback.\n",
+ __func__);
+ goto exit;
+ }
+
+ /*
+ * If usb_req was dequeued as part of bus suspend then
+ * corresponding DBM IN and OUT EPs should also be reset.
+ * There is a possbility that usb_bam may not have dequeued the
+ * request in case of quick back to back usb bus suspend resume.
+ */
+ if (gadget_is_dwc3(gadget) &&
+ msm_dwc3_reset_ep_after_lpm(gadget)) {
+ if (d->tx_req_dequeued) {
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->dst_connection_idx,
+ port->port_usb->in, d->dst_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ if (d->rx_req_dequeued) {
+ configure_usb_data_fifo(d->usb_bam_type,
+ d->src_connection_idx,
+ port->port_usb->out, d->src_pipe_type);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+ }
+ d->tx_req_dequeued = false;
+ d->rx_req_dequeued = false;
+ usb_bam_resume(d->usb_bam_type, &d->ipa_params);
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void u_bam_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data.dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void u_bam_data_set_ul_max_pkt_num(u8 max_packets_number)
+
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data.ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data.ul_aggregation_enable = true;
+ else
+ rndis_data.ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d\n", __func__,
+ rndis_data.ul_aggregation_enable);
+ pr_debug("%s(): ul_max_packets_number:%d\n", __func__,
+ max_packets_number);
+}
+
+void u_bam_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data.ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
diff --git a/drivers/usb/gadget/function/u_bam_data.h b/drivers/usb/gadget/function/u_bam_data.h
new file mode 100644
index 000000000000..e3acbd0c56a0
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_data.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_BAM_DATA_H
+#define __U_BAM_DATA_H
+
+#include "usb_gadget_xport.h"
+
+enum function_type {
+ USB_FUNC_ECM,
+ USB_FUNC_MBIM,
+ USB_FUNC_RNDIS,
+ USB_NUM_FUNCS,
+};
+
+#define PORTS_PER_FUNC 1
+#define BAM2BAM_DATA_N_PORTS (USB_NUM_FUNCS * PORTS_PER_FUNC)
+
+struct data_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ struct usb_ep *in;
+ int rx_buffer_size;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+};
+
+void bam_data_disconnect(struct data_port *gr, enum function_type func,
+ u8 dev_port_num);
+
+int bam_data_connect(struct data_port *gr, enum transport_type trans,
+ u8 dev_port_num, enum function_type func);
+
+int bam_data_setup(enum function_type func, unsigned int no_bam2bam_port);
+
+void bam_data_flush_workqueue(void);
+
+void bam_data_suspend(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled);
+
+void bam_data_resume(struct data_port *port_usb, u8 dev_port_num,
+ enum function_type func, bool remote_wakeup_enabled);
+
+void bam_data_flow_control_enable(bool enable);
+
+void u_bam_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void u_bam_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void u_bam_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void u_bam_data_start_rndis_ipa(void);
+
+void u_bam_data_stop_rndis_ipa(void);
+
+void bam_data_start_rx_tx(u8 port_num);
+
+int u_bam_data_func_to_port(enum function_type func, u8 func_port);
+#endif /* __U_BAM_DATA_H */
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
new file mode 100644
index 000000000000..013c54da0d0a
--- /dev/null
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+
+#include "u_rmnet.h"
+#include "f_qdss.h"
+
+#define RMNET_CTRL_QTI_NAME "rmnet_ctrl"
+#define DPL_CTRL_QTI_NAME "dpl_ctrl"
+/*
+ * Use size of gadget's qti control name. Here currently RMNET and DPL
+ * gadget is using QTI as control transport. Hence using RMNET ctrl name
+ * (as it is bigger in size) for QTI_CTRL_NAME_LEN.
+ */
+#define QTI_CTRL_NAME_LEN (sizeof(RMNET_CTRL_QTI_NAME)+2)
+
+struct qti_ctrl_port {
+ void *port_usb;
+ char name[QTI_CTRL_NAME_LEN];
+ struct miscdevice ctrl_device;
+
+ bool is_open;
+ int index;
+ unsigned intf;
+ int ipa_prod_idx;
+ int ipa_cons_idx;
+ enum peripheral_ep_type ep_type;
+
+ atomic_t connected;
+ atomic_t line_state;
+
+ atomic_t open_excl;
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t ioctl_excl;
+
+ wait_queue_head_t read_wq;
+
+ struct list_head cpkt_req_q;
+
+ spinlock_t lock;
+ enum qti_port_type port_type;
+ unsigned host_to_modem;
+ unsigned copied_to_modem;
+ unsigned copied_from_modem;
+ unsigned modem_to_host;
+ unsigned drp_cpkt_cnt;
+};
+static struct qti_ctrl_port *ctrl_port[QTI_NUM_PORTS];
+
+static inline int qti_ctrl_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1)
+ return 0;
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void qti_ctrl_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+static struct rmnet_ctrl_pkt *alloc_rmnet_ctrl_pkt(unsigned len, gfp_t flags)
+{
+ struct rmnet_ctrl_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(len, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->len = len;
+
+ return pkt;
+}
+
+static void free_rmnet_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+
+static void qti_ctrl_queue_notify(struct qti_ctrl_port *port)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt = NULL;
+
+ pr_debug("%s: Queue empty packet for QTI for port%d",
+ __func__, port->index);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!port->is_open) {
+ pr_err("%s: rmnet ctrl file handler %pK is not open",
+ __func__, port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ cpkt = alloc_rmnet_ctrl_pkt(0, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate reset function pkt\n", __func__);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ list_add_tail(&cpkt->list, &port->cpkt_req_q);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s: Wake up read queue", __func__);
+ wake_up(&port->read_wq);
+}
+
+static int gqti_ctrl_send_cpkt_tomodem(enum qti_port_type qport,
+ void *buf, size_t len)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ if (len > MAX_QTI_PKT_SIZE) {
+ pr_err("given pkt size too big:%zu > max_pkt_size:%d\n",
+ len, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return -ENODEV;
+ }
+ port = ctrl_port[qport];
+ cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC);
+ if (IS_ERR(cpkt)) {
+ pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(cpkt->buf, buf, len);
+ cpkt->len = len;
+
+ pr_debug("%s: port type:%d: Add to cpkt_req_q packet with len = %zu\n",
+ __func__, port->port_type, len);
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* drop cpkt if port is not open */
+ if (!port->is_open) {
+ pr_debug("rmnet file handler %pK(index=%d) is not open",
+ port, port->index);
+ port->drp_cpkt_cnt++;
+ spin_unlock_irqrestore(&port->lock, flags);
+ free_rmnet_ctrl_pkt(cpkt);
+ return 0;
+ }
+
+ list_add_tail(&cpkt->list, &port->cpkt_req_q);
+ port->host_to_modem++;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* wakeup read thread */
+ pr_debug("%s: Wake up read queue", __func__);
+ wake_up(&port->read_wq);
+
+ return 0;
+}
+
+static void
+gqti_ctrl_notify_modem(void *gptr, enum qti_port_type qport, int val)
+{
+ struct qti_ctrl_port *port;
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return;
+ }
+ port = ctrl_port[qport];
+ atomic_set(&port->line_state, val);
+
+ /* send 0 len pkt to qti to notify state change */
+ qti_ctrl_queue_notify(port);
+}
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf)
+{
+ struct qti_ctrl_port *port;
+ struct grmnet *g_rmnet = NULL;
+ unsigned long flags;
+
+ pr_debug("%s: port type:%d gadget:%pK\n", __func__, qport, gr);
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return -ENODEV;
+ }
+
+ port = ctrl_port[qport];
+ if (!port) {
+ pr_err("%s: gadget port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->port_type = qport;
+ port->ep_type = DATA_EP_TYPE_HSUSB;
+ port->intf = intf;
+
+ if (gr) {
+ port->port_usb = gr;
+ g_rmnet = (struct grmnet *)gr;
+ g_rmnet->send_encap_cmd = gqti_ctrl_send_cpkt_tomodem;
+ g_rmnet->notify_modem = gqti_ctrl_notify_modem;
+ if (port->port_type == QTI_PORT_DPL)
+ atomic_set(&port->line_state, 1);
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
+ pr_err("%s(): Port is used without port type.\n", __func__);
+ return -ENODEV;
+ }
+
+ port->host_to_modem = 0;
+ port->copied_to_modem = 0;
+ port->copied_from_modem = 0;
+ port->modem_to_host = 0;
+ port->drp_cpkt_cnt = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ atomic_set(&port->connected, 1);
+ wake_up(&port->read_wq);
+ if (port->port_usb && g_rmnet && g_rmnet->connect)
+ g_rmnet->connect(port->port_usb);
+
+ return 0;
+}
+
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport)
+{
+ struct qti_ctrl_port *port;
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+ struct grmnet *g_rmnet = NULL;
+
+ pr_debug("%s: gadget:%pK\n", __func__, gr);
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return;
+ }
+
+ port = ctrl_port[qport];
+ if (!port) {
+ pr_err("%s: gadget port is null\n", __func__);
+ return;
+ }
+
+ atomic_set(&port->connected, 0);
+ atomic_set(&port->line_state, 0);
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* reset ipa eps to -1 */
+ port->ipa_prod_idx = -1;
+ port->ipa_cons_idx = -1;
+ port->port_usb = NULL;
+
+ if (gr) {
+ g_rmnet = (struct grmnet *)gr;
+ g_rmnet->send_encap_cmd = NULL;
+ g_rmnet->notify_modem = NULL;
+ } else {
+ pr_err("%s(): unrecognized gadget type(%d).\n",
+ __func__, port->port_type);
+ }
+
+ while (!list_empty(&port->cpkt_req_q)) {
+ cpkt = list_first_entry(&port->cpkt_req_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ free_rmnet_ctrl_pkt(cpkt);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* send 0 len pkt to qti to notify state change */
+ qti_ctrl_queue_notify(port);
+}
+
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+ u32 ipa_prod, u32 ipa_cons)
+{
+ struct qti_ctrl_port *port;
+
+ if (qport >= QTI_NUM_PORTS) {
+ pr_err("%s: Invalid QTI port %d\n", __func__, qport);
+ return;
+ }
+
+ port = ctrl_port[qport];
+ port->ipa_prod_idx = ipa_prod;
+ port->ipa_cons_idx = ipa_cons;
+
+}
+
+
+static int qti_ctrl_open(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+
+ pr_debug("Open rmnet_ctrl_qti device file name=%s(index=%d)\n",
+ port->name, port->index);
+
+ if (qti_ctrl_lock(&port->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->is_open = true;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static int qti_ctrl_release(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+
+ pr_debug("Close rmnet control file");
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->is_open = false;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ qti_ctrl_unlock(&port->open_excl);
+
+ return 0;
+}
+
+static ssize_t
+qti_ctrl_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ struct rmnet_ctrl_pkt *cpkt = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("%s: Enter(%zu)\n", __func__, count);
+
+ if (count > MAX_QTI_PKT_SIZE) {
+ pr_err("Buffer size is too big %zu, should be at most %d\n",
+ count, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qti_ctrl_lock(&port->read_excl)) {
+ pr_err("Previous reading is not finished yet\n");
+ return -EBUSY;
+ }
+
+ /* block until a new packet is available */
+ do {
+ spin_lock_irqsave(&port->lock, flags);
+ if (!list_empty(&port->cpkt_req_q))
+ break;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pr_debug("%s: Requests list is empty. Wait.\n", __func__);
+ ret = wait_event_interruptible(port->read_wq,
+ !list_empty(&port->cpkt_req_q));
+ if (ret < 0) {
+ pr_debug("Waiting failed\n");
+ qti_ctrl_unlock(&port->read_excl);
+ return -ERESTARTSYS;
+ }
+ } while (1);
+
+ cpkt = list_first_entry(&port->cpkt_req_q, struct rmnet_ctrl_pkt,
+ list);
+ list_del(&cpkt->list);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (cpkt->len > count) {
+ pr_err("cpkt size too big:%d > buf size:%zu\n",
+ cpkt->len, count);
+ qti_ctrl_unlock(&port->read_excl);
+ free_rmnet_ctrl_pkt(cpkt);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s: cpkt size:%d\n", __func__, cpkt->len);
+
+
+ qti_ctrl_unlock(&port->read_excl);
+
+ ret = copy_to_user(buf, cpkt->buf, cpkt->len);
+ if (ret) {
+ pr_err("copy_to_user failed: err %d\n", ret);
+ ret = -EFAULT;
+ } else {
+ pr_debug("%s: copied %d bytes to user\n", __func__, cpkt->len);
+ ret = cpkt->len;
+ port->copied_to_modem++;
+ }
+
+ free_rmnet_ctrl_pkt(cpkt);
+
+ return ret;
+}
+
+static ssize_t
+qti_ctrl_write(struct file *fp, const char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ void *kbuf;
+ unsigned long flags;
+ int ret = 0;
+ struct grmnet *g_rmnet = NULL;
+
+ pr_debug("%s: Enter(%zu) port_index=%d", __func__, count, port->index);
+
+ if (!count) {
+ pr_debug("zero length ctrl pkt\n");
+ return -EINVAL;
+ }
+
+ if (count > MAX_QTI_PKT_SIZE) {
+ pr_debug("given pkt size too big:%zu > max_pkt_size:%d\n",
+ count, MAX_QTI_PKT_SIZE);
+ return -EINVAL;
+ }
+
+ if (qti_ctrl_lock(&port->write_excl)) {
+ pr_err("Previous writing not finished yet\n");
+ return -EBUSY;
+ }
+
+ if (!atomic_read(&port->connected)) {
+ pr_debug("USB cable not connected\n");
+ qti_ctrl_unlock(&port->write_excl);
+ return -EPIPE;
+ }
+
+ kbuf = kmalloc(count, GFP_KERNEL);
+ if (!kbuf) {
+ qti_ctrl_unlock(&port->write_excl);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed err:%d\n", ret);
+ kfree(kbuf);
+ qti_ctrl_unlock(&port->write_excl);
+ return -EFAULT;
+ }
+ port->copied_from_modem++;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->port_usb) {
+ if (port->port_type == QTI_PORT_RMNET) {
+ g_rmnet = (struct grmnet *)port->port_usb;
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
+ pr_err("%s(): unrecognized gadget type(%d).\n",
+ __func__, port->port_type);
+ return -EINVAL;
+ }
+
+ if (g_rmnet && g_rmnet->send_cpkt_response) {
+ ret = g_rmnet->send_cpkt_response(port->port_usb,
+ kbuf, count);
+ if (ret)
+ pr_err("%d failed to send ctrl packet.\n", ret);
+ port->modem_to_host++;
+ } else {
+ pr_err("send_cpkt_response callback is NULL\n");
+ ret = -EINVAL;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ kfree(kbuf);
+ qti_ctrl_unlock(&port->write_excl);
+
+ pr_debug("%s: Exit(%zu)", __func__, count);
+ return (ret) ? ret : count;
+}
+
+static long qti_ctrl_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct qti_ctrl_port *port = container_of(fp->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ struct grmnet *gr = NULL;
+ struct ep_info info;
+ int val, ret = 0;
+
+ pr_debug("%s: Received command %d for port type:%d\n",
+ __func__, cmd, port->port_type);
+
+ if (qti_ctrl_lock(&port->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case QTI_CTRL_MODEM_OFFLINE:
+ if (port && (port->port_type == QTI_PORT_DPL)) {
+ pr_err("%s(): Modem Offline not handled\n", __func__);
+ goto exit_ioctl;
+ }
+
+ if (port && port->port_usb)
+ gr = port->port_usb;
+
+ if (gr && gr->disconnect)
+ gr->disconnect(gr);
+ break;
+ case QTI_CTRL_MODEM_ONLINE:
+ if (port && (port->port_type == QTI_PORT_DPL)) {
+ pr_err("%s(): Modem Online not handled\n", __func__);
+ goto exit_ioctl;
+ }
+
+ if (port && port->port_usb)
+ gr = port->port_usb;
+
+ if (gr && gr->connect)
+ gr->connect(gr);
+ break;
+ case QTI_CTRL_GET_LINE_STATE:
+ val = atomic_read(&port->line_state);
+ ret = copy_to_user((void __user *)arg, &val, sizeof(val));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ pr_debug("%s: Sent line_state: %d for port type:%d\n", __func__,
+ atomic_read(&port->line_state), port->port_type);
+ break;
+ case QTI_CTRL_EP_LOOKUP:
+
+ pr_debug("%s(): EP_LOOKUP for port type:%d\n", __func__,
+ port->port_type);
+ val = atomic_read(&port->connected);
+ if (!val) {
+ pr_err_ratelimited("EP_LOOKUP failed: not connected\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (port->ipa_prod_idx == -1 && port->ipa_cons_idx == -1) {
+ pr_err_ratelimited("EP_LOOKUP ipa pipes not updated\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ info.ph_ep_info.ep_type = port->ep_type;
+ info.ph_ep_info.peripheral_iface_id = port->intf;
+ info.ipa_ep_pair.cons_pipe_num = port->ipa_cons_idx;
+ info.ipa_ep_pair.prod_pipe_num = port->ipa_prod_idx;
+
+ pr_debug("%s(): port type:%d ep_type:%d intf:%d\n",
+ __func__, port->port_type, info.ph_ep_info.ep_type,
+ info.ph_ep_info.peripheral_iface_id);
+
+ pr_debug("%s(): ipa_cons_idx:%d ipa_prod_idx:%d\n",
+ __func__, info.ipa_ep_pair.cons_pipe_num,
+ info.ipa_ep_pair.prod_pipe_num);
+
+ ret = copy_to_user((void __user *)arg, &info,
+ sizeof(info));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ break;
+ default:
+ pr_err("wrong parameter");
+ ret = -EINVAL;
+ }
+
+exit_ioctl:
+ qti_ctrl_unlock(&port->ioctl_excl);
+
+ return ret;
+}
+
+static unsigned int qti_ctrl_poll(struct file *file, poll_table *wait)
+{
+ struct qti_ctrl_port *port = container_of(file->private_data,
+ struct qti_ctrl_port,
+ ctrl_device);
+ unsigned long flags;
+ unsigned int mask = 0;
+
+ if (!port) {
+ pr_err("%s on a NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ poll_wait(file, &port->read_wq, wait);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (!list_empty(&port->cpkt_req_q)) {
+ mask |= POLLIN | POLLRDNORM;
+ pr_debug("%s sets POLLIN for rmnet_ctrl_qti_port\n", __func__);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return mask;
+}
+
+static int qti_ctrl_read_stats(struct seq_file *s, void *unused)
+{
+ struct qti_ctrl_port *port = s->private;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ port = ctrl_port[i];
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->lock, flags);
+
+ seq_printf(s, "\n#PORT:%d port: %pK\n", i, port);
+ seq_printf(s, "name: %s\n", port->name);
+ seq_printf(s, "host_to_modem: %d\n",
+ port->host_to_modem);
+ seq_printf(s, "copied_to_modem: %d\n",
+ port->copied_to_modem);
+ seq_printf(s, "copied_from_modem: %d\n",
+ port->copied_from_modem);
+ seq_printf(s, "modem_to_host: %d\n",
+ port->modem_to_host);
+ seq_printf(s, "cpkt_drp_cnt: %d\n",
+ port->drp_cpkt_cnt);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return 0;
+}
+
+static int qti_ctrl_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qti_ctrl_read_stats, inode->i_private);
+}
+
+static ssize_t qti_ctrl_reset_stats(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct qti_ctrl_port *port = s->private;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ port = ctrl_port[i];
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->host_to_modem = 0;
+ port->copied_to_modem = 0;
+ port->copied_from_modem = 0;
+ port->modem_to_host = 0;
+ port->drp_cpkt_cnt = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+ return count;
+}
+
+const struct file_operations qti_ctrl_stats_ops = {
+ .open = qti_ctrl_stats_open,
+ .read = seq_read,
+ .write = qti_ctrl_reset_stats,
+};
+
+static struct dentry *qti_ctrl_dent;
+static void qti_ctrl_debugfs_init(void)
+{
+ struct dentry *qti_ctrl_dfile;
+
+ qti_ctrl_dent = debugfs_create_dir("usb_qti", 0);
+ if (IS_ERR(qti_ctrl_dent))
+ return;
+
+ qti_ctrl_dfile =
+ debugfs_create_file("status", 0444, qti_ctrl_dent, 0,
+ &qti_ctrl_stats_ops);
+ if (!qti_ctrl_dfile || IS_ERR(qti_ctrl_dfile))
+ debugfs_remove(qti_ctrl_dent);
+}
+
+static void qti_ctrl_debugfs_exit(void)
+{
+ debugfs_remove_recursive(qti_ctrl_dent);
+}
+
+/* file operations for rmnet device /dev/rmnet_ctrl */
+static const struct file_operations qti_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qti_ctrl_open,
+ .release = qti_ctrl_release,
+ .read = qti_ctrl_read,
+ .write = qti_ctrl_write,
+ .unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = qti_ctrl_ioctl,
+#endif
+ .poll = qti_ctrl_poll,
+};
+/* file operations for DPL device /dev/dpl_ctrl */
+static const struct file_operations dpl_qti_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qti_ctrl_open,
+ .release = qti_ctrl_release,
+ .read = qti_ctrl_read,
+ .write = NULL,
+ .unlocked_ioctl = qti_ctrl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = qti_ctrl_ioctl,
+#endif
+ .poll = qti_ctrl_poll,
+};
+
+int gqti_ctrl_init(void)
+{
+ int ret, i, sz = QTI_CTRL_NAME_LEN;
+ struct qti_ctrl_port *port = NULL;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ port = kzalloc(sizeof(struct qti_ctrl_port), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ goto fail_init;
+ }
+
+ INIT_LIST_HEAD(&port->cpkt_req_q);
+ spin_lock_init(&port->lock);
+
+ atomic_set(&port->open_excl, 0);
+ atomic_set(&port->read_excl, 0);
+ atomic_set(&port->write_excl, 0);
+ atomic_set(&port->ioctl_excl, 0);
+ atomic_set(&port->connected, 0);
+ atomic_set(&port->line_state, 0);
+
+ init_waitqueue_head(&port->read_wq);
+
+ ctrl_port[i] = port;
+ port->index = i;
+ port->ipa_prod_idx = -1;
+ port->ipa_cons_idx = -1;
+
+ if (i == QTI_PORT_RMNET)
+ strlcat(port->name, RMNET_CTRL_QTI_NAME, sz);
+ else if (i == QTI_PORT_DPL)
+ strlcat(port->name, DPL_CTRL_QTI_NAME, sz);
+ else
+ snprintf(port->name, sz, "%s%d",
+ RMNET_CTRL_QTI_NAME, i);
+
+ port->ctrl_device.name = port->name;
+ if (i == QTI_PORT_DPL)
+ port->ctrl_device.fops = &dpl_qti_ctrl_fops;
+ else
+ port->ctrl_device.fops = &qti_ctrl_fops;
+ port->ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&port->ctrl_device);
+ if (ret) {
+ pr_err("rmnet control driver failed to register");
+ goto fail_init;
+ }
+ }
+ qti_ctrl_debugfs_init();
+ return ret;
+
+fail_init:
+ for (i--; i >= 0; i--) {
+ misc_deregister(&ctrl_port[i]->ctrl_device);
+ kfree(ctrl_port[i]);
+ ctrl_port[i] = NULL;
+ }
+ return ret;
+}
+
+void gqti_ctrl_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < QTI_NUM_PORTS; i++) {
+ misc_deregister(&ctrl_port[i]->ctrl_device);
+ kfree(ctrl_port[i]);
+ ctrl_port[i] = NULL;
+ }
+ qti_ctrl_debugfs_exit();
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
new file mode 100644
index 000000000000..d9a0b0e0b271
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -0,0 +1,1401 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/usb_bam.h>
+
+#include "u_data_ipa.h"
+#include "u_rmnet.h"
+
+struct ipa_data_ch_info {
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+ unsigned long flags;
+ unsigned id;
+ enum ipa_func_type func_type;
+ bool is_connected;
+ unsigned port_num;
+ spinlock_t port_lock;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ struct gadget_ipa_port *port_usb;
+ struct usb_gadget *gadget;
+ atomic_t pipe_connect_notified;
+ struct usb_bam_connect_ipa_params ipa_params;
+};
+
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
+static struct workqueue_struct *ipa_data_wq;
+struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+static struct rndis_data_ch_info *rndis_data;
+/**
+ * ipa_data_endless_complete() - completion callback for endless TX/RX request
+ * @ep: USB endpoint for which this completion happen
+ * @req: USB endless request
+ *
+ * This completion is being called when endless (TX/RX) transfer is terminated
+ * i.e. disconnect or suspend case.
+ */
+static void ipa_data_endless_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_debug("%s: endless complete for(%s) with status: %d\n",
+ __func__, ep->name, req->status);
+}
+
+/**
+ * ipa_data_start_endless_xfer() - configure USB endpoint and
+ * queue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX)
+ *
+ * It is being used to queue endless TX/RX request with UDC driver.
+ * It does set required DBM endpoint configuration before queueing endless
+ * TX/RX request.
+ */
+static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ unsigned long flags;
+ int status;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || (in && !port->tx_req)
+ || (!in && !port->rx_req)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb/req is NULL.\n", __func__);
+ return;
+ }
+
+ if (in)
+ ep = port->port_usb->in;
+ else
+ ep = port->port_usb->out;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (in) {
+ pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless RX_REQ, %d\n", status);
+ }
+}
+
+/**
+ * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX
+ *
+ * It is being used to terminate and dequeue endless TX/RX request with UDC
+ * driver.
+ */
+static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ unsigned long flags;
+ int status;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || (in && !port->tx_req)
+ || (!in && !port->rx_req)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb/req is NULL.\n", __func__);
+ return;
+ }
+
+ if (in)
+ ep = port->port_usb->in;
+ else
+ ep = port->port_usb->out;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (in) {
+ pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_dequeue(ep, port->tx_req);
+ if (status)
+ pr_err("error dequeueing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_dequeue(ep, port->rx_req);
+ if (status)
+ pr_err("error dequeueing endless RX_REQ, %d\n", status);
+ }
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void ipa_data_start_rx_tx(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_ep *epin, *epout;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+ /* queue in & out requests */
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb || !port->port_usb->in ||
+ !port->port_usb->out) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->rx_req || !port->tx_req) {
+ pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+ port->rx_req, port->tx_req);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ if (!port->is_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ epout = port->port_usb->out;
+ epin = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ if (epout)
+ ipa_data_start_endless_xfer(port, false);
+
+ pr_debug("%s: Starting tx", __func__);
+ if (epin)
+ ipa_data_start_endless_xfer(port, true);
+}
+/**
+ * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
+ * @w: disconnect work
+ *
+ * It is being schedule from ipa_data_disconnect() API when particular function
+ * is being disable due to USB disconnect or USB composition switch is being
+ * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to
+ * handshake operation and involvement of SPS related APIs, this functioality
+ * can't be used from atomic context.
+ */
+static void ipa_data_disconnect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ disconnect_w);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("Already disconnected.\n");
+ return;
+ }
+ port->is_connected = false;
+ pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__,
+ port->ipa_params.prod_clnt_hdl,
+ port->ipa_params.cons_clnt_hdl);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related
+ * pipes before calling IPA tethered function related disconnect
+ * API. IPA tethered function related disconnect API delete
+ * depedency graph with IPA RM which would results into IPA not
+ * pulling data although there is pending data on USB BAM
+ * producer pipe.
+ */
+ if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ }
+
+ if (port->ipa_params.prod_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ if (port->ipa_params.cons_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+
+ if (port->func_type == USB_IPA_FUNC_RMNET)
+ teth_bridge_disconnect(port->ipa_params.src_client);
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ pr_debug("%s(): disconnect work completed.\n", __func__);
+}
+
+/**
+ * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint
+ * @gp: USB gadget IPA Port
+ * @port_num: Port num used by function driver which need to be disable
+ *
+ * It is being called from atomic context from gadget driver when particular
+ * function is being disable due to USB cable disconnect or USB composition
+ * switch is being trigger. This API performs restoring USB endpoint operation
+ * and disable USB endpoint used for accelerated path.
+ */
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+
+ pr_debug("dev:%pK port number:%d\n", gp, func);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb) {
+ gadget = port->port_usb->cdev->gadget;
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in) {
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->tx_req) {
+ usb_ep_free_request(port->port_usb->in,
+ port->tx_req);
+ port->tx_req = NULL;
+ }
+ port->port_usb->in->endless = false;
+ }
+
+ if (port->port_usb->out) {
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->rx_req) {
+ usb_ep_free_request(port->port_usb->out,
+ port->rx_req);
+ port->rx_req = NULL;
+ }
+ port->port_usb->out->endless = false;
+ }
+
+ port->port_usb = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
+
+/**
+ * configure_fifo() - Configure USB BAM Pipe's data FIFO
+ * @idx: USB BAM Pipe index
+ * @ep: USB endpoint
+ *
+ * This function configures USB BAM data fifo using fetched pipe configuraion
+ * using provided index value. This function needs to used before starting
+ * endless transfer.
+ */
+static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
+{
+ struct sps_mem_buffer data_fifo = {0};
+ u32 usb_bam_pipe_idx;
+
+ get_bam2bam_connection_info(bam_type, idx,
+ &usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+ msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
+ usb_bam_pipe_idx);
+}
+
+/**
+ * ipa_data_connect_work() - Perform USB IPA BAM connect
+ * @w: connect work
+ *
+ * It is being schedule from ipa_data_connect() API when particular function
+ * which is using USB IPA accelerated path. This API performs allocating request
+ * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be
+ * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for connect sequence.
+ */
+static void ipa_data_connect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct gadget_ipa_port *gport;
+ struct usb_gadget *gadget = NULL;
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+ bool is_ipa_disconnected = true;
+
+ pr_debug("%s: Connect workqueue started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ pr_err("%s(): port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ gport = port->port_usb;
+ if (gport && gport->cdev)
+ gadget = gport->cdev->gadget;
+
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ pr_err("%s: gport is NULL.\n", __func__);
+ return;
+ }
+
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * ipa_data_connect()
+ */
+ if (port->is_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
+ gport->ipa_consumer_ep = -1;
+ gport->ipa_producer_ep = -1;
+
+ port->is_connected = true;
+
+ /* update IPA Parameteres here. */
+ port->ipa_params.usb_connection_speed = gadget->speed;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.skip_ep_cfg = true;
+ port->ipa_params.keep_ipa_awake = true;
+ port->ipa_params.cons_clnt_hdl = -1;
+ port->ipa_params.prod_clnt_hdl = -1;
+
+ if (gport->out) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || port->rx_req == NULL) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb is NULL, or rx_req cleaned\n",
+ __func__);
+ goto out;
+ }
+
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | port->src_pipe_idx;
+ port->rx_req->length = 32*1024;
+ port->rx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->src_connection_idx,
+ port->port_usb->out);
+ ret = msm_ep_config(gport->out, port->rx_req);
+ if (ret) {
+ pr_err("msm_ep_config() failed for OUT EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto out;
+ }
+ }
+
+ if (gport->in) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || port->tx_req == NULL) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb is NULL, or tx_req cleaned\n",
+ __func__);
+ goto unconfig_msm_ep_out;
+ }
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ port->dst_pipe_idx;
+ port->tx_req->length = 32*1024;
+ port->tx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, gport->in);
+ ret = msm_ep_config(gport->in, port->tx_req);
+ if (ret) {
+ pr_err("msm_ep_config() failed for IN EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_out;
+ }
+ }
+
+ if (port->func_type == USB_IPA_FUNC_RMNET) {
+ teth_bridge_params.client = port->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_in;
+ }
+ }
+
+ /*
+ * Perform below operations for Tx from Device (OUT transfer)
+ * 1. Connect with pipe of USB BAM with IPA BAM pipe
+ * 2. Update USB Endpoint related information using SPS Param.
+ * 3. Configure USB Endpoint/DBM for the same.
+ * 4. Override USB ep queue functionality for endless transfer.
+ */
+ if (gport->out) {
+ pr_debug("configure bam ipa connect for USB OUT\n");
+ port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ } else if (port->func_type == USB_IPA_FUNC_RMNET) {
+ port->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ port->ipa_params.priv =
+ teth_bridge_params.private_data;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ port->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ is_ipa_disconnected = false;
+ /* check if USB cable is disconnected or not */
+ if (!port->port_usb) {
+ pr_debug("%s:%d: cable is disconnected.\n",
+ __func__, __LINE__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto disconnect_usb_bam_ipa_out;
+ }
+
+ gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
+ }
+
+ if (gport->in) {
+ pr_debug("configure bam ipa connect for USB IN\n");
+ port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ } else if (port->func_type == USB_IPA_FUNC_RMNET) {
+ port->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ port->ipa_params.priv =
+ teth_bridge_params.private_data;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ port->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+
+ if (port->func_type == USB_IPA_FUNC_DPL)
+ port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ is_ipa_disconnected = false;
+ /* check if USB cable is disconnected or not */
+ if (!port->port_usb) {
+ pr_debug("%s:%d: cable is disconnected.\n",
+ __func__, __LINE__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto disconnect_usb_bam_ipa_out;
+ }
+
+ gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ rndis_data->prod_clnt_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ rndis_data->cons_clnt_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ rndis_data->priv = port->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data->ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data->ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data->dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data->cons_clnt_hdl,
+ rndis_data->prod_clnt_hdl,
+ rndis_data->ul_max_transfer_size,
+ rndis_data->ul_max_packets_number,
+ rndis_data->dl_max_transfer_size,
+ rndis_data->priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ atomic_set(&port->pipe_connect_notified, 1);
+ } else if (port->func_type == USB_IPA_FUNC_RMNET ||
+ port->func_type == USB_IPA_FUNC_DPL) {
+ /* For RmNet and DPL need to update_ipa_pipes to qti */
+ enum qti_port_type qti_port_type = port->func_type ==
+ USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL;
+ gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type,
+ gport->ipa_producer_ep, gport->ipa_consumer_ep);
+ }
+
+ if (port->func_type == USB_IPA_FUNC_RMNET) {
+ connect_params.ipa_usb_pipe_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_RMNET;
+ connect_params.client_type =
+ port->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n", __func__);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ }
+
+ pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ gport->ipa_producer_ep,
+ gport->ipa_consumer_ep);
+
+ pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
+ port->src_connection_idx, port->dst_connection_idx);
+
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
+ if (gport->out)
+ ipa_data_start_endless_xfer(port, false);
+ if (gport->in)
+ ipa_data_start_endless_xfer(port, true);
+
+ pr_debug("Connect workqueue done (port %pK)", port);
+ return;
+
+disconnect_usb_bam_ipa_out:
+ if (!is_ipa_disconnected) {
+ usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ is_ipa_disconnected = true;
+ }
+ if (port->func_type == USB_IPA_FUNC_RMNET)
+ teth_bridge_disconnect(port->ipa_params.src_client);
+unconfig_msm_ep_in:
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb && gport->in)
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+unconfig_msm_ep_out:
+ if (gport->in)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb && gport->out)
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+ if (gport->out)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_connected = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+}
+
+/**
+ * ipa_data_connect() - Prepare IPA params and enable USB endpoints
+ * @gp: USB IPA gadget port
+ * @port_num: port number used by accelerated function
+ * @src_connection_idx: USB BAM pipe index used as producer
+ * @dst_connection_idx: USB BAM pipe index used as consumer
+ *
+ * It is being called from accelerated function driver (from set_alt()) to
+ * initiate USB BAM IPA connection. This API is enabling accelerated endpoints
+ * and schedule connect_work() which establishes USB IPA BAM communication.
+ */
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ u8 src_connection_idx, u8 dst_connection_idx)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n",
+ gp, func, src_connection_idx, dst_connection_idx);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid portno#%d\n", func);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (!gp) {
+ pr_err("gadget port is null\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ port = ipa_data_ports[func];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gp;
+ port->gadget = gp->cdev->gadget;
+
+ if (gp->out) {
+ port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC);
+ if (!port->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ goto err;
+ }
+ port->rx_req->context = port;
+ port->rx_req->complete = ipa_data_endless_complete;
+ port->rx_req->length = 0;
+ port->rx_req->no_interrupt = 1;
+ }
+
+ if (gp->in) {
+ port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC);
+ if (!port->tx_req) {
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto free_rx_req;
+ }
+ port->tx_req->context = port;
+ port->tx_req->complete = ipa_data_endless_complete;
+ port->tx_req->length = 0;
+ port->tx_req->no_interrupt = 1;
+ }
+ port->src_connection_idx = src_connection_idx;
+ port->dst_connection_idx = dst_connection_idx;
+ port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
+
+ port->ipa_params.src_pipe = &(port->src_pipe_idx);
+ port->ipa_params.dst_pipe = &(port->dst_pipe_idx);
+ port->ipa_params.src_idx = src_connection_idx;
+ port->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Disable Xfer complete and Xfer not ready interrupts by
+ * marking endless flag which is used in UDC driver to enable
+ * these interrupts. with this set, these interrupts for selected
+ * endpoints won't be enabled.
+ */
+ if (port->port_usb->in) {
+ port->port_usb->in->endless = true;
+ ret = usb_ep_enable(port->port_usb->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK",
+ port->port_usb->in);
+ usb_ep_free_request(port->port_usb->in, port->tx_req);
+ port->tx_req = NULL;
+ port->port_usb->in->endless = false;
+ goto err_usb_in;
+ }
+ }
+
+ if (port->port_usb->out) {
+ port->port_usb->out->endless = true;
+ ret = usb_ep_enable(port->port_usb->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK",
+ port->port_usb->out);
+ usb_ep_free_request(port->port_usb->out, port->rx_req);
+ port->rx_req = NULL;
+ port->port_usb->out->endless = false;
+ goto err_usb_out;
+ }
+ }
+
+ /* Wait for host to enable flow_control */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = 0;
+ return ret;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ queue_work(ipa_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return ret;
+
+err_usb_out:
+ if (port->port_usb->in) {
+ usb_ep_disable(port->port_usb->in);
+ port->port_usb->in->endless = false;
+ }
+err_usb_in:
+ if (gp->in && port->tx_req) {
+ usb_ep_free_request(gp->in, port->tx_req);
+ port->tx_req = NULL;
+ }
+free_rx_req:
+ if (gp->out && port->rx_req) {
+ usb_ep_free_request(gp->out, port->rx_req);
+ port->rx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+err:
+ pr_debug("%s(): failed with error:%d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * ipa_data_start() - Restart USB endless transfer
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to restart USB endless transfer for USB bus resume.
+ * For USB consumer case, it restarts USB endless RX transfer, whereas
+ * for USB producer case, it resets DBM endpoint and restart USB endless
+ * TX transfer.
+ */
+static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): start endless RX\n", __func__);
+ ipa_data_start_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): start endless TX\n", __func__);
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, port->port_usb->in);
+ }
+ ipa_data_start_endless_xfer(port, true);
+ }
+}
+
+/**
+ * ipa_data_stop() - Stop endless Tx/Rx transfers
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to stop endless Tx/Rx transfers. It is being used
+ * for USB bus suspend functionality.
+ */
+static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): stop endless RX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): stop endless TX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, true);
+ }
+}
+
+void ipa_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(ipa_data_wq);
+}
+
+/**
+ * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB BAM IPA suspend functionality
+ * for USB bus suspend functionality.
+ */
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+ pr_debug("%s: suspended port %d\n", __func__, func);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ if (gp->in) {
+ gp->in_ep_desc_backup = gp->in->desc;
+ pr_debug("in_ep_desc_backup = %pK\n",
+ gp->in_ep_desc_backup);
+ }
+ if (gp->out) {
+ gp->out_ep_desc_backup = gp->out->desc;
+ pr_debug("out_ep_desc_backup = %pK\n",
+ gp->out_ep_desc_backup);
+ }
+ ipa_data_disconnect(gp, func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s: suspend started\n", __func__);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_register_start_stop_cbs(port->usb_bam_type,
+ port->dst_connection_idx, ipa_data_start,
+ ipa_data_stop, port);
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_resume() - Initiate USB resume functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB resume functionality
+ * for USB bus resume case.
+ */
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+ u8 src_connection_idx = 0;
+ u8 dst_connection_idx = 0;
+ enum usb_ctrl usb_bam_type;
+
+ pr_debug("dev:%pK port number:%d\n", gp, func);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ gadget = gp->cdev->gadget;
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ /* Restore endpoint descriptors info. */
+ if (gp->in) {
+ gp->in->desc = gp->in_ep_desc_backup;
+ pr_debug("in_ep_desc_backup = %pK\n",
+ gp->in_ep_desc_backup);
+ dst_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (gp->out) {
+ gp->out->desc = gp->out_ep_desc_backup;
+ pr_debug("out_ep_desc_backup = %pK\n",
+ gp->out_ep_desc_backup);
+ src_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ ipa_data_connect(gp, func,
+ src_connection_idx, dst_connection_idx);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct usb_gadget *gadget;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !port->port_usb->cdev) {
+ pr_err("port->port_usb or cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
+ pr_debug("%s: resume started\n", __func__);
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Gadget is NULL.\n", __func__);
+ return;
+ }
+
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, NULL);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type, port->src_connection_idx,
+ port->port_usb->out);
+ configure_fifo(port->usb_bam_type, port->dst_connection_idx,
+ port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ usb_bam_resume(port->usb_bam_type, &port->ipa_params);
+ }
+
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_port_alloc() - Allocate IPA USB Port structure
+ * @portno: port number to be used by particular USB function
+ *
+ * It is being used by USB function driver to allocate IPA data port
+ * for USB IPA data accelerated path.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+static int ipa_data_port_alloc(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ if (ipa_data_ports[func] != NULL) {
+ pr_debug("port %d already allocated.\n", func);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ipa_data_ports[func] = port;
+
+ pr_debug("port:%pK with portno:%d allocated\n", port, func);
+ return 0;
+}
+
+/**
+ * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
+ * @portno: port number to be used by particular USB function
+ * @func_type: USB gadget function type
+ *
+ * It is being used by USB function driver to select which BAM2BAM IPA
+ * port particular USB function wants to use.
+ *
+ */
+void ipa_data_port_select(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ pr_debug("portno:%d\n", func);
+
+ port = ipa_data_ports[func];
+ port->port_num = func;
+ port->is_connected = false;
+
+ spin_lock_init(&port->port_lock);
+
+ if (!work_pending(&port->connect_w))
+ INIT_WORK(&port->connect_w, ipa_data_connect_work);
+
+ if (!work_pending(&port->disconnect_w))
+ INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+
+ port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ port->func_type = func;
+};
+
+void ipa_data_free(enum ipa_func_type func)
+{
+ pr_debug("freeing %d IPA BAM port", func);
+
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+ if (ipa_data_wq) {
+ destroy_workqueue(ipa_data_wq);
+ ipa_data_wq = NULL;
+ }
+}
+
+/**
+ * ipa_data_setup() - setup BAM2BAM IPA port
+ *
+ * Each USB function who wants to use BAM2BAM IPA port would
+ * be counting number of IPA port to use and initialize those
+ * ports at time of bind_config() in android gadget driver.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+int ipa_data_setup(enum ipa_func_type func)
+{
+ int ret;
+
+ pr_debug("requested %d IPA BAM port", func);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d\n", func);
+ return -EINVAL;
+ }
+
+ ret = ipa_data_port_alloc(func);
+ if (ret) {
+ pr_err("Failed to alloc port:%d\n", func);
+ return ret;
+ }
+
+ if (func == USB_IPA_FUNC_RNDIS) {
+ rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL);
+ if (!rndis_data) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ goto free_ipa_ports;
+ }
+ }
+ if (ipa_data_wq) {
+ pr_debug("ipa_data_wq is already setup.");
+ return 0;
+ }
+
+ ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!ipa_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_rndis_data;
+ }
+
+ return 0;
+
+free_rndis_data:
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+free_ipa_ports:
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+
+ return ret;
+}
+
+void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_ul_max_pkt_num(u8 max_packets_number)
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data->ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data->ul_aggregation_enable = true;
+ else
+ rndis_data->ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n",
+ __func__, rndis_data->ul_aggregation_enable,
+ max_packets_number);
+}
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (atomic_read(&port->pipe_connect_notified)) {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ return;
+ }
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->connect_w);
+}
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (!atomic_read(&port->pipe_connect_notified))
+ return;
+
+ rndis_ipa_reset_trigger();
+ ipa_data_stop_endless_xfer(port, true);
+ ipa_data_stop_endless_xfer(port, false);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
new file mode 100644
index 000000000000..17dccbc4cf16
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -0,0 +1,119 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_DATA_IPA_H
+#define __U_DATA_IPA_H
+
+#include <linux/usb/composite.h>
+#include <linux/rndis_ipa.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/miscdevice.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb_bam.h>
+
+#include "u_rmnet.h"
+
+enum ipa_func_type {
+ USB_IPA_FUNC_ECM,
+ USB_IPA_FUNC_MBIM,
+ USB_IPA_FUNC_RMNET,
+ USB_IPA_FUNC_RNDIS,
+ USB_IPA_FUNC_DPL,
+ USB_IPA_NUM_FUNCS,
+};
+
+/* Max Number of IPA data ports supported */
+#define IPA_N_PORTS USB_IPA_NUM_FUNCS
+
+struct gadget_ipa_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ int rx_buffer_size;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+};
+
+struct ipa_function_bind_info {
+ struct usb_string *string_defs;
+ int data_str_idx;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+};
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN 40
+
+struct f_rndis_qc_opts {
+ struct usb_function_instance func_inst;
+ struct f_rndis_qc *rndis;
+ u32 vendor_id;
+ const char *manufacturer;
+ struct net_device *net;
+ int refcnt;
+};
+
+struct f_rmnet_opts {
+ struct usb_function_instance func_inst;
+ struct f_rmnet *dev;
+ int refcnt;
+};
+
+void ipa_data_port_select(enum ipa_func_type func);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func);
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ u8 src_connection_idx, u8 dst_connection_idx);
+int ipa_data_setup(enum ipa_func_type func);
+void ipa_data_free(enum ipa_func_type func);
+
+void ipa_data_flush_workqueue(void);
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+
+void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void ipa_data_start_rx_tx(enum ipa_func_type func);
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func);
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func);
+
+void *rndis_qc_get_ipa_priv(void);
+void *rndis_qc_get_ipa_rx_cb(void);
+bool rndis_qc_get_skip_ep_config(void);
+void *rndis_qc_get_ipa_tx_cb(void);
+void rndis_ipa_reset_trigger(void);
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+ u32 ipa_prod, u32 ipa_cons);
+#endif
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 0a0eeffc9438..34a337888788 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1075,6 +1075,9 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
struct eth_dev *dev;
u8 new_addr[ETH_ALEN];
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
@@ -1087,6 +1090,9 @@ int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
{
struct eth_dev *dev;
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
return get_ether_addr_str(dev->dev_mac, dev_addr, len);
}
@@ -1097,6 +1103,9 @@ int gether_set_host_addr(struct net_device *net, const char *host_addr)
struct eth_dev *dev;
u8 new_addr[ETH_ALEN];
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
if (get_ether_addr(host_addr, new_addr))
return -EINVAL;
@@ -1109,6 +1118,9 @@ int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
{
struct eth_dev *dev;
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
return get_ether_addr_str(dev->host_mac, host_addr, len);
}
@@ -1141,6 +1153,9 @@ void gether_set_qmult(struct net_device *net, unsigned qmult)
{
struct eth_dev *dev;
+ if (!net)
+ return;
+
dev = netdev_priv(net);
dev->qmult = qmult;
}
@@ -1150,6 +1165,9 @@ unsigned gether_get_qmult(struct net_device *net)
{
struct eth_dev *dev;
+ if (!net)
+ return -ENODEV;
+
dev = netdev_priv(net);
return dev->qmult;
}
@@ -1157,6 +1175,9 @@ EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len)
{
+ if (!net)
+ return -ENODEV;
+
rtnl_lock();
strlcpy(name, netdev_name(net), len);
rtnl_unlock();
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 4f47289fcf7c..0468459a5c0f 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -35,6 +35,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -48,6 +53,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -70,6 +80,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -83,6 +98,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -105,6 +125,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
unsigned qmult; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
qmult = gether_get_qmult(opts->net); \
mutex_unlock(&opts->lock); \
@@ -118,6 +143,11 @@
u8 val; \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
@@ -144,6 +174,11 @@ out: \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 60139854e0b1..6e6318c94e93 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -176,6 +176,9 @@ struct ffs_data {
struct usb_request *ep0req; /* P: mutex */
struct completion ep0req_completion; /* P: mutex */
+ struct completion epin_completion;
+ struct completion epout_completion;
+
/* reference counter */
atomic_t ref;
/* how many files are opened (EP0 and others) */
diff --git a/drivers/usb/gadget/function/u_qc_ether.c b/drivers/usb/gadget/function/u_qc_ether.c
new file mode 100644
index 000000000000..bacaf52f42d9
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qc_ether.c
@@ -0,0 +1,454 @@
+/*
+ * u_qc_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used. Each end of the link uses one address. The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link. (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ *
+ * This utilities is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+#define UETH__VERSION "29-May-2008"
+
+struct eth_qc_dev {
+ /* lock is held while accessing port_usb
+ * or updating its backlink port_usb->ioport
+ */
+ spinlock_t lock;
+ struct qc_gether *port_usb;
+
+ struct net_device *net;
+ struct usb_gadget *gadget;
+
+ unsigned header_len;
+
+ bool zlp;
+ u8 host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+ printk(level "%s: " fmt, (d)->net->name, ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+ xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define DBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG DBG
+#else
+#define VDBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+ xprintk(dev, KERN_ERR, fmt, ## args)
+#define INFO(dev, fmt, args...) \
+ xprintk(dev, KERN_INFO, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+static int ueth_qc_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ int status = 0;
+
+ /* don't change MTU on "live" link (peer won't know) */
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ status = -EBUSY;
+ else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+ status = -ERANGE;
+ else
+ net->mtu = new_mtu;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return status;
+}
+
+static void eth_qc_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *p)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+
+ strlcpy(p->driver, "g_qc_ether", sizeof(p->driver));
+ strlcpy(p->version, UETH__VERSION, sizeof(p->version));
+ strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
+}
+
+static const struct ethtool_ops qc_ethtool_ops = {
+ .get_drvinfo = eth_qc_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static netdev_tx_t eth_qc_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ return NETDEV_TX_OK;
+}
+
+static int eth_qc_open(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ struct qc_gether *link;
+
+ DBG(dev, "%s\n", __func__);
+ if (netif_carrier_ok(dev->net)) {
+ /* Force the netif to send the RTM_NEWLINK event
+ * that in use to notify on the USB cable status.
+ */
+ netif_carrier_off(dev->net);
+ netif_carrier_on(dev->net);
+ netif_wake_queue(dev->net);
+ }
+
+ spin_lock_irq(&dev->lock);
+ link = dev->port_usb;
+ if (link && link->open)
+ link->open(link);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int eth_qc_stop(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ struct qc_gether *link = dev->port_usb;
+
+ VDBG(dev, "%s\n", __func__);
+ netif_stop_queue(net);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb && link->close)
+ link->close(link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *qc_dev_addr;
+module_param(qc_dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_dev_addr, "QC Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *qc_host_addr;
+module_param(qc_host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_host_addr, "QC Host Ethernet Address");
+
+static int get_qc_ether_addr(const char *str, u8 *dev_addr)
+{
+ if (str) {
+ unsigned i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = hex_to_bin(*str++) << 4;
+ num |= hex_to_bin(*str++);
+ dev_addr[i] = num;
+ }
+ if (is_valid_ether_addr(dev_addr))
+ return 0;
+ }
+ random_ether_addr(dev_addr);
+ return 1;
+}
+
+static const struct net_device_ops eth_qc_netdev_ops = {
+ .ndo_open = eth_qc_open,
+ .ndo_stop = eth_qc_stop,
+ .ndo_start_xmit = eth_qc_start_xmit,
+ .ndo_change_mtu = ueth_qc_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct device_type qc_gadget_type = {
+ .name = "gadget",
+};
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN])
+{
+ if (get_qc_ether_addr(qc_dev_addr, dev_mac))
+ pr_debug("using random dev_mac ethernet address\n");
+ if (get_qc_ether_addr(qc_host_addr, host_mac))
+ pr_debug("using random host_mac ethernet address\n");
+}
+
+/**
+ * gether_qc_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+ return gether_qc_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_qc_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname)
+{
+ struct eth_qc_dev *dev;
+ struct net_device *net;
+ int status;
+
+ net = alloc_etherdev(sizeof(*dev));
+ if (!net)
+ return -ENOMEM;
+
+ dev = netdev_priv(net);
+ spin_lock_init(&dev->lock);
+
+ /* network device setup */
+ dev->net = net;
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+ if (get_qc_ether_addr(qc_dev_addr, net->dev_addr))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "self");
+ if (get_qc_ether_addr(qc_host_addr, dev->host_mac))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "host");
+
+ if (ethaddr)
+ ether_addr_copy(ethaddr, dev->host_mac);
+
+ net->netdev_ops = &eth_qc_netdev_ops;
+ net->ethtool_ops = &qc_ethtool_ops;
+
+ netif_carrier_off(net);
+
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &qc_gadget_type);
+
+ status = register_netdev(net);
+ if (status < 0) {
+ dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+ free_netdev(net);
+ } else {
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ }
+
+ return status;
+}
+
+/**
+ * gether_qc_cleanup_name - remove Ethernet-over-USB device
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_qc_setup().
+ */
+void gether_qc_cleanup_name(const char *netname)
+{
+ struct net_device *net_dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+
+ if (net_dev) {
+ dev_put(net_dev);
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+ }
+}
+
+struct net_device *gether_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+/**
+ * gether_qc_connect_name - notify network layer that USB link
+ * is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ * current device speed, and any framing wrapper(s) set up.
+ * @netname: name for network device (for example, "usb")
+ * Context: irqs blocked
+ * @netif_enable: if true, net interface will be turned on
+ *
+ * This is called to let the network layer know the connection
+ * is active ("carrier detect").
+ */
+struct net_device *gether_qc_connect_name(struct qc_gether *link,
+ const char *netname, bool netif_enable)
+{
+ struct net_device *net_dev;
+ struct eth_qc_dev *dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ dev_put(net_dev);
+ dev = netdev_priv(net_dev);
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ dev->zlp = link->is_zlp_ok;
+ dev->header_len = link->header_len;
+
+ spin_lock(&dev->lock);
+ dev->port_usb = link;
+ link->ioport = dev;
+ if (netif_running(dev->net)) {
+ if (link->open)
+ link->open(link);
+ } else {
+ if (link->close)
+ link->close(link);
+ }
+ spin_unlock(&dev->lock);
+
+ if (netif_enable) {
+ netif_carrier_on(dev->net);
+ if (netif_running(dev->net))
+ netif_wake_queue(dev->net);
+ }
+
+ return dev->net;
+}
+
+/**
+ * gether_qc_disconnect_name - notify network layer that USB
+ * link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * @netname: name for network device (for example, "usb")
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ */
+void gether_qc_disconnect_name(struct qc_gether *link, const char *netname)
+{
+ struct net_device *net_dev;
+ struct eth_qc_dev *dev;
+
+ /* Extract the eth_qc_dev from the net device */
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return;
+
+ dev_put(net_dev);
+ dev = netdev_priv(net_dev);
+
+ if (!dev)
+ return;
+
+ DBG(dev, "%s\n", __func__);
+
+ netif_stop_queue(dev->net);
+ netif_carrier_off(dev->net);
+
+ spin_lock(&dev->lock);
+ dev->port_usb = NULL;
+ link->ioport = NULL;
+ spin_unlock(&dev->lock);
+}
diff --git a/drivers/usb/gadget/function/u_qc_ether.h b/drivers/usb/gadget/function/u_qc_ether.h
new file mode 100644
index 000000000000..c5706edf8d2f
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qc_ether.h
@@ -0,0 +1,101 @@
+/*
+ * u_qc_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __U_QC_ETHER_H
+#define __U_QC_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing. Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration. When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ *
+ * This function is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+struct qc_gether {
+ struct usb_function func;
+
+ /* updated by gether_{connect,disconnect} */
+ struct eth_qc_dev *ioport;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ bool is_zlp_ok;
+
+ u16 cdc_filter;
+
+ /* hooks for added framing, as needed for RNDIS and EEM. */
+ u32 header_len;
+
+ struct sk_buff *(*wrap)(struct qc_gether *port,
+ struct sk_buff *skb);
+ int (*unwrap)(struct qc_gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
+
+ /* called on network open/close */
+ void (*open)(struct qc_gether *);
+ void (*close)(struct qc_gether *);
+};
+
+/* netdev setup/teardown as directed by the gadget driver */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
+void gether_qc_cleanup_name(const char *netname);
+/* variant of gether_setup that allows customizing network device name */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_qc_connect_name(struct qc_gether *link,
+ const char *netname, bool netif_enable);
+struct net_device *gether_qc_get_net(const char *netname);
+void gether_qc_disconnect_name(struct qc_gether *link, const char *netname);
+
+/* each configuration may bind one instance of an ethernet link */
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name);
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer,
+ u8 maxPktPerXfer, u8 pkt_alignment_factor,
+ char *xport_name);
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN]);
+
+#endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
new file mode 100644
index 000000000000..0ef1e2ab34be
--- /dev/null
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -0,0 +1,128 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb_bam.h>
+
+#include "f_qdss.h"
+static int alloc_sps_req(struct usb_ep *data_ep)
+{
+ struct usb_request *req = NULL;
+ struct f_qdss *qdss = data_ep->driver_data;
+ u32 sps_params = 0;
+
+ pr_debug("send_sps_req\n");
+
+ req = usb_ep_alloc_request(data_ep, GFP_ATOMIC);
+ if (!req) {
+ pr_err("usb_ep_alloc_request failed\n");
+ return -ENOMEM;
+ }
+
+ req->length = 32*1024;
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ qdss->bam_info.usb_bam_pipe_idx;
+ req->udc_priv = sps_params;
+ qdss->endless_req = req;
+
+ return 0;
+}
+
+static int init_data(struct usb_ep *ep);
+int set_qdss_data_connection(struct f_qdss *qdss, int enable)
+{
+ enum usb_ctrl usb_bam_type;
+ int res = 0;
+ int idx;
+ struct usb_qdss_bam_connect_info bam_info;
+ struct usb_gadget *gadget;
+
+ pr_debug("set_qdss_data_connection\n");
+
+ if (!qdss) {
+ pr_err("%s: qdss ptr is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ gadget = qdss->gadget;
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+
+ bam_info = qdss->bam_info;
+ /* There is only one qdss pipe, so the pipe number can be set to 0 */
+ idx = usb_bam_get_connection_idx(usb_bam_type, QDSS_P_BAM,
+ PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE, 0);
+ if (idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n", __func__);
+ return idx;
+ }
+
+ if (enable) {
+ usb_bam_alloc_fifos(usb_bam_type, idx);
+ bam_info.data_fifo =
+ kzalloc(sizeof(struct sps_mem_buffer), GFP_KERNEL);
+ if (!bam_info.data_fifo) {
+ pr_err("qdss_data_connection: memory alloc failed\n");
+ usb_bam_free_fifos(usb_bam_type, idx);
+ return -ENOMEM;
+ }
+ get_bam2bam_connection_info(usb_bam_type, idx,
+ &bam_info.usb_bam_pipe_idx,
+ NULL, bam_info.data_fifo, NULL);
+
+ alloc_sps_req(qdss->port.data);
+ msm_data_fifo_config(qdss->port.data,
+ bam_info.data_fifo->phys_base,
+ bam_info.data_fifo->size,
+ bam_info.usb_bam_pipe_idx);
+ init_data(qdss->port.data);
+
+ res = usb_bam_connect(usb_bam_type, idx,
+ &(bam_info.usb_bam_pipe_idx));
+ } else {
+ kfree(bam_info.data_fifo);
+ res = usb_bam_disconnect_pipe(usb_bam_type, idx);
+ if (res)
+ pr_err("usb_bam_disconnection error\n");
+ usb_bam_free_fifos(usb_bam_type, idx);
+ }
+
+ return res;
+}
+
+static int init_data(struct usb_ep *ep)
+{
+ struct f_qdss *qdss = ep->driver_data;
+ int res = 0;
+
+ pr_debug("init_data\n");
+
+ res = msm_ep_config(ep, qdss->endless_req);
+ if (res)
+ pr_err("msm_ep_config failed\n");
+
+ return res;
+}
+
+int uninit_data(struct usb_ep *ep)
+{
+ int res = 0;
+
+ pr_err("uninit_data\n");
+
+ res = msm_ep_unconfig(ep);
+ if (res)
+ pr_err("msm_ep_unconfig failed\n");
+
+ return res;
+}
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
new file mode 100644
index 000000000000..e0843794b594
--- /dev/null
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct rmnet_ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+enum qti_port_type {
+ QTI_PORT_RMNET,
+ QTI_PORT_DPL,
+ QTI_NUM_PORTS
+};
+
+
+struct grmnet {
+ /* to usb host, aka laptop, windows pc etc. Will
+ * be filled by usb driver of rmnet functionality
+ */
+ int (*send_cpkt_response)(void *g, void *buf, size_t len);
+
+ /* to modem, and to be filled by driver implementing
+ * control function
+ */
+ int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+ void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+
+ void (*disconnect)(struct grmnet *g);
+ void (*connect)(struct grmnet *g);
+};
+
+enum ctrl_client {
+ FRMNET_CTRL_CLIENT,
+ GPS_CTRL_CLIENT,
+
+ NR_CTRL_CLIENTS
+};
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned intf);
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport);
+int gqti_ctrl_init(void);
+void gqti_ctrl_cleanup(void);
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 58a699cfa458..d5fcd3e5f02d 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -4,6 +4,7 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
*
* This code also borrows from usbserial.c, which is
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
@@ -27,6 +28,8 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
#include "u_serial.h"
@@ -77,9 +80,13 @@
* next layer of buffering. For TX that's a circular buffer; for RX
* consider it a NOP. A third layer is provided by the TTY code.
*/
-#define QUEUE_SIZE 16
+#define TX_QUEUE_SIZE 8
+#define TX_BUF_SIZE 4096
#define WRITE_BUF_SIZE 8192 /* TX only */
+#define RX_QUEUE_SIZE 8
+#define RX_BUF_SIZE 4096
+
/* circular buffer */
struct gs_buf {
unsigned buf_size;
@@ -106,7 +113,7 @@ struct gs_port {
int read_allocated;
struct list_head read_queue;
unsigned n_read;
- struct tasklet_struct push;
+ struct work_struct push;
struct list_head write_pool;
int write_started;
@@ -118,6 +125,10 @@ struct gs_port {
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
+ unsigned long nbytes_from_host;
+ unsigned long nbytes_to_tty;
+ unsigned long nbytes_from_tty;
+ unsigned long nbytes_to_host;
};
static struct portmaster {
@@ -125,6 +136,7 @@ static struct portmaster {
struct gs_port *port;
} ports[MAX_U_SERIAL_PORTS];
+static struct workqueue_struct *gserial_wq;
#define GS_CLOSE_TIMEOUT 15 /* seconds */
@@ -360,26 +372,50 @@ __releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
- struct list_head *pool = &port->write_pool;
+ struct list_head *pool;
struct usb_ep *in;
int status = 0;
+ static long prev_len;
bool do_tty_wake = false;
- if (!port->port_usb)
- return status;
+ if (!port || !port->port_usb) {
+ pr_err("Error - port or port->usb is NULL.");
+ return -EIO;
+ }
- in = port->port_usb->in;
+ pool = &port->write_pool;
+ in = port->port_usb->in;
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
- if (port->write_started >= QUEUE_SIZE)
+ if (port->write_started >= TX_QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
- len = gs_send_packet(port, req->buf, in->maxpacket);
+ len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
if (len == 0) {
+ /* Queue zero length packet explicitly to make it
+ * work with UDCs which don't support req->zero flag
+ */
+ if (prev_len && (prev_len % in->maxpacket == 0)) {
+ req->length = 0;
+ list_del(&req->list);
+ spin_unlock(&port->port_lock);
+ status = usb_ep_queue(in, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock);
+ if (!port->port_usb) {
+ gs_free_req(in, req);
+ break;
+ }
+ if (status) {
+ printk(KERN_ERR "%s: %s err %d\n",
+ __func__, "queue", status);
+ list_add(&req->list, pool);
+ }
+ prev_len = 0;
+ }
wake_up_interruptible(&port->drain_wait);
break;
}
@@ -387,7 +423,6 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
- req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
port->port_num, len, *((u8 *)req->buf),
@@ -405,6 +440,16 @@ __acquires(&port->port_lock)
status = usb_ep_queue(in, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
port->write_busy = false;
+ /*
+ * If port_usb is NULL, gserial disconnect is called
+ * while the spinlock is dropped and all requests are
+ * freed. Free the current request here.
+ */
+ if (!port->port_usb) {
+ do_tty_wake = false;
+ gs_free_req(in, req);
+ break;
+ }
if (status) {
pr_debug("%s: %s %s err %d\n",
@@ -413,11 +458,10 @@ __acquires(&port->port_lock)
break;
}
- port->write_started++;
+ prev_len = req->length;
+ port->nbytes_from_tty += req->length;
- /* abort immediately after disconnect */
- if (!port->port_usb)
- break;
+ port->write_started++;
}
if (do_tty_wake && port->port.tty)
@@ -434,8 +478,17 @@ __releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
- struct list_head *pool = &port->read_pool;
- struct usb_ep *out = port->port_usb->out;
+ struct list_head *pool;
+ struct usb_ep *out;
+ unsigned started = 0;
+
+ if (!port || !port->port_usb) {
+ pr_err("Error - port or port->usb is NULL.");
+ return -EIO;
+ }
+
+ pool = &port->read_pool;
+ out = port->port_usb->out;
while (!list_empty(pool)) {
struct usb_request *req;
@@ -447,12 +500,12 @@ __acquires(&port->port_lock)
if (!tty)
break;
- if (port->read_started >= QUEUE_SIZE)
+ if (port->read_started >= RX_QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list);
- req->length = out->maxpacket;
+ req->length = RX_BUF_SIZE;
/* drop lock while we call out; the controller driver
* may need to call us back (e.g. for disconnect)
@@ -461,6 +514,17 @@ __acquires(&port->port_lock)
status = usb_ep_queue(out, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
+ /*
+ * If port_usb is NULL, gserial disconnect is called
+ * while the spinlock is dropped and all requests are
+ * freed. Free the current request here.
+ */
+ if (!port->port_usb) {
+ started = 0;
+ gs_free_req(out, req);
+ break;
+ }
+
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", out->name, status);
@@ -468,10 +532,6 @@ __acquires(&port->port_lock)
break;
}
port->read_started++;
-
- /* abort immediately after disconnect */
- if (!port->port_usb)
- break;
}
return port->read_started;
}
@@ -486,9 +546,9 @@ __acquires(&port->port_lock)
* So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
* can be buffered before the TTY layer's buffers (currently 64 KB).
*/
-static void gs_rx_push(unsigned long _port)
+static void gs_rx_push(struct work_struct *w)
{
- struct gs_port *port = (void *)_port;
+ struct gs_port *port = container_of(w, struct gs_port, push);
struct tty_struct *tty;
struct list_head *queue = &port->read_queue;
bool disconnect = false;
@@ -538,6 +598,7 @@ static void gs_rx_push(unsigned long _port)
count = tty_insert_flip_string(&port->port, packet,
size);
+ port->nbytes_to_tty += count;
if (count)
do_push = true;
if (count != size) {
@@ -566,13 +627,13 @@ static void gs_rx_push(unsigned long _port)
* this time around, there may be trouble unless there's an
* implicit tty_unthrottle() call on its way...
*
- * REVISIT we should probably add a timer to keep the tasklet
+ * REVISIT we should probably add a timer to keep the work queue
* from starving ... but it's not clear that case ever happens.
*/
if (!list_empty(queue) && tty) {
if (!test_bit(TTY_THROTTLED, &tty->flags)) {
if (do_push)
- tasklet_schedule(&port->push);
+ queue_work(gserial_wq, &port->push);
else
pr_warn("ttyGS%d: RX not scheduled?\n",
port->port_num);
@@ -589,19 +650,23 @@ static void gs_rx_push(unsigned long _port)
static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
+ unsigned long flags;
/* Queue all received data until the tty layer is ready for it. */
- spin_lock(&port->port_lock);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_from_host += req->actual;
list_add_tail(&req->list, &port->read_queue);
- tasklet_schedule(&port->push);
- spin_unlock(&port->port_lock);
+ queue_work(gserial_wq, &port->push);
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_port *port = ep->driver_data;
+ unsigned long flags;
- spin_lock(&port->port_lock);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->nbytes_to_host += req->actual;
list_add(&req->list, &port->write_pool);
port->write_started--;
@@ -613,7 +678,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
/* FALL THROUGH */
case 0:
/* normal completion */
- gs_start_tx(port);
+ if (port->port_usb)
+ gs_start_tx(port);
break;
case -ESHUTDOWN:
@@ -622,7 +688,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
break;
}
- spin_unlock(&port->port_lock);
+ spin_unlock_irqrestore(&port->port_lock, flags);
}
static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
@@ -640,19 +706,20 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
}
static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int queue_size, int req_size,
void (*fn)(struct usb_ep *, struct usb_request *),
int *allocated)
{
int i;
struct usb_request *req;
- int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
+ int n = allocated ? queue_size - *allocated : queue_size;
/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
* do quite that many this time, don't fail ... we just won't
* be as speedy as we might otherwise be.
*/
for (i = 0; i < n; i++) {
- req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ req = gs_alloc_req(ep, req_size, GFP_ATOMIC);
if (!req)
return list_empty(head) ? -ENOMEM : 0;
req->complete = fn;
@@ -674,23 +741,32 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
*/
static int gs_start_io(struct gs_port *port)
{
- struct list_head *head = &port->read_pool;
- struct usb_ep *ep = port->port_usb->out;
+ struct list_head *head;
+ struct usb_ep *ep;
int status;
unsigned started;
+ if (!port || !port->port_usb) {
+ pr_err("Error - port or port->usb is NULL.");
+ return -EIO;
+ }
+
+ head = &port->read_pool;
+ ep = port->port_usb->out;
+
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
- status = gs_alloc_requests(ep, head, gs_read_complete,
- &port->read_allocated);
+ status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
+ gs_read_complete, &port->read_allocated);
if (status)
return status;
status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
+ TX_QUEUE_SIZE, TX_BUF_SIZE,
gs_write_complete, &port->write_allocated);
if (status) {
gs_free_requests(ep, head, &port->read_allocated);
@@ -701,6 +777,9 @@ static int gs_start_io(struct gs_port *port)
port->n_read = 0;
started = gs_start_rx(port);
+ if (!port->port_usb)
+ return -EIO;
+
if (started) {
gs_start_tx(port);
/* Unblock any pending writes into our circular buffer, in case
@@ -785,7 +864,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
if (status) {
- pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
+ pr_debug("gs_open: ttyGS%d (%pK,%pK) no buffer\n",
port->port_num, tty, file);
port->openclose = false;
goto exit_unlock_port;
@@ -815,7 +894,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
gser->connect(gser);
}
- pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
+ pr_debug("gs_open: ttyGS%d (%pK,%pK)\n", port->port_num, tty, file);
status = 0;
@@ -851,7 +930,8 @@ static void gs_close(struct tty_struct *tty, struct file *file)
goto exit;
}
- pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
+ pr_debug("gs_close: ttyGS%d (%pK,%pK) ...\n",
+ port->port_num, tty, file);
/* mark port as closing but in use; we can drop port lock
* and sleep if necessary
@@ -877,7 +957,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
- * let the push tasklet fire again until we're re-opened.
+ * let the push work queue fire again until we're re-opened.
*/
if (gser == NULL)
gs_buf_free(&port->port_write_buf);
@@ -888,7 +968,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
port->openclose = false;
- pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
+ pr_debug("gs_close: ttyGS%d (%pK,%pK) done!\n",
port->port_num, tty, file);
wake_up(&port->close_wait);
@@ -902,7 +982,10 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
unsigned long flags;
int status;
- pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
+ if (!port)
+ return 0;
+
+ pr_vdebug("gs_write: ttyGS%d (%pK) writing %d bytes\n",
port->port_num, tty, count);
spin_lock_irqsave(&port->port_lock, flags);
@@ -922,7 +1005,9 @@ static int gs_put_char(struct tty_struct *tty, unsigned char ch)
unsigned long flags;
int status;
- pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %ps\n",
+ if (!port)
+ return 0;
+ pr_vdebug("gs_put_char: (%d,%pK) char=0x%x, called from %pKs\n",
port->port_num, tty, ch, __builtin_return_address(0));
spin_lock_irqsave(&port->port_lock, flags);
@@ -937,7 +1022,9 @@ static void gs_flush_chars(struct tty_struct *tty)
struct gs_port *port = tty->driver_data;
unsigned long flags;
- pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+ if (!port)
+ return;
+ pr_vdebug("gs_flush_chars: (%d,%pK)\n", port->port_num, tty);
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
@@ -951,12 +1038,14 @@ static int gs_write_room(struct tty_struct *tty)
unsigned long flags;
int room = 0;
+ if (!port)
+ return 0;
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
room = gs_buf_space_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
- pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
+ pr_vdebug("gs_write_room: (%d,%pK) room=%d\n",
port->port_num, tty, room);
return room;
@@ -972,7 +1061,7 @@ static int gs_chars_in_buffer(struct tty_struct *tty)
chars = gs_buf_data_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
- pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+ pr_vdebug("gs_chars_in_buffer: (%d,%pK) chars=%d\n",
port->port_num, tty, chars);
return chars;
@@ -984,13 +1073,20 @@ static void gs_unthrottle(struct tty_struct *tty)
struct gs_port *port = tty->driver_data;
unsigned long flags;
+ /*
+ * tty's driver data is set to NULL during port close. Nothing
+ * to do here.
+ */
+ if (!port)
+ return;
+
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb) {
/* Kickstart read queue processing. We don't do xon/xoff,
* rts/cts, or other handshaking with the host, but if the
* read queue backs up enough we'll be NAKing OUT packets.
*/
- tasklet_schedule(&port->push);
+ queue_work(gserial_wq, &port->push);
pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
}
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1002,6 +1098,8 @@ static int gs_break_ctl(struct tty_struct *tty, int duration)
int status = 0;
struct gserial *gser;
+ if (!port)
+ return 0;
pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
port->port_num, duration);
@@ -1014,6 +1112,83 @@ static int gs_break_ctl(struct tty_struct *tty, int duration)
return status;
}
+static int gs_tiocmget(struct tty_struct *tty)
+{
+ struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
+ unsigned int result = 0;
+
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (!gser) {
+ result = -ENODEV;
+ goto fail;
+ }
+
+ if (gser->get_dtr)
+ result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+
+ if (gser->get_rts)
+ result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
+
+ if (gser->serial_state & TIOCM_CD)
+ result |= TIOCM_CD;
+
+ if (gser->serial_state & TIOCM_RI)
+ result |= TIOCM_RI;
+
+fail:
+ spin_unlock_irq(&port->port_lock);
+ return result;
+}
+
+static int gs_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
+ int status = 0;
+
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+
+ if (!gser) {
+ status = -ENODEV;
+ goto fail;
+ }
+
+ if (set & TIOCM_RI) {
+ if (gser->send_ring_indicator) {
+ gser->serial_state |= TIOCM_RI;
+ status = gser->send_ring_indicator(gser, 1);
+ }
+ }
+
+ if (clear & TIOCM_RI) {
+ if (gser->send_ring_indicator) {
+ gser->serial_state &= ~TIOCM_RI;
+ status = gser->send_ring_indicator(gser, 0);
+ }
+ }
+
+ if (set & TIOCM_CD) {
+ if (gser->send_carrier_detect) {
+ gser->serial_state |= TIOCM_CD;
+ status = gser->send_carrier_detect(gser, 1);
+ }
+ }
+
+ if (clear & TIOCM_CD) {
+ if (gser->send_carrier_detect) {
+ gser->serial_state &= ~TIOCM_CD;
+ status = gser->send_carrier_detect(gser, 0);
+ }
+ }
+fail:
+ spin_unlock_irq(&port->port_lock);
+ return status;
+}
+
static const struct tty_operations gs_tty_ops = {
.open = gs_open,
.close = gs_close,
@@ -1024,6 +1199,8 @@ static const struct tty_operations gs_tty_ops = {
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
.break_ctl = gs_break_ctl,
+ .tiocmget = gs_tiocmget,
+ .tiocmset = gs_tiocmset,
};
/*-------------------------------------------------------------------------*/
@@ -1053,7 +1230,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
init_waitqueue_head(&port->drain_wait);
init_waitqueue_head(&port->close_wait);
- tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+ INIT_WORK(&port->push, gs_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
@@ -1068,6 +1245,129 @@ out:
return ret;
}
+#if defined(CONFIG_DEBUG_FS)
+
+#define BUF_SIZE 512
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gs_port *ui_dev = file->private_data;
+ struct tty_struct *tty;
+ struct gserial *gser;
+ char *buf;
+ unsigned long flags;
+ int i = 0;
+ int ret;
+ int result = 0;
+
+ if (!ui_dev)
+ return -EINVAL;
+
+ tty = ui_dev->port.tty;
+ gser = ui_dev->port_usb;
+
+ buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ui_dev->port_lock, flags);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
+
+ i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
+ (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
+
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
+
+ i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
+ (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
+
+ if (tty)
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "tty_flags: %lu\n", tty->flags);
+
+ if (gser->get_dtr) {
+ result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+ i += scnprintf(buf + i, BUF_SIZE - i,
+ "DTR_status: %d\n", result);
+ }
+
+ spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gs_port *ui_dev = file->private_data;
+ unsigned long flags;
+
+ if (!ui_dev)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ui_dev->port_lock, flags);
+ ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
+ ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
+ spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+ return count;
+}
+
+static int serial_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+const struct file_operations debug_rst_ops = {
+ .open = serial_debug_open,
+ .write = debug_write_reset,
+};
+
+const struct file_operations debug_adb_ops = {
+ .open = serial_debug_open,
+ .read = debug_read_status,
+};
+
+struct dentry *gs_dent;
+static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
+{
+ char buf[48];
+
+ if (!ui_dev)
+ return;
+
+ snprintf(buf, 48, "usb_serial%d", port_num);
+ gs_dent = debugfs_create_dir(buf, 0);
+ if (!gs_dent || IS_ERR(gs_dent))
+ return;
+
+ debugfs_create_file("readstatus", 0444, gs_dent, ui_dev,
+ &debug_adb_ops);
+ debugfs_create_file("reset", S_IRUGO | S_IWUSR,
+ gs_dent, ui_dev, &debug_rst_ops);
+}
+
+static void usb_debugfs_remove(void)
+{
+ debugfs_remove_recursive(gs_dent);
+}
+#else
+static inline void usb_debugfs_init(struct gs_port *ui_dev, int port_num) {}
+static inline void usb_debugfs_remove(void) {}
+#endif
+
static int gs_closed(struct gs_port *port)
{
int cond;
@@ -1080,7 +1380,7 @@ static int gs_closed(struct gs_port *port)
static void gserial_free_port(struct gs_port *port)
{
- tasklet_kill(&port->push);
+ cancel_work_sync(&port->push);
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
WARN_ON(port->port_usb != NULL);
@@ -1286,6 +1586,9 @@ void gserial_disconnect(struct gserial *gser)
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
+ port->nbytes_from_host = port->nbytes_to_tty =
+ port->nbytes_from_tty = port->nbytes_to_host = 0;
+
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
@@ -1305,7 +1608,8 @@ static int userial_init(void)
gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_RESET_TERMIOS;
gs_tty_driver->init_termios = tty_std_termios;
/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
@@ -1321,6 +1625,12 @@ static int userial_init(void)
for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
mutex_init(&ports[i].lock);
+ gserial_wq = create_singlethread_workqueue("k_gserial");
+ if (!gserial_wq) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* export the driver ... */
status = tty_register_driver(gs_tty_driver);
if (status) {
@@ -1329,6 +1639,9 @@ static int userial_init(void)
goto fail;
}
+ for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
+ usb_debugfs_init(ports[i].port, i);
+
pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
MAX_U_SERIAL_PORTS,
(MAX_U_SERIAL_PORTS == 1) ? "" : "s");
@@ -1336,6 +1649,8 @@ static int userial_init(void)
return status;
fail:
put_tty_driver(gs_tty_driver);
+ if (gserial_wq)
+ destroy_workqueue(gserial_wq);
gs_tty_driver = NULL;
return status;
}
@@ -1343,6 +1658,8 @@ module_init(userial_init);
static void userial_cleanup(void)
{
+ usb_debugfs_remove();
+ destroy_workqueue(gserial_wq);
tty_unregister_driver(gs_tty_driver);
put_tty_driver(gs_tty_driver);
gs_tty_driver = NULL;
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index c20210c0babd..50c801cd16d2 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -45,11 +45,21 @@ struct gserial {
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+ u16 serial_state;
+
+ /* control signal callbacks*/
+ unsigned int (*get_dtr)(struct gserial *p);
+ unsigned int (*get_rts)(struct gserial *p);
/* notification callbacks */
void (*connect)(struct gserial *p);
void (*disconnect)(struct gserial *p);
int (*send_break)(struct gserial *p, int duration);
+ unsigned int (*send_carrier_detect)(struct gserial *p, unsigned int);
+ unsigned int (*send_ring_indicator)(struct gserial *p, unsigned int);
+ int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits);
+ /* notification changes to modem */
+ void (*notify_modem)(void *gser, u8 portno, int ctrl_bits);
};
/* utilities to allocate/free request and buffer */
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 5c2ac8e8456d..3317d3222184 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,82 +1,41 @@
/*
- * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ * u_uac1.h - Utility definitions for UAC1 function
*
- * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
- * Copyright (C) 2008 Analog Devices, Inc
+ * Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
*
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
-#ifndef __U_AUDIO_H
-#define __U_AUDIO_H
+#ifndef __U_UAC1_H
+#define __U_UAC1_H
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/usb/audio.h>
#include <linux/usb/composite.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
-#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
-#define FILE_CONTROL "/dev/snd/controlC0"
-
#define UAC1_OUT_EP_MAX_PACKET_SIZE 200
-#define UAC1_REQ_COUNT 256
-#define UAC1_AUDIO_BUF_SIZE 48000
-
-/*
- * This represents the USB side of an audio card device, managed by a USB
- * function which provides control and stream interfaces.
- */
-
-struct gaudio_snd_dev {
- struct gaudio *card;
- struct file *filp;
- struct snd_pcm_substream *substream;
- int access;
- int format;
- int channels;
- int rate;
-};
-
-struct gaudio {
- struct usb_function func;
- struct usb_gadget *gadget;
+#define UAC1_DEF_CCHMASK 0x3
+#define UAC1_DEF_CSRATE 48000
+#define UAC1_DEF_CSSIZE 2
+#define UAC1_DEF_PCHMASK 0x3
+#define UAC1_DEF_PSRATE 48000
+#define UAC1_DEF_PSSIZE 2
+#define UAC1_DEF_REQ_NUM 8
- /* ALSA sound device interfaces */
- struct gaudio_snd_dev control;
- struct gaudio_snd_dev playback;
- struct gaudio_snd_dev capture;
-
- /* TODO */
-};
struct f_uac1_opts {
struct usb_function_instance func_inst;
- int req_buf_size;
- int req_count;
- int audio_buf_size;
- char *fn_play;
- char *fn_cap;
- char *fn_cntl;
+ int c_chmask;
+ int c_srate;
+ int c_ssize;
+ int p_chmask;
+ int p_srate;
+ int p_ssize;
+ int req_number;
unsigned bound:1;
- unsigned fn_play_alloc:1;
- unsigned fn_cap_alloc:1;
- unsigned fn_cntl_alloc:1;
+
struct mutex lock;
int refcnt;
};
-int gaudio_setup(struct gaudio *card);
-void gaudio_cleanup(struct gaudio *the_card);
-
-size_t u_audio_playback(struct gaudio *card, void *buf, size_t count);
-int u_audio_get_playback_channels(struct gaudio *card);
-int u_audio_get_playback_rate(struct gaudio *card);
-
-#endif /* __U_AUDIO_H */
+#endif /* __U_UAC1_H */
diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index c78c84138a28..4d32492ae084 100644
--- a/drivers/usb/gadget/function/u_uac1.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -18,7 +18,7 @@
#include <linux/random.h>
#include <linux/syscalls.h>
-#include "u_uac1.h"
+#include "u_uac1_legacy.h"
/*
* This component encapsulates the ALSA devices for USB audio gadget
@@ -205,10 +205,11 @@ static int gaudio_open_snd_dev(struct gaudio *card)
{
struct snd_pcm_file *pcm_file;
struct gaudio_snd_dev *snd;
- struct f_uac1_opts *opts;
+ struct f_uac1_legacy_opts *opts;
char *fn_play, *fn_cap, *fn_cntl;
- opts = container_of(card->func.fi, struct f_uac1_opts, func_inst);
+ opts = container_of(card->func.fi, struct f_uac1_legacy_opts,
+ func_inst);
fn_play = opts->fn_play;
fn_cap = opts->fn_cap;
fn_cntl = opts->fn_cntl;
@@ -266,18 +267,24 @@ static int gaudio_close_snd_dev(struct gaudio *gau)
/* Close control device */
snd = &gau->control;
- if (snd->filp)
+ if (snd->filp) {
filp_close(snd->filp, NULL);
+ snd->filp = NULL;
+ }
/* Close PCM playback device and setup substream */
snd = &gau->playback;
- if (snd->filp)
+ if (snd->filp) {
filp_close(snd->filp, NULL);
+ snd->filp = NULL;
+ }
/* Close PCM capture device and setup substream */
snd = &gau->capture;
- if (snd->filp)
+ if (snd->filp) {
filp_close(snd->filp, NULL);
+ snd->filp = NULL;
+ }
return 0;
}
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
new file mode 100644
index 000000000000..d715b1af56a4
--- /dev/null
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -0,0 +1,82 @@
+/*
+ * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_UAC1_LEGACY_H
+#define __U_UAC1_LEGACY_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
+#define FILE_CONTROL "/dev/snd/controlC0"
+
+#define UAC1_OUT_EP_MAX_PACKET_SIZE 200
+#define UAC1_REQ_COUNT 256
+#define UAC1_AUDIO_BUF_SIZE 48000
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+ struct gaudio *card;
+ struct file *filp;
+ struct snd_pcm_substream *substream;
+ int access;
+ int format;
+ int channels;
+ int rate;
+};
+
+struct gaudio {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+
+ /* ALSA sound device interfaces */
+ struct gaudio_snd_dev control;
+ struct gaudio_snd_dev playback;
+ struct gaudio_snd_dev capture;
+
+ /* TODO */
+};
+
+struct f_uac1_legacy_opts {
+ struct usb_function_instance func_inst;
+ int req_buf_size;
+ int req_count;
+ int audio_buf_size;
+ char *fn_play;
+ char *fn_cap;
+ char *fn_cntl;
+ unsigned bound:1;
+ unsigned fn_play_alloc:1;
+ unsigned fn_cap_alloc:1;
+ unsigned fn_cntl_alloc:1;
+ struct mutex lock;
+ int refcnt;
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(struct gaudio *the_card);
+
+size_t u_audio_playback(struct gaudio *card, void *buf, size_t count);
+int u_audio_get_playback_channels(struct gaudio *card);
+int u_audio_get_playback_rate(struct gaudio *card);
+
+#endif /* __U_UAC1_LEGACY_H */
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 78dd37279bd4..19eeb83538a5 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -24,6 +24,7 @@
#define UAC2_DEF_CCHMASK 0x3
#define UAC2_DEF_CSRATE 64000
#define UAC2_DEF_CSSIZE 2
+#define UAC2_DEF_REQ_NUM 2
struct f_uac2_opts {
struct usb_function_instance func_inst;
@@ -33,6 +34,7 @@ struct f_uac2_opts {
int c_chmask;
int c_srate;
int c_ssize;
+ int req_number;
bool bound;
struct mutex lock;
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 4d682ad7bf23..4b995c1f9f22 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -54,8 +54,10 @@ config USB_AUDIO
depends on SND
select USB_LIBCOMPOSITE
select SND_PCM
- select USB_F_UAC1 if GADGET_UAC1
+ select USB_F_UAC1 if (GADGET_UAC1 && !GADGET_UAC1_LEGACY)
+ select USB_F_UAC1_LEGACY if (GADGET_UAC1 && GADGET_UAC1_LEGACY)
select USB_F_UAC2 if !GADGET_UAC1
+ select USB_U_AUDIO if (USB_F_UAC2 || USB_F_UAC1)
help
This Gadget Audio driver is compatible with USB Audio Class
specification 2.0. It implements 1 AudioControl interface,
@@ -73,10 +75,17 @@ config USB_AUDIO
dynamically linked module called "g_audio".
config GADGET_UAC1
- bool "UAC 1.0 (Legacy)"
+ bool "UAC 1.0"
depends on USB_AUDIO
help
- If you instead want older UAC Spec-1.0 driver that also has audio
+ If you instead want older USB Audio Class specification 1.0 support
+ with similar driver capabilities.
+
+config GADGET_UAC1_LEGACY
+ bool "UAC 1.0 (Legacy)"
+ depends on GADGET_UAC1
+ help
+ If you instead want legacy UAC Spec-1.0 driver that also has audio
paths hardwired to the Audio codec chip on-board and doesn't work
without one.
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index 0fbe38d5d739..78ee92cdfb59 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -53,8 +53,41 @@ static int c_ssize = UAC2_DEF_CSSIZE;
module_param(c_ssize, uint, S_IRUGO);
MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
#else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
#include "u_uac1.h"
+/* Playback(USB-IN) Default Stereo - Fl/Fr */
+static int p_chmask = UAC1_DEF_PCHMASK;
+module_param(p_chmask, uint, S_IRUGO);
+MODULE_PARM_DESC(p_chmask, "Playback Channel Mask");
+
+/* Playback Default 48 KHz */
+static int p_srate = UAC1_DEF_PSRATE;
+module_param(p_srate, uint, S_IRUGO);
+MODULE_PARM_DESC(p_srate, "Playback Sampling Rate");
+
+/* Playback Default 16bits/sample */
+static int p_ssize = UAC1_DEF_PSSIZE;
+module_param(p_ssize, uint, S_IRUGO);
+MODULE_PARM_DESC(p_ssize, "Playback Sample Size(bytes)");
+
+/* Capture(USB-OUT) Default Stereo - Fl/Fr */
+static int c_chmask = UAC1_DEF_CCHMASK;
+module_param(c_chmask, uint, S_IRUGO);
+MODULE_PARM_DESC(c_chmask, "Capture Channel Mask");
+
+/* Capture Default 48 KHz */
+static int c_srate = UAC1_DEF_CSRATE;
+module_param(c_srate, uint, S_IRUGO);
+MODULE_PARM_DESC(c_srate, "Capture Sampling Rate");
+
+/* Capture Default 16bits/sample */
+static int c_ssize = UAC1_DEF_CSSIZE;
+module_param(c_ssize, uint, S_IRUGO);
+MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
+#else /* CONFIG_GADGET_UAC1_LEGACY */
+#include "u_uac1_legacy.h"
+
static char *fn_play = FILE_PCM_PLAYBACK;
module_param(fn_play, charp, S_IRUGO);
MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
@@ -78,6 +111,7 @@ MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
static int audio_buf_size = UAC1_AUDIO_BUF_SIZE;
module_param(audio_buf_size, int, S_IRUGO);
MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+#endif /* CONFIG_GADGET_UAC1_LEGACY */
#endif
/* string IDs are assigned dynamically */
@@ -125,7 +159,7 @@ static struct usb_device_descriptor device_desc = {
.bcdUSB = cpu_to_le16(0x200),
-#ifdef CONFIG_GADGET_UAC1
+#ifdef CONFIG_GADGET_UAC1_LEGACY
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
@@ -207,7 +241,11 @@ static int audio_bind(struct usb_composite_dev *cdev)
#ifndef CONFIG_GADGET_UAC1
struct f_uac2_opts *uac2_opts;
#else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
struct f_uac1_opts *uac1_opts;
+#else
+ struct f_uac1_legacy_opts *uac1_opts;
+#endif
#endif
int status;
@@ -216,7 +254,11 @@ static int audio_bind(struct usb_composite_dev *cdev)
if (IS_ERR(fi_uac2))
return PTR_ERR(fi_uac2);
#else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
fi_uac1 = usb_get_function_instance("uac1");
+#else
+ fi_uac1 = usb_get_function_instance("uac1_legacy");
+#endif
if (IS_ERR(fi_uac1))
return PTR_ERR(fi_uac1);
#endif
@@ -229,14 +271,26 @@ static int audio_bind(struct usb_composite_dev *cdev)
uac2_opts->c_chmask = c_chmask;
uac2_opts->c_srate = c_srate;
uac2_opts->c_ssize = c_ssize;
+ uac2_opts->req_number = UAC2_DEF_REQ_NUM;
#else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
uac1_opts = container_of(fi_uac1, struct f_uac1_opts, func_inst);
+ uac1_opts->p_chmask = p_chmask;
+ uac1_opts->p_srate = p_srate;
+ uac1_opts->p_ssize = p_ssize;
+ uac1_opts->c_chmask = c_chmask;
+ uac1_opts->c_srate = c_srate;
+ uac1_opts->c_ssize = c_ssize;
+ uac1_opts->req_number = UAC1_DEF_REQ_NUM;
+#else /* CONFIG_GADGET_UAC1_LEGACY */
+ uac1_opts = container_of(fi_uac1, struct f_uac1_legacy_opts, func_inst);
uac1_opts->fn_play = fn_play;
uac1_opts->fn_cap = fn_cap;
uac1_opts->fn_cntl = fn_cntl;
uac1_opts->req_buf_size = req_buf_size;
uac1_opts->req_count = req_count;
uac1_opts->audio_buf_size = audio_buf_size;
+#endif /* CONFIG_GADGET_UAC1_LEGACY */
#endif
status = usb_string_ids_tab(cdev, strings_dev);
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index 907f8144813c..18839732c840 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -14,15 +14,14 @@
#include "u_f.h"
#include <linux/usb/ch9.h>
-struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len)
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req) {
- req->length = len ?: default_len;
- if (usb_endpoint_dir_out(ep->desc))
- req->length = usb_ep_align(ep, req->length);
+ req->length = usb_endpoint_dir_out(ep->desc) ?
+ usb_ep_align(ep, len) : len;
req->buf = kmalloc(req->length, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request(ep, req);
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 3ee365fbc2e2..2f03334c6874 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -53,14 +53,13 @@ struct usb_request;
*
* @ep: the endpoint to allocate a usb_request
* @len: usb_requests's buffer suggested size
- * @default_len: used if @len is not provided, ie, is 0
*
* In case @ep direction is OUT, the @len will be aligned to ep's
* wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
* usb_requests's length (req->length) to refer to the allocated buffer size.
* Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
*/
-struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len);
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
/* Frees a usb_request previously allocated by alloc_ep_req() */
static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index c6859fdd74bc..3454e263fd82 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -76,7 +76,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
}
req->num_mapped_sgs = mapped;
- } else {
+ } else if (!req->dma_pre_mapped) {
req->dma = dma_map_single(dev, req->buf, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -101,9 +101,15 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
- } else {
+ } else if (!req->dma_pre_mapped && req->dma != DMA_ERROR_CODE) {
+ /*
+ * If the DMA address has not been mapped by a higher layer,
+ * then unmap it here. Otherwise, the DMA address will be
+ * unmapped by the upper layer (where the request was queued).
+ */
dma_unmap_single(gadget->dev.parent, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->dma = DMA_ERROR_CODE;
}
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 8e0b9377644b..c1c14d818b5c 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -106,7 +106,7 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
static void __maybe_unused
dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
- ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ ehci_dbg(ehci, "%s td %pK n%08x %08x t%08x p0=%08x\n", label, qtd,
hc32_to_cpup(ehci, &qtd->hw_next),
hc32_to_cpup(ehci, &qtd->hw_alt_next),
hc32_to_cpup(ehci, &qtd->hw_token),
@@ -124,7 +124,7 @@ dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
- ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ ehci_dbg (ehci, "%s qh %pK n%08x info %x %x qtd %x\n", label,
qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
}
@@ -132,7 +132,7 @@ dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
static void __maybe_unused
dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
{
- ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n",
+ ehci_dbg (ehci, "%s [%d] itd %pK, next %08x, urb %pK\n",
label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
itd->urb);
ehci_dbg (ehci,
@@ -163,7 +163,7 @@ dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
static void __maybe_unused
dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
- ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
+ ehci_dbg (ehci, "%s [%d] sitd %pK, next %08x, urb %pK\n",
label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
sitd->urb);
ehci_dbg (ehci,
@@ -436,7 +436,7 @@ static void qh_lines (
scratch = hc32_to_cpup(ehci, &hw->hw_info1);
hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
temp = scnprintf (next, size,
- "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
+ "qh/%pK dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
@@ -464,7 +464,7 @@ static void qh_lines (
mark = '/';
}
temp = snprintf (next, size,
- "\n\t%p%c%s len=%d %08x urb %p",
+ "\n\t%pK%c%s len=%d %08x urb %pK",
td, mark, ({ char *tmp;
switch ((scratch>>8)&0x03) {
case 0: tmp = "out"; break;
@@ -662,7 +662,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
hw = p.qh->hw;
- temp = scnprintf (next, size, " qh%d-%04x/%p",
+ temp = scnprintf (next, size, " qh%d-%04x/%pK",
p.qh->ps.period,
hc32_to_cpup(ehci,
&hw->hw_info2)
@@ -724,20 +724,20 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
break;
case Q_TYPE_FSTN:
temp = scnprintf (next, size,
- " fstn-%8x/%p", p.fstn->hw_prev,
+ " fstn-%8x/%pK", p.fstn->hw_prev,
p.fstn);
tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = scnprintf (next, size,
- " itd/%p", p.itd);
+ " itd/%pK", p.itd);
tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
p = p.itd->itd_next;
break;
case Q_TYPE_SITD:
temp = scnprintf (next, size,
- " sitd%d-%04x/%p",
+ " sitd%d-%04x/%pK",
p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
@@ -909,7 +909,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
if (!list_empty(&ehci->async_unlink)) {
- temp = scnprintf(next, size, "async unlink qh %p\n",
+ temp = scnprintf(next, size, "async unlink qh %pK\n",
list_first_entry(&ehci->async_unlink,
struct ehci_qh, unlink_node));
size -= temp;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b9ad19d1b400..56a32d4e2cbc 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1013,7 +1013,7 @@ idle_timeout:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
- ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
+ ehci_err (ehci, "qh %pK (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 230c56d40557..37632e9b3e84 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -275,7 +275,7 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
- "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ "%s %s urb %pK ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
@@ -361,7 +361,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* Report Data Buffer Error: non-fatal but useful */
if (token & QTD_STS_DBE)
ehci_dbg(ehci,
- "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ "detected DataBufferErr for urb %pK ep%d%s len %d, qtd %pK [qh %pK]\n",
urb,
usb_endpoint_num(&urb->ep->desc),
usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
@@ -935,7 +935,7 @@ qh_make (
}
break;
default:
- ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
+ ehci_dbg(ehci, "bogus dev %pK speed %d\n", urb->dev,
urb->dev->speed);
done:
qh_destroy(ehci, qh);
@@ -1123,7 +1123,7 @@ submit_async (
struct ehci_qtd *qtd;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
ehci_dbg(ehci,
- "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ "%s %s urb %pK ep%d%s len %d, qtd %pK [qh %pK]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f9a332775c47..9e69e4567e6a 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -548,7 +548,7 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
unsigned period = qh->ps.period;
dev_dbg(&qh->ps.udev->dev,
- "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ "link qh%d-%04x/%pK start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
@@ -641,7 +641,7 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
: (qh->ps.usecs * 8);
dev_dbg(&qh->ps.udev->dev,
- "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ "unlink qh%d-%04x/%pK start %d [%d/%d us]\n",
qh->ps.period,
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
@@ -751,7 +751,7 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
* FIXME kill the now-dysfunctional queued urbs
*/
else {
- ehci_err(ehci, "can't reschedule qh %p, err %d\n",
+ ehci_err(ehci, "can't reschedule qh %pK, err %d\n",
qh, rc);
}
}
@@ -869,7 +869,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
/* reuse the previous schedule slots, if we can */
if (qh->ps.phase != NO_FRAME) {
- ehci_dbg(ehci, "reused qh %p schedule\n", qh);
+ ehci_dbg(ehci, "reused qh %pK schedule\n", qh);
return 0;
}
@@ -1552,7 +1552,7 @@ iso_stream_schedule (
/* no room in the schedule */
if (!done) {
- ehci_dbg(ehci, "iso sched full %p", urb);
+ ehci_dbg(ehci, "iso sched full %pK", urb);
status = -ENOSPC;
goto fail;
}
@@ -1606,7 +1606,7 @@ iso_stream_schedule (
/* Is the schedule about to wrap around? */
if (unlikely(!empty && start < period)) {
- ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
+ ehci_dbg(ehci, "request %pK would overflow (%u-%u < %u mod %u)\n",
urb, stream->next_uframe, base, period, mod);
status = -EFBIG;
goto fail;
@@ -1635,7 +1635,7 @@ iso_stream_schedule (
/* How many uframes and packets do we need to skip? */
skip = (now2 - start + period - 1) & -period;
if (skip >= span) { /* Entirely in the past? */
- ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
+ ehci_dbg(ehci, "iso underrun %pK (%u+%u < %u) [%u]\n",
urb, start + base, span - period, now2 + base,
base);
@@ -1662,7 +1662,7 @@ iso_stream_schedule (
use_start:
/* Tried to schedule too far into the future? */
if (unlikely(start + span - period >= mod + wrap)) {
- ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
+ ehci_dbg(ehci, "request %pK would overflow (%u+%u >= %u)\n",
urb, start, span - period, mod + wrap);
status = -EFBIG;
goto fail;
@@ -1957,7 +1957,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
- "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
+ "%s %s urb %pK ep%d%s len %d, %d pkts %d uframes [%pK]\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
@@ -2337,7 +2337,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
- "submit %p dev%s ep%d%s-iso len %d\n",
+ "submit %pK dev%s ep%d%s-iso len %d\n",
urb, urb->dev->devpath,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
@@ -2490,7 +2490,7 @@ restart:
q = *q_p;
break;
default:
- ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
+ ehci_dbg(ehci, "corrupt type %d frame %d shadow %pK\n",
type, frame, q.ptr);
// BUG ();
/* FALL THROUGH */
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index c3eded317495..56176222b0b6 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -256,7 +256,7 @@ static void ohci_dump_td (const struct ohci_hcd *ohci, const char *label,
{
u32 tmp = hc32_to_cpup (ohci, &td->hwINFO);
- ohci_dbg (ohci, "%s td %p%s; urb %p index %d; hw next td %08x\n",
+ ohci_dbg (ohci, "%s td %pK%s; urb %pK index %d; hw next td %08x\n",
label, td,
(tmp & TD_DONE) ? " (DONE)" : "",
td->urb, td->index,
@@ -314,7 +314,7 @@ ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
u32 tmp = hc32_to_cpu (ohci, ed->hwINFO);
char *type = "";
- ohci_dbg (ohci, "%s, ed %p state 0x%x type %s; next ed %08x\n",
+ ohci_dbg (ohci, "%s, ed %pK state 0x%x type %s; next ed %08x\n",
label,
ed, ed->state, edstring (ed->type),
hc32_to_cpup (ohci, &ed->hwNextED));
@@ -415,7 +415,7 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
struct td *td;
temp = scnprintf (buf, size,
- "ed/%p %cs dev%d ep%d%s max %d %08x%s%s %s",
+ "ed/%pK %cs dev%d ep%d%s max %d %08x%s%s %s",
ed,
(info & ED_LOWSPEED) ? 'l' : 'f',
info & 0x7f,
@@ -437,7 +437,7 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
cbp = hc32_to_cpup (ohci, &td->hwCBP);
be = hc32_to_cpup (ohci, &td->hwBE);
temp = scnprintf (buf, size,
- "\n\ttd %p %s %d cc=%x urb %p (%08x)",
+ "\n\ttd %pK %s %d cc=%x urb %pK (%08x)",
td,
({ char *pid;
switch (info & TD_DP) {
@@ -516,7 +516,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
next += temp;
do {
- temp = scnprintf (next, size, " ed%d/%p",
+ temp = scnprintf (next, size, " ed%d/%pK",
ed->interval, ed);
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 570b3fd1f5d0..5137b1d5b312 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -278,7 +278,7 @@ static int ohci_urb_enqueue (
ed->interval);
if (urb_priv->td_cnt >= urb_priv->length) {
++urb_priv->td_cnt; /* Mark it */
- ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+ ohci_dbg(ohci, "iso underrun %pK (%u+%u < %u)\n",
urb, frame, length,
next);
}
@@ -386,7 +386,7 @@ sanitize:
/* caller was supposed to have unlinked any requests;
* that's not our job. can't recover; must leak ed.
*/
- ohci_err (ohci, "leak ed %p (#%02x) state %d%s\n",
+ ohci_err (ohci, "leak ed %pK (#%02x) state %d%s\n",
ed, ep->desc.bEndpointAddress, ed->state,
list_empty (&ed->td_list) ? "" : " (has tds)");
td_free (ohci, ed->dummy);
@@ -1042,7 +1042,7 @@ int ohci_restart(struct ohci_hcd *ohci)
case ED_UNLINK:
break;
default:
- ohci_dbg(ohci, "bogus ed %p state %d\n",
+ ohci_dbg(ohci, "bogus ed %pK state %d\n",
ed, ed->state);
}
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index c9e315c6808a..99576f3a1970 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -109,7 +109,7 @@ td_free (struct ohci_hcd *hc, struct td *td)
if (*prev)
*prev = td->td_hash;
else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0)
- ohci_dbg (hc, "no hash for td %p\n", td);
+ ohci_dbg (hc, "no hash for td %pK\n", td);
dma_pool_free (hc->td_cache, td, td->td_dma);
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 24edb7674710..48200a89f7aa 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -143,7 +143,7 @@ static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
{
unsigned i;
- ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "link %sed %pK branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
@@ -287,7 +287,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
}
ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
- ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "unlink %sed %pK branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
@@ -787,7 +787,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
if (cc != TD_CC_NOERROR)
ohci_dbg(ohci,
- "urb %p iso td %p (%d) len %d cc %d\n",
+ "urb %pK iso td %pK (%d) len %d cc %d\n",
urb, td, 1 + td->index, dlen, cc);
/* BULK, INT, CONTROL ... drivers see aggregate length/status,
@@ -819,7 +819,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
if (cc != TD_CC_NOERROR && cc < 0x0E)
ohci_dbg(ohci,
- "urb %p td %p (%d) cc %d, len=%d/%d\n",
+ "urb %pK td %pK (%d) cc %d, len=%d/%d\n",
urb, td, 1 + td->index, cc,
urb->actual_length,
urb->transfer_buffer_length);
@@ -885,7 +885,7 @@ static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
/* fallthrough */
default:
ohci_dbg (ohci,
- "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
+ "urb %pK path %s ep%d%s %08x cc %d --> status %d\n",
urb, urb->dev->devpath,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 1b28a000d5c6..466ab4fa289e 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -47,7 +47,7 @@ static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
u32 status, token;
status = td_status(uhci, td);
- out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td,
+ out += sprintf(out, "%*s[%pK] link (%08x) ", space, "", td,
hc32_to_cpu(uhci, td->link));
out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
((status >> 27) & 3),
@@ -105,9 +105,9 @@ static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
char *ptype;
- out += sprintf(out, "urb_priv [%p] ", urbp);
- out += sprintf(out, "urb [%p] ", urbp->urb);
- out += sprintf(out, "qh [%p] ", urbp->qh);
+ out += sprintf(out, "urb_priv [%pK] ", urbp);
+ out += sprintf(out, "urb [%pK] ", urbp->urb);
+ out += sprintf(out, "qh [%pK] ", urbp->qh);
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
@@ -177,13 +177,13 @@ static int uhci_show_qh(struct uhci_hcd *uhci,
default: qtype = "Skel" ; break;
}
- out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n",
+ out += sprintf(out, "%*s[%pK] %s QH link (%08x) element (%08x)\n",
space, "", qh, qtype,
hc32_to_cpu(uhci, qh->link),
hc32_to_cpu(uhci, element));
if (qh->type == USB_ENDPOINT_XFER_ISOC)
out += sprintf(out,
- "%*s period %d phase %d load %d us, frame %x desc [%p]\n",
+ "%*s period %d phase %d load %d us, frame %x desc [%pK]\n",
space, "", qh->period, qh->phase, qh->load,
qh->iso_frame, qh->iso_packet_desc);
else if (qh->type == USB_ENDPOINT_XFER_INT)
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index da6f56d996ce..9ca86cf5c9a9 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -124,9 +124,9 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
if (!list_empty(&td->list))
- dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
+ dev_WARN(uhci_dev(uhci), "td %pK still in list!\n", td);
if (!list_empty(&td->fl_list))
- dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
+ dev_WARN(uhci_dev(uhci), "td %pK still in fl_list!\n", td);
dma_pool_free(uhci->td_pool, td, td->dma_handle);
}
@@ -294,7 +294,7 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
if (!list_empty(&qh->queue))
- dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
+ dev_WARN(uhci_dev(uhci), "qh %pK list not empty!\n", qh);
list_del(&qh->node);
if (qh->udev) {
@@ -744,7 +744,7 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
struct uhci_td *td, *tmp;
if (!list_empty(&urbp->node))
- dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
+ dev_WARN(uhci_dev(uhci), "urb %pK still on QH's list!\n",
urbp->urb);
list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
@@ -1317,7 +1317,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
else if (!uhci_frame_before_eq(next,
frame + (urb->number_of_packets - 1) *
qh->period))
- dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+ dev_dbg(uhci_dev(uhci), "iso underrun %pK (%u+%u < %u)\n",
urb, frame,
(urb->number_of_packets - 1) *
qh->period,
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 3425154baf8b..a190c97d11e4 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -30,10 +30,10 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
{
u32 temp;
- xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+ xhci_dbg(xhci, "// xHCI capability registers at %pK:\n",
xhci->cap_regs);
temp = readl(&xhci->cap_regs->hc_capbase);
- xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+ xhci_dbg(xhci, "// @%pK = 0x%x (CAPLENGTH AND HCIVERSION)\n",
&xhci->cap_regs->hc_capbase, temp);
xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
(unsigned int) HC_LENGTH(temp));
@@ -42,17 +42,17 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
(unsigned int) HC_VERSION(temp));
#endif
- xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+ xhci_dbg(xhci, "// xHCI operational registers at %pK:\n", xhci->op_regs);
temp = readl(&xhci->cap_regs->run_regs_off);
- xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+ xhci_dbg(xhci, "// @%pK = 0x%x RTSOFF\n",
&xhci->cap_regs->run_regs_off,
(unsigned int) temp & RTSOFF_MASK);
- xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+ xhci_dbg(xhci, "// xHCI runtime registers at %pK:\n", xhci->run_regs);
temp = readl(&xhci->cap_regs->db_off);
- xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
- xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+ xhci_dbg(xhci, "// @%pK = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+ xhci_dbg(xhci, "// Doorbell array at %pK:\n", xhci->dba);
}
static void xhci_print_cap_regs(struct xhci_hcd *xhci)
@@ -60,7 +60,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
u32 temp;
u32 hci_version;
- xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+ xhci_dbg(xhci, "xHCI capability registers at %pK:\n", xhci->cap_regs);
temp = readl(&xhci->cap_regs->hc_capbase);
hci_version = HC_VERSION(temp);
@@ -157,7 +157,7 @@ static void xhci_print_status(struct xhci_hcd *xhci)
static void xhci_print_op_regs(struct xhci_hcd *xhci)
{
- xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+ xhci_dbg(xhci, "xHCI operational registers at %pK:\n", xhci->op_regs);
xhci_print_command_reg(xhci);
xhci_print_status(xhci);
}
@@ -178,7 +178,7 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
addr = &xhci->op_regs->port_status_base;
for (i = 0; i < ports; i++) {
for (j = 0; j < NUM_PORT_REGS; ++j) {
- xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+ xhci_dbg(xhci, "%pK port %s reg = 0x%x\n",
addr, names[j],
(unsigned int) readl(addr));
addr++;
@@ -198,35 +198,35 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
if (temp == XHCI_INIT_VALUE)
return;
- xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
+ xhci_dbg(xhci, " %pK: ir_set[%i]\n", ir_set, set_num);
- xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
+ xhci_dbg(xhci, " %pK: ir_set.pending = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->irq_control;
temp = readl(addr);
- xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
+ xhci_dbg(xhci, " %pK: ir_set.control = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->erst_size;
temp = readl(addr);
- xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
+ xhci_dbg(xhci, " %pK: ir_set.erst_size = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->rsvd;
temp = readl(addr);
if (temp != XHCI_INIT_VALUE)
- xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
+ xhci_dbg(xhci, " WARN: %pK: ir_set.rsvd = 0x%x\n",
addr, (unsigned int)temp);
addr = &ir_set->erst_base;
temp_64 = xhci_read_64(xhci, addr);
- xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
+ xhci_dbg(xhci, " %pK: ir_set.erst_base = @%08llx\n",
addr, temp_64);
addr = &ir_set->erst_dequeue;
temp_64 = xhci_read_64(xhci, addr);
- xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
+ xhci_dbg(xhci, " %pK: ir_set.erst_dequeue = @%08llx\n",
addr, temp_64);
}
@@ -235,15 +235,15 @@ void xhci_print_run_regs(struct xhci_hcd *xhci)
u32 temp;
int i;
- xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+ xhci_dbg(xhci, "xHCI runtime registers at %pK:\n", xhci->run_regs);
temp = readl(&xhci->run_regs->microframe_index);
- xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
+ xhci_dbg(xhci, " %pK: Microframe index = 0x%x\n",
&xhci->run_regs->microframe_index,
(unsigned int) temp);
for (i = 0; i < 7; ++i) {
temp = readl(&xhci->run_regs->rsvd[i]);
if (temp != XHCI_INIT_VALUE)
- xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
+ xhci_dbg(xhci, " WARN: %pK: Rsvd[%i] = 0x%x\n",
&xhci->run_regs->rsvd[i],
i, (unsigned int) temp);
}
@@ -345,13 +345,13 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+ xhci_dbg(xhci, "Ring deq = %pK (virt), 0x%llx (dma)\n",
ring->dequeue,
(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
ring->dequeue));
xhci_dbg(xhci, "Ring deq updated %u times\n",
ring->deq_updates);
- xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+ xhci_dbg(xhci, "Ring enq = %pK (virt), 0x%llx (dma)\n",
ring->enqueue,
(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
ring->enqueue));
@@ -441,7 +441,7 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
{
int i;
for (i = 0; i < 4; ++i) {
- xhci_dbg(xhci, "@%p (virt) @%08llx "
+ xhci_dbg(xhci, "@%pK (virt) @%08llx "
"(dma) %#08llx - rsvd64[%d]\n",
&ctx[4 + i], (unsigned long long)dma,
ctx[4 + i], i);
@@ -480,24 +480,24 @@ static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
xhci_dbg(xhci, "Slot Context:\n");
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - dev_info\n",
&slot_ctx->dev_info,
(unsigned long long)dma, slot_ctx->dev_info);
dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - dev_info2\n",
&slot_ctx->dev_info2,
(unsigned long long)dma, slot_ctx->dev_info2);
dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - tt_info\n",
&slot_ctx->tt_info,
(unsigned long long)dma, slot_ctx->tt_info);
dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - dev_state\n",
&slot_ctx->dev_state,
(unsigned long long)dma, slot_ctx->dev_state);
dma += field_size;
for (i = 0; i < 4; ++i) {
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&slot_ctx->reserved[i], (unsigned long long)dma,
slot_ctx->reserved[i], i);
dma += field_size;
@@ -528,24 +528,24 @@ static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
usb_endpoint_out(epaddr) ? "OUT" : "IN",
epaddr & USB_ENDPOINT_NUMBER_MASK, i);
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - ep_info\n",
&ep_ctx->ep_info,
(unsigned long long)dma, ep_ctx->ep_info);
dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - ep_info2\n",
&ep_ctx->ep_info2,
(unsigned long long)dma, ep_ctx->ep_info2);
dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08llx - deq\n",
&ep_ctx->deq,
(unsigned long long)dma, ep_ctx->deq);
dma += 2*field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - tx_info\n",
&ep_ctx->tx_info,
(unsigned long long)dma, ep_ctx->tx_info);
dma += field_size;
for (j = 0; j < 3; ++j) {
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ep_ctx->reserved[j],
(unsigned long long)dma,
ep_ctx->reserved[j], j);
@@ -575,16 +575,16 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
return;
}
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - drop flags\n",
&ctrl_ctx->drop_flags, (unsigned long long)dma,
ctrl_ctx->drop_flags);
dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - add flags\n",
&ctrl_ctx->add_flags, (unsigned long long)dma,
ctrl_ctx->add_flags);
dma += field_size;
for (i = 0; i < 6; ++i) {
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
+ xhci_dbg(xhci, "@%pK (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
&ctrl_ctx->rsvd2[i], (unsigned long long)dma,
ctrl_ctx->rsvd2[i], i);
dma += field_size;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 6113b9da00c6..6321943965e9 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -20,7 +20,7 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
+#include <linux/gfp.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -376,10 +376,6 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
int i;
ret = 0;
- virt_dev = xhci->devs[slot_id];
- if (!virt_dev)
- return -ENODEV;
-
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
@@ -387,6 +383,13 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
}
spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cmd);
+ return -ENODEV;
+ }
+
for (i = LAST_EP_INDEX; i > 0; i--) {
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
struct xhci_command *command;
@@ -887,6 +890,151 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
return status;
}
+static void xhci_single_step_completion(struct urb *urb)
+{
+ struct completion *done = urb->context;
+
+ complete(done);
+}
+
+/*
+ * Allocate a URB and initialize the various fields of it.
+ * This API is used by the single_step_set_feature test of
+ * EHSET where IN packet of the GetDescriptor request is
+ * sent 15secs after the SETUP packet.
+ * Return NULL if failed.
+ */
+static struct urb *xhci_request_single_step_set_feature_urb(
+ struct usb_device *udev,
+ void *dr,
+ void *buf,
+ struct completion *done)
+{
+ struct urb *urb;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_host_endpoint *ep;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ urb->pipe = usb_rcvctrlpipe(udev, 0);
+ ep = udev->ep_in[usb_pipeendpoint(urb->pipe)];
+ if (!ep) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ /*
+ * Initialize the various URB fields as these are used by the HCD
+ * driver to queue it and as well as when completion happens.
+ */
+ urb->ep = ep;
+ urb->dev = udev;
+ urb->setup_packet = dr;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+ urb->complete = xhci_single_step_completion;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags = URB_DIR_IN;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ usb_hcd_map_urb_for_dma(hcd, urb, GFP_KERNEL);
+ urb->context = done;
+ return urb;
+}
+
+/*
+ * This function implements the USB_PORT_FEAT_TEST handling of the
+ * SINGLE_STEP_SET_FEATURE test mode as defined in the Embedded
+ * High-Speed Electrical Test (EHSET) specification. This simply
+ * issues a GetDescriptor control transfer, with an inserted 15-second
+ * delay after the end of the SETUP stage and before the IN token of
+ * the DATA stage is set. The idea is that this gives the test operator
+ * enough time to configure the oscilloscope to perform a measurement
+ * of the response time between the DATA and ACK packets that follow.
+ */
+static int xhci_ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ int retval;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct usb_device *udev;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_device_descriptor *buf;
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ /* Obtain udev of the rhub's child port */
+ udev = usb_hub_find_child(hcd->self.root_hub, port);
+ if (!udev) {
+ xhci_err(xhci, "No device attached to the RootHub\n");
+ return -ENODEV;
+ }
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Fill Setup packet for GetDescriptor */
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ urb = xhci_request_single_step_set_feature_urb(udev, dr, buf, &done);
+ if (!urb) {
+ retval = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Now complete just the SETUP stage */
+ spin_lock_irqsave(&xhci->lock, flags);
+ retval = xhci_submit_single_step_set_feature(hcd, urb, 1);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ if (retval)
+ goto out1;
+
+ if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ xhci_err(xhci, "%s SETUP stage timed out on ep0\n", __func__);
+ goto out1;
+ }
+
+ /* Sleep for 15 seconds; HC will send SOFs during this period */
+ msleep(15 * 1000);
+
+ /* Complete remaining DATA and status stages. Re-use same URB */
+ urb->status = -EINPROGRESS;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ retval = xhci_submit_single_step_set_feature(hcd, urb, 0);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ if (!retval && !wait_for_completion_timeout(&done,
+ msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ xhci_err(xhci, "%s IN stage timed out on ep0\n", __func__);
+ }
+out1:
+ usb_free_urb(urb);
+cleanup:
+ kfree(dr);
+ kfree(buf);
+ return retval;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -901,6 +1049,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 link_state = 0;
u16 wake_mask = 0;
u16 timeout = 0;
+ u16 test_mode = 0;
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -974,8 +1123,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
link_state = (wIndex & 0xff00) >> 3;
if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
wake_mask = wIndex & 0xff00;
- /* The MSB of wIndex is the U1/U2 timeout */
- timeout = (wIndex & 0xff00) >> 8;
+ /* The MSB of wIndex is the U1/U2 timeout OR TEST mode*/
+ test_mode = timeout = (wIndex & 0xff00) >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > max_ports)
goto error;
@@ -1057,6 +1206,40 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = readl(port_array[wIndex]);
break;
}
+
+ /*
+ * For xHCI 1.1 according to section 4.19.1.2.4.1 a
+ * root hub port's transition to compliance mode upon
+ * detecting LFPS timeout may be controlled by an
+ * Compliance Transition Enabled (CTE) flag (not
+ * software visible). This flag is set by writing 0xA
+ * to PORTSC PLS field which will allow transition to
+ * compliance mode the next time LFPS timeout is
+ * encountered. A warm reset will clear it.
+ *
+ * The CTE flag is only supported if the HCCPARAMS2 CTC
+ * flag is set, otherwise, the compliance substate is
+ * automatically entered as on 1.0 and prior.
+ */
+ if (link_state == USB_SS_PORT_LS_COMP_MOD) {
+ if (!HCC2_CTC(xhci->hcc_params2)) {
+ xhci_dbg(xhci, "CTC flag is 0, port already supports entering compliance mode\n");
+ break;
+ }
+
+ if ((temp & PORT_CONNECT)) {
+ xhci_warn(xhci, "Can't set compliance mode when port is connected\n");
+ goto error;
+ }
+
+ xhci_dbg(xhci, "Enable compliance mode transition for port %d\n",
+ wIndex);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ link_state);
+ temp = readl(port_array[wIndex]);
+ break;
+ }
+
/* Port must be enabled */
if (!(temp & PORT_PE)) {
retval = -ENODEV;
@@ -1149,6 +1332,32 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_U2_TIMEOUT(timeout);
writel(temp, port_array[wIndex] + PORTPMSC);
break;
+ case USB_PORT_FEAT_TEST:
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
+ if (test_mode && test_mode <= 5) {
+ /* unlock to execute stop endpoint commands */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_stop_device(xhci, slot_id, 1);
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_halt(xhci);
+
+ temp = readl_relaxed(port_array[wIndex] +
+ PORTPMSC);
+ temp |= test_mode << 28;
+ writel_relaxed(temp, port_array[wIndex] +
+ PORTPMSC);
+ /* to make sure above write goes through */
+ mb();
+ } else if (test_mode == 6) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ retval = xhci_ehset_single_step_set_feature(hcd,
+ wIndex);
+ spin_lock_irqsave(&xhci->lock, flags);
+ } else {
+ goto error;
+ }
+ break;
default:
goto error;
}
@@ -1181,7 +1390,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(USB_RESUME_TIMEOUT);
+ usleep_range(21000, 21500);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1500,7 +1709,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(USB_RESUME_TIMEOUT);
+ usleep_range(21000, 21500);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f274e7e4e659..cfd163c7e2ec 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1064,7 +1064,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
/* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
- xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+ xhci_dbg(xhci, "Set slot id %d dcbaa entry %pK to 0x%llx\n",
slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id],
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
@@ -1235,7 +1235,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
if (udev->tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
}
- xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+ xhci_dbg(xhci, "udev->tt = %pK\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */
@@ -1527,6 +1527,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
}
break;
case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_bulk(&ep->desc) && max_packet < 8)
+ max_packet = 8;
case USB_SPEED_LOW:
break;
default:
@@ -1840,25 +1842,151 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command);
}
-void xhci_mem_cleanup(struct xhci_hcd *xhci)
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
{
+ union xhci_trb *erdp_trb, *current_trb;
+ struct xhci_segment *seg;
+ u64 erdp_reg;
+ u32 iman_reg;
+ dma_addr_t deq;
+ unsigned long segment_offset;
+
+ /* disable irq, ack pending interrupt and ack all pending events */
+
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg &= ~IMAN_IE;
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ if (iman_reg & IMAN_IP)
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+
+ /* last acked event trb is in erdp reg */
+ erdp_reg =
+ xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+ deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+ if (!deq) {
+ pr_debug("%s: event ring handling not required\n", __func__);
+ return;
+ }
+
+ seg = xhci->sec_event_ring[intr_num]->first_seg;
+ segment_offset = deq - seg->dma;
+
+ /* find out virtual address of the last acked event trb */
+ erdp_trb = current_trb = &seg->trbs[0] +
+ (segment_offset/sizeof(*current_trb));
+
+ /* read cycle state of the last acked trb to find out CCS */
+ xhci->sec_event_ring[intr_num]->cycle_state =
+ (current_trb->event_cmd.flags & TRB_CYCLE);
+
+ while (1) {
+ /* last trb of the event ring: toggle cycle state */
+ if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+ xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+ current_trb = &seg->trbs[0];
+ } else {
+ current_trb++;
+ }
+
+ /* cycle state transition */
+ if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+ xhci->sec_event_ring[intr_num]->cycle_state)
+ break;
+ }
+
+ if (erdp_trb != current_trb) {
+ deq =
+ xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+ current_trb);
+ if (deq == 0)
+ xhci_warn(xhci,
+ "WARN ivalid SW event ring dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ erdp_reg &= ERST_PTR_MASK;
+ erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C); event ring is empty. */
+ erdp_reg |= ERST_EHB;
+ xhci_write_64(xhci, erdp_reg,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
+{
+ int size;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+ if (intr_num >= xhci->max_interrupters) {
+ xhci_err(xhci, "invalid secondary interrupter num %d\n",
+ intr_num);
+ return -EINVAL;
+ }
+
+ size =
+ sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
+ if (xhci->sec_erst[intr_num].entries) {
+ xhci_handle_sec_intr_events(xhci, intr_num);
+ dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
+ xhci->sec_erst[intr_num].erst_dma_addr);
+ xhci->sec_erst[intr_num].entries = NULL;
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
+ intr_num);
+ if (xhci->sec_event_ring[intr_num])
+ xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
+ xhci->sec_event_ring[intr_num] = NULL;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed sec event ring");
+
+ return 0;
+}
+
+void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
+{
int size;
- int i, j, num_ports;
+ unsigned int i;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
- cancel_delayed_work_sync(&xhci->cmd_timer);
+ /* sec event ring clean up */
+ for (i = 1; i < xhci->max_interrupters; i++)
+ xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);
- /* Free the Event Ring Segment Table and the actual Event Ring */
+ kfree(xhci->sec_ir_set);
+ xhci->sec_ir_set = NULL;
+ kfree(xhci->sec_erst);
+ xhci->sec_erst = NULL;
+ kfree(xhci->sec_event_ring);
+ xhci->sec_event_ring = NULL;
+
+ /* primary event ring clean up */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
dma_free_coherent(dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ int i, j, num_ports;
+
+ cancel_delayed_work_sync(&xhci->cmd_timer);
+
+ xhci_event_ring_cleanup(xhci);
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
@@ -1964,15 +2092,15 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
if (seg != result_seg) {
xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
test_name, test_number);
- xhci_warn(xhci, "Tested TRB math w/ seg %p and "
+ xhci_warn(xhci, "Tested TRB math w/ seg %pK and "
"input DMA 0x%llx\n",
input_seg,
(unsigned long long) input_dma);
- xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
- "ending TRB %p (0x%llx DMA)\n",
+ xhci_warn(xhci, "starting TRB %pK (0x%llx DMA), "
+ "ending TRB %pK (0x%llx DMA)\n",
start_trb, start_dma,
end_trb, end_dma);
- xhci_warn(xhci, "Expected seg %p, got seg %p\n",
+ xhci_warn(xhci, "Expected seg %pK, got seg %pK\n",
result_seg, seg);
trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
true);
@@ -2103,30 +2231,6 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
return 0;
}
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
- u64 temp;
- dma_addr_t deq;
-
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0 && !in_interrupt())
- xhci_warn(xhci, "WARN something wrong with SW event ring "
- "dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- temp &= ERST_PTR_MASK;
- /* Don't clear the EHB bit (which is RW1C) because
- * there might be more events to service.
- */
- temp &= ~ERST_EHB;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Write event ring dequeue pointer, "
- "preserving EHB bit");
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &xhci->ir_set->erst_dequeue);
-}
-
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
__le32 __iomem *addr, u8 major_revision, int max_caps)
{
@@ -2142,7 +2246,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
rhub = &xhci->usb2_rhub;
} else {
xhci_warn(xhci, "Ignoring unknown port speed, "
- "Ext Cap %p, revision = 0x%x\n",
+ "Ext Cap %pK, revision = 0x%x\n",
addr, major_revision);
/* Ignoring port protocol we can't understand. FIXME */
return;
@@ -2155,7 +2259,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Ext Cap %p, port offset = %u, "
+ "Ext Cap %pK, port offset = %u, "
"count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
@@ -2217,7 +2321,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
for (i = port_offset; i < (port_offset + port_count); i++) {
/* Duplicate entry. Ignore the port if the revisions differ. */
if (xhci->port_array[i] != 0) {
- xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
+ xhci_warn(xhci, "Duplicate port entry, Ext Cap %pK,"
" port %u\n", addr, i);
xhci_warn(xhci, "Port was marked as USB %u, "
"duplicated as USB %u\n",
@@ -2373,7 +2477,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
NUM_PORT_REGS*i;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"USB 2.0 port at index %u, "
- "addr = %p", i,
+ "addr = %pK", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
@@ -2394,7 +2498,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
NUM_PORT_REGS*i;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"USB 3.0 port at index %u, "
- "addr = %p", i,
+ "addr = %pK", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
@@ -2404,13 +2508,184 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
return 0;
}
+int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
+ struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
+ unsigned int intr_num, gfp_t flags)
+{
+ dma_addr_t dma, deq;
+ u64 val_64;
+ unsigned int val;
+ struct xhci_segment *seg;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+ *er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1,
+ TYPE_EVENT, flags);
+ if (!*er)
+ return -ENOMEM;
+
+ erst->entries = dma_alloc_coherent(dev,
+ sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+ flags);
+ if (!erst->entries) {
+ xhci_ring_free(xhci, *er);
+ return -ENOMEM;
+ }
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "intr# %d: Allocated event ring segment table at 0x%llx",
+ intr_num, (unsigned long long)dma);
+
+ memset(erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+ erst->num_entries = ERST_NUM_SEGS;
+ erst->erst_dma_addr = dma;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
+ intr_num,
+ erst->num_entries,
+ erst->entries,
+ (unsigned long long)erst->erst_dma_addr);
+
+ /* set ring base address and size for each segment table entry */
+ for (val = 0, seg = (*er)->first_seg; val < ERST_NUM_SEGS; val++) {
+ struct xhci_erst_entry *entry = &erst->entries[val];
+
+ entry->seg_addr = cpu_to_le64(seg->dma);
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ /* set ERST count with the number of entries in the segment table */
+ val = readl_relaxed(&ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Write ERST size = %i to ir_set %d (some bits preserved)", val,
+ intr_num);
+ writel_relaxed(val, &ir_set->erst_size);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "intr# %d: Set ERST entries to point to event ring.",
+ intr_num);
+ /* set the segment table base address */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Set ERST base address for ir_set %d = 0x%llx",
+ intr_num,
+ (unsigned long long)erst->erst_dma_addr);
+ val_64 = xhci_read_64(xhci, &ir_set->erst_base);
+ val_64 &= ERST_PTR_MASK;
+ val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+ xhci_write_64(xhci, val_64, &ir_set->erst_base);
+
+ /* Set the event ring dequeue address */
+ deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci,
+ "intr# %d:WARN something wrong with SW event ring deq ptr.\n",
+ intr_num);
+ /* Update HC event ring dequeue pointer */
+ val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
+ val_64 &= ERST_PTR_MASK;
+ /* Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ val_64 &= ~ERST_EHB;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "intr# %d:Write event ring dequeue pointer, preserving EHB bit",
+ intr_num);
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
+ &ir_set->erst_dequeue);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wrote ERST address to ir_set %d.", intr_num);
+ xhci_print_ir_set(xhci, intr_num);
+
+ return 0;
+}
+
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned intr_num)
+{
+ int ret;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
+ || !xhci->sec_event_ring || !xhci->sec_erst ||
+ intr_num >= xhci->max_interrupters) {
+ xhci_err(xhci,
+ "%s:state %x ir_set %pK evt_ring %pK erst %pK intr# %d\n",
+ __func__, xhci->xhc_state, xhci->sec_ir_set,
+ xhci->sec_event_ring, xhci->sec_erst, intr_num);
+ return -EINVAL;
+ }
+
+ if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+ && xhci->sec_event_ring[intr_num]->first_seg)
+ goto done;
+
+ xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
+ ret = xhci_event_ring_setup(xhci,
+ &xhci->sec_event_ring[intr_num],
+ xhci->sec_ir_set[intr_num],
+ &xhci->sec_erst[intr_num],
+ intr_num, GFP_KERNEL);
+ if (ret) {
+ xhci_err(xhci, "sec event ring setup failed inter#%d\n",
+ intr_num);
+ return ret;
+ }
+done:
+ return 0;
+}
+
+int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+ int ret = 0;
+
+ /* primary + secondary */
+ xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Allocating primary event ring");
+
+ /* Set ir_set to interrupt register set 0 */
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
+ ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
+ &xhci->erst, 0, flags);
+ if (ret) {
+ xhci_err(xhci, "failed to setup primary event ring\n");
+ goto fail;
+ }
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Allocating sec event ring related pointers");
+
+ xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
+ sizeof(*xhci->sec_ir_set), flags);
+ if (!xhci->sec_ir_set) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
+ sizeof(*xhci->sec_event_ring), flags);
+ if (!xhci->sec_event_ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ xhci->sec_erst = kcalloc(xhci->max_interrupters,
+ sizeof(*xhci->sec_erst), flags);
+ if (!xhci->sec_erst)
+ ret = -ENOMEM;
+fail:
+ return ret;
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int val, val2;
u64 val_64;
- struct xhci_segment *seg;
u32 page_size, temp;
int i;
@@ -2463,7 +2738,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Device context base array address = 0x%llx (DMA), %p (virt)",
+ "// Device context base array address = 0x%llx (DMA), %pK (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
@@ -2504,7 +2779,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Allocated command ring at %p", xhci->cmd_ring);
+ "Allocated command ring at %pK", xhci->cmd_ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
@@ -2536,73 +2811,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
- /* Set ir_set to interrupt register set 0 */
- xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- flags);
- if (!xhci->event_ring)
- goto fail;
- if (xhci_check_trb_in_td_math(xhci) < 0)
+ if (xhci_event_ring_init(xhci, GFP_KERNEL))
goto fail;
- xhci->erst.entries = dma_alloc_coherent(dev,
- sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
- flags);
- if (!xhci->erst.entries)
+ if (xhci_check_trb_in_td_math(xhci) < 0)
goto fail;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Allocated event ring segment table at 0x%llx",
- (unsigned long long)dma);
-
- memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
- xhci->erst.num_entries = ERST_NUM_SEGS;
- xhci->erst.erst_dma_addr = dma;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
- xhci->erst.num_entries,
- xhci->erst.entries,
- (unsigned long long)xhci->erst.erst_dma_addr);
-
- /* set ring base address and size for each segment table entry */
- for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
- struct xhci_erst_entry *entry = &xhci->erst.entries[val];
- entry->seg_addr = cpu_to_le64(seg->dma);
- entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
- entry->rsvd = 0;
- seg = seg->next;
- }
-
- /* set ERST count with the number of entries in the segment table */
- val = readl(&xhci->ir_set->erst_size);
- val &= ERST_SIZE_MASK;
- val |= ERST_NUM_SEGS;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Write ERST size = %i to ir_set 0 (some bits preserved)",
- val);
- writel(val, &xhci->ir_set->erst_size);
-
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST entries to point to event ring.");
- /* set the segment table base address */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST base address for ir_set 0 = 0x%llx",
- (unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
- val_64 &= ERST_PTR_MASK;
- val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
- /* Set the event ring dequeue address */
- xhci_set_hc_event_deq(xhci);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Wrote ERST address to ir_set 0.");
- xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c4c40e9d4247..a0917a135c5d 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -38,12 +38,19 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct device_node *node = dev->of_node;
+ struct usb_xhci_pdata *pdata = dev_get_platdata(dev);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+
+ if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
+ (pdata && pdata->usb3_lpm_capable))
+ xhci->quirks |= XHCI_LPM_SUPPORT;
}
/* called during probe() after chip reset completes */
@@ -73,9 +80,62 @@ static int xhci_plat_start(struct usb_hcd *hcd)
return xhci_run(hcd);
}
+static ssize_t config_imod_store(struct device *pdev,
+ struct device_attribute *attr, const char *buff, size_t size)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(pdev);
+ struct xhci_hcd *xhci;
+ u32 temp;
+ u32 imod;
+ unsigned long flags;
+
+ if (kstrtouint(buff, 10, &imod) != 1)
+ return 0;
+
+ imod &= ER_IRQ_INTERVAL_MASK;
+ xhci = hcd_to_xhci(hcd);
+
+ if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+ && hcd->state == HC_STATE_SUSPENDED)
+ return -EACCES;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl_relaxed(&xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= imod;
+ writel_relaxed(temp, &xhci->ir_set->irq_control);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return size;
+}
+
+static ssize_t config_imod_show(struct device *pdev,
+ struct device_attribute *attr, char *buff)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(pdev);
+ struct xhci_hcd *xhci;
+ u32 temp;
+ unsigned long flags;
+
+ xhci = hcd_to_xhci(hcd);
+
+ if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+ && hcd->state == HC_STATE_SUSPENDED)
+ return -EACCES;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl_relaxed(&xhci->ir_set->irq_control) &
+ ER_IRQ_INTERVAL_MASK;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return snprintf(buff, PAGE_SIZE, "%08u\n", temp);
+}
+
+static DEVICE_ATTR(config_imod, S_IRUGO | S_IWUSR,
+ config_imod_show, config_imod_store);
+
static int xhci_plat_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct usb_xhci_pdata *pdata = dev_get_platdata(&pdev->dev);
const struct hc_driver *driver;
struct xhci_hcd *xhci;
@@ -84,6 +144,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
int irq;
+ u32 temp, imod;
+ unsigned long flags;
if (usb_disabled())
return -ENODEV;
@@ -113,6 +175,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
+ hcd_to_bus(hcd)->skip_resume = true;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
@@ -137,6 +201,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_hcd;
}
+ if (pdev->dev.parent)
+ pm_runtime_resume(pdev->dev.parent);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-375-xhci") ||
of_device_is_compatible(pdev->dev.of_node,
@@ -158,9 +231,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
- if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
- (pdata && pdata->usb3_lpm_capable))
- xhci->quirks |= XHCI_LPM_SUPPORT;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
@@ -178,6 +249,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto disable_usb_phy;
+ device_wakeup_enable(&hcd->self.root_hub->dev);
+
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
@@ -185,6 +258,28 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto dealloc_usb2_hcd;
+ device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
+
+ /* override imod interval if specified */
+ if (pdata && pdata->imod_interval) {
+ imod = pdata->imod_interval & ER_IRQ_INTERVAL_MASK;
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl_relaxed(&xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= imod;
+ writel_relaxed(temp, &xhci->ir_set->irq_control);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ dev_dbg(&pdev->dev, "%s: imod set to %u\n", __func__, imod);
+ }
+
+ ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
+ if (ret)
+ dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
+ __func__);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
@@ -198,6 +293,8 @@ put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
disable_clk:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (!IS_ERR(clk))
clk_disable_unprepare(clk);
@@ -213,8 +310,8 @@ static int xhci_plat_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
+ device_remove_file(&dev->dev, &dev_attr_config_imod);
xhci->xhc_state |= XHCI_STATE_REMOVING;
-
usb_remove_hcd(xhci->shared_hcd);
usb_phy_shutdown(hcd->usb_phy);
@@ -225,36 +322,128 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(clk);
usb_put_hcd(hcd);
+ pm_runtime_set_suspended(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int xhci_plat_suspend(struct device *dev)
{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat PM suspend\n");
+ return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat PM resume\n");
+
+ return (!hcd_to_bus(hcd)->skip_resume) ? xhci_resume(xhci, false) : 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int xhci_plat_runtime_idle(struct device *dev)
+{
/*
- * xhci_suspend() needs `do_wakeup` to know whether host is allowed
- * to do wakeup during suspend. Since xhci_plat_suspend is currently
- * only designed for system suspend, device_may_wakeup() is enough
- * to dertermine whether host is allowed to do wakeup. Need to
- * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
- * also applies to runtime suspend.
+ * When pm_runtime_put_autosuspend() is called on this device,
+ * after this idle callback returns the PM core will schedule the
+ * autosuspend if there is any remaining time until expiry. However,
+ * when reaching this point because the child_count becomes 0, the
+ * core does not honor autosuspend in that case and results in
+ * idle/suspend happening immediately. In order to have a delay
+ * before suspend we have to call pm_runtime_autosuspend() manually.
*/
- return xhci_suspend(xhci, device_may_wakeup(dev));
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ return -EBUSY;
}
-static int xhci_plat_resume(struct device *dev)
+static int xhci_plat_pm_freeze(struct device *dev)
{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat freeze\n");
+
+ return xhci_suspend(xhci, false);
+}
+
+static int xhci_plat_pm_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
- return xhci_resume(xhci, 0);
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat restore\n");
+
+ ret = xhci_resume(xhci, true);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+
+ return ret;
+}
+
+static int xhci_plat_runtime_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat runtime suspend\n");
+
+ return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_runtime_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat runtime resume\n");
+
+ ret = xhci_resume(xhci, false);
+ pm_runtime_mark_last_busy(dev);
+
+ return ret;
}
static const struct dev_pm_ops xhci_plat_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+ .suspend = xhci_plat_suspend,
+ .resume = xhci_plat_resume,
+ .freeze = xhci_plat_pm_freeze,
+ .restore = xhci_plat_pm_restore,
+ .thaw = xhci_plat_pm_restore,
+ SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
+ xhci_plat_runtime_idle)
};
#define DEV_PM_OPS (&xhci_plat_pm_ops)
#else
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a39b7a49b7cf..9d2cc0de92e1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -310,7 +310,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
i_cmd->status = COMP_CMD_STOP;
- xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+ xhci_dbg(xhci, "Turn aborted command %pK to no-op\n",
i_cmd->command_trb);
/* get cycle state from the original cmd trb */
cycle_state = le32_to_cpu(
@@ -354,29 +354,20 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
&xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should
- * time the completion od all xHCI commands, including
+ * time the completion of all xHCI commands, including
* the Command Abort operation. If software doesn't see
- * CRR negated in a timely manner (e.g. longer than 5
- * seconds), then it should assume that the there are
- * larger problems with the xHC and assert HCRST.
+ * CRR negated in a timely manner, then it should assume
+ * that the there are larger problems with the xHC and assert HCRST.
*/
- ret = xhci_handshake(&xhci->op_regs->cmd_ring,
- CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+ ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 1000 * 1000);
if (ret < 0) {
- /* we are about to kill xhci, give it one more chance */
- xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
- &xhci->op_regs->cmd_ring);
- udelay(1000);
- ret = xhci_handshake(&xhci->op_regs->cmd_ring,
- CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
- if (ret < 0) {
- xhci_err(xhci, "Stopped the command ring failed, "
- "maybe the host is dead\n");
- xhci->xhc_state |= XHCI_STATE_DYING;
- xhci_quiesce(xhci);
- xhci_halt(xhci);
- return -ESHUTDOWN;
- }
+ xhci_err(xhci,
+ "Stop command ring failed, maybe the host is dead\n");
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ xhci_quiesce(xhci);
+ xhci_halt(xhci);
+ return -ESHUTDOWN;
}
/*
* Writing the CMD_RING_ABORT bit should cause a cmd completion event,
@@ -592,7 +583,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
"Cycle state = 0x%x", state->new_cycle_state);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "New dequeue segment = %p (virtual)",
+ "New dequeue segment = %pK (virtual)",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -627,8 +618,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cancel (unchain) link TRB");
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Address = %p (0x%llx dma); "
- "in seg %p (0x%llx dma)",
+ "Address = %pK (0x%llx dma); "
+ "in seg %pK (0x%llx dma)",
cur_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
cur_seg,
@@ -764,7 +755,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* short, don't muck with the stream ID after
* submission.
*/
- xhci_warn(xhci, "WARN Cancelled URB %p "
+ xhci_warn(xhci, "WARN Cancelled URB %pK "
"has invalid stream ID %u.\n",
cur_td->urb,
cur_td->urb->stream_id);
@@ -1103,7 +1094,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
- xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+ xhci_warn(xhci, "ep deq seg = %pK, deq ptr = %pK\n",
ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
@@ -2623,7 +2614,7 @@ cleanup:
URB_SHORT_NOT_OK)) ||
(status != 0 &&
!usb_endpoint_xfer_isoc(&urb->ep->desc)))
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ xhci_dbg(xhci, "Giveback URB %pK, len = %d, "
"expected = %d, status = %d\n",
urb, urb->actual_length,
urb->transfer_buffer_length,
@@ -3575,6 +3566,156 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return 0;
}
+/*
+ * Variant of xhci_queue_ctrl_tx() used to implement EHSET
+ * SINGLE_STEP_SET_FEATURE test mode. It differs in that the control
+ * transfer is broken up so that the SETUP stage can happen and call
+ * the URB's completion handler before the DATA/STATUS stages are
+ * executed by the xHC hardware. This assumes the control transfer is a
+ * GetDescriptor, with a DATA stage in the IN direction, and an OUT
+ * STATUS stage.
+ *
+ * This function is called twice, usually with a 15-second delay in between.
+ * - with is_setup==true, the SETUP stage for the control request
+ * (GetDescriptor) is queued in the TRB ring and sent to HW immediately
+ * - with is_setup==false, the DATA and STATUS TRBs are queued and exceuted
+ *
+ * Caller must have locked xhci->lock
+ */
+int xhci_submit_single_step_set_feature(struct usb_hcd *hcd, struct urb *urb,
+ int is_setup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_ring *ep_ring;
+ int num_trbs;
+ int ret;
+ unsigned int slot_id, ep_index;
+ struct usb_ctrlrequest *setup;
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+ u32 field, length_field, remainder;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
+
+ /* Need buffer for data stage */
+ if (urb->transfer_buffer_length <= 0)
+ return -EINVAL;
+
+ /*
+ * Need to copy setup packet into setup TRB, so we can't use the setup
+ * DMA address.
+ */
+ if (!urb->setup_packet)
+ return -EINVAL;
+ setup = (struct usb_ctrlrequest *) urb->setup_packet;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+
+ urb_priv = kzalloc(sizeof(struct urb_priv) +
+ sizeof(struct xhci_td *), GFP_ATOMIC);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ td = urb_priv->td[0] = kzalloc(sizeof(struct xhci_td), GFP_ATOMIC);
+ if (!td) {
+ kfree(urb_priv);
+ return -ENOMEM;
+ }
+
+ urb_priv->length = 1;
+ urb_priv->td_cnt = 0;
+ urb->hcpriv = urb_priv;
+
+ num_trbs = is_setup ? 1 : 2;
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ num_trbs, urb, 0, GFP_ATOMIC);
+ if (ret < 0) {
+ kfree(td);
+ kfree(urb_priv);
+ return ret;
+ }
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ if (is_setup) {
+ /* Queue only the setup TRB */
+ field = TRB_IDT | TRB_IOC | TRB_TYPE(TRB_SETUP);
+ if (start_cycle == 0)
+ field |= 0x1;
+
+ /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+ if (xhci->hci_version == 0x100) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_TX_TYPE(TRB_DATA_IN);
+ else
+ field |= TRB_TX_TYPE(TRB_DATA_OUT);
+ }
+
+ /* Save the DMA address of the last TRB in the TD */
+ td->last_trb = ep_ring->enqueue;
+
+ queue_trb(xhci, ep_ring, false,
+ setup->bRequestType | setup->bRequest << 8 |
+ le16_to_cpu(setup->wValue) << 16,
+ le16_to_cpu(setup->wIndex) |
+ le16_to_cpu(setup->wLength) << 16,
+ TRB_LEN(8) | TRB_INTR_TARGET(0),
+ field);
+ } else {
+ /* Queue data TRB */
+ field = TRB_ISP | TRB_TYPE(TRB_DATA);
+ if (start_cycle == 0)
+ field |= 0x1;
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_DIR_IN;
+
+ remainder = xhci_td_remainder(xhci, 0,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ urb, 1);
+
+ length_field = TRB_LEN(urb->transfer_buffer_length) |
+ TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
+
+ queue_trb(xhci, ep_ring, true,
+ lower_32_bits(urb->transfer_dma),
+ upper_32_bits(urb->transfer_dma),
+ length_field,
+ field);
+
+ /* Save the DMA address of the last TRB in the TD */
+ td->last_trb = ep_ring->enqueue;
+
+ /* Queue status TRB */
+ field = TRB_IOC | TRB_TYPE(TRB_STATUS);
+ if (!(setup->bRequestType & USB_DIR_IN))
+ field |= TRB_DIR_IN;
+
+ queue_trb(xhci, ep_ring, false,
+ 0,
+ 0,
+ TRB_INTR_TARGET(0),
+ field | ep_ring->cycle_state);
+ }
+
+ giveback_first_trb(xhci, slot_id, ep_index, 0, start_cycle, start_trb);
+ return 0;
+}
+
static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
struct urb *urb, int i)
{
@@ -4179,7 +4320,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
int ret;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
+ "Set TR Deq Ptr cmd, new deq seg = %pK (0x%llx dma), new deq ptr = %pK (0x%llx dma), new cycle = %u",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
@@ -4191,7 +4332,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
deq_state->new_deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+ xhci_warn(xhci, "WARN deq seg = %pK, deq pt = %pK\n",
deq_state->new_deq_seg, deq_state->new_deq_ptr);
return;
}
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 59c05653b2ea..4ef95ac3d976 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -103,7 +103,7 @@ DECLARE_EVENT_CLASS(xhci_log_ctx,
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
),
- TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
+ TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%pK",
__entry->ctx_64, __entry->ctx_type,
(unsigned long long) __entry->ctx_dma, __entry->ctx_va
)
@@ -134,7 +134,7 @@ DECLARE_EVENT_CLASS(xhci_log_event,
memcpy(__get_dynamic_array(trb), trb_va,
sizeof(struct xhci_generic_trb));
),
- TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x",
+ TP_printk("\ntrb_dma=@%llx, trb_va=@%pK, status=%08x, flags=%08x",
(unsigned long long) __entry->dma, __entry->va,
__entry->status, __entry->flags
)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b1994b03341f..85eb0fe2183c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -75,6 +75,27 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
return ret;
}
+int xhci_handshake_check_state(struct xhci_hcd *xhci,
+ void __iomem *ptr, u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = readl_relaxed(ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ /* host removed. Bail out */
+ if (xhci->xhc_state & XHCI_STATE_REMOVING)
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
/*
* Disable interrupts and begin the xHCI halting process.
*/
@@ -112,10 +133,20 @@ int xhci_halt(struct xhci_hcd *xhci)
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (!ret) {
xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- } else
+ } else {
xhci_warn(xhci, "Host not halted after %u microseconds.\n",
XHCI_MAX_HALT_USEC);
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
+ if (delayed_work_pending(&xhci->cmd_timer)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Cleanup command queue");
+ cancel_delayed_work(&xhci->cmd_timer);
+ xhci_cleanup_command_queue(xhci);
+ }
+
return ret;
}
@@ -126,7 +157,13 @@ static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ /*
+ * disable irq to avoid xhci_irq flooding due to unhandeled port
+ * change event in halt state, as soon as xhci_start clears halt bit
+ */
+ disable_irq(hcd->irq);
temp = readl(&xhci->op_regs->command);
temp |= (CMD_RUN);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
@@ -147,6 +184,8 @@ static int xhci_start(struct xhci_hcd *xhci)
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ enable_irq(hcd->irq);
+
return ret;
}
@@ -645,7 +684,7 @@ int xhci_run(struct usb_hcd *hcd)
temp = readl(&xhci->ir_set->irq_pending);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
+ "// Enabling event ring interrupter %pK by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
@@ -743,6 +782,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
spin_lock_irq(&xhci->lock);
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ spin_unlock_irq(&xhci->lock);
+ return;
+ }
xhci_halt(xhci);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
@@ -930,7 +973,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
- if (!hcd->state)
+ if (!hcd->state || xhci->suspended)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
@@ -1000,6 +1043,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
+ xhci->suspended = true;
return rc;
}
@@ -1020,7 +1064,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
bool comp_timer_running = false;
bool pending_portevent = false;
- if (!hcd->state)
+ if (!hcd->state || !xhci->suspended)
return 0;
/* Wait a bit if either of the roothubs need to settle from the
@@ -1179,6 +1223,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ xhci->suspended = false;
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -1503,7 +1548,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
exit:
return ret;
dying:
- xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
+ xhci_dbg(xhci, "Ep 0x%x: URB %pK submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
ret = -ESHUTDOWN;
@@ -1639,7 +1684,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
i = urb_priv->td_cnt;
if (i < urb_priv->length)
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Cancel URB %p, dev %s, ep 0x%x, "
+ "Cancel URB %pK, dev %s, ep 0x%x, "
"starting at offset 0x%llx",
urb, urb->dev->devpath,
urb->ep->desc.bEndpointAddress,
@@ -1707,7 +1752,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1735,7 +1780,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
xhci_get_endpoint_flag(&ep->desc)) {
/* Do not warn when called after a usb_device_reset */
if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+ xhci_warn(xhci, "xHCI %s called with disabled ep %pK\n",
__func__, ep);
return 0;
}
@@ -1827,7 +1872,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* ignore this request.
*/
if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
- xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+ xhci_warn(xhci, "xHCI %s called with enabled ep %pK\n",
__func__, ep);
return 0;
}
@@ -2808,7 +2853,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
(xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
@@ -2905,7 +2950,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
return;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) {
@@ -2958,7 +3003,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit config ep after "
"reset ep command\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
+ xhci_warn(xhci, "WARN deq seg = %pK, deq ptr = %pK\n",
deq_state->new_deq_seg,
deq_state->new_deq_ptr);
return;
@@ -3692,6 +3737,7 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
+ virt_dev->udev = NULL;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev->udev = NULL;
@@ -3985,7 +4031,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
- "Slot ID %d dcbaa entry @%p = %#016llx",
+ "Slot ID %d dcbaa entry @%pK = %#016llx",
udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
(unsigned long long)
@@ -5037,6 +5083,61 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
}
EXPORT_SYMBOL_GPL(xhci_gen_setup);
+dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
+ unsigned intr_num)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (intr_num >= xhci->max_interrupters) {
+ xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
+ xhci->max_interrupters);
+ return 0;
+ }
+
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
+ xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+ && xhci->sec_event_ring[intr_num]->first_seg)
+ return xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+ return 0;
+}
+
+static dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+ return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+ return 0;
+}
+
+dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
+ struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+ int ret;
+ unsigned int ep_index;
+ struct xhci_virt_device *virt_dev;
+
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+ if (ret <= 0) {
+ xhci_err(xhci, "%s: invalid args\n", __func__);
+ return 0;
+ }
+
+ virt_dev = xhci->devs[udev->slot_id];
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ if (virt_dev->eps[ep_index].ring &&
+ virt_dev->eps[ep_index].ring->first_seg)
+ return virt_dev->eps[ep_index].ring->first_seg->dma;
+
+ return 0;
+}
+
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -5096,6 +5197,11 @@ static const struct hc_driver xhci_hc_driver = {
.enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
.disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
.find_raw_port_number = xhci_find_raw_port_number,
+ .sec_event_ring_setup = xhci_sec_event_ring_setup,
+ .sec_event_ring_cleanup = xhci_sec_event_ring_cleanup,
+ .get_sec_event_ring_dma_addr = xhci_get_sec_event_ring_dma_addr,
+ .get_xfer_ring_dma_addr = xhci_get_xfer_ring_dma_addr,
+ .get_dcba_dma_addr = xhci_get_dcba_dma_addr,
};
void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a7f346529f91..d6504885fa55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1522,6 +1522,9 @@ struct xhci_hcd {
/* Our HCD's current interrupter register set */
struct xhci_intr_reg __iomem *ir_set;
+ /* secondary interrupter */
+ struct xhci_intr_reg __iomem **sec_ir_set;
+
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
__u32 hcs_params2;
@@ -1563,6 +1566,11 @@ struct xhci_hcd {
struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
+
+ /* secondary event ring and erst */
+ struct xhci_ring **sec_event_ring;
+ struct xhci_erst *sec_erst;
+
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
/* Store LPM test failed devices' information */
@@ -1665,6 +1673,7 @@ struct xhci_hcd {
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
+ bool suspended;
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
};
@@ -1822,10 +1831,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
void xhci_urb_free_priv(struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned intr_num);
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
+int xhci_handshake_check_state(struct xhci_hcd *xhci,
+ void __iomem *ptr, u32 mask, u32 done, int usec);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
@@ -1961,4 +1974,8 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+/* EHSET */
+int xhci_submit_single_step_set_feature(struct usb_hcd *hcd, struct urb *urb,
+ int is_setup);
+
#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index c7383c41c90e..8006d75efc09 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -258,3 +258,37 @@ config USB_CHAOSKEY
To compile this driver as a module, choose M here: the
module will be called chaoskey.
+
+config USB_QTI_KS_BRIDGE
+ tristate "USB QTI kick start bridge"
+ depends on USB
+ help
+ Say Y here if you have a QTI modem device connected via USB that
+ will be bridged in kernel space. This driver works as a bridge to pass
+ boot images, ram-dumps and efs sync.
+ To compile this driver as a module, choose M here: the module
+ will be called ks_bridge. If unsure, choose N.
+
+config USB_QCOM_IPC_BRIDGE
+ tristate "USB QTI IPC bridge driver"
+ depends on USB
+ depends on USB_QCOM_DIAG_BRIDGE
+ help
+ Say Y here if you have a QTI modem device connected via USB that
+ will be bridged in kernel space. This driver works as a transport
+ layer for IPC router module that enables communication between
+ APPS processor and MODEM processor. This config depends on
+ USB_QCOM_DIAG_BRIDGE because the core USB support for the transports
+ of both diag and IPC messages is in the same driver. Select this
+ config manually if you want to compile HSIC transport IPC router.
+
+config USB_QCOM_DIAG_BRIDGE
+ tristate "USB QTI diagnostic bridge driver"
+ depends on USB
+ help
+ Say Y here if you have a QTI modem device connected via USB that
+ will be bridged in kernel space. This driver communicates with the
+ diagnostic interface and allows for bridging with the diag forwarding
+ driver.
+ To compile this driver as a module, choose M here: the
+ module will be called diag_bridge. If unsure, choose N.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0cbdd77363f2..4986df051c22 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -28,3 +28,6 @@ obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o
+
+obj-$(CONFIG_USB_QTI_KS_BRIDGE) += ks_bridge.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diag_ipc_bridge.o
diff --git a/drivers/usb/misc/diag_ipc_bridge.c b/drivers/usb/misc/diag_ipc_bridge.c
new file mode 100644
index 000000000000..780746e8e630
--- /dev/null
+++ b/drivers/usb/misc/diag_ipc_bridge.c
@@ -0,0 +1,859 @@
+/* Copyright (c) 2011-2015, 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/usb/diag_bridge.h>
+#include <linux/usb/ipc_bridge.h>
+
+#define DRIVER_DESC "USB host diag bridge driver"
+#define DRIVER_VERSION "1.0"
+
+enum {
+ DIAG_BRIDGE,
+ IPC_BRIDGE,
+ MAX_BRIDGE_DEVS,
+};
+
+#define AUTOSUSP_DELAY_WITH_USB 1000
+
+#define IPC_BRIDGE_MAX_READ_SZ (8 * 1024)
+#define IPC_BRIDGE_MAX_WRITE_SZ (8 * 1024)
+
+struct diag_bridge {
+ struct usb_device *udev;
+ struct usb_interface *ifc;
+ struct usb_anchor submitted;
+ __u8 in_epAddr;
+ __u8 out_epAddr;
+ int err;
+ struct kref kref;
+ struct mutex ifc_mutex;
+ struct mutex read_mutex;
+ struct mutex write_mutex;
+ bool opened;
+ struct completion read_done;
+ struct completion write_done;
+ int read_result;
+ int write_result;
+ struct diag_bridge_ops *ops;
+ struct platform_device *pdev;
+ unsigned default_autosusp_delay;
+ int id;
+
+ /* Support INT IN instead of BULK IN */
+ bool use_int_in_pipe;
+ unsigned int period;
+
+ /* debugging counters */
+ unsigned long bytes_to_host;
+ unsigned long bytes_to_mdm;
+ unsigned pending_reads;
+ unsigned pending_writes;
+ unsigned drop_count;
+};
+struct diag_bridge *__dev[MAX_BRIDGE_DEVS];
+
+int diag_bridge_open(int id, struct diag_bridge_ops *ops)
+{
+ struct diag_bridge *dev;
+
+ if (id < 0 || id >= MAX_BRIDGE_DEVS) {
+ pr_err("%s: Invalid device ID\n", __func__);
+ return -ENODEV;
+ }
+
+ dev = __dev[id];
+ if (!dev) {
+ pr_err("%s: dev is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (dev->ops) {
+ pr_err("%s: bridge already opened\n", __func__);
+ return -EALREADY;
+ }
+
+ mutex_lock(&dev->ifc_mutex);
+ if (dev->opened) {
+ mutex_unlock(&dev->ifc_mutex);
+ pr_err("%s: Bridge already opened\n", __func__);
+ return -EBUSY;
+ }
+
+ dev->opened = true;
+ mutex_unlock(&dev->ifc_mutex);
+
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
+ dev->ops = ops;
+ dev->err = 0;
+
+#ifdef CONFIG_PM_RUNTIME
+ dev->default_autosusp_delay =
+ dev->udev->dev.power.autosuspend_delay;
+#endif
+ pm_runtime_set_autosuspend_delay(&dev->udev->dev,
+ AUTOSUSP_DELAY_WITH_USB);
+
+ kref_get(&dev->kref);
+
+ return 0;
+}
+EXPORT_SYMBOL(diag_bridge_open);
+
+static int ipc_bridge_open(struct platform_device *pdev)
+{
+ if (__dev[IPC_BRIDGE]->pdev != pdev)
+ return -EINVAL;
+
+ return diag_bridge_open(IPC_BRIDGE, NULL);
+}
+
+static void diag_bridge_delete(struct kref *kref)
+{
+ struct diag_bridge *dev = container_of(kref, struct diag_bridge, kref);
+ struct usb_interface *ifc = dev->ifc;
+ int id = dev->id;
+
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
+ usb_set_intfdata(ifc, NULL);
+ usb_put_intf(ifc);
+ usb_put_dev(dev->udev);
+ __dev[id] = 0;
+ kfree(dev);
+}
+
+void diag_bridge_close(int id)
+{
+ struct diag_bridge *dev;
+
+ if (id < 0 || id >= MAX_BRIDGE_DEVS) {
+ pr_err("%s: Invalid device ID\n", __func__);
+ return;
+ }
+
+ dev = __dev[id];
+ if (!dev) {
+ pr_err("%s: dev is null\n", __func__);
+ return;
+ }
+
+ if (id == DIAG_BRIDGE && !dev->ops) {
+ pr_err("%s: can't close bridge that was not open\n", __func__);
+ return;
+ }
+
+ mutex_lock(&dev->ifc_mutex);
+ if (!dev->opened) {
+ mutex_unlock(&dev->ifc_mutex);
+ pr_err("%s: Bridge not opened\n", __func__);
+ return;
+ }
+
+ dev->opened = false;
+ mutex_unlock(&dev->ifc_mutex);
+
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
+
+ usb_kill_anchored_urbs(&dev->submitted);
+ dev->ops = 0;
+
+ pm_runtime_set_autosuspend_delay(&dev->udev->dev,
+ dev->default_autosusp_delay);
+
+ kref_put(&dev->kref, diag_bridge_delete);
+}
+EXPORT_SYMBOL(diag_bridge_close);
+
+static void ipc_bridge_close(struct platform_device *pdev)
+{
+ WARN_ON(__dev[IPC_BRIDGE]->pdev != pdev);
+ WARN_ON(__dev[IPC_BRIDGE]->udev->state != USB_STATE_NOTATTACHED);
+ diag_bridge_close(IPC_BRIDGE);
+}
+
+static void diag_bridge_read_cb(struct urb *urb)
+{
+ struct diag_bridge *dev = urb->context;
+ struct diag_bridge_ops *cbs = dev->ops;
+
+ dev_dbg(&dev->ifc->dev, "%s: status:%d actual:%d\n", __func__,
+ urb->status, urb->actual_length);
+
+ /* save error so that subsequent read/write returns ENODEV */
+ if (urb->status == -EPROTO)
+ dev->err = urb->status;
+
+ if (cbs && cbs->read_complete_cb) {
+ cbs->read_complete_cb(cbs->ctxt,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ urb->status < 0 ? urb->status : urb->actual_length);
+ } else {
+ if (urb->dev->state == USB_STATE_NOTATTACHED)
+ dev->read_result = -ENODEV;
+ else if (urb->status < 0)
+ dev->read_result = urb->status;
+ else
+ dev->read_result = urb->actual_length;
+
+ complete(&dev->read_done);
+ }
+
+ dev->bytes_to_host += urb->actual_length;
+ dev->pending_reads--;
+ kref_put(&dev->kref, diag_bridge_delete);
+}
+
+int diag_bridge_read(int id, char *data, int size)
+{
+ struct urb *urb = NULL;
+ unsigned int pipe;
+ struct diag_bridge *dev;
+ int ret;
+
+ if (id < 0 || id >= MAX_BRIDGE_DEVS) {
+ pr_err("%s: Invalid device ID\n", __func__);
+ return -ENODEV;
+ }
+
+ pr_debug("%s: reading %d bytes\n", __func__, size);
+
+ dev = __dev[id];
+ if (!dev) {
+ pr_err("%s: device is disconnected\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->read_mutex);
+ if (!dev->ifc) {
+ pr_err("%s: device is disconnected\n", __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (id == DIAG_BRIDGE && !dev->ops) {
+ pr_err("%s: bridge is not open\n", __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (!size) {
+ dev_err(&dev->ifc->dev, "invalid size:%d\n", size);
+ dev->drop_count++;
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* if there was a previous unrecoverable error, just quit */
+ if (id == DIAG_BRIDGE && dev->err) {
+ pr_err("%s: EPROTO error occurred, or device disconnected\n",
+ __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ kref_get(&dev->kref);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(&dev->ifc->dev, "unable to allocate urb\n");
+ ret = -ENOMEM;
+ goto put_error;
+ }
+
+ ret = usb_autopm_get_interface(dev->ifc);
+ if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+ pr_err_ratelimited("%s: read: autopm_get failed:%d\n",
+ __func__, ret);
+ goto free_error;
+ }
+
+ if (dev->use_int_in_pipe) {
+ pipe = usb_rcvintpipe(dev->udev, dev->in_epAddr);
+ usb_fill_int_urb(urb, dev->udev, pipe, data, size,
+ diag_bridge_read_cb, dev, dev->period);
+ } else {
+ pipe = usb_rcvbulkpipe(dev->udev, dev->in_epAddr);
+ usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
+ diag_bridge_read_cb, dev);
+ }
+
+ usb_anchor_urb(urb, &dev->submitted);
+ dev->pending_reads++;
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ pr_err_ratelimited("%s: submitting urb failed err:%d\n",
+ __func__, ret);
+ dev->pending_reads--;
+ usb_unanchor_urb(urb);
+ usb_autopm_put_interface(dev->ifc);
+ goto free_error;
+ }
+
+ usb_autopm_put_interface(dev->ifc);
+
+ if (id == IPC_BRIDGE) {
+ wait_for_completion(&dev->read_done);
+ ret = dev->read_result;
+ }
+
+ usb_free_urb(urb);
+ mutex_unlock(&dev->read_mutex);
+ return ret;
+
+free_error:
+ usb_free_urb(urb);
+put_error:
+ /* If URB submit successful, this is done in the completion handler */
+ kref_put(&dev->kref, diag_bridge_delete);
+error:
+ mutex_unlock(&dev->read_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(diag_bridge_read);
+
+static int
+ipc_bridge_read(struct platform_device *pdev, char *buf, unsigned int count)
+{
+ if (__dev[IPC_BRIDGE]->pdev != pdev)
+ return -EINVAL;
+ if (!__dev[IPC_BRIDGE]->opened)
+ return -EPERM;
+ if (count > IPC_BRIDGE_MAX_READ_SZ)
+ return -ENOSPC;
+ if (__dev[IPC_BRIDGE]->udev->state == USB_STATE_NOTATTACHED)
+ return -ENODEV;
+
+ return diag_bridge_read(IPC_BRIDGE, buf, count);
+}
+
+static void diag_bridge_write_cb(struct urb *urb)
+{
+ struct diag_bridge *dev = urb->context;
+ struct diag_bridge_ops *cbs = dev->ops;
+
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
+
+ usb_autopm_put_interface_async(dev->ifc);
+
+ /* save error so that subsequent read/write returns ENODEV */
+ if (urb->status == -EPROTO)
+ dev->err = urb->status;
+
+ if (cbs && cbs->write_complete_cb) {
+ cbs->write_complete_cb(cbs->ctxt,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ urb->status < 0 ? urb->status : urb->actual_length);
+ } else {
+ if (urb->dev->state == USB_STATE_NOTATTACHED)
+ dev->write_result = -ENODEV;
+ else if (urb->status < 0)
+ dev->write_result = urb->status;
+ else
+ dev->write_result = urb->actual_length;
+
+ complete(&dev->write_done);
+ }
+
+ dev->bytes_to_mdm += urb->actual_length;
+ dev->pending_writes--;
+ kref_put(&dev->kref, diag_bridge_delete);
+}
+
+int diag_bridge_write(int id, char *data, int size)
+{
+ struct urb *urb = NULL;
+ unsigned int pipe;
+ struct diag_bridge *dev;
+ int ret;
+
+ if (id < 0 || id >= MAX_BRIDGE_DEVS) {
+ pr_err("%s: Invalid device ID\n", __func__);
+ return -ENODEV;
+ }
+
+ pr_debug("%s: writing %d bytes\n", __func__, size);
+
+ dev = __dev[id];
+ if (!dev) {
+ pr_err("%s: device is disconnected\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->write_mutex);
+ if (!dev->ifc) {
+ pr_err("%s: device is disconnected\n", __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (id == DIAG_BRIDGE && !dev->ops) {
+ pr_err("%s: bridge is not open\n", __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (!size) {
+ dev_err(&dev->ifc->dev, "invalid size:%d\n", size);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* if there was a previous unrecoverable error, just quit */
+ if (id == DIAG_BRIDGE && dev->err) {
+ pr_err("%s: EPROTO error occurred, or device disconnected\n",
+ __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ kref_get(&dev->kref);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(&dev->ifc->dev, "unable to allocate urb\n");
+ ret = -ENOMEM;
+ goto put_error;
+ }
+
+ ret = usb_autopm_get_interface(dev->ifc);
+ if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+ pr_err_ratelimited("%s: write: autopm_get failed:%d\n",
+ __func__, ret);
+ goto free_error;
+ }
+
+ pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr);
+ usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
+ diag_bridge_write_cb, dev);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ usb_anchor_urb(urb, &dev->submitted);
+ dev->pending_writes++;
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ pr_err_ratelimited("%s: submitting urb failed err:%d\n",
+ __func__, ret);
+ dev->pending_writes--;
+ usb_unanchor_urb(urb);
+ usb_autopm_put_interface(dev->ifc);
+ goto free_error;
+ }
+
+ if (id == IPC_BRIDGE) {
+ wait_for_completion(&dev->write_done);
+ ret = dev->write_result;
+ }
+
+ usb_free_urb(urb);
+ mutex_unlock(&dev->write_mutex);
+ return ret;
+
+free_error:
+ usb_free_urb(urb);
+put_error:
+ /* If URB submit successful, this is done in the completion handler */
+ kref_put(&dev->kref, diag_bridge_delete);
+error:
+ mutex_unlock(&dev->write_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(diag_bridge_write);
+
+static int
+ipc_bridge_write(struct platform_device *pdev, char *buf, unsigned int count)
+{
+ if (__dev[IPC_BRIDGE]->pdev != pdev)
+ return -EINVAL;
+ if (!__dev[IPC_BRIDGE]->opened)
+ return -EPERM;
+ if (count > IPC_BRIDGE_MAX_WRITE_SZ)
+ return -EINVAL;
+
+ return diag_bridge_write(IPC_BRIDGE, buf, count);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 512
+static ssize_t diag_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int i, ret = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_BRIDGE_DEVS; i++) {
+ struct diag_bridge *dev = __dev[i];
+
+ if (!dev)
+ continue;
+
+ ret += scnprintf(buf, DEBUG_BUF_SIZE,
+ "epin:%d, epout:%d\n"
+ "bytes to host: %lu\n"
+ "bytes to mdm: %lu\n"
+ "pending reads: %u\n"
+ "pending writes: %u\n"
+ "drop count:%u\n"
+ "last error: %d\n",
+ dev->in_epAddr, dev->out_epAddr,
+ dev->bytes_to_host, dev->bytes_to_mdm,
+ dev->pending_reads, dev->pending_writes,
+ dev->drop_count,
+ dev->err);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int i;
+
+ for (i = 0; i < MAX_BRIDGE_DEVS; i++) {
+ struct diag_bridge *dev = __dev[i];
+
+ if (dev) {
+ dev->bytes_to_host = dev->bytes_to_mdm = 0;
+ dev->pending_reads = dev->pending_writes = 0;
+ dev->drop_count = 0;
+ }
+ }
+
+ return count;
+}
+
+const struct file_operations diag_stats_ops = {
+ .read = diag_read_stats,
+ .write = diag_reset_stats,
+};
+
+static struct dentry *dent;
+
+static void diag_bridge_debugfs_init(void)
+{
+ struct dentry *dfile;
+
+ dent = debugfs_create_dir("diag_bridge", 0);
+ if (IS_ERR(dent))
+ return;
+
+ dfile = debugfs_create_file("status", 0444, dent, 0, &diag_stats_ops);
+ if (!dfile || IS_ERR(dfile))
+ debugfs_remove(dent);
+}
+
+static void diag_bridge_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(dent);
+ dent = NULL;
+}
+#else
+static inline void diag_bridge_debugfs_init(void) { }
+static inline void diag_bridge_debugfs_cleanup(void) { }
+#endif
+
+static const struct ipc_bridge_platform_data ipc_bridge_pdata = {
+ .max_read_size = IPC_BRIDGE_MAX_READ_SZ,
+ .max_write_size = IPC_BRIDGE_MAX_WRITE_SZ,
+ .open = ipc_bridge_open,
+ .read = ipc_bridge_read,
+ .write = ipc_bridge_write,
+ .close = ipc_bridge_close,
+};
+
+static int
+diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
+{
+ struct diag_bridge *dev;
+ struct usb_host_interface *ifc_desc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, devid, ret = -ENOMEM;
+
+ pr_debug("%s: id:%lu\n", __func__, id->driver_info);
+
+ devid = id->driver_info & 0xFF;
+ if (devid < 0 || devid >= MAX_BRIDGE_DEVS) {
+ pr_err("%s: Invalid device ID\n", __func__);
+ return -ENODEV;
+ }
+
+ /* already probed? */
+ if (__dev[devid]) {
+ pr_err("%s: Diag device already probed\n", __func__);
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ __dev[devid] = dev;
+ dev->id = devid;
+
+ dev->udev = usb_get_dev(interface_to_usbdev(ifc));
+ dev->ifc = usb_get_intf(ifc);
+ kref_init(&dev->kref);
+ mutex_init(&dev->ifc_mutex);
+ mutex_init(&dev->read_mutex);
+ mutex_init(&dev->write_mutex);
+ init_completion(&dev->read_done);
+ init_completion(&dev->write_done);
+ init_usb_anchor(&dev->submitted);
+
+ ifc_desc = ifc->cur_altsetting;
+ for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &ifc_desc->endpoint[i].desc;
+ if (!dev->in_epAddr && (usb_endpoint_is_bulk_in(ep_desc) ||
+ usb_endpoint_is_int_in(ep_desc))) {
+ dev->in_epAddr = ep_desc->bEndpointAddress;
+ if (usb_endpoint_is_int_in(ep_desc)) {
+ dev->use_int_in_pipe = 1;
+ dev->period = ep_desc->bInterval;
+ }
+ }
+ if (!dev->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
+ dev->out_epAddr = ep_desc->bEndpointAddress;
+ }
+
+ if (!(dev->in_epAddr && dev->out_epAddr)) {
+ pr_err("%s: could not find bulk in and bulk out endpoints\n",
+ __func__);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ usb_set_intfdata(ifc, dev);
+ diag_bridge_debugfs_init();
+ if (devid == DIAG_BRIDGE) {
+ dev->pdev = platform_device_register_simple("diag_bridge",
+ devid, NULL, 0);
+ if (IS_ERR(dev->pdev)) {
+ pr_err("%s: unable to allocate platform device\n",
+ __func__);
+ ret = PTR_ERR(dev->pdev);
+ goto error;
+ }
+ } else {
+ dev->pdev = platform_device_alloc("ipc_bridge", -1);
+ if (!dev->pdev) {
+ pr_err("%s: unable to allocate platform device\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = platform_device_add_data(dev->pdev, &ipc_bridge_pdata,
+ sizeof(struct ipc_bridge_platform_data));
+ if (ret) {
+ pr_err("%s: fail to add pdata\n", __func__);
+ goto put_pdev;
+ }
+
+ ret = platform_device_add(dev->pdev);
+ if (ret) {
+ pr_err("%s: fail to add pdev\n", __func__);
+ goto put_pdev;
+ }
+ }
+
+ dev_dbg(&dev->ifc->dev, "%s: complete\n", __func__);
+
+ return 0;
+
+put_pdev:
+ platform_device_put(dev->pdev);
+error:
+ diag_bridge_debugfs_cleanup();
+ mutex_destroy(&dev->write_mutex);
+ mutex_destroy(&dev->read_mutex);
+ mutex_destroy(&dev->ifc_mutex);
+ if (dev)
+ kref_put(&dev->kref, diag_bridge_delete);
+
+ return ret;
+}
+
+static void diag_bridge_disconnect(struct usb_interface *ifc)
+{
+ struct diag_bridge *dev = usb_get_intfdata(ifc);
+
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
+
+ platform_device_unregister(dev->pdev);
+ diag_bridge_debugfs_cleanup();
+ dev->err = -ENODEV;
+ kref_put(&dev->kref, diag_bridge_delete);
+}
+
+static int diag_bridge_suspend(struct usb_interface *ifc, pm_message_t message)
+{
+ struct diag_bridge *dev = usb_get_intfdata(ifc);
+ struct diag_bridge_ops *cbs = dev->ops;
+ int ret = 0;
+
+ if (cbs && cbs->suspend) {
+ ret = cbs->suspend(cbs->ctxt);
+ if (ret) {
+ dev_dbg(&dev->ifc->dev,
+ "%s: diag veto'd suspend\n", __func__);
+ return ret;
+ }
+ }
+
+ usb_kill_anchored_urbs(&dev->submitted);
+
+ return ret;
+}
+
+static int diag_bridge_resume(struct usb_interface *ifc)
+{
+ struct diag_bridge *dev = usb_get_intfdata(ifc);
+ struct diag_bridge_ops *cbs = dev->ops;
+
+
+ if (cbs && cbs->resume)
+ cbs->resume(cbs->ctxt);
+
+ return 0;
+}
+
+#define DEV_ID(n) (n)
+
+static const struct usb_device_id diag_bridge_ids[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9001, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x901D, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x901F, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9034, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9048, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x904C, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9075, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9079, 0),
+ .driver_info = DEV_ID(1), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908A, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908E, 0),
+ .driver_info = DEV_ID(0), },
+ /* 908E, ifc#1 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908E, 1),
+ .driver_info = DEV_ID(1), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909C, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909D, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909E, 0),
+ .driver_info = DEV_ID(0), },
+ /* 909E, ifc#1 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909E, 1),
+ .driver_info = DEV_ID(1), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909F, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A0, 0),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A4, 0),
+ .driver_info = DEV_ID(0), },
+ /* 909E, ifc#1 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A4, 1),
+ .driver_info = DEV_ID(1), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90EF, 4),
+ .driver_info = DEV_ID(0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90F0, 4),
+ .driver_info = DEV_ID(0), },
+ /* 9900, ifc#2 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9900, 2),
+ .driver_info = DEV_ID(0), },
+ /* 9900, ifc#1 refers to IPC client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9900, 1),
+ .driver_info = DEV_ID(1), },
+ /* 9901, ifc#4 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9901, 4),
+ .driver_info = DEV_ID(0), },
+ /* 9901, ifc#3 refers to IPC client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9901, 3),
+ .driver_info = DEV_ID(1), },
+ /* 9902, ifc#2 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9902, 2),
+ .driver_info = DEV_ID(0), },
+ /* 9902, ifc#1 refers to IPC client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9902, 1),
+ .driver_info = DEV_ID(1), },
+ /* 9903, ifc#4 refers to diag client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9903, 4),
+ .driver_info = DEV_ID(0), },
+ /* 9903, ifc#3 refers to IPC client interface */
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9903, 3),
+ .driver_info = DEV_ID(1), },
+
+ {} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, diag_bridge_ids);
+
+static struct usb_driver diag_bridge_driver = {
+ .name = "diag_bridge",
+ .probe = diag_bridge_probe,
+ .disconnect = diag_bridge_disconnect,
+ .suspend = diag_bridge_suspend,
+ .resume = diag_bridge_resume,
+ .reset_resume = diag_bridge_resume,
+ .id_table = diag_bridge_ids,
+ .supports_autosuspend = 1,
+};
+
+static int __init diag_bridge_init(void)
+{
+ int ret;
+
+ ret = usb_register(&diag_bridge_driver);
+ if (ret) {
+ pr_err("%s: unable to register diag driver\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit diag_bridge_exit(void)
+{
+ usb_deregister(&diag_bridge_driver);
+}
+
+module_init(diag_bridge_init);
+module_exit(diag_bridge_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
index c31b4a33e6bb..0efcd485c02a 100644
--- a/drivers/usb/misc/ehset.c
+++ b/drivers/usb/misc/ehset.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,69 +26,89 @@
#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
-static int ehset_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static u8 numPorts;
+
+static int ehset_get_port_num(struct device *dev, const char *buf,
+ unsigned long *val)
+{
+ int ret;
+
+ ret = kstrtoul(buf, 10, val);
+ if (ret < 0) {
+ dev_err(dev, "couldn't parse string %d\n", ret);
+ return ret;
+ }
+
+ if (!*val || *val > numPorts) {
+ dev_err(dev, "Invalid port num entered\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ehset_clear_port_feature(struct usb_device *udev, int feature,
+ int port1)
+{
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
+ NULL, 0, 1000);
+}
+
+static int ehset_set_port_feature(struct usb_device *udev, int feature,
+ int port1, int timeout)
+{
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
+ NULL, 0, timeout);
+}
+
+static int ehset_set_testmode(struct device *dev, struct usb_device *child_udev,
+ struct usb_device *hub_udev, int test_id, int port)
{
- int ret = -EINVAL;
- struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_device *hub_udev = dev->parent;
struct usb_device_descriptor *buf;
- u8 portnum = dev->portnum;
- u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
+ int ret = -EINVAL;
- switch (test_pid) {
+ switch (test_id) {
case TEST_SE0_NAK_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_SE0_NAK << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_SE0_NAK << 8) | port, 1000);
break;
case TEST_J_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_J << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_J << 8) | port, 1000);
break;
case TEST_K_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_K << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_K << 8) | port, 1000);
break;
case TEST_PACKET_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_PACKET << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_PACKET << 8) | port, 1000);
break;
case TEST_HS_HOST_PORT_SUSPEND_RESUME:
/* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
msleep(15 * 1000);
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_SUSPEND, portnum,
- NULL, 0, 1000);
- if (ret < 0)
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_SUSPEND,
+ port, 1000);
+ if (ret)
break;
msleep(15 * 1000);
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_SUSPEND, portnum,
- NULL, 0, 1000);
+ ret = ehset_clear_port_feature(hub_udev, USB_PORT_FEAT_SUSPEND,
+ port);
break;
case TEST_SINGLE_STEP_GET_DEV_DESC:
/* Test: wait for 15secs -> GetDescriptor request */
msleep(15 * 1000);
buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ break;
+ }
- ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ ret = usb_control_msg(child_udev,
+ usb_rcvctrlpipe(child_udev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, USB_DT_DEVICE_SIZE,
@@ -103,28 +123,212 @@ static int ehset_probe(struct usb_interface *intf,
* SetPortFeature handling can only be done inside the HCD's
* hub_control callback function.
*/
- if (hub_udev != dev->bus->root_hub) {
- dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
+ if (hub_udev != child_udev->bus->root_hub) {
+ dev_err(dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
break;
}
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (6 << 8) | portnum,
- NULL, 0, 60 * 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (6 << 8) | port, 60 * 1000);
break;
default:
- dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n",
- __func__, test_pid);
+ dev_err(dev, "%s: unsupported test ID: 0x%x\n",
+ __func__, test_id);
+ }
+
+ return ret;
+}
+
+static ssize_t test_se0_nak_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_SE0_NAK_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while SE0_NAK test\n", ret);
+ return ret;
}
+ return count;
+}
+static DEVICE_ATTR_WO(test_se0_nak_portnum);
+
+static ssize_t test_j_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_J_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while J state test\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_j_portnum);
+
+static ssize_t test_k_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_K_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while K state test\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_k_portnum);
+
+static ssize_t test_packet_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_PACKET_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while sending test packets\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_packet_portnum);
+
+static ssize_t test_port_susp_resume_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev,
+ TEST_HS_HOST_PORT_SUSPEND_RESUME, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while port suspend resume test\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_port_susp_resume_portnum);
+
+static struct attribute *ehset_attributes[] = {
+ &dev_attr_test_se0_nak_portnum.attr,
+ &dev_attr_test_j_portnum.attr,
+ &dev_attr_test_k_portnum.attr,
+ &dev_attr_test_packet_portnum.attr,
+ &dev_attr_test_port_susp_resume_portnum.attr,
+ NULL
+};
+
+static const struct attribute_group ehset_attr_group = {
+ .attrs = ehset_attributes,
+};
+
+static int ehset_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret = -EINVAL;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_device *hub_udev = dev->parent;
+ u8 portnum = dev->portnum;
+ u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
+
+ /*
+ * If an external hub does not support the EHSET test fixture, then user
+ * can forcefully unbind the external hub from the hub driver (to which
+ * an external hub gets bound by default) and bind it to this driver, so
+ * as to send test signals on any downstream port of the hub.
+ */
+ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB) {
+ struct usb_hub_descriptor *descriptor;
+
+ descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
+ if (!descriptor)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
+ USB_DT_HUB << 8, 0, descriptor,
+ USB_DT_HUB_NONVAR_SIZE, USB_CTRL_GET_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&intf->dev, "%s: Failed to get hub desc %d\n",
+ __func__, ret);
+ kfree(descriptor);
+ return ret;
+ }
+
+ numPorts = descriptor->bNbrPorts;
+ ret = sysfs_create_group(&intf->dev.kobj, &ehset_attr_group);
+ if (ret < 0)
+ dev_err(&intf->dev, "%s: Failed to create sysfs nodes %d\n",
+ __func__, ret);
+
+ kfree(descriptor);
+ return ret;
+ }
+
+ ret = ehset_set_testmode(&intf->dev, dev, hub_udev, test_pid, portnum);
+
return (ret < 0) ? ret : 0;
}
static void ehset_disconnect(struct usb_interface *intf)
{
+ struct usb_device *dev = interface_to_usbdev(intf);
+
+ numPorts = 0;
+ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ sysfs_remove_group(&intf->dev.kobj, &ehset_attr_group);
}
static const struct usb_device_id ehset_id_table[] = {
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
new file mode 100644
index 000000000000..3c8badc41fbc
--- /dev/null
+++ b/drivers/usb/misc/ks_bridge.c
@@ -0,0 +1,1155 @@
+/*
+ * Copyright (c) 2012-2014, 2017-2019, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* add additional information to our printk's */
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/kobject.h>
+
+#define DRIVER_DESC "USB host ks bridge driver"
+
+enum bus_id {
+ BUS_HSIC,
+ BUS_USB,
+ BUS_UNDEF,
+};
+
+#define BUSNAME_LEN 20
+
+static enum bus_id str_to_busid(const char *name)
+{
+ if (!strncasecmp("msm_hsic_host", name, BUSNAME_LEN))
+ return BUS_HSIC;
+ if (!strncasecmp("msm_ehci_host.0", name, BUSNAME_LEN))
+ return BUS_USB;
+ if (!strncasecmp("xhci-hcd.0.auto", name, BUSNAME_LEN) ||
+ !strncasecmp("xhci-hcd.1.auto", name, BUSNAME_LEN))
+ return BUS_USB;
+
+ return BUS_UNDEF;
+}
+
+struct data_pkt {
+ int n_read;
+ char *buf;
+ size_t len;
+ struct list_head list;
+ void *ctxt;
+};
+
+#define FILE_OPENED BIT(0)
+#define USB_DEV_CONNECTED BIT(1)
+#define NO_RX_REQS 10
+#define NO_BRIDGE_INSTANCES 4
+#define EFS_HSIC_BRIDGE_INDEX 2
+#define EFS_USB_BRIDGE_INDEX 3
+#define MAX_DATA_PKT_SIZE 16384
+#define PENDING_URB_TIMEOUT 10
+
+struct ksb_dev_info {
+ const char *name;
+};
+
+struct ks_bridge {
+ char *name;
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ struct work_struct to_mdm_work;
+ struct work_struct start_rx_work;
+ struct list_head to_mdm_list;
+ struct list_head to_ks_list;
+ wait_queue_head_t ks_wait_q;
+ wait_queue_head_t pending_urb_wait;
+ atomic_t tx_pending_cnt;
+ atomic_t rx_pending_cnt;
+
+ struct ksb_dev_info id_info;
+
+ /* cdev interface */
+ dev_t cdev_start_no;
+ struct cdev cdev;
+ struct class *class;
+ struct device *device;
+
+ /* usb specific */
+ struct usb_device *udev;
+ struct usb_interface *ifc;
+ __u8 in_epAddr;
+ __u8 out_epAddr;
+ unsigned int in_pipe;
+ unsigned int out_pipe;
+ struct usb_anchor submitted;
+
+ unsigned long flags;
+
+ /* to handle INT IN ep */
+ unsigned int period;
+
+#define DBG_MSG_LEN 40
+#define DBG_MAX_MSG 500
+ unsigned int dbg_idx;
+ rwlock_t dbg_lock;
+
+ char (dbgbuf[DBG_MAX_MSG])[DBG_MSG_LEN]; /* buffer */
+};
+
+struct ks_bridge *__ksb[NO_BRIDGE_INSTANCES];
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg = 1;
+module_param(enable_dbg, uint, S_IRUGO | S_IWUSR);
+
+static void
+dbg_log_event(struct ks_bridge *ksb, char *event, int d1, int d2)
+{
+ unsigned long flags;
+ unsigned long long t;
+ unsigned long nanosec;
+
+ if (!enable_dbg)
+ return;
+
+ write_lock_irqsave(&ksb->dbg_lock, flags);
+ t = cpu_clock(smp_processor_id());
+ nanosec = do_div(t, 1000000000)/1000;
+ scnprintf(ksb->dbgbuf[ksb->dbg_idx], DBG_MSG_LEN, "%5lu.%06lu:%s:%x:%x",
+ (unsigned long)t, nanosec, event, d1, d2);
+
+ ksb->dbg_idx++;
+ ksb->dbg_idx = ksb->dbg_idx % DBG_MAX_MSG;
+ write_unlock_irqrestore(&ksb->dbg_lock, flags);
+}
+
+static
+struct data_pkt *ksb_alloc_data_pkt(size_t count, gfp_t flags, void *ctxt)
+{
+ struct data_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct data_pkt), flags);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+
+ pkt->buf = kmalloc(count, flags);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->len = count;
+ INIT_LIST_HEAD(&pkt->list);
+ pkt->ctxt = ctxt;
+
+ return pkt;
+}
+
+static void ksb_free_data_pkt(struct data_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt);
+static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ unsigned long flags;
+ struct ks_bridge *ksb = fp->private_data;
+ struct data_pkt *pkt = NULL;
+ size_t space, copied;
+
+read_start:
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (list_empty(&ksb->to_ks_list)) {
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ ret = wait_event_interruptible(ksb->ks_wait_q,
+ !list_empty(&ksb->to_ks_list) ||
+ !test_bit(USB_DEV_CONNECTED, &ksb->flags));
+ if (ret < 0)
+ return ret;
+
+ goto read_start;
+ }
+
+ space = count;
+ copied = 0;
+ while (!list_empty(&ksb->to_ks_list) && space &&
+ test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ size_t len;
+
+ pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
+ list_del_init(&pkt->list);
+ len = min_t(size_t, space, pkt->len - pkt->n_read);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
+ if (ret) {
+ dev_err(ksb->device,
+ "copy_to_user failed err:%d\n", ret);
+ ksb_free_data_pkt(pkt);
+ return -EFAULT;
+ }
+
+ pkt->n_read += len;
+ space -= len;
+ copied += len;
+
+ if (pkt->n_read == pkt->len) {
+ /*
+ * re-init the packet and queue it
+ * for more data.
+ */
+ pkt->n_read = 0;
+ pkt->len = MAX_DATA_PKT_SIZE;
+ submit_one_urb(ksb, GFP_KERNEL, pkt);
+ pkt = NULL;
+ }
+ spin_lock_irqsave(&ksb->lock, flags);
+ }
+
+ /* put the partial packet back in the list */
+ if (!space && pkt && pkt->n_read != pkt->len) {
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ list_add(&pkt->list, &ksb->to_ks_list);
+ else
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ dbg_log_event(ksb, "KS_READ", copied, 0);
+
+ dev_dbg(ksb->device, "count:%zu space:%zu copied:%zu", count,
+ space, copied);
+
+ return copied;
+}
+
+static void ksb_tx_cb(struct urb *urb)
+{
+ struct data_pkt *pkt = urb->context;
+ struct ks_bridge *ksb = pkt->ctxt;
+
+ dbg_log_event(ksb, "C TX_URB", urb->status, 0);
+ dev_dbg(&ksb->udev->dev, "status:%d", urb->status);
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ usb_autopm_put_interface_async(ksb->ifc);
+
+ if (urb->status < 0)
+ pr_err_ratelimited("%s: urb failed with err:%d",
+ ksb->id_info.name, urb->status);
+
+ ksb_free_data_pkt(pkt);
+
+ atomic_dec(&ksb->tx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+}
+
+static void ksb_tomdm_work(struct work_struct *w)
+{
+ struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work);
+ struct data_pkt *pkt;
+ unsigned long flags;
+ struct urb *urb;
+ int ret;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_mdm_list)
+ && test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dbg_log_event(ksb, "TX_URB_MEM_FAIL", -ENOMEM, 0);
+ pr_err_ratelimited("%s: unable to allocate urb",
+ ksb->id_info.name);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ ret = usb_autopm_get_interface(ksb->ifc);
+ if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+ dbg_log_event(ksb, "TX_URB_AUTOPM_FAIL", ret, 0);
+ pr_err_ratelimited("%s: autopm_get failed:%d",
+ ksb->id_info.name, ret);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe,
+ pkt->buf, pkt->len, ksb_tx_cb, pkt);
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S TX_URB", pkt->len, 0);
+
+ atomic_inc(&ksb->tx_pending_cnt);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&ksb->udev->dev, "out urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ usb_autopm_put_interface(ksb->ifc);
+ atomic_dec(&ksb->tx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+ return;
+ }
+
+ usb_free_urb(urb);
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+}
+
+static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ struct data_pkt *pkt;
+ unsigned long flags;
+ struct ks_bridge *ksb = fp->private_data;
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return -ENODEV;
+
+ if (count > MAX_DATA_PKT_SIZE)
+ count = MAX_DATA_PKT_SIZE;
+
+ pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
+ if (IS_ERR(pkt)) {
+ dev_err(ksb->device,
+ "unable to allocate data packet");
+ return PTR_ERR(pkt);
+ }
+
+ ret = copy_from_user(pkt->buf, buf, count);
+ if (ret) {
+ dev_err(ksb->device,
+ "copy_from_user failed: err:%d", ret);
+ ksb_free_data_pkt(pkt);
+ return ret;
+ }
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ list_add_tail(&pkt->list, &ksb->to_mdm_list);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ queue_work(ksb->wq, &ksb->to_mdm_work);
+
+ dbg_log_event(ksb, "KS_WRITE", count, 0);
+
+ return count;
+}
+
+static int ksb_fs_open(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb =
+ container_of(ip->i_cdev, struct ks_bridge, cdev);
+
+ if (IS_ERR(ksb)) {
+ pr_err("ksb device not found");
+ return -ENODEV;
+ }
+
+ dev_dbg(ksb->device, ":%s", ksb->id_info.name);
+ dbg_log_event(ksb, "FS-OPEN", 0, 0);
+
+ fp->private_data = ksb;
+ set_bit(FILE_OPENED, &ksb->flags);
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static unsigned int ksb_fs_poll(struct file *file, poll_table *wait)
+{
+ struct ks_bridge *ksb = file->private_data;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return POLLERR;
+
+ poll_wait(file, &ksb->ks_wait_q, wait);
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return POLLERR;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (!list_empty(&ksb->to_ks_list))
+ ret = POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ return ret;
+}
+
+static int ksb_fs_release(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb = fp->private_data;
+ struct data_pkt *pkt;
+ unsigned long flags;
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ dev_dbg(ksb->device, ":%s", ksb->id_info.name);
+ dbg_log_event(ksb, "FS-RELEASE", 0, 0);
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ wait_event_interruptible_timeout(
+ ksb->pending_urb_wait,
+ !atomic_read(&ksb->tx_pending_cnt) &&
+ !atomic_read(&ksb->rx_pending_cnt),
+ msecs_to_jiffies(PENDING_URB_TIMEOUT));
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_ks_list)) {
+ pkt = list_first_entry(&ksb->to_ks_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ while (!list_empty(&ksb->to_mdm_list)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ clear_bit(FILE_OPENED, &ksb->flags);
+ fp->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations ksb_fops = {
+ .owner = THIS_MODULE,
+ .read = ksb_fs_read,
+ .write = ksb_fs_write,
+ .open = ksb_fs_open,
+ .release = ksb_fs_release,
+ .poll = ksb_fs_poll,
+};
+
+static struct ksb_dev_info ksb_fboot_dev[] = {
+ {
+ .name = "ks_hsic_bridge",
+ },
+ {
+ .name = "ks_usb_bridge",
+ },
+};
+
+static struct ksb_dev_info ksb_efs_hsic_dev = {
+ .name = "efs_hsic_bridge",
+};
+
+static struct ksb_dev_info ksb_efs_usb_dev = {
+ .name = "efs_usb_bridge",
+};
+static const struct usb_device_id ksb_usb_ids[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9008, 0),
+ .driver_info = (unsigned long)&ksb_fboot_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9025, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9091, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x901D, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x901F, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x900E, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9900, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9901, 0), },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9902, 3),
+ .driver_info = (unsigned long)&ksb_fboot_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9903, 5),
+ .driver_info = (unsigned long)&ksb_fboot_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9048, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x904C, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9075, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x9079, 2),
+ .driver_info = (unsigned long)&ksb_efs_usb_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908A, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x908E, 3),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909C, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909D, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909E, 3),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x909F, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A0, 2),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+ { USB_DEVICE_INTERFACE_NUMBER(0x5c6, 0x90A4, 3),
+ .driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+
+ {} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
+
+static void ksb_rx_cb(struct urb *urb);
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt)
+{
+ struct urb *urb;
+ int ret;
+
+ urb = usb_alloc_urb(0, flags);
+ if (!urb) {
+ dev_err(&ksb->udev->dev, "unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ if (ksb->period)
+ usb_fill_int_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt, ksb->period);
+ else
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt);
+
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ atomic_inc(&ksb->rx_pending_cnt);
+ ret = usb_submit_urb(urb, flags);
+ if (ret) {
+ dev_err(&ksb->udev->dev, "in urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ atomic_dec(&ksb->rx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+ return;
+ }
+
+ dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+ usb_free_urb(urb);
+}
+static void ksb_rx_cb(struct urb *urb)
+{
+ struct data_pkt *pkt = urb->context;
+ struct ks_bridge *ksb = pkt->ctxt;
+ bool wakeup = true;
+
+ dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);
+
+ dev_dbg(&ksb->udev->dev, "status:%d actual:%d", urb->status,
+ urb->actual_length);
+
+ /*non zero len of data received while unlinking urb*/
+ if (urb->status == -ENOENT && (urb->actual_length > 0)) {
+ /*
+ * If we wakeup the reader process now, it may
+ * queue the URB before its reject flag gets
+ * cleared.
+ */
+ wakeup = false;
+ goto add_to_list;
+ }
+
+ if (urb->status < 0) {
+ if (urb->status != -ESHUTDOWN && urb->status != -ENOENT
+ && urb->status != -EPROTO)
+ pr_err_ratelimited("%s: urb failed with err:%d",
+ ksb->id_info.name, urb->status);
+
+ if (!urb->actual_length) {
+ ksb_free_data_pkt(pkt);
+ goto done;
+ }
+ }
+
+ usb_mark_last_busy(ksb->udev);
+
+ if (urb->actual_length == 0) {
+ submit_one_urb(ksb, GFP_ATOMIC, pkt);
+ goto done;
+ }
+
+add_to_list:
+ spin_lock(&ksb->lock);
+ pkt->len = urb->actual_length;
+ list_add_tail(&pkt->list, &ksb->to_ks_list);
+ spin_unlock(&ksb->lock);
+ /* wake up read thread */
+ if (wakeup)
+ wake_up(&ksb->ks_wait_q);
+done:
+ atomic_dec(&ksb->rx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+}
+
+static void ksb_start_rx_work(struct work_struct *w)
+{
+ struct ks_bridge *ksb =
+ container_of(w, struct ks_bridge, start_rx_work);
+ struct data_pkt *pkt;
+ struct urb *urb;
+ int i = 0;
+ int ret;
+ bool put = true;
+
+ ret = usb_autopm_get_interface(ksb->ifc);
+ if (ret < 0) {
+ if (ret != -EAGAIN && ret != -EACCES) {
+ pr_err_ratelimited("%s: autopm_get failed:%d",
+ ksb->id_info.name, ret);
+ return;
+ }
+ put = false;
+ }
+ for (i = 0; i < NO_RX_REQS; i++) {
+
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ break;
+
+ pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
+ if (IS_ERR(pkt)) {
+ dev_err(&ksb->udev->dev, "unable to allocate data pkt");
+ break;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(&ksb->udev->dev, "unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ break;
+ }
+
+ if (ksb->period)
+ usb_fill_int_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt, ksb->period);
+ else
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt);
+
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+ atomic_inc(&ksb->rx_pending_cnt);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&ksb->udev->dev, "in urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ atomic_dec(&ksb->rx_pending_cnt);
+ wake_up(&ksb->pending_urb_wait);
+ break;
+ }
+
+ usb_free_urb(urb);
+ }
+ if (put)
+ usb_autopm_put_interface_async(ksb->ifc);
+}
+
+static void ks_bridge_notify_status(struct kobject *kobj,
+ const struct usb_device_id *id)
+{
+ char product_info[32];
+ char *envp[2] = { product_info, NULL };
+
+ snprintf(product_info, sizeof(product_info), "PRODUCT=%x/%x/%x",
+ id->idVendor, id->idProduct, id->bDeviceProtocol);
+ kobject_uevent_env(kobj, KOBJ_ONLINE, envp);
+}
+
+static int
+ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
+{
+ __u8 ifc_num, ifc_count, ksb_port_num;
+ struct usb_host_interface *ifc_desc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i;
+ struct ks_bridge *ksb;
+ unsigned long flags;
+ struct data_pkt *pkt;
+ struct ksb_dev_info *mdev, *fbdev;
+ struct usb_device *udev;
+ unsigned int bus_id;
+ int ret;
+ bool free_mdev = false;
+
+ ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;
+
+ udev = interface_to_usbdev(ifc);
+ ifc_count = udev->actconfig->desc.bNumInterfaces;
+ fbdev = mdev = (struct ksb_dev_info *)id->driver_info;
+
+ bus_id = str_to_busid(udev->bus->bus_name);
+ if (bus_id == BUS_UNDEF) {
+ dev_err(&udev->dev, "unknown usb bus %s, probe failed\n",
+ udev->bus->bus_name);
+ return -ENODEV;
+ }
+
+ switch (id->idProduct) {
+ case 0x900E:
+ case 0x9025:
+ case 0x9091:
+ case 0x901D:
+ case 0x901F:
+ /* 1-1 mapping between ksb and udev port which starts with 1 */
+ ksb_port_num = udev->portnum - 1;
+ dev_dbg(&udev->dev, "ifc_count: %u, port_num:%u\n", ifc_count,
+ ksb_port_num);
+ if (ifc_count > 1)
+ return -ENODEV;
+ if (ksb_port_num >= NO_BRIDGE_INSTANCES) {
+ dev_err(&udev->dev, "port-num:%u invalid. Try first\n",
+ ksb_port_num);
+ ksb_port_num = 0;
+ }
+ ksb = __ksb[ksb_port_num];
+ if (ksb->ifc) {
+ dev_err(&udev->dev, "port already in use\n");
+ return -ENODEV;
+ }
+ mdev = kzalloc(sizeof(struct ksb_dev_info), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+ free_mdev = true;
+ mdev->name = ksb->name;
+ break;
+ case 0x9008:
+ case 0x9902:
+ case 0x9903:
+ ksb = __ksb[bus_id];
+ mdev = &fbdev[bus_id];
+ break;
+ case 0x9048:
+ case 0x904C:
+ case 0x9075:
+ case 0x908A:
+ case 0x908E:
+ case 0x90A0:
+ case 0x909C:
+ case 0x909D:
+ case 0x909E:
+ case 0x909F:
+ case 0x90A4:
+ ksb = __ksb[EFS_HSIC_BRIDGE_INDEX];
+ break;
+ case 0x9079:
+ if (ifc_num != 2)
+ return -ENODEV;
+ ksb = __ksb[EFS_USB_BRIDGE_INDEX];
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!ksb) {
+ pr_err("ksb is not initialized");
+ return -ENODEV;
+ }
+
+ ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
+ ksb->ifc = ifc;
+ ifc_desc = ifc->cur_altsetting;
+ ksb->id_info = *mdev;
+
+ for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &ifc_desc->endpoint[i].desc;
+
+ if (!ksb->in_epAddr && (usb_endpoint_is_bulk_in(ep_desc))) {
+ ksb->in_epAddr = ep_desc->bEndpointAddress;
+ ksb->period = 0;
+ }
+
+ if (!ksb->in_epAddr && (usb_endpoint_is_int_in(ep_desc))) {
+ ksb->in_epAddr = ep_desc->bEndpointAddress;
+ ksb->period = ep_desc->bInterval;
+ }
+
+ if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
+ ksb->out_epAddr = ep_desc->bEndpointAddress;
+ }
+
+ if (!(ksb->in_epAddr && ksb->out_epAddr)) {
+ dev_err(&udev->dev,
+ "could not find bulk in and bulk out endpoints");
+ usb_put_dev(ksb->udev);
+ ksb->ifc = NULL;
+ if (free_mdev)
+ kfree(mdev);
+ return -ENODEV;
+ }
+
+ ksb->in_pipe = ksb->period ?
+ usb_rcvintpipe(ksb->udev, ksb->in_epAddr) :
+ usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
+
+ ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);
+
+ usb_set_intfdata(ifc, ksb);
+ set_bit(USB_DEV_CONNECTED, &ksb->flags);
+ atomic_set(&ksb->tx_pending_cnt, 0);
+ atomic_set(&ksb->rx_pending_cnt, 0);
+
+ dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);
+
+ /*free up stale buffers if any from previous disconnect*/
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_ks_list)) {
+ pkt = list_first_entry(&ksb->to_ks_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ while (!list_empty(&ksb->to_mdm_list)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ret = alloc_chrdev_region(&ksb->cdev_start_no, 0, 1, mdev->name);
+ if (ret < 0) {
+ dbg_log_event(ksb, "chr reg failed", ret, 0);
+ goto fail_chrdev_region;
+ }
+
+ ksb->class = class_create(THIS_MODULE, mdev->name);
+ if (IS_ERR(ksb->class)) {
+ dbg_log_event(ksb, "clscr failed", PTR_ERR(ksb->class), 0);
+ goto fail_class_create;
+ }
+
+ cdev_init(&ksb->cdev, &ksb_fops);
+ ksb->cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&ksb->cdev, ksb->cdev_start_no, 1);
+ if (ret < 0) {
+ dbg_log_event(ksb, "cdev_add failed", ret, 0);
+ goto fail_class_create;
+ }
+
+ ksb->device = device_create(ksb->class, &udev->dev, ksb->cdev_start_no,
+ NULL, mdev->name);
+ if (IS_ERR(ksb->device)) {
+ dbg_log_event(ksb, "devcrfailed", PTR_ERR(ksb->device), 0);
+ goto fail_device_create;
+ }
+
+ if (device_can_wakeup(&ksb->udev->dev))
+ ifc->needs_remote_wakeup = 1;
+
+ if (free_mdev)
+ kfree(mdev);
+
+ ks_bridge_notify_status(&ksb->device->kobj, id);
+ dev_dbg(&udev->dev, "usb dev connected");
+
+ return 0;
+
+fail_device_create:
+ cdev_del(&ksb->cdev);
+fail_class_create:
+ unregister_chrdev_region(ksb->cdev_start_no, 1);
+fail_chrdev_region:
+ usb_set_intfdata(ifc, NULL);
+ clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+
+ if (free_mdev)
+ kfree(mdev);
+
+ return -ENODEV;
+
+}
+
+static int ksb_usb_suspend(struct usb_interface *ifc, pm_message_t message)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+ unsigned long flags;
+
+ dbg_log_event(ksb, "SUSPEND", 0, 0);
+
+ if (pm_runtime_autosuspend_expiration(&ksb->udev->dev)) {
+ dbg_log_event(ksb, "SUSP ABORT-TimeCheck", 0, 0);
+ return -EBUSY;
+ }
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (!list_empty(&ksb->to_ks_list)) {
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ dbg_log_event(ksb, "SUSPEND ABORT", 0, 0);
+ /*
+ * Now wakeup the reader process and queue
+ * Rx URBs for more data.
+ */
+ wake_up(&ksb->ks_wait_q);
+ queue_work(ksb->wq, &ksb->start_rx_work);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ return 0;
+}
+
+static int ksb_usb_resume(struct usb_interface *ifc)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+ dbg_log_event(ksb, "RESUME", 0, 0);
+
+ if (test_bit(FILE_OPENED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static void ksb_usb_disconnect(struct usb_interface *ifc)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+ unsigned long flags;
+ struct data_pkt *pkt;
+
+ dbg_log_event(ksb, "PID-DETACH", 0, 0);
+
+ clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+ kobject_uevent(&ksb->device->kobj, KOBJ_OFFLINE);
+ wake_up(&ksb->ks_wait_q);
+ cancel_work_sync(&ksb->to_mdm_work);
+ cancel_work_sync(&ksb->start_rx_work);
+
+ device_destroy(ksb->class, ksb->cdev_start_no);
+ cdev_del(&ksb->cdev);
+ class_destroy(ksb->class);
+ unregister_chrdev_region(ksb->cdev_start_no, 1);
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ wait_event_interruptible_timeout(
+ ksb->pending_urb_wait,
+ !atomic_read(&ksb->tx_pending_cnt) &&
+ !atomic_read(&ksb->rx_pending_cnt),
+ msecs_to_jiffies(PENDING_URB_TIMEOUT));
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_ks_list)) {
+ pkt = list_first_entry(&ksb->to_ks_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ while (!list_empty(&ksb->to_mdm_list)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ifc->needs_remote_wakeup = 0;
+ usb_put_dev(ksb->udev);
+ ksb->ifc = NULL;
+ usb_set_intfdata(ifc, NULL);
+}
+
+static struct usb_driver ksb_usb_driver = {
+ .name = "ks_bridge",
+ .probe = ksb_usb_probe,
+ .disconnect = ksb_usb_disconnect,
+ .suspend = ksb_usb_suspend,
+ .resume = ksb_usb_resume,
+ .reset_resume = ksb_usb_resume,
+ .id_table = ksb_usb_ids,
+ .supports_autosuspend = 1,
+};
+
+static int ksb_debug_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct ks_bridge *ksb = s->private;
+ int i;
+
+ read_lock_irqsave(&ksb->dbg_lock, flags);
+ for (i = 0; i < DBG_MAX_MSG; i++) {
+ if (i == (ksb->dbg_idx - 1))
+ seq_printf(s, "-->%s\n", ksb->dbgbuf[i]);
+ else
+ seq_printf(s, "%s\n", ksb->dbgbuf[i]);
+ }
+ read_unlock_irqrestore(&ksb->dbg_lock, flags);
+
+ return 0;
+}
+
+static int ksb_debug_open(struct inode *ip, struct file *fp)
+{
+ return single_open(fp, ksb_debug_show, ip->i_private);
+
+ return 0;
+}
+
+static const struct file_operations dbg_fops = {
+ .open = ksb_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dbg_dir;
+
+static int __init ksb_init(void)
+{
+ struct ks_bridge *ksb;
+ int num_instances = 0;
+ int ret = 0;
+ int i;
+
+ dbg_dir = debugfs_create_dir("ks_bridge", NULL);
+ if (IS_ERR(dbg_dir))
+ pr_err("unable to create debug dir");
+
+ for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+ ksb = kzalloc(sizeof(struct ks_bridge), GFP_KERNEL);
+ if (!ksb) {
+ pr_err("unable to allocat mem for ks_bridge");
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+ __ksb[i] = ksb;
+
+ ksb->name = kasprintf(GFP_KERNEL, "ks_usb_bridge.%i", i);
+ if (!ksb->name) {
+ pr_info("unable to allocate name");
+ kfree(ksb);
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+
+ spin_lock_init(&ksb->lock);
+ INIT_LIST_HEAD(&ksb->to_mdm_list);
+ INIT_LIST_HEAD(&ksb->to_ks_list);
+ init_waitqueue_head(&ksb->ks_wait_q);
+ init_waitqueue_head(&ksb->pending_urb_wait);
+ ksb->wq = create_singlethread_workqueue(ksb->name);
+ if (!ksb->wq) {
+ pr_err("unable to allocate workqueue");
+ kfree(ksb->name);
+ kfree(ksb);
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+
+ INIT_WORK(&ksb->to_mdm_work, ksb_tomdm_work);
+ INIT_WORK(&ksb->start_rx_work, ksb_start_rx_work);
+ init_usb_anchor(&ksb->submitted);
+
+ ksb->dbg_idx = 0;
+ ksb->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+ if (!IS_ERR(dbg_dir))
+ debugfs_create_file(ksb->name, S_IRUGO, dbg_dir,
+ ksb, &dbg_fops);
+
+ num_instances++;
+ }
+
+ ret = usb_register(&ksb_usb_driver);
+ if (ret) {
+ pr_err("unable to register ks bridge driver");
+ goto dev_free;
+ }
+
+ pr_info("init done");
+
+ return 0;
+
+dev_free:
+ if (!IS_ERR(dbg_dir))
+ debugfs_remove_recursive(dbg_dir);
+
+ for (i = 0; i < num_instances; i++) {
+ ksb = __ksb[i];
+
+ destroy_workqueue(ksb->wq);
+ kfree(ksb->name);
+ kfree(ksb);
+ }
+
+ return ret;
+
+}
+
+static void __exit ksb_exit(void)
+{
+ struct ks_bridge *ksb;
+ int i;
+
+ if (!IS_ERR(dbg_dir))
+ debugfs_remove_recursive(dbg_dir);
+
+ usb_deregister(&ksb_usb_driver);
+
+ for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+ ksb = __ksb[i];
+
+ destroy_workqueue(ksb->wq);
+ kfree(ksb->name);
+ kfree(ksb);
+ }
+}
+
+module_init(ksb_init);
+module_exit(ksb_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index bda82e63c1a9..218b26aab667 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -180,6 +180,28 @@ static ssize_t hot_reset_store(struct device *dev,
}
static DEVICE_ATTR_WO(hot_reset);
+static ssize_t warm_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *hdev = interface_to_usbdev(intf);
+ struct lvs_rh *lvs = usb_get_intfdata(intf);
+ int port;
+ int ret;
+
+ if (kstrtoint(buf, 0, &port) || port < 1 || port > 255)
+ port = lvs->portnum;
+
+ ret = lvs_rh_set_port_feature(hdev, port, USB_PORT_FEAT_BH_PORT_RESET);
+ if (ret < 0) {
+ dev_err(dev, "can't issue warm reset %d\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(warm_reset);
+
static ssize_t u2_timeout_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -278,13 +300,39 @@ free_desc:
}
static DEVICE_ATTR_WO(get_dev_desc);
+static ssize_t enable_compliance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *hdev = interface_to_usbdev(intf);
+ struct lvs_rh *lvs = usb_get_intfdata(intf);
+ int port;
+ int ret;
+
+ if (kstrtoint(buf, 0, &port) || port < 1 || port > 255)
+ port = lvs->portnum;
+
+ ret = lvs_rh_set_port_feature(hdev,
+ port | (USB_SS_PORT_LS_COMP_MOD << 3),
+ USB_PORT_FEAT_LINK_STATE);
+ if (ret < 0) {
+ dev_err(dev, "can't enable compliance mode %d\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(enable_compliance);
+
static struct attribute *lvs_attributes[] = {
&dev_attr_get_dev_desc.attr,
&dev_attr_u1_timeout.attr,
&dev_attr_u2_timeout.attr,
&dev_attr_hot_reset.attr,
+ &dev_attr_warm_reset.attr,
&dev_attr_u3_entry.attr,
&dev_attr_u3_exit.attr,
+ &dev_attr_enable_compliance.attr,
NULL
};
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 108dcc5f5350..d0c7f4949f6f 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -349,7 +349,7 @@ static int mon_text_open(struct inode *inode, struct file *file)
rp->r.rnf_error = mon_text_error;
rp->r.rnf_complete = mon_text_complete;
- snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
+ snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%pK", rp);
rp->e_slab = kmem_cache_create(rp->slab_name,
sizeof(struct mon_event_text), sizeof(long), 0,
mon_text_ctor);
diff --git a/drivers/usb/pd/Kconfig b/drivers/usb/pd/Kconfig
new file mode 100644
index 000000000000..cc88df495f6e
--- /dev/null
+++ b/drivers/usb/pd/Kconfig
@@ -0,0 +1,25 @@
+#
+# USB Power Delivery driver configuration
+#
+menu "USB Power Delivery"
+
+config USB_PD
+ def_bool n
+
+config USB_PD_POLICY
+ tristate "USB Power Delivery Protocol and Policy Engine"
+ depends on EXTCON
+ depends on DUAL_ROLE_USB_INTF
+ select USB_PD
+ help
+ Say Y here to enable USB PD protocol and policy engine.
+
+config QPNP_USB_PDPHY
+ tristate "QPNP USB Power Delivery PHY"
+ depends on SPMI
+ help
+ Say Y here to enable QPNP USB PD PHY peripheral driver
+ which communicates over the SPMI bus. The is used to handle
+ the PHY layer communication of the Power Delivery stack.
+
+endmenu
diff --git a/drivers/usb/pd/Makefile b/drivers/usb/pd/Makefile
new file mode 100644
index 000000000000..f48707026799
--- /dev/null
+++ b/drivers/usb/pd/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for USB Power Delivery drivers
+#
+
+obj-$(CONFIG_USB_PD_POLICY) += policy_engine.o
+obj-$(CONFIG_QPNP_USB_PDPHY) += qpnp-pdphy.o
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
new file mode 100644
index 000000000000..540498548978
--- /dev/null
+++ b/drivers/usb/pd/policy_engine.c
@@ -0,0 +1,4038 @@
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/extcon.h>
+#include <linux/usb/class-dual-role.h>
+#include <linux/usb/usbpd.h>
+#include "usbpd.h"
+
+/* To start USB stack for USB3.1 complaince testing */
+static bool usb_compliance_mode;
+module_param(usb_compliance_mode, bool, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(usb_compliance_mode, "Start USB stack for USB3.1 compliance testing");
+
+static bool disable_usb_pd;
+module_param(disable_usb_pd, bool, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(disable_usb_pd, "Disable USB PD for USB3.1 compliance testing");
+
+static bool rev3_sink_only;
+module_param(rev3_sink_only, bool, 0644);
+MODULE_PARM_DESC(rev3_sink_only, "Enable power delivery rev3.0 sink only mode");
+
+enum usbpd_state {
+ PE_UNKNOWN,
+ PE_ERROR_RECOVERY,
+ PE_SRC_DISABLED,
+ PE_SRC_STARTUP,
+ PE_SRC_SEND_CAPABILITIES,
+ PE_SRC_SEND_CAPABILITIES_WAIT, /* substate to wait for Request */
+ PE_SRC_NEGOTIATE_CAPABILITY,
+ PE_SRC_TRANSITION_SUPPLY,
+ PE_SRC_READY,
+ PE_SRC_HARD_RESET,
+ PE_SRC_SOFT_RESET,
+ PE_SRC_SEND_SOFT_RESET,
+ PE_SRC_DISCOVERY,
+ PE_SRC_TRANSITION_TO_DEFAULT,
+ PE_SNK_STARTUP,
+ PE_SNK_DISCOVERY,
+ PE_SNK_WAIT_FOR_CAPABILITIES,
+ PE_SNK_EVALUATE_CAPABILITY,
+ PE_SNK_SELECT_CAPABILITY,
+ PE_SNK_TRANSITION_SINK,
+ PE_SNK_READY,
+ PE_SNK_HARD_RESET,
+ PE_SNK_SOFT_RESET,
+ PE_SNK_SEND_SOFT_RESET,
+ PE_SNK_TRANSITION_TO_DEFAULT,
+ PE_DRS_SEND_DR_SWAP,
+ PE_PRS_SNK_SRC_SEND_SWAP,
+ PE_PRS_SNK_SRC_TRANSITION_TO_OFF,
+ PE_PRS_SNK_SRC_SOURCE_ON,
+ PE_PRS_SRC_SNK_SEND_SWAP,
+ PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
+ PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
+ PE_VCS_WAIT_FOR_VCONN,
+};
+
+static const char * const usbpd_state_strings[] = {
+ "UNKNOWN",
+ "ERROR_RECOVERY",
+ "SRC_Disabled",
+ "SRC_Startup",
+ "SRC_Send_Capabilities",
+ "SRC_Send_Capabilities (Wait for Request)",
+ "SRC_Negotiate_Capability",
+ "SRC_Transition_Supply",
+ "SRC_Ready",
+ "SRC_Hard_Reset",
+ "SRC_Soft_Reset",
+ "SRC_Send_Soft_Reset",
+ "SRC_Discovery",
+ "SRC_Transition_to_default",
+ "SNK_Startup",
+ "SNK_Discovery",
+ "SNK_Wait_for_Capabilities",
+ "SNK_Evaluate_Capability",
+ "SNK_Select_Capability",
+ "SNK_Transition_Sink",
+ "SNK_Ready",
+ "SNK_Hard_Reset",
+ "SNK_Soft_Reset",
+ "SNK_Send_Soft_Reset",
+ "SNK_Transition_to_default",
+ "DRS_Send_DR_Swap",
+ "PRS_SNK_SRC_Send_Swap",
+ "PRS_SNK_SRC_Transition_to_off",
+ "PRS_SNK_SRC_Source_on",
+ "PRS_SRC_SNK_Send_Swap",
+ "PRS_SRC_SNK_Transition_to_off",
+ "PRS_SRC_SNK_Wait_Source_on",
+ "VCS_Wait_for_VCONN",
+};
+
+enum usbpd_control_msg_type {
+ MSG_RESERVED = 0,
+ MSG_GOODCRC,
+ MSG_GOTOMIN,
+ MSG_ACCEPT,
+ MSG_REJECT,
+ MSG_PING,
+ MSG_PS_RDY,
+ MSG_GET_SOURCE_CAP,
+ MSG_GET_SINK_CAP,
+ MSG_DR_SWAP,
+ MSG_PR_SWAP,
+ MSG_VCONN_SWAP,
+ MSG_WAIT,
+ MSG_SOFT_RESET,
+ MSG_NOT_SUPPORTED = 0x10,
+ MSG_GET_SOURCE_CAP_EXTENDED,
+ MSG_GET_STATUS,
+ MSG_FR_SWAP,
+ MSG_GET_PPS_STATUS,
+ MSG_GET_COUNTRY_CODES,
+};
+
+enum usbpd_data_msg_type {
+ MSG_SOURCE_CAPABILITIES = 1,
+ MSG_REQUEST,
+ MSG_BIST,
+ MSG_SINK_CAPABILITIES,
+ MSG_BATTERY_STATUS,
+ MSG_ALERT,
+ MSG_GET_COUNTRY_INFO,
+ MSG_VDM = 0xF,
+};
+
+enum usbpd_ext_msg_type {
+ MSG_SOURCE_CAPABILITIES_EXTENDED = 1,
+ MSG_STATUS,
+ MSG_GET_BATTERY_CAP,
+ MSG_GET_BATTERY_STATUS,
+ MSG_BATTERY_CAPABILITIES,
+ MSG_GET_MANUFACTURER_INFO,
+ MSG_MANUFACTURER_INFO,
+ MSG_SECURITY_REQUEST,
+ MSG_SECURITY_RESPONSE,
+ MSG_FIRMWARE_UPDATE_REQUEST,
+ MSG_FIRMWARE_UPDATE_RESPONSE,
+ MSG_PPS_STATUS,
+ MSG_COUNTRY_INFO,
+ MSG_COUNTRY_CODES,
+};
+
+enum vdm_state {
+ VDM_NONE,
+ DISCOVERED_ID,
+ DISCOVERED_SVIDS,
+ DISCOVERED_MODES,
+ MODE_ENTERED,
+ MODE_EXITED,
+};
+
+static void *usbpd_ipc_log;
+#define usbpd_dbg(dev, fmt, ...) do { \
+ ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+ ##__VA_ARGS__); \
+ dev_dbg(dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define usbpd_info(dev, fmt, ...) do { \
+ ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+ ##__VA_ARGS__); \
+ dev_info(dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define usbpd_warn(dev, fmt, ...) do { \
+ ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+ ##__VA_ARGS__); \
+ dev_warn(dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define usbpd_err(dev, fmt, ...) do { \
+ ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+ ##__VA_ARGS__); \
+ dev_err(dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define NUM_LOG_PAGES 10
+
+/* Timeouts (in ms) */
+#define ERROR_RECOVERY_TIME 25
+#define SENDER_RESPONSE_TIME 26
+#define SINK_WAIT_CAP_TIME 500
+#define PS_TRANSITION_TIME 450
+#define SRC_CAP_TIME 120
+#define SRC_TRANSITION_TIME 25
+#define SRC_RECOVER_TIME 750
+#define PS_HARD_RESET_TIME 25
+#define PS_SOURCE_ON 400
+#define PS_SOURCE_OFF 750
+#define FIRST_SOURCE_CAP_TIME 200
+#define VDM_BUSY_TIME 50
+#define VCONN_ON_TIME 100
+
+/* tPSHardReset + tSafe0V */
+#define SNK_HARD_RESET_VBUS_OFF_TIME (35 + 650)
+
+/* tSrcRecover + tSrcTurnOn */
+#define SNK_HARD_RESET_VBUS_ON_TIME (1000 + 275)
+
+#define PD_CAPS_COUNT 50
+
+#define PD_MAX_MSG_ID 7
+
+#define PD_MAX_DATA_OBJ 7
+
+#define PD_SRC_CAP_EXT_DB_LEN 24
+#define PD_STATUS_DB_LEN 5
+#define PD_BATTERY_CAP_DB_LEN 9
+
+#define PD_MAX_EXT_MSG_LEN 260
+#define PD_MAX_EXT_MSG_LEGACY_LEN 26
+
+#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
+ (((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \
+ ((pr) << 8) | ((id) << 9) | ((cnt) << 12))
+#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
+#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0x1F)
+#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_EXTENDED BIT(15)
+#define PD_MSG_HDR_IS_EXTENDED(hdr) ((hdr) & PD_MSG_HDR_EXTENDED)
+
+#define PD_MSG_EXT_HDR(chunked, num, req, size) \
+ (((chunked) << 15) | (((num) & 0xF) << 11) | \
+ ((req) << 10) | ((size) & 0x1FF))
+#define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr) ((ehdr) & 0x8000)
+#define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr) (((ehdr) >> 11) & 0xF)
+#define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr) ((ehdr) & 0x400)
+#define PD_MSG_EXT_HDR_DATA_SIZE(ehdr) ((ehdr) & 0x1FF)
+
+#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
+ (((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
+ ((usb_comm) << 25) | ((no_usb_susp) << 24) | \
+ ((curr1) << 10) | (curr2))
+
+#define PD_RDO_AUGMENTED(obj, mismatch, usb_comm, no_usb_susp, volt, curr) \
+ (((obj) << 28) | ((mismatch) << 26) | ((usb_comm) << 25) | \
+ ((no_usb_susp) << 24) | ((volt) << 9) | (curr))
+
+#define PD_RDO_OBJ_POS(rdo) ((rdo) >> 28 & 7)
+#define PD_RDO_GIVEBACK(rdo) ((rdo) >> 27 & 1)
+#define PD_RDO_MISMATCH(rdo) ((rdo) >> 26 & 1)
+#define PD_RDO_USB_COMM(rdo) ((rdo) >> 25 & 1)
+#define PD_RDO_NO_USB_SUSP(rdo) ((rdo) >> 24 & 1)
+#define PD_RDO_FIXED_CURR(rdo) ((rdo) >> 10 & 0x3FF)
+#define PD_RDO_FIXED_CURR_MINMAX(rdo) ((rdo) & 0x3FF)
+#define PD_RDO_PROG_VOLTAGE(rdo) ((rdo) >> 9 & 0x7FF)
+#define PD_RDO_PROG_CURR(rdo) ((rdo) & 0x7F)
+
+#define PD_SRC_PDO_TYPE(pdo) (((pdo) >> 30) & 3)
+#define PD_SRC_PDO_TYPE_FIXED 0
+#define PD_SRC_PDO_TYPE_BATTERY 1
+#define PD_SRC_PDO_TYPE_VARIABLE 2
+#define PD_SRC_PDO_TYPE_AUGMENTED 3
+
+#define PD_SRC_PDO_FIXED_PR_SWAP(pdo) (((pdo) >> 29) & 1)
+#define PD_SRC_PDO_FIXED_USB_SUSP(pdo) (((pdo) >> 28) & 1)
+#define PD_SRC_PDO_FIXED_EXT_POWERED(pdo) (((pdo) >> 27) & 1)
+#define PD_SRC_PDO_FIXED_USB_COMM(pdo) (((pdo) >> 26) & 1)
+#define PD_SRC_PDO_FIXED_DR_SWAP(pdo) (((pdo) >> 25) & 1)
+#define PD_SRC_PDO_FIXED_PEAK_CURR(pdo) (((pdo) >> 20) & 3)
+#define PD_SRC_PDO_FIXED_VOLTAGE(pdo) (((pdo) >> 10) & 0x3FF)
+#define PD_SRC_PDO_FIXED_MAX_CURR(pdo) ((pdo) & 0x3FF)
+
+#define PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) (((pdo) >> 20) & 0x3FF)
+#define PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) (((pdo) >> 10) & 0x3FF)
+#define PD_SRC_PDO_VAR_BATT_MAX(pdo) ((pdo) & 0x3FF)
+
+#define PD_APDO_PPS(pdo) (((pdo) >> 28) & 3)
+#define PD_APDO_MAX_VOLT(pdo) (((pdo) >> 17) & 0xFF)
+#define PD_APDO_MIN_VOLT(pdo) (((pdo) >> 8) & 0xFF)
+#define PD_APDO_MAX_CURR(pdo) ((pdo) & 0x7F)
+
+/* Vendor Defined Messages */
+#define MAX_CRC_RECEIVE_TIME 9 /* ~(2 * tReceive_max(1.1ms) * # retry 4) */
+#define MAX_VDM_RESPONSE_TIME 60 /* 2 * tVDMSenderResponse_max(30ms) */
+#define MAX_VDM_BUSY_TIME 100 /* 2 * tVDMBusy (50ms) */
+
+#define PD_SNK_PDO_FIXED(prs, hc, uc, usb_comm, drs, volt, curr) \
+ (((prs) << 29) | ((hc) << 28) | ((uc) << 27) | ((usb_comm) << 26) | \
+ ((drs) << 25) | ((volt) << 10) | (curr))
+
+/* VDM header is the first 32-bit object following the 16-bit PD header */
+#define VDM_HDR_SVID(hdr) ((hdr) >> 16)
+#define VDM_IS_SVDM(hdr) ((hdr) & 0x8000)
+#define SVDM_HDR_OBJ_POS(hdr) (((hdr) >> 8) & 0x7)
+#define SVDM_HDR_CMD_TYPE(hdr) (((hdr) >> 6) & 0x3)
+#define SVDM_HDR_CMD(hdr) ((hdr) & 0x1f)
+
+#define SVDM_HDR(svid, ver, obj, cmd_type, cmd) \
+ (((svid) << 16) | (1 << 15) | ((ver) << 13) \
+ | ((obj) << 8) | ((cmd_type) << 6) | (cmd))
+
+/* discover id response vdo bit fields */
+#define ID_HDR_USB_HOST BIT(31)
+#define ID_HDR_USB_DEVICE BIT(30)
+#define ID_HDR_MODAL_OPR BIT(26)
+#define ID_HDR_PRODUCT_TYPE(n) ((n) >> 27)
+#define ID_HDR_PRODUCT_PER_MASK (2 << 27)
+#define ID_HDR_PRODUCT_HUB 1
+#define ID_HDR_PRODUCT_PER 2
+#define ID_HDR_PRODUCT_AMA 5
+#define ID_HDR_VID 0x05c6 /* qcom */
+#define PROD_VDO_PID 0x0a00 /* TBD */
+
+static bool check_vsafe0v = true;
+module_param(check_vsafe0v, bool, S_IRUSR | S_IWUSR);
+
+static int min_sink_current = 900;
+module_param(min_sink_current, int, S_IRUSR | S_IWUSR);
+
+static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
+static const u32 default_snk_caps[] = { 0x2601912C }; /* VSafe5V @ 3A */
+
+struct vdm_tx {
+ u32 data[PD_MAX_DATA_OBJ];
+ int size;
+};
+
+struct rx_msg {
+ u16 hdr;
+ u16 data_len; /* size of payload in bytes */
+ struct list_head entry;
+ u8 payload[];
+};
+
+#define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+
+struct usbpd {
+ struct device dev;
+ struct workqueue_struct *wq;
+ struct work_struct sm_work;
+ struct hrtimer timer;
+ bool sm_queued;
+
+ struct extcon_dev *extcon;
+
+ enum usbpd_state current_state;
+ bool hard_reset_recvd;
+ ktime_t hard_reset_recvd_time;
+ struct list_head rx_q;
+ spinlock_t rx_lock;
+ struct rx_msg *rx_ext_msg;
+
+ u32 received_pdos[PD_MAX_DATA_OBJ];
+ u32 received_ado;
+ u16 src_cap_id;
+ u8 selected_pdo;
+ u8 requested_pdo;
+ u32 rdo; /* can be either source or sink */
+ int current_voltage; /* uV */
+ int requested_voltage; /* uV */
+ int requested_current; /* mA */
+ bool pd_connected;
+ bool in_explicit_contract;
+ bool peer_usb_comm;
+ bool peer_pr_swap;
+ bool peer_dr_swap;
+
+ u32 sink_caps[7];
+ int num_sink_caps;
+
+ struct power_supply *usb_psy;
+ struct notifier_block psy_nb;
+
+ enum power_supply_typec_mode typec_mode;
+ enum power_supply_type psy_type;
+ enum power_supply_typec_power_role forced_pr;
+ bool vbus_present;
+
+ enum pd_spec_rev spec_rev;
+ enum data_role current_dr;
+ enum power_role current_pr;
+ bool in_pr_swap;
+ bool pd_phy_opened;
+ bool send_request;
+ struct completion is_ready;
+ struct completion tx_chunk_request;
+ u8 next_tx_chunk;
+
+ struct mutex swap_lock;
+ struct dual_role_phy_instance *dual_role;
+ struct dual_role_phy_desc dr_desc;
+ bool send_pr_swap;
+ bool send_dr_swap;
+
+ struct regulator *vbus;
+ struct regulator *vconn;
+ bool vbus_enabled;
+ bool vconn_enabled;
+ bool vconn_is_external;
+
+ u8 tx_msgid;
+ u8 rx_msgid;
+ int caps_count;
+ int hard_reset_count;
+
+ enum vdm_state vdm_state;
+ u16 *discovered_svids;
+ int num_svids;
+ struct vdm_tx *vdm_tx;
+ struct vdm_tx *vdm_tx_retry;
+ struct list_head svid_handlers;
+
+ struct list_head instance;
+
+ /* ext msg support */
+ bool send_get_src_cap_ext;
+ u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN];
+ bool send_get_pps_status;
+ u32 pps_status_db;
+ u8 status_db[PD_STATUS_DB_LEN];
+ bool send_get_battery_cap;
+ u8 get_battery_cap_db;
+ u8 battery_cap_db[PD_BATTERY_CAP_DB_LEN];
+ u8 get_battery_status_db;
+ bool send_get_battery_status;
+ u32 battery_sts_dobj;
+};
+
+static LIST_HEAD(_usbpd); /* useful for debugging */
+
+static const unsigned int usbpd_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_USB_CC,
+ EXTCON_USB_SPEED,
+ EXTCON_USB_TYPEC_MED_HIGH_CURRENT,
+ EXTCON_NONE,
+};
+
+/* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
+static const u32 usbpd_extcon_exclusive[] = {0x3, 0};
+
+enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
+{
+ int ret;
+ union power_supply_propval val;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, &val);
+ if (ret)
+ return ORIENTATION_NONE;
+
+ return val.intval;
+}
+EXPORT_SYMBOL(usbpd_get_plug_orientation);
+
+static inline void stop_usb_host(struct usbpd *pd)
+{
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
+}
+
+static inline void start_usb_host(struct usbpd *pd, bool ss)
+{
+ enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+ cc == ORIENTATION_CC2);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, ss);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
+}
+
+static inline void stop_usb_peripheral(struct usbpd *pd)
+{
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+}
+
+static inline void start_usb_peripheral(struct usbpd *pd)
+{
+ enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+ cc == ORIENTATION_CC2);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, 1);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_TYPEC_MED_HIGH_CURRENT,
+ pd->typec_mode > POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ? 1 : 0);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
+}
+
+static int set_power_role(struct usbpd *pd, enum power_role pr)
+{
+ union power_supply_propval val = {0};
+
+ switch (pr) {
+ case PR_NONE:
+ val.intval = POWER_SUPPLY_TYPEC_PR_NONE;
+ break;
+ case PR_SINK:
+ val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ break;
+ case PR_SRC:
+ val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+ break;
+ }
+
+ return power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+}
+
+static struct usbpd_svid_handler *find_svid_handler(struct usbpd *pd, u16 svid)
+{
+ struct usbpd_svid_handler *handler;
+
+ list_for_each_entry(handler, &pd->svid_handlers, entry)
+ if (svid == handler->svid)
+ return handler;
+
+ return NULL;
+}
+
+/* Reset protocol layer */
+static inline void pd_reset_protocol(struct usbpd *pd)
+{
+ /*
+ * first Rx ID should be 0; set this to a sentinel of -1 so that in
+ * phy_msg_received() we can check if we had seen it before.
+ */
+ pd->rx_msgid = -1;
+ pd->tx_msgid = 0;
+ pd->send_request = false;
+ pd->send_pr_swap = false;
+ pd->send_dr_swap = false;
+}
+
+static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
+ size_t num_data, enum pd_sop_type sop)
+{
+ int ret;
+ u16 hdr;
+
+ if (pd->hard_reset_recvd)
+ return -EBUSY;
+
+ hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, num_data, pd->spec_rev);
+
+ ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), sop);
+ if (ret)
+ return ret;
+
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+ return 0;
+}
+
+static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type,
+ const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+ int ret;
+ size_t len_remain, chunk_len;
+ u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0};
+ u16 hdr;
+ u16 ext_hdr;
+ u8 num_objs;
+
+ if (data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ data_len = PD_MAX_EXT_MSG_LEN;
+ }
+
+ pd->next_tx_chunk = 0;
+ len_remain = data_len;
+ do {
+ ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len);
+ memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr));
+
+ chunk_len = min_t(size_t, len_remain,
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+ memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len);
+
+ num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32));
+ len_remain -= chunk_len;
+
+ reinit_completion(&pd->tx_chunk_request);
+ hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, num_objs, pd->spec_rev) |
+ PD_MSG_HDR_EXTENDED;
+ ret = pd_phy_write(hdr, chunked_payload,
+ num_objs * sizeof(u32), sop);
+ if (ret)
+ return ret;
+
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+
+ /* Wait for request chunk */
+ if (len_remain &&
+ !wait_for_completion_timeout(&pd->tx_chunk_request,
+ msecs_to_jiffies(SENDER_RESPONSE_TIME))) {
+ usbpd_err(&pd->dev, "Timed out waiting for chunk request\n");
+ return -EPROTO;
+ }
+ } while (len_remain);
+
+ return 0;
+}
+
+static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
+{
+ int curr;
+ int max_current;
+ bool mismatch = false;
+ u8 type;
+ u32 pdo = pd->received_pdos[pdo_pos - 1];
+
+ type = PD_SRC_PDO_TYPE(pdo);
+ if (type == PD_SRC_PDO_TYPE_FIXED) {
+ curr = max_current = PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10;
+
+ /*
+ * Check if the PDO has enough current, otherwise set the
+ * Capability Mismatch flag
+ */
+ if (curr < min_sink_current) {
+ mismatch = true;
+ max_current = min_sink_current;
+ }
+
+ pd->requested_voltage =
+ PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
+ pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
+ max_current / 10);
+ } else if (type == PD_SRC_PDO_TYPE_AUGMENTED) {
+ if ((uv / 100000) > PD_APDO_MAX_VOLT(pdo) ||
+ (uv / 100000) < PD_APDO_MIN_VOLT(pdo) ||
+ (ua / 50000) > PD_APDO_MAX_CURR(pdo) || (ua < 0)) {
+ usbpd_err(&pd->dev, "uv (%d) and ua (%d) out of range of APDO\n",
+ uv, ua);
+ return -EINVAL;
+ }
+
+ curr = ua / 1000;
+ pd->requested_voltage = uv;
+ pd->rdo = PD_RDO_AUGMENTED(pdo_pos, mismatch, 1, 1,
+ uv / 20000, ua / 50000);
+ } else {
+ usbpd_err(&pd->dev, "Only Fixed or Programmable PDOs supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* Can't sink more than 5V if VCONN is sourced from the VBUS input */
+ if (pd->vconn_enabled && !pd->vconn_is_external &&
+ pd->requested_voltage > 5000000)
+ return -ENOTSUPP;
+
+ pd->requested_current = curr;
+ pd->requested_pdo = pdo_pos;
+
+ return 0;
+}
+
+static int pd_eval_src_caps(struct usbpd *pd)
+{
+ int i;
+ union power_supply_propval val;
+ u32 first_pdo = pd->received_pdos[0];
+
+ if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) {
+ usbpd_err(&pd->dev, "First src_cap invalid! %08x\n", first_pdo);
+ return -EINVAL;
+ }
+
+ pd->peer_usb_comm = PD_SRC_PDO_FIXED_USB_COMM(first_pdo);
+ pd->peer_pr_swap = PD_SRC_PDO_FIXED_PR_SWAP(first_pdo);
+ pd->peer_dr_swap = PD_SRC_PDO_FIXED_DR_SWAP(first_pdo);
+
+ val.intval = PD_SRC_PDO_FIXED_USB_SUSP(first_pdo);
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
+
+ if (pd->spec_rev == USBPD_REV_30 && !rev3_sink_only) {
+ bool pps_found = false;
+
+ /* downgrade to 2.0 if no PPS */
+ for (i = 1; i < PD_MAX_DATA_OBJ; i++) {
+ if ((PD_SRC_PDO_TYPE(pd->received_pdos[i]) ==
+ PD_SRC_PDO_TYPE_AUGMENTED) &&
+ !PD_APDO_PPS(pd->received_pdos[i])) {
+ pps_found = true;
+ break;
+ }
+ }
+ if (!pps_found)
+ pd->spec_rev = USBPD_REV_20;
+ }
+
+ /* Select the first PDO (vSafe5V) immediately. */
+ pd_select_pdo(pd, 1, 0, 0);
+
+ return 0;
+}
+
+static void pd_send_hard_reset(struct usbpd *pd)
+{
+ union power_supply_propval val = {0};
+
+ usbpd_dbg(&pd->dev, "send hard reset");
+
+ /* Force CC logic to source/sink to keep Rp/Rd unchanged */
+ set_power_role(pd, pd->current_pr);
+ pd->hard_reset_count++;
+ pd_phy_signal(HARD_RESET_SIG);
+ pd->in_pr_swap = false;
+ power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
+}
+
+static void kick_sm(struct usbpd *pd, int ms)
+{
+ pm_stay_awake(&pd->dev);
+ pd->sm_queued = true;
+
+ if (ms)
+ hrtimer_start(&pd->timer, ms_to_ktime(ms), HRTIMER_MODE_REL);
+ else
+ queue_work(pd->wq, &pd->sm_work);
+}
+
+static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
+{
+ union power_supply_propval val = {1};
+
+ if (sig != HARD_RESET_SIG) {
+ usbpd_err(&pd->dev, "invalid signal (%d) received\n", sig);
+ return;
+ }
+
+ pd->hard_reset_recvd = true;
+ pd->hard_reset_recvd_time = ktime_get();
+
+ usbpd_err(&pd->dev, "hard reset received\n");
+
+ /* Force CC logic to source/sink to keep Rp/Rd unchanged */
+ set_power_role(pd, pd->current_pr);
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ kick_sm(pd, 0);
+}
+
+struct pd_request_chunk {
+ struct work_struct w;
+ struct usbpd *pd;
+ u8 msg_type;
+ u8 chunk_num;
+ enum pd_sop_type sop;
+};
+
+static void pd_request_chunk_work(struct work_struct *w)
+{
+ struct pd_request_chunk *req =
+ container_of(w, struct pd_request_chunk, w);
+ struct usbpd *pd = req->pd;
+ unsigned long flags;
+ int ret;
+ u8 payload[4] = {0}; /* ext_hdr + padding */
+ u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, 1, pd->spec_rev) | PD_MSG_HDR_EXTENDED;
+
+ *(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0);
+
+ ret = pd_phy_write(hdr, payload, sizeof(payload), req->sop);
+ if (!ret) {
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+ } else {
+ usbpd_err(&pd->dev, "could not send chunk request\n");
+
+ /* queue what we have anyway */
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ pd->rx_ext_msg = NULL;
+ }
+
+ kfree(req);
+}
+
+static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
+ size_t len, enum pd_sop_type sop)
+{
+ struct rx_msg *rx_msg;
+ u16 bytes_to_copy;
+ u16 ext_hdr = *(u16 *)buf;
+ u8 chunk_num;
+
+ if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) {
+ usbpd_err(&pd->dev, "unchunked extended messages unsupported\n");
+ return NULL;
+ }
+
+ /* request for next Tx chunk */
+ if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) {
+ if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) ||
+ PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) !=
+ pd->next_tx_chunk) {
+ usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n",
+ ext_hdr);
+ return NULL;
+ }
+
+ if (!completion_done(&pd->tx_chunk_request))
+ complete(&pd->tx_chunk_request);
+
+ return NULL;
+ }
+
+ chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr);
+ if (!chunk_num) {
+ /* allocate new message if first chunk */
+ rx_msg = kzalloc(sizeof(*rx_msg) +
+ PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
+ GFP_ATOMIC);
+ if (!rx_msg)
+ return NULL;
+
+ rx_msg->hdr = header;
+ rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr);
+
+ if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ rx_msg->data_len = PD_MAX_EXT_MSG_LEN;
+ }
+ } else {
+ if (!pd->rx_ext_msg) {
+ usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n");
+ return NULL;
+ }
+
+ rx_msg = pd->rx_ext_msg;
+ }
+
+ /*
+ * The amount to copy is derived as follows:
+ *
+ * - if extended data_len < 26, then copy data_len bytes
+ * - for chunks 0..N-2, copy 26 bytes
+ * - for the last chunk (N-1), copy the remainder
+ */
+ bytes_to_copy =
+ min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN),
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+
+ /* check against received length to avoid overrun */
+ if (bytes_to_copy > len - sizeof(ext_hdr)) {
+ usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%zu\n",
+ bytes_to_copy, len - sizeof(ext_hdr));
+ bytes_to_copy = len - sizeof(ext_hdr);
+ }
+
+ memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2,
+ bytes_to_copy);
+
+ /* request next chunk? */
+ if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) >
+ PD_MAX_EXT_MSG_LEGACY_LEN) {
+ struct pd_request_chunk *req;
+
+ if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) {
+ usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n");
+ kfree(pd->rx_ext_msg);
+ }
+
+ pd->rx_ext_msg = rx_msg;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req)
+ goto queue_rx; /* return what we have anyway */
+
+ INIT_WORK(&req->w, pd_request_chunk_work);
+ req->pd = pd;
+ req->msg_type = PD_MSG_HDR_TYPE(header);
+ req->chunk_num = chunk_num + 1;
+ req->sop = sop;
+ queue_work(pd->wq, &req->w);
+
+ return NULL;
+ }
+
+queue_rx:
+ pd->rx_ext_msg = NULL;
+ return rx_msg; /* queue it for usbpd_sm */
+}
+
+static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
+ u8 *buf, size_t len)
+{
+ struct rx_msg *rx_msg;
+ unsigned long flags;
+ u16 header;
+
+ if (sop != SOP_MSG) {
+ usbpd_err(&pd->dev, "invalid msg type (%d) received; only SOP supported\n",
+ sop);
+ return;
+ }
+
+ if (len < 2) {
+ usbpd_err(&pd->dev, "invalid message received, len=%zd\n", len);
+ return;
+ }
+
+ header = *((u16 *)buf);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+
+ if (len % 4 != 0) {
+ usbpd_err(&pd->dev, "len=%zd not multiple of 4\n", len);
+ return;
+ }
+
+ /* if MSGID already seen, discard */
+ if (PD_MSG_HDR_ID(header) == pd->rx_msgid &&
+ PD_MSG_HDR_TYPE(header) != MSG_SOFT_RESET) {
+ usbpd_dbg(&pd->dev, "MessageID already seen, discarding\n");
+ return;
+ }
+
+ pd->rx_msgid = PD_MSG_HDR_ID(header);
+
+ /* discard Pings */
+ if (PD_MSG_HDR_TYPE(header) == MSG_PING && !len)
+ return;
+
+ /* check header's count field to see if it matches len */
+ if (PD_MSG_HDR_COUNT(header) != (len / 4)) {
+ usbpd_err(&pd->dev, "header count (%d) mismatch, len=%zd\n",
+ PD_MSG_HDR_COUNT(header), len);
+ return;
+ }
+
+ /* if spec rev differs (i.e. is older), update PHY */
+ if (PD_MSG_HDR_REV(header) < pd->spec_rev)
+ pd->spec_rev = PD_MSG_HDR_REV(header);
+
+ usbpd_dbg(&pd->dev, "received message: type(%d) num_objs(%d)\n",
+ PD_MSG_HDR_TYPE(header), PD_MSG_HDR_COUNT(header));
+
+ if (!PD_MSG_HDR_IS_EXTENDED(header)) {
+ rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_ATOMIC);
+ if (!rx_msg)
+ return;
+
+ rx_msg->hdr = header;
+ rx_msg->data_len = len;
+ memcpy(rx_msg->payload, buf, len);
+ } else {
+ rx_msg = pd_ext_msg_received(pd, header, buf, len, sop);
+ if (!rx_msg)
+ return;
+ }
+
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ list_add_tail(&rx_msg->entry, &pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ kick_sm(pd, 0);
+}
+
+static void phy_shutdown(struct usbpd *pd)
+{
+ usbpd_dbg(&pd->dev, "shutdown");
+}
+
+static enum hrtimer_restart pd_timeout(struct hrtimer *timer)
+{
+ struct usbpd *pd = container_of(timer, struct usbpd, timer);
+
+ usbpd_dbg(&pd->dev, "timeout");
+ queue_work(pd->wq, &pd->sm_work);
+
+ return HRTIMER_NORESTART;
+}
+
+/* Enters new state and executes actions on entry */
+static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
+{
+ struct pd_phy_params phy_params = {
+ .signal_cb = phy_sig_received,
+ .msg_rx_cb = phy_msg_received,
+ .shutdown_cb = phy_shutdown,
+ .frame_filter_val = FRAME_FILTER_EN_SOP |
+ FRAME_FILTER_EN_HARD_RESET,
+ };
+ union power_supply_propval val = {0};
+ unsigned long flags;
+ int ret;
+
+ if (pd->hard_reset_recvd) /* let usbpd_sm handle it */
+ return;
+
+ usbpd_dbg(&pd->dev, "%s -> %s\n",
+ usbpd_state_strings[pd->current_state],
+ usbpd_state_strings[next_state]);
+
+ pd->current_state = next_state;
+
+ switch (next_state) {
+ case PE_ERROR_RECOVERY: /* perform hard disconnect/reconnect */
+ pd->in_pr_swap = false;
+ pd->current_pr = PR_NONE;
+ set_power_role(pd, PR_NONE);
+ pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
+ kick_sm(pd, 0);
+ break;
+
+ /* Source states */
+ case PE_SRC_DISABLED:
+ /* are we still connected? */
+ if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+ pd->current_pr = PR_NONE;
+ kick_sm(pd, 0);
+ }
+
+ break;
+
+ case PE_SRC_STARTUP:
+ if (pd->current_dr == DR_NONE) {
+ pd->current_dr = DR_DFP;
+ /*
+ * Defer starting USB host mode until PE_SRC_READY or
+ * when PE_SRC_SEND_CAPABILITIES fails
+ */
+ }
+
+ dual_role_instance_changed(pd->dual_role);
+
+ /* Set CC back to DRP toggle for the next disconnect */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+
+ /* support only PD 2.0 as a source */
+ pd->spec_rev = USBPD_REV_20;
+ pd_reset_protocol(pd);
+
+ if (!pd->in_pr_swap) {
+ if (pd->pd_phy_opened) {
+ pd_phy_close();
+ pd->pd_phy_opened = false;
+ }
+
+ phy_params.data_role = pd->current_dr;
+ phy_params.power_role = pd->current_pr;
+
+ ret = pd_phy_open(&phy_params);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ usbpd_err(&pd->dev, "error opening PD PHY %d\n",
+ ret);
+ pd->current_state = PE_UNKNOWN;
+ return;
+ }
+
+ pd->pd_phy_opened = true;
+ }
+
+ if (pd->in_pr_swap) {
+ pd->in_pr_swap = false;
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+ }
+
+ /*
+ * A sink might remove its terminations (during some Type-C
+ * compliance tests or a sink attempting to do Try.SRC)
+ * at this point just after we enabled VBUS. Sending PD
+ * messages now would delay detecting the detach beyond the
+ * required timing. Instead, delay sending out the first
+ * source capabilities to allow for the other side to
+ * completely settle CC debounce and allow HW to detect detach
+ * sooner in the meantime. PD spec allows up to
+ * tFirstSourceCap (250ms).
+ */
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, FIRST_SOURCE_CAP_TIME);
+ break;
+
+ case PE_SRC_SEND_CAPABILITIES:
+ kick_sm(pd, 0);
+ break;
+
+ case PE_SRC_NEGOTIATE_CAPABILITY:
+ if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
+ PD_RDO_FIXED_CURR(pd->rdo) >
+ PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
+ /* send Reject */
+ ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Reject\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ usbpd_err(&pd->dev, "Invalid request: %08x\n", pd->rdo);
+
+ if (pd->in_explicit_contract)
+ usbpd_set_state(pd, PE_SRC_READY);
+ else
+ /*
+ * bypass PE_SRC_Capability_Response and
+ * PE_SRC_Wait_New_Capabilities in this
+ * implementation for simplicity.
+ */
+ usbpd_set_state(pd, PE_SRC_SEND_CAPABILITIES);
+ break;
+ }
+
+ /* PE_SRC_TRANSITION_SUPPLY pseudo-state */
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ /* tSrcTransition required after ACCEPT */
+ usleep_range(SRC_TRANSITION_TIME * USEC_PER_MSEC,
+ (SRC_TRANSITION_TIME + 5) * USEC_PER_MSEC);
+
+ /*
+ * Normally a voltage change should occur within tSrcReady
+ * but since we only support VSafe5V there is nothing more to
+ * prepare from the power supply so send PS_RDY right away.
+ */
+ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ usbpd_set_state(pd, PE_SRC_READY);
+ break;
+
+ case PE_SRC_READY:
+ pd->in_explicit_contract = true;
+
+ if (pd->vdm_tx)
+ kick_sm(pd, 0);
+ else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_IDENTITY,
+ SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ complete(&pd->is_ready);
+ dual_role_instance_changed(pd->dual_role);
+ break;
+
+ case PE_SRC_HARD_RESET:
+ case PE_SNK_HARD_RESET:
+ /* are we still connected? */
+ if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE)
+ pd->current_pr = PR_NONE;
+
+ /* hard reset may sleep; handle it in the workqueue */
+ kick_sm(pd, 0);
+ break;
+
+ case PE_SRC_SEND_SOFT_RESET:
+ case PE_SNK_SEND_SOFT_RESET:
+ pd_reset_protocol(pd);
+
+ ret = pd_send_msg(pd, MSG_SOFT_RESET, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Soft Reset, do Hard Reset\n");
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+ break;
+ }
+
+ /* wait for ACCEPT */
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ break;
+
+ /* Sink states */
+ case PE_SNK_STARTUP:
+ if (pd->current_dr == DR_NONE || pd->current_dr == DR_UFP) {
+ pd->current_dr = DR_UFP;
+
+ if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
+ pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+ pd->psy_type == POWER_SUPPLY_TYPE_USB_FLOAT ||
+ usb_compliance_mode)
+ start_usb_peripheral(pd);
+ }
+
+ dual_role_instance_changed(pd->dual_role);
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ALLOWED, &val);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to read USB PROP_PD_ALLOWED: %d\n",
+ ret);
+ break;
+ }
+
+ if (!val.intval || disable_usb_pd)
+ break;
+
+ /*
+ * support up to PD 3.0 as a sink; if source is 2.0
+ * phy_msg_received() will handle the downgrade.
+ */
+ pd->spec_rev = USBPD_REV_30;
+ pd_reset_protocol(pd);
+
+ if (!pd->in_pr_swap) {
+ if (pd->pd_phy_opened) {
+ pd_phy_close();
+ pd->pd_phy_opened = false;
+ }
+
+ phy_params.data_role = pd->current_dr;
+ phy_params.power_role = pd->current_pr;
+
+ ret = pd_phy_open(&phy_params);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ usbpd_err(&pd->dev, "error opening PD PHY %d\n",
+ ret);
+ pd->current_state = PE_UNKNOWN;
+ return;
+ }
+
+ pd->pd_phy_opened = true;
+ }
+
+ pd->current_voltage = pd->requested_voltage = 5000000;
+ val.intval = pd->requested_voltage; /* set max range to 5V */
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
+
+ if (!pd->vbus_present) {
+ pd->current_state = PE_SNK_DISCOVERY;
+ /* max time for hard reset to turn vbus back on */
+ kick_sm(pd, SNK_HARD_RESET_VBUS_ON_TIME);
+ break;
+ }
+
+ pd->current_state = PE_SNK_WAIT_FOR_CAPABILITIES;
+ /* fall-through */
+
+ case PE_SNK_WAIT_FOR_CAPABILITIES:
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ if (list_empty(&pd->rx_q))
+ kick_sm(pd, SINK_WAIT_CAP_TIME);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+ break;
+
+ case PE_SNK_EVALUATE_CAPABILITY:
+ pd->pd_connected = true; /* we know peer is PD capable */
+ pd->hard_reset_count = 0;
+
+ /* evaluate PDOs and select one */
+ ret = pd_eval_src_caps(pd);
+ if (ret < 0) {
+ usbpd_err(&pd->dev, "Invalid src_caps received. Skipping request\n");
+ break;
+ }
+ pd->current_state = PE_SNK_SELECT_CAPABILITY;
+ /* fall-through */
+
+ case PE_SNK_SELECT_CAPABILITY:
+ ret = pd_send_msg(pd, MSG_REQUEST, &pd->rdo, 1, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Request\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+
+ /* wait for ACCEPT */
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ break;
+
+ case PE_SNK_TRANSITION_SINK:
+ /* wait for PS_RDY */
+ kick_sm(pd, PS_TRANSITION_TIME);
+ break;
+
+ case PE_SNK_READY:
+ pd->in_explicit_contract = true;
+
+ if (pd->vdm_tx)
+ kick_sm(pd, 0);
+ else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_IDENTITY,
+ SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ complete(&pd->is_ready);
+ dual_role_instance_changed(pd->dual_role);
+ break;
+
+ case PE_SNK_TRANSITION_TO_DEFAULT:
+ if (pd->current_dr != DR_UFP) {
+ stop_usb_host(pd);
+ start_usb_peripheral(pd);
+ pd->current_dr = DR_UFP;
+ pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ }
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+ }
+
+ /* max time for hard reset to turn vbus off */
+ kick_sm(pd, SNK_HARD_RESET_VBUS_OFF_TIME);
+ break;
+
+ case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+ val.intval = pd->requested_current = 0; /* suspend charging */
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+
+ pd->in_explicit_contract = false;
+
+ /*
+ * need to update PR bit in message header so that
+ * proper GoodCRC is sent when receiving next PS_RDY
+ */
+ pd_phy_update_roles(pd->current_dr, PR_SRC);
+
+ /* wait for PS_RDY */
+ kick_sm(pd, PS_SOURCE_OFF);
+ break;
+
+ default:
+ usbpd_dbg(&pd->dev, "No action for state %s\n",
+ usbpd_state_strings[pd->current_state]);
+ break;
+ }
+}
+
+int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
+{
+ if (find_svid_handler(pd, hdlr->svid)) {
+ usbpd_err(&pd->dev, "SVID 0x%04x already registered\n",
+ hdlr->svid);
+ return -EINVAL;
+ }
+
+ /* require connect/disconnect callbacks be implemented */
+ if (!hdlr->connect || !hdlr->disconnect) {
+ usbpd_err(&pd->dev, "SVID 0x%04x connect/disconnect must be non-NULL\n",
+ hdlr->svid);
+ return -EINVAL;
+ }
+
+ usbpd_dbg(&pd->dev, "registered handler for SVID 0x%04x\n", hdlr->svid);
+
+ list_add_tail(&hdlr->entry, &pd->svid_handlers);
+
+ /* already connected with this SVID discovered? */
+ if (pd->vdm_state >= DISCOVERED_SVIDS) {
+ int i;
+
+ for (i = 0; i < pd->num_svids; i++) {
+ if (pd->discovered_svids[i] == hdlr->svid) {
+ hdlr->connect(hdlr);
+ hdlr->discovered = true;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usbpd_register_svid);
+
+void usbpd_unregister_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
+{
+ list_del_init(&hdlr->entry);
+}
+EXPORT_SYMBOL(usbpd_unregister_svid);
+
+int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos)
+{
+ struct vdm_tx *vdm_tx;
+
+ if (!pd->in_explicit_contract || pd->vdm_tx)
+ return -EBUSY;
+
+ vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL);
+ if (!vdm_tx)
+ return -ENOMEM;
+
+ vdm_tx->data[0] = vdm_hdr;
+ if (vdos && num_vdos)
+ memcpy(&vdm_tx->data[1], vdos, num_vdos * sizeof(u32));
+ vdm_tx->size = num_vdos + 1; /* include the header */
+
+ /* VDM will get sent in PE_SRC/SNK_READY state handling */
+ pd->vdm_tx = vdm_tx;
+
+ /* slight delay before queuing to prioritize handling of incoming VDM */
+ kick_sm(pd, 2);
+
+ return 0;
+}
+EXPORT_SYMBOL(usbpd_send_vdm);
+
+int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+ enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+ const u32 *vdos, int num_vdos)
+{
+ u32 svdm_hdr = SVDM_HDR(svid, 0, obj_pos, cmd_type, cmd);
+
+ usbpd_dbg(&pd->dev, "VDM tx: svid:%x cmd:%x cmd_type:%x svdm_hdr:%x\n",
+ svid, cmd, cmd_type, svdm_hdr);
+
+ return usbpd_send_vdm(pd, svdm_hdr, vdos, num_vdos);
+}
+EXPORT_SYMBOL(usbpd_send_svdm);
+
+static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+ u32 vdm_hdr =
+ rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0;
+
+ u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)];
+ u16 svid = VDM_HDR_SVID(vdm_hdr);
+ u16 *psvid;
+ u8 i, num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1;
+ u8 cmd = SVDM_HDR_CMD(vdm_hdr);
+ u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
+ bool has_dp = false;
+ struct usbpd_svid_handler *handler;
+
+ usbpd_dbg(&pd->dev, "VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x\n",
+ svid, cmd, cmd_type, vdm_hdr);
+
+ /* if it's a supported SVID, pass the message to the handler */
+ handler = find_svid_handler(pd, svid);
+
+ /* Unstructured VDM */
+ if (!VDM_IS_SVDM(vdm_hdr)) {
+ if (handler && handler->vdm_received)
+ handler->vdm_received(handler, vdm_hdr, vdos, num_vdos);
+ return;
+ }
+
+ /* if this interrupts a previous exchange, abort queued response */
+ if (cmd_type == SVDM_CMD_TYPE_INITIATOR && pd->vdm_tx) {
+ usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
+ VDM_HDR_SVID(pd->vdm_tx->data[0]));
+
+ kfree(pd->vdm_tx);
+ pd->vdm_tx = NULL;
+ }
+
+ if (handler && handler->svdm_received) {
+ handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
+ return;
+ }
+
+ /* Standard Discovery or unhandled messages go here */
+ switch (cmd_type) {
+ case SVDM_CMD_TYPE_INITIATOR:
+ if (svid == USBPD_SID && cmd == USBPD_SVDM_DISCOVER_IDENTITY) {
+ u32 tx_vdos[3] = {
+ ID_HDR_USB_HOST | ID_HDR_USB_DEVICE |
+ ID_HDR_PRODUCT_PER_MASK | ID_HDR_VID,
+ 0x0, /* TBD: Cert Stat VDO */
+ (PROD_VDO_PID << 16),
+ /* TBD: Get these from gadget */
+ };
+
+ usbpd_send_svdm(pd, USBPD_SID, cmd,
+ SVDM_CMD_TYPE_RESP_ACK, 0, tx_vdos, 3);
+ } else if (cmd != USBPD_SVDM_ATTENTION) {
+ usbpd_send_svdm(pd, svid, cmd, SVDM_CMD_TYPE_RESP_NAK,
+ SVDM_HDR_OBJ_POS(vdm_hdr), NULL, 0);
+ }
+ break;
+
+ case SVDM_CMD_TYPE_RESP_ACK:
+ if (svid != USBPD_SID) {
+ usbpd_err(&pd->dev, "unhandled ACK for SVID:0x%x\n",
+ svid);
+ break;
+ }
+
+ switch (cmd) {
+ case USBPD_SVDM_DISCOVER_IDENTITY:
+ kfree(pd->vdm_tx_retry);
+ pd->vdm_tx_retry = NULL;
+
+ pd->vdm_state = DISCOVERED_ID;
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_SVIDS,
+ SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+ break;
+
+ case USBPD_SVDM_DISCOVER_SVIDS:
+ pd->vdm_state = DISCOVERED_SVIDS;
+
+ kfree(pd->vdm_tx_retry);
+ pd->vdm_tx_retry = NULL;
+
+ if (!pd->discovered_svids) {
+ pd->num_svids = 2 * num_vdos;
+ pd->discovered_svids = kcalloc(pd->num_svids,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (!pd->discovered_svids) {
+ usbpd_err(&pd->dev, "unable to allocate SVIDs\n");
+ break;
+ }
+
+ psvid = pd->discovered_svids;
+ } else { /* handle > 12 SVIDs */
+ void *ptr;
+ size_t oldsize = pd->num_svids * sizeof(u16);
+ size_t newsize = oldsize +
+ (2 * num_vdos * sizeof(u16));
+
+ ptr = krealloc(pd->discovered_svids, newsize,
+ GFP_KERNEL);
+ if (!ptr) {
+ usbpd_err(&pd->dev, "unable to realloc SVIDs\n");
+ break;
+ }
+
+ pd->discovered_svids = ptr;
+ psvid = pd->discovered_svids + pd->num_svids;
+ memset(psvid, 0, (2 * num_vdos));
+ pd->num_svids += 2 * num_vdos;
+ }
+
+ /* convert 32-bit VDOs to list of 16-bit SVIDs */
+ for (i = 0; i < num_vdos * 2; i++) {
+ /*
+ * Within each 32-bit VDO,
+ * SVID[i]: upper 16-bits
+ * SVID[i+1]: lower 16-bits
+ * where i is even.
+ */
+ if (!(i & 1))
+ svid = vdos[i >> 1] >> 16;
+ else
+ svid = vdos[i >> 1] & 0xFFFF;
+
+ /*
+ * There are some devices that incorrectly
+ * swap the order of SVIDs within a VDO. So in
+ * case of an odd-number of SVIDs it could end
+ * up with SVID[i] as 0 while SVID[i+1] is
+ * non-zero. Just skip over the zero ones.
+ */
+ if (svid) {
+ usbpd_dbg(&pd->dev, "Discovered SVID: 0x%04x\n",
+ svid);
+ *psvid++ = svid;
+ }
+ }
+
+ /* if more than 12 SVIDs, resend the request */
+ if (num_vdos == 6 && vdos[5] != 0) {
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_SVIDS,
+ SVDM_CMD_TYPE_INITIATOR, 0,
+ NULL, 0);
+ break;
+ }
+
+ /* now that all SVIDs are discovered, notify handlers */
+ for (i = 0; i < pd->num_svids; i++) {
+ svid = pd->discovered_svids[i];
+ if (svid) {
+ handler = find_svid_handler(pd, svid);
+ if (handler) {
+ handler->connect(handler);
+ handler->discovered = true;
+ }
+ }
+
+ if (svid == 0xFF01)
+ has_dp = true;
+ }
+
+ /*
+ * Finally start USB host now that we have determined
+ * if DisplayPort mode is present or not and limit USB
+ * to HS-only mode if so.
+ */
+ start_usb_host(pd, !has_dp);
+
+ break;
+
+ default:
+ usbpd_dbg(&pd->dev, "unhandled ACK for command:0x%x\n",
+ cmd);
+ break;
+ }
+ break;
+
+ case SVDM_CMD_TYPE_RESP_NAK:
+ usbpd_info(&pd->dev, "VDM NAK received for SVID:0x%04x command:0x%x\n",
+ svid, cmd);
+
+ switch (cmd) {
+ case USBPD_SVDM_DISCOVER_IDENTITY:
+ case USBPD_SVDM_DISCOVER_SVIDS:
+ start_usb_host(pd, true);
+ break;
+ default:
+ break;
+ }
+
+ break;
+
+ case SVDM_CMD_TYPE_RESP_BUSY:
+ switch (cmd) {
+ case USBPD_SVDM_DISCOVER_IDENTITY:
+ case USBPD_SVDM_DISCOVER_SVIDS:
+ if (!pd->vdm_tx_retry) {
+ usbpd_err(&pd->dev, "Discover command %d VDM was unexpectedly freed\n",
+ cmd);
+ break;
+ }
+
+ /* wait tVDMBusy, then retry */
+ pd->vdm_tx = pd->vdm_tx_retry;
+ pd->vdm_tx_retry = NULL;
+ kick_sm(pd, VDM_BUSY_TIME);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+}
+
+static void handle_vdm_tx(struct usbpd *pd)
+{
+ int ret;
+ unsigned long flags;
+
+ /* only send one VDM at a time */
+ if (pd->vdm_tx) {
+ u32 vdm_hdr = pd->vdm_tx->data[0];
+
+ /* bail out and try again later if a message just arrived */
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ if (!list_empty(&pd->rx_q)) {
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ ret = pd_send_msg(pd, MSG_VDM, pd->vdm_tx->data,
+ pd->vdm_tx->size, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n",
+ ret, SVDM_HDR_CMD(pd->vdm_tx->data[0]));
+
+ /* retry when hitting PE_SRC/SNK_Ready again */
+ if (ret != -EBUSY)
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_SEND_SOFT_RESET :
+ PE_SNK_SEND_SOFT_RESET);
+
+ return;
+ }
+
+ /*
+ * special case: keep initiated Discover ID/SVIDs
+ * around in case we need to re-try when receiving BUSY
+ */
+ if (VDM_IS_SVDM(vdm_hdr) &&
+ SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
+ SVDM_HDR_CMD(vdm_hdr) <= USBPD_SVDM_DISCOVER_SVIDS) {
+ if (pd->vdm_tx_retry) {
+ usbpd_dbg(&pd->dev, "Previous Discover VDM command %d not ACKed/NAKed\n",
+ SVDM_HDR_CMD(
+ pd->vdm_tx_retry->data[0]));
+ kfree(pd->vdm_tx_retry);
+ }
+ pd->vdm_tx_retry = pd->vdm_tx;
+ } else {
+ kfree(pd->vdm_tx);
+ }
+
+ pd->vdm_tx = NULL;
+ }
+}
+
+static void reset_vdm_state(struct usbpd *pd)
+{
+ struct usbpd_svid_handler *handler;
+
+ list_for_each_entry(handler, &pd->svid_handlers, entry) {
+ if (handler->discovered) {
+ handler->disconnect(handler);
+ handler->discovered = false;
+ }
+ }
+
+ pd->vdm_state = VDM_NONE;
+ kfree(pd->vdm_tx_retry);
+ pd->vdm_tx_retry = NULL;
+ kfree(pd->discovered_svids);
+ pd->discovered_svids = NULL;
+ pd->num_svids = 0;
+ kfree(pd->vdm_tx);
+ pd->vdm_tx = NULL;
+}
+
+static void dr_swap(struct usbpd *pd)
+{
+ reset_vdm_state(pd);
+
+ if (pd->current_dr == DR_DFP) {
+ stop_usb_host(pd);
+ start_usb_peripheral(pd);
+ pd->current_dr = DR_UFP;
+ } else if (pd->current_dr == DR_UFP) {
+ stop_usb_peripheral(pd);
+ pd->current_dr = DR_DFP;
+
+ /* don't start USB host until after SVDM discovery */
+ usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
+ SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+ }
+
+ pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ dual_role_instance_changed(pd->dual_role);
+}
+
+
+static void vconn_swap(struct usbpd *pd)
+{
+ int ret;
+
+ if (pd->vconn_enabled) {
+ pd->current_state = PE_VCS_WAIT_FOR_VCONN;
+ kick_sm(pd, VCONN_ON_TIME);
+ } else {
+ ret = regulator_enable(pd->vconn);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to enable vconn\n");
+ return;
+ }
+
+ pd->vconn_enabled = true;
+
+ /*
+ * Small delay to ensure Vconn has ramped up. This is well
+ * below tVCONNSourceOn (100ms) so we still send PS_RDY within
+ * the allowed time.
+ */
+ usleep_range(5000, 10000);
+
+ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_SEND_SOFT_RESET :
+ PE_SNK_SEND_SOFT_RESET);
+ return;
+ }
+ }
+}
+
+static int enable_vbus(struct usbpd *pd)
+{
+ union power_supply_propval val = {0};
+ int count = 100;
+ int ret;
+
+ if (!check_vsafe0v)
+ goto enable_reg;
+
+ /*
+ * Check to make sure there's no lingering charge on
+ * VBUS before enabling it as a source. If so poll here
+ * until it goes below VSafe0V (0.8V) before proceeding.
+ */
+ while (count--) {
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+ if (ret || val.intval <= 800000)
+ break;
+ usleep_range(20000, 30000);
+ }
+
+ if (count < 99)
+ msleep(100); /* need to wait an additional tCCDebounce */
+
+enable_reg:
+ ret = regulator_enable(pd->vbus);
+ if (ret)
+ usbpd_err(&pd->dev, "Unable to enable vbus (%d)\n", ret);
+ else
+ pd->vbus_enabled = true;
+
+ count = 10;
+ /*
+ * Check to make sure VBUS voltage reaches above Vsafe5Vmin (4.75v)
+ * before proceeding.
+ */
+ while (count--) {
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+ if (ret || val.intval >= 4750000) /*vsafe5Vmin*/
+ break;
+ usleep_range(10000, 12000); /* Delay between two reads */
+ }
+
+ if (ret)
+ msleep(100); /* Delay to wait for VBUS ramp up if read fails */
+
+ return ret;
+}
+
+static inline void rx_msg_cleanup(struct usbpd *pd)
+{
+ struct rx_msg *msg, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ list_for_each_entry_safe(msg, tmp, &pd->rx_q, entry) {
+ list_del(&msg->entry);
+ kfree(msg);
+ }
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+}
+
+/* For PD 3.0, check SinkTxOk before allowing initiating AMS */
+static inline bool is_sink_tx_ok(struct usbpd *pd)
+{
+ if (pd->spec_rev == USBPD_REV_30)
+ return pd->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+
+ return true;
+}
+
+/* Handles current state and determines transitions */
+static void usbpd_sm(struct work_struct *w)
+{
+ struct usbpd *pd = container_of(w, struct usbpd, sm_work);
+ union power_supply_propval val = {0};
+ int ret;
+ struct rx_msg *rx_msg = NULL;
+ unsigned long flags;
+
+ usbpd_dbg(&pd->dev, "handle state %s\n",
+ usbpd_state_strings[pd->current_state]);
+
+ hrtimer_cancel(&pd->timer);
+ pd->sm_queued = false;
+
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ if (!list_empty(&pd->rx_q)) {
+ rx_msg = list_first_entry(&pd->rx_q, struct rx_msg, entry);
+ list_del(&rx_msg->entry);
+ }
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ /* Disconnect? */
+ if (pd->current_pr == PR_NONE) {
+ if (pd->current_state == PE_UNKNOWN)
+ goto sm_done;
+
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+ }
+
+ usbpd_info(&pd->dev, "USB Type-C disconnect\n");
+
+ if (pd->pd_phy_opened) {
+ pd_phy_close();
+ pd->pd_phy_opened = false;
+ }
+
+ pd->in_pr_swap = false;
+ pd->pd_connected = false;
+ pd->in_explicit_contract = false;
+ pd->hard_reset_recvd = false;
+ pd->caps_count = 0;
+ pd->hard_reset_count = 0;
+ pd->requested_voltage = 0;
+ pd->requested_current = 0;
+ pd->selected_pdo = pd->requested_pdo = 0;
+ memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
+ rx_msg_cleanup(pd);
+
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+ &val);
+
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+ if (pd->vbus_enabled) {
+ regulator_disable(pd->vbus);
+ pd->vbus_enabled = false;
+ }
+
+ if (pd->current_dr == DR_UFP)
+ stop_usb_peripheral(pd);
+ else if (pd->current_dr == DR_DFP)
+ stop_usb_host(pd);
+
+ pd->current_dr = DR_NONE;
+
+ reset_vdm_state(pd);
+
+ if (pd->current_state == PE_ERROR_RECOVERY)
+ /* forced disconnect, wait before resetting to DRP */
+ usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
+ (ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+ /* set due to dual_role class "mode" change */
+ if (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
+ val.intval = pd->forced_pr;
+ else if (rev3_sink_only)
+ val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ else
+ /* Set CC back to DRP toggle */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+ pd->forced_pr = POWER_SUPPLY_TYPEC_PR_NONE;
+
+ pd->current_state = PE_UNKNOWN;
+
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ dual_role_instance_changed(pd->dual_role);
+
+ goto sm_done;
+ }
+
+ /* Hard reset? */
+ if (pd->hard_reset_recvd) {
+ pd->hard_reset_recvd = false;
+
+ if (pd->requested_current) {
+ val.intval = pd->requested_current = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+ }
+
+ pd->requested_voltage = 5000000;
+ val.intval = pd->requested_voltage;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
+
+ pd->in_pr_swap = false;
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+ pd->in_explicit_contract = false;
+ pd->selected_pdo = pd->requested_pdo = 0;
+ pd->rdo = 0;
+ rx_msg_cleanup(pd);
+ reset_vdm_state(pd);
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+
+ if (pd->current_pr == PR_SINK) {
+ usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
+ } else {
+ s64 delta = ktime_ms_delta(ktime_get(),
+ pd->hard_reset_recvd_time);
+ pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+ if (delta >= PS_HARD_RESET_TIME)
+ kick_sm(pd, 0);
+ else
+ kick_sm(pd, PS_HARD_RESET_TIME - (int)delta);
+ }
+
+ goto sm_done;
+ }
+
+ /* Soft reset? */
+ if (IS_CTRL(rx_msg, MSG_SOFT_RESET)) {
+ usbpd_dbg(&pd->dev, "Handle soft reset\n");
+
+ if (pd->current_pr == PR_SRC)
+ pd->current_state = PE_SRC_SOFT_RESET;
+ else if (pd->current_pr == PR_SINK)
+ pd->current_state = PE_SNK_SOFT_RESET;
+ }
+
+ switch (pd->current_state) {
+ case PE_UNKNOWN:
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ if (pd->current_pr == PR_SINK) {
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ } else if (pd->current_pr == PR_SRC) {
+ if (!pd->vconn_enabled &&
+ pd->typec_mode ==
+ POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
+ ret = regulator_enable(pd->vconn);
+ if (ret)
+ usbpd_err(&pd->dev, "Unable to enable vconn\n");
+ else
+ pd->vconn_enabled = true;
+ }
+ enable_vbus(pd);
+
+ usbpd_set_state(pd, PE_SRC_STARTUP);
+ }
+ break;
+
+ case PE_SRC_STARTUP:
+ usbpd_set_state(pd, PE_SRC_STARTUP);
+ break;
+
+ case PE_SRC_SEND_CAPABILITIES:
+ ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
+ ARRAY_SIZE(default_src_caps), SOP_MSG);
+ if (ret) {
+ pd->caps_count++;
+
+ if (pd->caps_count == 10 && pd->current_dr == DR_DFP) {
+ /* Likely not PD-capable, start host now */
+ start_usb_host(pd, true);
+ } else if (pd->caps_count >= PD_CAPS_COUNT) {
+ usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
+ usbpd_set_state(pd, PE_SRC_DISABLED);
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE,
+ &val);
+ break;
+ }
+
+ kick_sm(pd, SRC_CAP_TIME);
+ break;
+ }
+
+ /* transmit was successful if GoodCRC was received */
+ pd->caps_count = 0;
+ pd->hard_reset_count = 0;
+ pd->pd_connected = true; /* we know peer is PD capable */
+
+ /* wait for REQUEST */
+ pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+ break;
+
+ case PE_SRC_SEND_CAPABILITIES_WAIT:
+ if (IS_DATA(rx_msg, MSG_REQUEST)) {
+ pd->rdo = *(u32 *)rx_msg->payload;
+ usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+ } else if (rx_msg) {
+ usbpd_err(&pd->dev, "Unexpected message received\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ } else {
+ usbpd_set_state(pd, PE_SRC_HARD_RESET);
+ }
+ break;
+
+ case PE_SRC_READY:
+ if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, 0);
+ } else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
+ ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+ pd->sink_caps, pd->num_sink_caps,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Sink Caps\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ }
+ } else if (IS_DATA(rx_msg, MSG_REQUEST)) {
+ pd->rdo = *(u32 *)rx_msg->payload;
+ usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+ } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
+ if (pd->vdm_state == MODE_ENTERED) {
+ usbpd_set_state(pd, PE_SRC_HARD_RESET);
+ break;
+ }
+
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ dr_swap(pd);
+ } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
+ /* we'll happily accept Src->Sink requests anytime */
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
+ kick_sm(pd, SRC_TRANSITION_TIME);
+ break;
+ } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ vconn_swap(pd);
+ } else if (IS_DATA(rx_msg, MSG_VDM)) {
+ handle_vdm_rx(pd, rx_msg);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ }
+ break;
+ } else if (pd->send_pr_swap) {
+ pd->send_pr_swap = false;
+ ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev, "Error sending PR Swap\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ pd->current_state = PE_PRS_SRC_SNK_SEND_SWAP;
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (pd->send_dr_swap) {
+ pd->send_dr_swap = false;
+ ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev, "Error sending DR Swap\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ pd->current_state = PE_DRS_SEND_DR_SWAP;
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else {
+ handle_vdm_tx(pd);
+ }
+ break;
+
+ case PE_SRC_TRANSITION_TO_DEFAULT:
+ if (pd->vconn_enabled)
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+
+ if (pd->vbus_enabled)
+ regulator_disable(pd->vbus);
+ pd->vbus_enabled = false;
+
+ if (pd->current_dr != DR_DFP) {
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+ pd->current_dr = DR_DFP;
+ pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ }
+
+ /* PE_UNKNOWN will turn on VBUS and go back to PE_SRC_STARTUP */
+ pd->current_state = PE_UNKNOWN;
+ kick_sm(pd, SRC_RECOVER_TIME);
+ break;
+
+ case PE_SRC_HARD_RESET:
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ pd_send_hard_reset(pd);
+ pd->in_explicit_contract = false;
+ pd->rdo = 0;
+ rx_msg_cleanup(pd);
+ reset_vdm_state(pd);
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+
+ pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+ kick_sm(pd, PS_HARD_RESET_TIME);
+ break;
+
+ case PE_SNK_STARTUP:
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ break;
+
+ case PE_SNK_DISCOVERY:
+ if (!rx_msg) {
+ if (pd->vbus_present)
+ usbpd_set_state(pd,
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+
+ /*
+ * Handle disconnection in the middle of PR_Swap.
+ * Since in psy_changed() if pd->in_pr_swap is true
+ * we ignore the typec_mode==NONE change since that is
+ * expected to happen. However if the cable really did
+ * get disconnected we need to check for it here after
+ * waiting for VBUS presence times out.
+ */
+ if (!pd->typec_mode) {
+ pd->current_pr = PR_NONE;
+ kick_sm(pd, 0);
+ }
+
+ break;
+ }
+ /* else fall-through */
+
+ case PE_SNK_WAIT_FOR_CAPABILITIES:
+ pd->in_pr_swap = false;
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+ if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ &val);
+
+ /* save the PDOs so userspace can further evaluate */
+ memset(&pd->received_pdos, 0,
+ sizeof(pd->received_pdos));
+ memcpy(&pd->received_pdos, rx_msg->payload,
+ min_t(size_t, rx_msg->data_len,
+ sizeof(pd->received_pdos)));
+ pd->src_cap_id++;
+
+ usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
+
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+ } else if (pd->hard_reset_count < 3) {
+ usbpd_set_state(pd, PE_SNK_HARD_RESET);
+ } else {
+ usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ &val);
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+ pd_phy_close();
+ pd->pd_phy_opened = false;
+ }
+ break;
+
+ case PE_SNK_SELECT_CAPABILITY:
+ if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+ u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+ bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+ && (PD_SRC_PDO_TYPE(pdo) ==
+ PD_SRC_PDO_TYPE_AUGMENTED);
+
+ usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
+
+ /* prepare for voltage increase/decrease */
+ val.intval = pd->requested_voltage;
+ power_supply_set_property(pd->usb_psy,
+ pd->requested_voltage >= pd->current_voltage ?
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX :
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+ &val);
+
+ /*
+ * if changing voltages (not within the same PPS PDO),
+ * we must lower input current to pSnkStdby (2.5W).
+ * Calculate it and set PD_CURRENT_MAX accordingly.
+ */
+ if (!same_pps &&
+ pd->requested_voltage != pd->current_voltage) {
+ int mv = max(pd->requested_voltage,
+ pd->current_voltage) / 1000;
+ val.intval = (2500000 / mv) * 1000;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+ } else {
+ /* decreasing current? */
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+ if (!ret &&
+ pd->requested_current < val.intval) {
+ val.intval =
+ pd->requested_current * 1000;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+ &val);
+ }
+ }
+
+ pd->selected_pdo = pd->requested_pdo;
+ } else if (IS_CTRL(rx_msg, MSG_REJECT) ||
+ IS_CTRL(rx_msg, MSG_WAIT)) {
+ if (pd->in_explicit_contract)
+ usbpd_set_state(pd, PE_SNK_READY);
+ else
+ usbpd_set_state(pd,
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+ } else if (rx_msg) {
+ usbpd_err(&pd->dev, "Invalid response to sink request\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ } else {
+ /* timed out; go to hard reset */
+ usbpd_set_state(pd, PE_SNK_HARD_RESET);
+ }
+ break;
+
+ case PE_SNK_TRANSITION_SINK:
+ if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
+ val.intval = pd->requested_voltage;
+ power_supply_set_property(pd->usb_psy,
+ pd->requested_voltage >= pd->current_voltage ?
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN :
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
+ pd->current_voltage = pd->requested_voltage;
+
+ /* resume charging */
+ val.intval = pd->requested_current * 1000; /* mA->uA */
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+
+ usbpd_set_state(pd, PE_SNK_READY);
+ } else {
+ /* timed out; go to hard reset */
+ usbpd_set_state(pd, PE_SNK_HARD_RESET);
+ }
+ break;
+
+ case PE_SNK_READY:
+ if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
+ /* save the PDOs so userspace can further evaluate */
+ memset(&pd->received_pdos, 0,
+ sizeof(pd->received_pdos));
+ memcpy(&pd->received_pdos, rx_msg->payload,
+ min_t(size_t, rx_msg->data_len,
+ sizeof(pd->received_pdos)));
+ pd->src_cap_id++;
+
+ usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
+ } else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
+ ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+ pd->sink_caps, pd->num_sink_caps,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Sink Caps\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ }
+ } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) &&
+ pd->spec_rev == USBPD_REV_20) {
+ ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
+ default_src_caps,
+ ARRAY_SIZE(default_src_caps), SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
+ if (pd->vdm_state == MODE_ENTERED) {
+ usbpd_set_state(pd, PE_SNK_HARD_RESET);
+ break;
+ }
+
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ dr_swap(pd);
+ } else if (IS_CTRL(rx_msg, MSG_PR_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
+ /* TODO: should we Reject in certain circumstances? */
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+
+ pd->in_pr_swap = true;
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+ break;
+ } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
+ /*
+ * if VCONN is connected to VBUS, make sure we are
+ * not in high voltage contract, otherwise reject.
+ */
+ if (!pd->vconn_is_external &&
+ (pd->requested_voltage > 5000000)) {
+ ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Reject\n");
+ usbpd_set_state(pd,
+ PE_SNK_SEND_SOFT_RESET);
+ }
+
+ break;
+ }
+
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+
+ vconn_swap(pd);
+ } else if (IS_DATA(rx_msg, MSG_VDM)) {
+ handle_vdm_rx(pd, rx_msg);
+ } else if (pd->send_get_src_cap_ext && is_sink_tx_ok(pd)) {
+ pd->send_get_src_cap_ext = false;
+ ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_src_cap_ext\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) {
+ if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid src cap ext db\n");
+ break;
+ }
+ memcpy(&pd->src_cap_ext_db, rx_msg->payload,
+ sizeof(pd->src_cap_ext_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_pps_status && is_sink_tx_ok(pd)) {
+ pd->send_get_pps_status = false;
+ ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_pps_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_PPS_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->pps_status_db)) {
+ usbpd_err(&pd->dev, "Invalid pps status db\n");
+ break;
+ }
+ memcpy(&pd->pps_status_db, rx_msg->payload,
+ sizeof(pd->pps_status_db));
+ complete(&pd->is_ready);
+ } else if (IS_DATA(rx_msg, MSG_ALERT)) {
+ if (rx_msg->data_len != sizeof(pd->received_ado)) {
+ usbpd_err(&pd->dev, "Invalid ado\n");
+ break;
+ }
+ memcpy(&pd->received_ado, rx_msg->payload,
+ sizeof(pd->received_ado));
+ ret = pd_send_msg(pd, MSG_GET_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_STATUS)) {
+ if (rx_msg->data_len != PD_STATUS_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid status db\n");
+ break;
+ }
+ memcpy(&pd->status_db, rx_msg->payload,
+ sizeof(pd->status_db));
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ } else if (pd->send_get_battery_cap && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_cap = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP,
+ &pd->get_battery_cap_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_cap\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) {
+ if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid battery cap db\n");
+ break;
+ }
+ memcpy(&pd->battery_cap_db, rx_msg->payload,
+ sizeof(pd->battery_cap_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_battery_status && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_status = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS,
+ &pd->get_battery_status_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) {
+ usbpd_err(&pd->dev, "Invalid bat sts dobj\n");
+ break;
+ }
+ memcpy(&pd->battery_sts_dobj, rx_msg->payload,
+ sizeof(pd->battery_sts_dobj));
+ complete(&pd->is_ready);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ }
+ break;
+ } else if (pd->send_request) {
+ pd->send_request = false;
+ usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
+ } else if (pd->send_pr_swap && is_sink_tx_ok(pd)) {
+ pd->send_pr_swap = false;
+ ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev, "Error sending PR Swap\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+
+ pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP;
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (pd->send_dr_swap && is_sink_tx_ok(pd)) {
+ pd->send_dr_swap = false;
+ ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev, "Error sending DR Swap\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+
+ pd->current_state = PE_DRS_SEND_DR_SWAP;
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (is_sink_tx_ok(pd)) {
+ handle_vdm_tx(pd);
+ }
+ break;
+
+ case PE_SNK_TRANSITION_TO_DEFAULT:
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ break;
+
+ case PE_SRC_SOFT_RESET:
+ case PE_SNK_SOFT_RESET:
+ pd_reset_protocol(pd);
+
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "%s: Error sending Accept, do Hard Reset\n",
+ usbpd_state_strings[pd->current_state]);
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+ break;
+ }
+
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_SEND_CAPABILITIES :
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+ break;
+
+ case PE_SRC_SEND_SOFT_RESET:
+ case PE_SNK_SEND_SOFT_RESET:
+ if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_SEND_CAPABILITIES :
+ PE_SNK_WAIT_FOR_CAPABILITIES);
+ } else {
+ usbpd_err(&pd->dev, "%s: Did not see Accept, do Hard Reset\n",
+ usbpd_state_strings[pd->current_state]);
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+ }
+ break;
+
+ case PE_SNK_HARD_RESET:
+ /* prepare charger for VBUS change */
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ pd->requested_voltage = 5000000;
+
+ if (pd->requested_current) {
+ val.intval = pd->requested_current = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+ }
+
+ val.intval = pd->requested_voltage;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
+
+ pd_send_hard_reset(pd);
+ pd->in_explicit_contract = false;
+ pd->selected_pdo = pd->requested_pdo = 0;
+ pd->rdo = 0;
+ reset_vdm_state(pd);
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
+ break;
+
+ case PE_DRS_SEND_DR_SWAP:
+ if (IS_CTRL(rx_msg, MSG_ACCEPT))
+ dr_swap(pd);
+
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_READY : PE_SNK_READY);
+ break;
+
+ case PE_PRS_SRC_SNK_SEND_SWAP:
+ if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
+ pd->current_state = PE_SRC_READY;
+ break;
+ }
+
+ pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
+ kick_sm(pd, SRC_TRANSITION_TIME);
+ break;
+
+ case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
+ pd->in_pr_swap = true;
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+ pd->in_explicit_contract = false;
+
+ if (pd->vbus_enabled) {
+ regulator_disable(pd->vbus);
+ pd->vbus_enabled = false;
+ }
+
+ /* PE_PRS_SRC_SNK_Assert_Rd */
+ pd->current_pr = PR_SINK;
+ set_power_role(pd, pd->current_pr);
+ pd_phy_update_roles(pd->current_dr, pd->current_pr);
+
+ /* allow time for Vbus discharge, must be < tSrcSwapStdby */
+ msleep(500);
+
+ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+ usbpd_set_state(pd, PE_ERROR_RECOVERY);
+ break;
+ }
+
+ pd->current_state = PE_PRS_SRC_SNK_WAIT_SOURCE_ON;
+ kick_sm(pd, PS_SOURCE_ON);
+ break;
+
+ case PE_PRS_SRC_SNK_WAIT_SOURCE_ON:
+ if (IS_CTRL(rx_msg, MSG_PS_RDY))
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ else
+ usbpd_set_state(pd, PE_ERROR_RECOVERY);
+ break;
+
+ case PE_PRS_SNK_SRC_SEND_SWAP:
+ if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
+ pd->current_state = PE_SNK_READY;
+ break;
+ }
+
+ pd->in_pr_swap = true;
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PR_SWAP, &val);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+ break;
+
+ case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+ if (!IS_CTRL(rx_msg, MSG_PS_RDY)) {
+ usbpd_set_state(pd, PE_ERROR_RECOVERY);
+ break;
+ }
+
+ /* PE_PRS_SNK_SRC_Assert_Rp */
+ pd->current_pr = PR_SRC;
+ set_power_role(pd, pd->current_pr);
+ pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
+
+ /* fall-through */
+
+ case PE_PRS_SNK_SRC_SOURCE_ON:
+ enable_vbus(pd);
+
+ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+ usbpd_set_state(pd, PE_ERROR_RECOVERY);
+ break;
+ }
+
+ usbpd_set_state(pd, PE_SRC_STARTUP);
+ break;
+
+ case PE_VCS_WAIT_FOR_VCONN:
+ if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
+ /*
+ * hopefully redundant check but in case not enabled
+ * avoids unbalanced regulator disable count
+ */
+ if (pd->vconn_enabled)
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+
+ pd->current_state = pd->current_pr == PR_SRC ?
+ PE_SRC_READY : PE_SNK_READY;
+ } else {
+ /* timed out; go to hard reset */
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+ }
+
+ break;
+
+ default:
+ usbpd_err(&pd->dev, "Unhandled state %s\n",
+ usbpd_state_strings[pd->current_state]);
+ break;
+ }
+
+sm_done:
+ kfree(rx_msg);
+
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ ret = list_empty(&pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ /* requeue if there are any new/pending RX messages */
+ if (!ret)
+ kick_sm(pd, 0);
+
+ if (!pd->sm_queued)
+ pm_relax(&pd->dev);
+}
+
+static inline const char *src_current(enum power_supply_typec_mode typec_mode)
+{
+ switch (typec_mode) {
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ return "default";
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ return "medium - 1.5A";
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ return "high - 3.0A";
+ default:
+ return "";
+ }
+}
+
+static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
+{
+ struct usbpd *pd = container_of(nb, struct usbpd, psy_nb);
+ union power_supply_propval val;
+ enum power_supply_typec_mode typec_mode;
+ int ret;
+
+ if (ptr != pd->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
+ return 0;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to read USB TYPEC_MODE: %d\n", ret);
+ return ret;
+ }
+
+ typec_mode = val.intval;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PE_START, &val);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to read USB PROP_PE_START: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Don't proceed if PE_START=0 as other props may still change */
+ if (!val.intval && !pd->pd_connected &&
+ typec_mode != POWER_SUPPLY_TYPEC_NONE)
+ return 0;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &val);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to read USB PRESENT: %d\n", ret);
+ return ret;
+ }
+
+ pd->vbus_present = val.intval;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_REAL_TYPE, &val);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to read USB TYPE: %d\n", ret);
+ return ret;
+ }
+
+ pd->psy_type = val.intval;
+
+ /*
+ * For sink hard reset, state machine needs to know when VBUS changes
+ * - when in PE_SNK_TRANSITION_TO_DEFAULT, notify when VBUS falls
+ * - when in PE_SNK_DISCOVERY, notify when VBUS rises
+ */
+ if (typec_mode && ((!pd->vbus_present &&
+ pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT) ||
+ (pd->vbus_present && pd->current_state == PE_SNK_DISCOVERY))) {
+ usbpd_dbg(&pd->dev, "hard reset: typec mode:%d present:%d\n",
+ typec_mode, pd->vbus_present);
+ pd->typec_mode = typec_mode;
+ kick_sm(pd, 0);
+ return 0;
+ }
+
+ if (pd->typec_mode == typec_mode)
+ return 0;
+
+ pd->typec_mode = typec_mode;
+
+ usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d orientation:%d\n",
+ typec_mode, pd->vbus_present, pd->psy_type,
+ usbpd_get_plug_orientation(pd));
+
+ switch (typec_mode) {
+ /* Disconnect */
+ case POWER_SUPPLY_TYPEC_NONE:
+ if (pd->in_pr_swap) {
+ usbpd_dbg(&pd->dev, "Ignoring disconnect due to PR swap\n");
+ return 0;
+ }
+
+ pd->current_pr = PR_NONE;
+ break;
+
+ /* Sink states */
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
+ src_current(typec_mode));
+
+ /* if waiting for SinkTxOk to start an AMS */
+ if (pd->spec_rev == USBPD_REV_30 &&
+ typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH &&
+ (pd->send_pr_swap || pd->send_dr_swap || pd->vdm_tx))
+ break;
+
+ if (pd->current_pr == PR_SINK)
+ return 0;
+
+ /*
+ * Unexpected if not in PR swap; need to force disconnect from
+ * source so we can turn off VBUS, Vconn, PD PHY etc.
+ */
+ if (pd->current_pr == PR_SRC) {
+ usbpd_info(&pd->dev, "Forcing disconnect from source mode\n");
+ pd->current_pr = PR_NONE;
+ break;
+ }
+
+ pd->current_pr = PR_SINK;
+ break;
+
+ /* Source states */
+ case POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE:
+ case POWER_SUPPLY_TYPEC_SINK:
+ usbpd_info(&pd->dev, "Type-C Sink%s connected\n",
+ typec_mode == POWER_SUPPLY_TYPEC_SINK ?
+ "" : " (powered)");
+
+ if (pd->current_pr == PR_SRC)
+ return 0;
+
+ pd->current_pr = PR_SRC;
+ break;
+
+ case POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY:
+ usbpd_info(&pd->dev, "Type-C Debug Accessory connected\n");
+ break;
+ case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
+ usbpd_info(&pd->dev, "Type-C Analog Audio Adapter connected\n");
+ break;
+ default:
+ usbpd_warn(&pd->dev, "Unsupported typec mode:%d\n",
+ typec_mode);
+ break;
+ }
+
+ /* queue state machine due to CC state change */
+ kick_sm(pd, 0);
+ return 0;
+}
+
+static enum dual_role_property usbpd_dr_properties[] = {
+ DUAL_ROLE_PROP_SUPPORTED_MODES,
+ DUAL_ROLE_PROP_MODE,
+ DUAL_ROLE_PROP_PR,
+ DUAL_ROLE_PROP_DR,
+};
+
+static int usbpd_dr_get_property(struct dual_role_phy_instance *dual_role,
+ enum dual_role_property prop, unsigned int *val)
+{
+ struct usbpd *pd = dual_role_get_drvdata(dual_role);
+
+ if (!pd)
+ return -ENODEV;
+
+ switch (prop) {
+ case DUAL_ROLE_PROP_MODE:
+ /* For now associate UFP/DFP with data role only */
+ if (pd->current_dr == DR_UFP)
+ *val = DUAL_ROLE_PROP_MODE_UFP;
+ else if (pd->current_dr == DR_DFP)
+ *val = DUAL_ROLE_PROP_MODE_DFP;
+ else
+ *val = DUAL_ROLE_PROP_MODE_NONE;
+ break;
+ case DUAL_ROLE_PROP_PR:
+ if (pd->current_pr == PR_SRC)
+ *val = DUAL_ROLE_PROP_PR_SRC;
+ else if (pd->current_pr == PR_SINK)
+ *val = DUAL_ROLE_PROP_PR_SNK;
+ else
+ *val = DUAL_ROLE_PROP_PR_NONE;
+ break;
+ case DUAL_ROLE_PROP_DR:
+ if (pd->current_dr == DR_UFP)
+ *val = DUAL_ROLE_PROP_DR_DEVICE;
+ else if (pd->current_dr == DR_DFP)
+ *val = DUAL_ROLE_PROP_DR_HOST;
+ else
+ *val = DUAL_ROLE_PROP_DR_NONE;
+ break;
+ default:
+ usbpd_warn(&pd->dev, "unsupported property %d\n", prop);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int usbpd_dr_set_property(struct dual_role_phy_instance *dual_role,
+ enum dual_role_property prop, const unsigned int *val)
+{
+ struct usbpd *pd = dual_role_get_drvdata(dual_role);
+ bool do_swap = false;
+ int wait_count = 5;
+
+ if (!pd)
+ return -ENODEV;
+
+ switch (prop) {
+ case DUAL_ROLE_PROP_MODE:
+ usbpd_dbg(&pd->dev, "Setting mode to %d\n", *val);
+
+ if (pd->current_state == PE_UNKNOWN) {
+ usbpd_warn(&pd->dev, "No active connection. Don't allow MODE change\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * Forces disconnect on CC and re-establishes connection.
+ * This does not use PD-based PR/DR swap
+ */
+ if (*val == DUAL_ROLE_PROP_MODE_UFP)
+ pd->forced_pr = POWER_SUPPLY_TYPEC_PR_SINK;
+ else if (*val == DUAL_ROLE_PROP_MODE_DFP)
+ pd->forced_pr = POWER_SUPPLY_TYPEC_PR_SOURCE;
+
+ /* new mode will be applied in disconnect handler */
+ set_power_role(pd, PR_NONE);
+
+ /* wait until it takes effect */
+ while (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE &&
+ --wait_count)
+ msleep(20);
+
+ if (!wait_count) {
+ usbpd_err(&pd->dev, "setting mode timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ break;
+
+ case DUAL_ROLE_PROP_DR:
+ usbpd_dbg(&pd->dev, "Setting data_role to %d\n", *val);
+
+ if (*val == DUAL_ROLE_PROP_DR_HOST) {
+ if (pd->current_dr == DR_UFP)
+ do_swap = true;
+ } else if (*val == DUAL_ROLE_PROP_DR_DEVICE) {
+ if (pd->current_dr == DR_DFP)
+ do_swap = true;
+ } else {
+ usbpd_warn(&pd->dev, "setting data_role to 'none' unsupported\n");
+ return -ENOTSUPP;
+ }
+
+ if (do_swap) {
+ if (pd->current_state != PE_SRC_READY &&
+ pd->current_state != PE_SNK_READY) {
+ usbpd_err(&pd->dev, "data_role swap not allowed: PD not in Ready state\n");
+ return -EAGAIN;
+ }
+
+ if (pd->current_state == PE_SNK_READY &&
+ !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+ return -EAGAIN;
+ }
+
+ mutex_lock(&pd->swap_lock);
+ reinit_completion(&pd->is_ready);
+ pd->send_dr_swap = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(100))) {
+ usbpd_err(&pd->dev, "data_role swap timed out\n");
+ mutex_unlock(&pd->swap_lock);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&pd->swap_lock);
+
+ if ((*val == DUAL_ROLE_PROP_DR_HOST &&
+ pd->current_dr != DR_DFP) ||
+ (*val == DUAL_ROLE_PROP_DR_DEVICE &&
+ pd->current_dr != DR_UFP)) {
+ usbpd_err(&pd->dev, "incorrect state (%s) after data_role swap\n",
+ pd->current_dr == DR_DFP ?
+ "dfp" : "ufp");
+ return -EPROTO;
+ }
+ }
+
+ break;
+
+ case DUAL_ROLE_PROP_PR:
+ usbpd_dbg(&pd->dev, "Setting power_role to %d\n", *val);
+
+ if (*val == DUAL_ROLE_PROP_PR_SRC) {
+ if (pd->current_pr == PR_SINK)
+ do_swap = true;
+ } else if (*val == DUAL_ROLE_PROP_PR_SNK) {
+ if (pd->current_pr == PR_SRC)
+ do_swap = true;
+ } else {
+ usbpd_warn(&pd->dev, "setting power_role to 'none' unsupported\n");
+ return -ENOTSUPP;
+ }
+
+ if (do_swap) {
+ if (pd->current_state != PE_SRC_READY &&
+ pd->current_state != PE_SNK_READY) {
+ usbpd_err(&pd->dev, "power_role swap not allowed: PD not in Ready state\n");
+ return -EAGAIN;
+ }
+
+ if (pd->current_state == PE_SNK_READY &&
+ !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+ return -EAGAIN;
+ }
+
+ mutex_lock(&pd->swap_lock);
+ reinit_completion(&pd->is_ready);
+ pd->send_pr_swap = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(2000))) {
+ usbpd_err(&pd->dev, "power_role swap timed out\n");
+ mutex_unlock(&pd->swap_lock);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&pd->swap_lock);
+
+ if ((*val == DUAL_ROLE_PROP_PR_SRC &&
+ pd->current_pr != PR_SRC) ||
+ (*val == DUAL_ROLE_PROP_PR_SNK &&
+ pd->current_pr != PR_SINK)) {
+ usbpd_err(&pd->dev, "incorrect state (%s) after power_role swap\n",
+ pd->current_pr == PR_SRC ?
+ "source" : "sink");
+ return -EPROTO;
+ }
+ }
+ break;
+
+ default:
+ usbpd_warn(&pd->dev, "unsupported property %d\n", prop);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int usbpd_dr_prop_writeable(struct dual_role_phy_instance *dual_role,
+ enum dual_role_property prop)
+{
+ struct usbpd *pd = dual_role_get_drvdata(dual_role);
+
+ switch (prop) {
+ case DUAL_ROLE_PROP_MODE:
+ return 1;
+ case DUAL_ROLE_PROP_DR:
+ case DUAL_ROLE_PROP_PR:
+ if (pd)
+ return pd->current_state == PE_SNK_READY ||
+ pd->current_state == PE_SRC_READY;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int i;
+
+ add_uevent_var(env, "DATA_ROLE=%s", pd->current_dr == DR_DFP ?
+ "dfp" : "ufp");
+
+ if (pd->current_pr == PR_SINK) {
+ add_uevent_var(env, "POWER_ROLE=sink");
+ add_uevent_var(env, "SRC_CAP_ID=%d", pd->src_cap_id);
+
+ for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++)
+ add_uevent_var(env, "PDO%d=%08x", i,
+ pd->received_pdos[i]);
+
+ add_uevent_var(env, "REQUESTED_PDO=%d", pd->requested_pdo);
+ add_uevent_var(env, "SELECTED_PDO=%d", pd->selected_pdo);
+ } else {
+ add_uevent_var(env, "POWER_ROLE=source");
+ for (i = 0; i < ARRAY_SIZE(default_src_caps); i++)
+ add_uevent_var(env, "PDO%d=%08x", i,
+ default_src_caps[i]);
+ }
+
+ add_uevent_var(env, "RDO=%08x", pd->rdo);
+ add_uevent_var(env, "CONTRACT=%s", pd->in_explicit_contract ?
+ "explicit" : "implicit");
+ add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
+
+ add_uevent_var(env, "ADO=%08x", pd->received_ado);
+ for (i = 0; i < PD_STATUS_DB_LEN; i++)
+ add_uevent_var(env, "SDB%d=%08x", i, pd->status_db[i]);
+
+ return 0;
+}
+
+static ssize_t contract_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ pd->in_explicit_contract ? "explicit" : "implicit");
+}
+static DEVICE_ATTR_RO(contract);
+
+static ssize_t current_pr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ const char *pr = "none";
+
+ if (pd->current_pr == PR_SINK)
+ pr = "sink";
+ else if (pd->current_pr == PR_SRC)
+ pr = "source";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", pr);
+}
+static DEVICE_ATTR_RO(current_pr);
+
+static ssize_t initial_pr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ const char *pr = "none";
+
+ if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+ pr = "sink";
+ else if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SINK)
+ pr = "source";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", pr);
+}
+static DEVICE_ATTR_RO(initial_pr);
+
+static ssize_t current_dr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ const char *dr = "none";
+
+ if (pd->current_dr == DR_UFP)
+ dr = "ufp";
+ else if (pd->current_dr == DR_DFP)
+ dr = "dfp";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", dr);
+}
+static DEVICE_ATTR_RO(current_dr);
+
+static ssize_t initial_dr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ const char *dr = "none";
+
+ if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+ dr = "ufp";
+ else if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SINK)
+ dr = "dfp";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", dr);
+}
+static DEVICE_ATTR_RO(initial_dr);
+
+static ssize_t src_cap_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->src_cap_id);
+}
+static DEVICE_ATTR_RO(src_cap_id);
+
+/* Dump received source PDOs in human-readable format */
+static ssize_t pdo_h_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int i;
+ ssize_t cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++) {
+ u32 pdo = pd->received_pdos[i];
+
+ if (pdo == 0)
+ break;
+
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt, "PDO %d\n", i + 1);
+
+ if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_FIXED) {
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+ "\tFixed supply\n"
+ "\tDual-Role Power:%d\n"
+ "\tUSB Suspend Supported:%d\n"
+ "\tExternally Powered:%d\n"
+ "\tUSB Communications Capable:%d\n"
+ "\tData Role Swap:%d\n"
+ "\tPeak Current:%d\n"
+ "\tVoltage:%d (mV)\n"
+ "\tMax Current:%d (mA)\n",
+ PD_SRC_PDO_FIXED_PR_SWAP(pdo),
+ PD_SRC_PDO_FIXED_USB_SUSP(pdo),
+ PD_SRC_PDO_FIXED_EXT_POWERED(pdo),
+ PD_SRC_PDO_FIXED_USB_COMM(pdo),
+ PD_SRC_PDO_FIXED_DR_SWAP(pdo),
+ PD_SRC_PDO_FIXED_PEAK_CURR(pdo),
+ PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50,
+ PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10);
+ } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_BATTERY) {
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+ "\tBattery supply\n"
+ "\tMax Voltage:%d (mV)\n"
+ "\tMin Voltage:%d (mV)\n"
+ "\tMax Power:%d (mW)\n",
+ PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MAX(pdo) * 250);
+ } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_VARIABLE) {
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+ "\tVariable supply\n"
+ "\tMax Voltage:%d (mV)\n"
+ "\tMin Voltage:%d (mV)\n"
+ "\tMax Current:%d (mA)\n",
+ PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MAX(pdo) * 10);
+ } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_AUGMENTED) {
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+ "\tProgrammable Power supply\n"
+ "\tMax Voltage:%d (mV)\n"
+ "\tMin Voltage:%d (mV)\n"
+ "\tMax Current:%d (mA)\n",
+ PD_APDO_MAX_VOLT(pdo) * 100,
+ PD_APDO_MIN_VOLT(pdo) * 100,
+ PD_APDO_MAX_CURR(pdo) * 50);
+ } else {
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+ "Invalid PDO\n");
+ }
+
+ buf[cnt++] = '\n';
+ }
+
+ return cnt;
+}
+static DEVICE_ATTR_RO(pdo_h);
+
+static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
+#define PDO_ATTR(n) { \
+ .attr = { .name = __stringify(pdo##n), .mode = S_IRUGO }, \
+ .show = pdo_n_show, \
+}
+static struct device_attribute dev_attr_pdos[] = {
+ PDO_ATTR(1),
+ PDO_ATTR(2),
+ PDO_ATTR(3),
+ PDO_ATTR(4),
+ PDO_ATTR(5),
+ PDO_ATTR(6),
+ PDO_ATTR(7),
+};
+
+static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_pdos); i++)
+ if (attr == &dev_attr_pdos[i])
+ /* dump the PDO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n",
+ pd->received_pdos[i]);
+
+ usbpd_err(&pd->dev, "Invalid PDO index\n");
+ return -EINVAL;
+}
+
+static ssize_t select_pdo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int src_cap_id;
+ int pdo, uv = 0, ua = 0;
+ int ret;
+
+ mutex_lock(&pd->swap_lock);
+
+ /* Only allowed if we are already in explicit sink contract */
+ if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "select_pdo: Cannot select new PDO yet\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
+ if (ret != 2 && ret != 4) {
+ usbpd_err(&pd->dev, "select_pdo: Must specify <src cap id> <PDO> [<uV> <uA>]\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (src_cap_id != pd->src_cap_id) {
+ usbpd_err(&pd->dev, "select_pdo: src_cap_id mismatch. Requested:%d, current:%d\n",
+ src_cap_id, pd->src_cap_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pdo < 1 || pdo > 7) {
+ usbpd_err(&pd->dev, "select_pdo: invalid PDO:%d\n", pdo);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = pd_select_pdo(pd, pdo, uv, ua);
+ if (ret)
+ goto out;
+
+ reinit_completion(&pd->is_ready);
+ pd->send_request = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(1000))) {
+ usbpd_err(&pd->dev, "select_pdo: request timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* determine if request was accepted/rejected */
+ if (pd->selected_pdo != pd->requested_pdo ||
+ pd->current_voltage != pd->requested_voltage) {
+ usbpd_err(&pd->dev, "select_pdo: request rejected\n");
+ ret = -EINVAL;
+ }
+
+out:
+ pd->send_request = false;
+ mutex_unlock(&pd->swap_lock);
+ return ret ? ret : size;
+}
+
+static ssize_t select_pdo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->selected_pdo);
+}
+static DEVICE_ATTR_RW(select_pdo);
+
+static ssize_t rdo_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ /* dump the RDO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n", pd->rdo);
+}
+static DEVICE_ATTR_RO(rdo);
+
+static ssize_t rdo_h_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int pos = PD_RDO_OBJ_POS(pd->rdo);
+ int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos]);
+ int len;
+
+ len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
+ "\tObj Pos:%d\n"
+ "\tGiveback:%d\n"
+ "\tCapability Mismatch:%d\n"
+ "\tUSB Communications Capable:%d\n"
+ "\tNo USB Suspend:%d\n",
+ PD_RDO_OBJ_POS(pd->rdo),
+ PD_RDO_GIVEBACK(pd->rdo),
+ PD_RDO_MISMATCH(pd->rdo),
+ PD_RDO_USB_COMM(pd->rdo),
+ PD_RDO_NO_USB_SUSP(pd->rdo));
+
+ switch (type) {
+ case PD_SRC_PDO_TYPE_FIXED:
+ case PD_SRC_PDO_TYPE_VARIABLE:
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "(Fixed/Variable)\n"
+ "\tOperating Current:%d (mA)\n"
+ "\t%s Current:%d (mA)\n",
+ PD_RDO_FIXED_CURR(pd->rdo) * 10,
+ PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+ PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 10);
+ break;
+
+ case PD_SRC_PDO_TYPE_BATTERY:
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "(Battery)\n"
+ "\tOperating Power:%d (mW)\n"
+ "\t%s Power:%d (mW)\n",
+ PD_RDO_FIXED_CURR(pd->rdo) * 250,
+ PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+ PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 250);
+ break;
+
+ case PD_SRC_PDO_TYPE_AUGMENTED:
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "(Programmable)\n"
+ "\tOutput Voltage:%d (mV)\n"
+ "\tOperating Current:%d (mA)\n",
+ PD_RDO_PROG_VOLTAGE(pd->rdo) * 20,
+ PD_RDO_PROG_CURR(pd->rdo) * 50);
+ break;
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(rdo_h);
+
+static ssize_t hard_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val = 0;
+
+ if (sscanf(buf, "%d\n", &val) != 1)
+ return -EINVAL;
+
+ if (val)
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+
+ return size;
+}
+static DEVICE_ATTR_WO(hard_reset);
+
+static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
+{
+ int ret = 0;
+
+ /* Only allowed if we are already in explicit sink contract */
+ if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "%s: Cannot send msg\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ reinit_completion(&pd->is_ready);
+ *msg_tx_flag = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(1000))) {
+ usbpd_err(&pd->dev, "%s: request timed out\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ *msg_tx_flag = false;
+ return ret;
+
+}
+
+static ssize_t get_src_cap_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->src_cap_ext_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RO(get_src_cap_ext);
+
+static ssize_t get_pps_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_pps_status);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->pps_status_db);
+}
+static DEVICE_ATTR_RO(get_pps_status);
+
+static ssize_t rx_ado_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ /* dump the ADO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n", pd->received_ado);
+}
+static DEVICE_ATTR_RO(rx_ado);
+
+static ssize_t get_battery_cap_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_cap_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_cap_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_cap);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_cap_db == -EINVAL)
+ return -EINVAL;
+
+ for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->battery_cap_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RW(get_battery_cap);
+
+static ssize_t get_battery_status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_status_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_status_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_status);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_status_db == -EINVAL)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->battery_sts_dobj);
+}
+static DEVICE_ATTR_RW(get_battery_status);
+
+static struct attribute *usbpd_attrs[] = {
+ &dev_attr_contract.attr,
+ &dev_attr_initial_pr.attr,
+ &dev_attr_current_pr.attr,
+ &dev_attr_initial_dr.attr,
+ &dev_attr_current_dr.attr,
+ &dev_attr_src_cap_id.attr,
+ &dev_attr_pdo_h.attr,
+ &dev_attr_pdos[0].attr,
+ &dev_attr_pdos[1].attr,
+ &dev_attr_pdos[2].attr,
+ &dev_attr_pdos[3].attr,
+ &dev_attr_pdos[4].attr,
+ &dev_attr_pdos[5].attr,
+ &dev_attr_pdos[6].attr,
+ &dev_attr_select_pdo.attr,
+ &dev_attr_rdo.attr,
+ &dev_attr_rdo_h.attr,
+ &dev_attr_hard_reset.attr,
+ &dev_attr_get_src_cap_ext.attr,
+ &dev_attr_get_pps_status.attr,
+ &dev_attr_rx_ado.attr,
+ &dev_attr_get_battery_cap.attr,
+ &dev_attr_get_battery_status.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(usbpd);
+
+static struct class usbpd_class = {
+ .name = "usbpd",
+ .owner = THIS_MODULE,
+ .dev_uevent = usbpd_uevent,
+ .dev_groups = usbpd_groups,
+};
+
+static int match_usbpd_device(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static void devm_usbpd_put(struct device *dev, void *res)
+{
+ struct usbpd **ppd = res;
+
+ put_device(&(*ppd)->dev);
+}
+
+struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
+{
+ struct usbpd **ptr, *pd = NULL;
+ struct device_node *pd_np;
+ struct platform_device *pdev;
+ struct device *pd_dev;
+
+ if (!usbpd_class.p) /* usbpd_init() not yet called */
+ return ERR_PTR(-EAGAIN);
+
+ if (!dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ pd_np = of_parse_phandle(dev->of_node, phandle, 0);
+ if (!pd_np)
+ return ERR_PTR(-ENXIO);
+
+ pdev = of_find_device_by_node(pd_np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pd_dev = class_find_device(&usbpd_class, NULL, &pdev->dev,
+ match_usbpd_device);
+ if (!pd_dev) {
+ platform_device_put(pdev);
+ /* device was found but maybe hadn't probed yet, so defer */
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ ptr = devres_alloc(devm_usbpd_put, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr) {
+ put_device(pd_dev);
+ platform_device_put(pdev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pd = dev_get_drvdata(pd_dev);
+ if (!pd)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ *ptr = pd;
+ devres_add(dev, ptr);
+
+ return pd;
+}
+EXPORT_SYMBOL(devm_usbpd_get_by_phandle);
+
+static int num_pd_instances;
+
+/**
+ * usbpd_create - Create a new instance of USB PD protocol/policy engine
+ * @parent - parent device to associate with
+ *
+ * This creates a new usbpd class device which manages the state of a
+ * USB PD-capable port. The parent device that is passed in should be
+ * associated with the physical device port, e.g. a PD PHY.
+ *
+ * Return: struct usbpd pointer, or an ERR_PTR value
+ */
+struct usbpd *usbpd_create(struct device *parent)
+{
+ int ret;
+ struct usbpd *pd;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ device_initialize(&pd->dev);
+ pd->dev.class = &usbpd_class;
+ pd->dev.parent = parent;
+ dev_set_drvdata(&pd->dev, pd);
+
+ ret = dev_set_name(&pd->dev, "usbpd%d", num_pd_instances++);
+ if (ret)
+ goto free_pd;
+
+ ret = device_init_wakeup(&pd->dev, true);
+ if (ret)
+ goto free_pd;
+
+ ret = device_add(&pd->dev);
+ if (ret)
+ goto free_pd;
+
+ pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
+ if (!pd->wq) {
+ ret = -ENOMEM;
+ goto del_pd;
+ }
+ INIT_WORK(&pd->sm_work, usbpd_sm);
+ hrtimer_init(&pd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pd->timer.function = pd_timeout;
+ mutex_init(&pd->swap_lock);
+
+ pd->usb_psy = power_supply_get_by_name("usb");
+ if (!pd->usb_psy) {
+ usbpd_dbg(&pd->dev, "Could not get USB power_supply, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto destroy_wq;
+ }
+
+ /*
+ * associate extcon with the parent dev as it could have a DT
+ * node which will be useful for extcon_get_edev_by_phandle()
+ */
+ pd->extcon = devm_extcon_dev_allocate(parent, usbpd_extcon_cable);
+ if (IS_ERR(pd->extcon)) {
+ usbpd_err(&pd->dev, "failed to allocate extcon device\n");
+ ret = PTR_ERR(pd->extcon);
+ goto put_psy;
+ }
+
+ pd->extcon->mutually_exclusive = usbpd_extcon_exclusive;
+ ret = devm_extcon_dev_register(parent, pd->extcon);
+ if (ret) {
+ usbpd_err(&pd->dev, "failed to register extcon device\n");
+ goto put_psy;
+ }
+
+ pd->vbus = devm_regulator_get(parent, "vbus");
+ if (IS_ERR(pd->vbus)) {
+ ret = PTR_ERR(pd->vbus);
+ goto put_psy;
+ }
+
+ pd->vconn = devm_regulator_get(parent, "vconn");
+ if (IS_ERR(pd->vconn)) {
+ ret = PTR_ERR(pd->vconn);
+ goto put_psy;
+ }
+
+ pd->vconn_is_external = device_property_present(parent,
+ "qcom,vconn-uses-external-source");
+
+ pd->num_sink_caps = device_property_read_u32_array(parent,
+ "qcom,default-sink-caps", NULL, 0);
+ if (pd->num_sink_caps > 0) {
+ int i;
+ u32 sink_caps[14];
+
+ if (pd->num_sink_caps % 2 || pd->num_sink_caps > 14) {
+ ret = -EINVAL;
+ usbpd_err(&pd->dev, "default-sink-caps must be be specified as voltage/current, max 7 pairs\n");
+ goto put_psy;
+ }
+
+ ret = device_property_read_u32_array(parent,
+ "qcom,default-sink-caps", sink_caps,
+ pd->num_sink_caps);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error reading default-sink-caps\n");
+ goto put_psy;
+ }
+
+ pd->num_sink_caps /= 2;
+
+ for (i = 0; i < pd->num_sink_caps; i++) {
+ int v = sink_caps[i * 2] / 50;
+ int c = sink_caps[i * 2 + 1] / 10;
+
+ pd->sink_caps[i] =
+ PD_SNK_PDO_FIXED(0, 0, 0, 0, 0, v, c);
+ }
+
+ /* First PDO includes additional capabilities */
+ pd->sink_caps[0] |= PD_SNK_PDO_FIXED(1, 0, 0, 1, 1, 0, 0);
+ } else {
+ memcpy(pd->sink_caps, default_snk_caps,
+ sizeof(default_snk_caps));
+ pd->num_sink_caps = ARRAY_SIZE(default_snk_caps);
+ }
+
+ /*
+ * Register the Android dual-role class (/sys/class/dual_role_usb/).
+ * The first instance should be named "otg_default" as that's what
+ * Android expects.
+ * Note this is different than the /sys/class/usbpd/ created above.
+ */
+ pd->dr_desc.name = (num_pd_instances == 1) ?
+ "otg_default" : dev_name(&pd->dev);
+ pd->dr_desc.supported_modes = DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP;
+ pd->dr_desc.properties = usbpd_dr_properties;
+ pd->dr_desc.num_properties = ARRAY_SIZE(usbpd_dr_properties);
+ pd->dr_desc.get_property = usbpd_dr_get_property;
+ pd->dr_desc.set_property = usbpd_dr_set_property;
+ pd->dr_desc.property_is_writeable = usbpd_dr_prop_writeable;
+
+ pd->dual_role = devm_dual_role_instance_register(&pd->dev,
+ &pd->dr_desc);
+ if (IS_ERR(pd->dual_role)) {
+ usbpd_err(&pd->dev, "could not register dual_role instance\n");
+ goto put_psy;
+ } else {
+ pd->dual_role->drv_data = pd;
+ }
+
+ pd->current_pr = PR_NONE;
+ pd->current_dr = DR_NONE;
+ list_add_tail(&pd->instance, &_usbpd);
+
+ spin_lock_init(&pd->rx_lock);
+ INIT_LIST_HEAD(&pd->rx_q);
+ INIT_LIST_HEAD(&pd->svid_handlers);
+ init_completion(&pd->is_ready);
+ init_completion(&pd->tx_chunk_request);
+
+ pd->psy_nb.notifier_call = psy_changed;
+ ret = power_supply_reg_notifier(&pd->psy_nb);
+ if (ret)
+ goto del_inst;
+
+ /* force read initial power_supply values */
+ psy_changed(&pd->psy_nb, PSY_EVENT_PROP_CHANGED, pd->usb_psy);
+
+ return pd;
+
+del_inst:
+ list_del(&pd->instance);
+put_psy:
+ power_supply_put(pd->usb_psy);
+destroy_wq:
+ destroy_workqueue(pd->wq);
+del_pd:
+ device_del(&pd->dev);
+free_pd:
+ num_pd_instances--;
+ kfree(pd);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(usbpd_create);
+
+/**
+ * usbpd_destroy - Removes and frees a usbpd instance
+ * @pd: the instance to destroy
+ */
+void usbpd_destroy(struct usbpd *pd)
+{
+ if (!pd)
+ return;
+
+ list_del(&pd->instance);
+ power_supply_unreg_notifier(&pd->psy_nb);
+ power_supply_put(pd->usb_psy);
+ destroy_workqueue(pd->wq);
+ device_del(&pd->dev);
+ kfree(pd);
+}
+EXPORT_SYMBOL(usbpd_destroy);
+
+static int __init usbpd_init(void)
+{
+ usbpd_ipc_log = ipc_log_context_create(NUM_LOG_PAGES, "usb_pd", 0);
+ return class_register(&usbpd_class);
+}
+module_init(usbpd_init);
+
+static void __exit usbpd_exit(void)
+{
+ class_unregister(&usbpd_class);
+}
+module_exit(usbpd_exit);
+
+MODULE_DESCRIPTION("USB Power Delivery Policy Engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
new file mode 100644
index 000000000000..85acc9f943e0
--- /dev/null
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -0,0 +1,914 @@
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "usbpd.h"
+
+#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
+#define USB_PDPHY_MSG_HDR_LEN 2
+
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG 0x40
+#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL 0x46
+#define CONTROL_ENABLE BIT(0)
+
+#define USB_PDPHY_RX_STATUS 0x4A
+#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER 0x4C
+#define FRAME_FILTER_EN_HARD_RESET BIT(5)
+#define FRAME_FILTER_EN_SOP BIT(0)
+
+#define USB_PDPHY_TX_SIZE 0x42
+#define TX_SIZE_MASK 0xF
+
+#define USB_PDPHY_TX_CONTROL 0x44
+#define TX_CONTROL_RETRY_COUNT (BIT(6) | BIT(5))
+#define TX_CONTROL_FRAME_TYPE (BIT(4) | BIT(3) | BIT(2))
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL BIT(1)
+#define TX_CONTROL_SEND_MSG BIT(0)
+
+#define USB_PDPHY_RX_SIZE 0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE 0x4B
+#define RX_BUFFER_TOKEN BIT(0)
+
+#define USB_PDPHY_BIST_MODE 0x4E
+#define BIST_MODE_MASK 0xF
+#define BIST_ENABLE BIT(7)
+#define PD_MSG_BIST 0x3
+#define PD_BIST_TEST_DATA_MODE 0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR 0x60
+#define USB_PDPHY_TX_BUFFER_DATA 0x62
+
+#define USB_PDPHY_RX_BUFFER 0x80
+
+#define USB_PDPHY_SEC_ACCESS 0xD0
+#define USB_PDPHY_TRIM_3 0xF3
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
+#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
+
+/* timers */
+#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
+
+struct usb_pdphy {
+ struct device *dev;
+ struct regmap *regmap;
+
+ u16 base;
+ struct regulator *vdd_pdphy;
+
+ /* irqs */
+ int sig_tx_irq;
+ int sig_rx_irq;
+ int msg_tx_irq;
+ int msg_rx_irq;
+ int msg_tx_failed_irq;
+ int msg_tx_discarded_irq;
+ int msg_rx_discarded_irq;
+
+ void (*signal_cb)(struct usbpd *pd, enum pd_sig_type sig);
+ void (*msg_rx_cb)(struct usbpd *pd, enum pd_sop_type sop,
+ u8 *buf, size_t len);
+ void (*shutdown_cb)(struct usbpd *pd);
+
+ /* write waitq */
+ wait_queue_head_t tx_waitq;
+
+ bool is_opened;
+ int tx_status;
+ u8 frame_filter_val;
+ bool in_test_data_mode;
+
+ enum data_role data_role;
+ enum power_role power_role;
+
+ struct usbpd *usbpd;
+
+ /* debug */
+ struct dentry *debug_root;
+ unsigned int tx_bytes; /* hdr + data */
+ unsigned int rx_bytes; /* hdr + data */
+ unsigned int sig_tx_cnt;
+ unsigned int sig_rx_cnt;
+ unsigned int msg_tx_cnt;
+ unsigned int msg_rx_cnt;
+ unsigned int msg_tx_failed_cnt;
+ unsigned int msg_tx_discarded_cnt;
+ unsigned int msg_rx_discarded_cnt;
+};
+
+static struct usb_pdphy *__pdphy;
+
+static int pdphy_dbg_status(struct seq_file *s, void *p)
+{
+ struct usb_pdphy *pdphy = s->private;
+
+ seq_printf(s,
+ "PD Phy driver status\n"
+ "==================================================\n");
+ seq_printf(s, "opened: %10d\n", pdphy->is_opened);
+ seq_printf(s, "tx status: %10d\n", pdphy->tx_status);
+ seq_printf(s, "tx bytes: %10u\n", pdphy->tx_bytes);
+ seq_printf(s, "rx bytes: %10u\n", pdphy->rx_bytes);
+ seq_printf(s, "data role: %10u\n", pdphy->data_role);
+ seq_printf(s, "power role: %10u\n", pdphy->power_role);
+ seq_printf(s, "frame filter: %10u\n", pdphy->frame_filter_val);
+ seq_printf(s, "sig tx cnt: %10u\n", pdphy->sig_tx_cnt);
+ seq_printf(s, "sig rx cnt: %10u\n", pdphy->sig_rx_cnt);
+ seq_printf(s, "msg tx cnt: %10u\n", pdphy->msg_tx_cnt);
+ seq_printf(s, "msg rx cnt: %10u\n", pdphy->msg_rx_cnt);
+ seq_printf(s, "msg tx failed cnt: %10u\n",
+ pdphy->msg_tx_failed_cnt);
+ seq_printf(s, "msg tx discarded cnt: %10u\n",
+ pdphy->msg_tx_discarded_cnt);
+ seq_printf(s, "msg rx discarded cnt: %10u\n",
+ pdphy->msg_rx_discarded_cnt);
+
+ return 0;
+}
+
+static int pdphy_dbg_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pdphy_dbg_status, inode->i_private);
+}
+
+static const struct file_operations status_ops = {
+ .owner = THIS_MODULE,
+ .open = pdphy_dbg_status_open,
+ .llseek = seq_lseek,
+ .read = seq_read,
+ .release = single_release,
+};
+
+static void pdphy_create_debugfs_entries(struct usb_pdphy *pdphy)
+{
+ struct dentry *ent;
+
+ pdphy->debug_root = debugfs_create_dir("usb-pdphy", NULL);
+ if (!pdphy->debug_root) {
+ dev_warn(pdphy->dev, "Couldn't create debug dir\n");
+ return;
+ }
+
+ ent = debugfs_create_file("status", S_IRUSR, pdphy->debug_root, pdphy,
+ &status_ops);
+ if (!ent) {
+ dev_warn(pdphy->dev, "Couldn't create status file\n");
+ debugfs_remove(pdphy->debug_root);
+ }
+}
+
+static int pdphy_enable_power(struct usb_pdphy *pdphy, bool on)
+{
+ int ret = 0;
+
+ dev_dbg(pdphy->dev, "%s turn %s regulator.\n", __func__,
+ on ? "on" : "off");
+
+ if (!on)
+ goto disable_pdphy_vdd;
+
+ ret = regulator_set_load(pdphy->vdd_pdphy, VDD_PDPHY_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(pdphy->dev, "Unable to set HPM of vdd_pdphy:%d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_set_voltage(pdphy->vdd_pdphy, VDD_PDPHY_VOL_MIN,
+ VDD_PDPHY_VOL_MAX);
+ if (ret) {
+ dev_err(pdphy->dev,
+ "set voltage failed for vdd_pdphy:%d\n", ret);
+ goto put_pdphy_vdd_lpm;
+ }
+
+ ret = regulator_enable(pdphy->vdd_pdphy);
+ if (ret) {
+ dev_err(pdphy->dev, "Unable to enable vdd_pdphy:%d\n", ret);
+ goto unset_pdphy_vdd;
+ }
+
+ dev_dbg(pdphy->dev, "%s: PD PHY regulator turned ON.\n", __func__);
+ return ret;
+
+disable_pdphy_vdd:
+ ret = regulator_disable(pdphy->vdd_pdphy);
+ if (ret)
+ dev_err(pdphy->dev, "Unable to disable vdd_pdphy:%d\n", ret);
+
+unset_pdphy_vdd:
+ ret = regulator_set_voltage(pdphy->vdd_pdphy, 0, VDD_PDPHY_VOL_MAX);
+ if (ret)
+ dev_err(pdphy->dev,
+ "Unable to set (0) voltage for vdd_pdphy:%d\n", ret);
+
+put_pdphy_vdd_lpm:
+ ret = regulator_set_load(pdphy->vdd_pdphy, 0);
+ if (ret < 0)
+ dev_err(pdphy->dev, "Unable to set (0) HPM of vdd_pdphy\n");
+
+ return ret;
+}
+
+void pdphy_enable_irq(struct usb_pdphy *pdphy, bool enable)
+{
+ if (enable) {
+ enable_irq(pdphy->sig_tx_irq);
+ enable_irq(pdphy->sig_rx_irq);
+ enable_irq_wake(pdphy->sig_rx_irq);
+ enable_irq(pdphy->msg_tx_irq);
+ if (!pdphy->in_test_data_mode) {
+ enable_irq(pdphy->msg_rx_irq);
+ enable_irq_wake(pdphy->msg_rx_irq);
+ }
+ enable_irq(pdphy->msg_tx_failed_irq);
+ enable_irq(pdphy->msg_tx_discarded_irq);
+ enable_irq(pdphy->msg_rx_discarded_irq);
+ return;
+ }
+
+ disable_irq(pdphy->sig_tx_irq);
+ disable_irq(pdphy->sig_rx_irq);
+ disable_irq_wake(pdphy->sig_rx_irq);
+ disable_irq(pdphy->msg_tx_irq);
+ if (!pdphy->in_test_data_mode) {
+ disable_irq(pdphy->msg_rx_irq);
+ disable_irq_wake(pdphy->msg_rx_irq);
+ }
+ disable_irq(pdphy->msg_tx_failed_irq);
+ disable_irq(pdphy->msg_tx_discarded_irq);
+ disable_irq(pdphy->msg_rx_discarded_irq);
+}
+
+static int pdphy_reg_read(struct usb_pdphy *pdphy, u8 *val, u16 addr, int count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(pdphy->regmap, pdphy->base + addr, val, count);
+ if (ret) {
+ dev_err(pdphy->dev, "read failed: addr=0x%04x, ret=%d\n",
+ pdphy->base + addr, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Write multiple registers to device with block of data */
+static int pdphy_bulk_reg_write(struct usb_pdphy *pdphy, u16 addr,
+ const void *val, u8 val_cnt)
+{
+ int ret;
+
+ ret = regmap_bulk_write(pdphy->regmap, pdphy->base + addr,
+ val, val_cnt);
+ if (ret) {
+ dev_err(pdphy->dev, "bulk write failed: addr=0x%04x, ret=%d\n",
+ pdphy->base + addr, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Writes a single byte to the specified register */
+static inline int pdphy_reg_write(struct usb_pdphy *pdphy, u16 addr, u8 val)
+{
+ return pdphy_bulk_reg_write(pdphy, addr, &val, 1);
+}
+
+/* Writes to the specified register limited by the bit mask */
+static int pdphy_masked_write(struct usb_pdphy *pdphy, u16 addr,
+ u8 mask, u8 val)
+{
+ int ret;
+
+ ret = regmap_update_bits(pdphy->regmap, pdphy->base + addr, mask, val);
+ if (ret) {
+ dev_err(pdphy->dev, "write failed: addr=0x%04x, ret=%d\n",
+ pdphy->base + addr, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int pd_phy_update_roles(enum data_role dr, enum power_role pr)
+{
+ struct usb_pdphy *pdphy = __pdphy;
+
+ return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+ (MSG_CONFIG_PORT_DATA_ROLE | MSG_CONFIG_PORT_POWER_ROLE),
+ ((dr == DR_DFP ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
+ (pr == PR_SRC ? MSG_CONFIG_PORT_POWER_ROLE : 0)));
+}
+EXPORT_SYMBOL(pd_phy_update_roles);
+
+int pd_phy_open(struct pd_phy_params *params)
+{
+ int ret;
+ struct usb_pdphy *pdphy = __pdphy;
+
+ if (!pdphy) {
+ pr_err("%s: pdphy not found\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdphy->is_opened) {
+ dev_err(pdphy->dev, "%s: already opened\n", __func__);
+ return -EBUSY;
+ }
+
+ pdphy->signal_cb = params->signal_cb;
+ pdphy->msg_rx_cb = params->msg_rx_cb;
+ pdphy->shutdown_cb = params->shutdown_cb;
+ pdphy->data_role = params->data_role;
+ pdphy->power_role = params->power_role;
+ pdphy->frame_filter_val = params->frame_filter_val;
+
+ dev_dbg(pdphy->dev, "%s: DR %x PR %x frame filter val %x\n", __func__,
+ pdphy->data_role, pdphy->power_role, pdphy->frame_filter_val);
+
+ ret = pdphy_enable_power(pdphy, true);
+ if (ret)
+ return ret;
+
+ /* update data and power role to be used in GoodCRC generation */
+ ret = pd_phy_update_roles(pdphy->data_role, pdphy->power_role);
+ if (ret)
+ return ret;
+
+ /* PD 2.0 phy */
+ ret = pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+ MSG_CONFIG_SPEC_REV_MASK, USBPD_REV_20);
+ if (ret)
+ return ret;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
+ if (ret)
+ return ret;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, CONTROL_ENABLE);
+ if (ret)
+ return ret;
+
+ /* update frame filter */
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER,
+ pdphy->frame_filter_val);
+ if (ret)
+ return ret;
+
+ /* initialize Rx buffer ownership to PDPHY HW */
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+ if (ret)
+ return ret;
+
+ pdphy->is_opened = true;
+ pdphy_enable_irq(pdphy, true);
+
+ return ret;
+}
+EXPORT_SYMBOL(pd_phy_open);
+
+int pd_phy_signal(enum pd_sig_type sig)
+{
+ u8 val;
+ int ret;
+ struct usb_pdphy *pdphy = __pdphy;
+
+ dev_dbg(pdphy->dev, "%s: type %d\n", __func__, sig);
+
+ if (!pdphy) {
+ pr_err("%s: pdphy not found\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!pdphy->is_opened) {
+ dev_dbg(pdphy->dev, "%s: pdphy disabled\n", __func__);
+ return -ENODEV;
+ }
+
+ pdphy->tx_status = -EINPROGRESS;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+ if (ret)
+ return ret;
+
+ usleep_range(2, 3);
+
+ val = (sig == CABLE_RESET_SIG ? TX_CONTROL_FRAME_TYPE_CABLE_RESET : 0)
+ | TX_CONTROL_SEND_SIGNAL;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible_hrtimeout(pdphy->tx_waitq,
+ pdphy->tx_status != -EINPROGRESS,
+ ms_to_ktime(HARD_RESET_COMPLETE_TIME));
+ if (ret) {
+ dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret);
+ return ret;
+ }
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+
+ if (pdphy->tx_status)
+ return pdphy->tx_status;
+
+ if (sig == HARD_RESET_SIG)
+ /* Frame filter is reconfigured in pd_phy_open() */
+ return pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(pd_phy_signal);
+
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+ u8 val;
+ int ret;
+ size_t total_len = data_len + USB_PDPHY_MSG_HDR_LEN;
+ struct usb_pdphy *pdphy = __pdphy;
+
+ dev_dbg(pdphy->dev, "%s: hdr %x frame sop_type %d\n",
+ __func__, hdr, sop);
+
+ if (data && data_len)
+ print_hex_dump_debug("tx data obj:", DUMP_PREFIX_NONE, 32, 4,
+ data, data_len, false);
+
+ if (!pdphy) {
+ pr_err("%s: pdphy not found\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!pdphy->is_opened) {
+ dev_dbg(pdphy->dev, "%s: pdphy disabled\n", __func__);
+ return -ENODEV;
+ }
+
+ if (data_len > USB_PDPHY_MAX_DATA_OBJ_LEN) {
+ dev_err(pdphy->dev, "%s: invalid data object len %zu\n",
+ __func__, data_len);
+ return -EINVAL;
+ }
+
+ ret = pdphy_reg_read(pdphy, &val, USB_PDPHY_RX_ACKNOWLEDGE, 1);
+ if (ret || val) {
+ dev_err(pdphy->dev, "%s: RX message pending\n", __func__);
+ return -EBUSY;
+ }
+
+ pdphy->tx_status = -EINPROGRESS;
+
+ /* write 2 byte SOP message header */
+ ret = pdphy_bulk_reg_write(pdphy, USB_PDPHY_TX_BUFFER_HDR, (u8 *)&hdr,
+ USB_PDPHY_MSG_HDR_LEN);
+ if (ret)
+ return ret;
+
+ if (data && data_len) {
+ /* write data objects of SOP message */
+ ret = pdphy_bulk_reg_write(pdphy, USB_PDPHY_TX_BUFFER_DATA,
+ data, data_len);
+ if (ret)
+ return ret;
+ }
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_SIZE, total_len - 1);
+ if (ret)
+ return ret;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+ if (ret)
+ return ret;
+
+ usleep_range(2, 3);
+
+ val = TX_CONTROL_RETRY_COUNT | (sop << 2) | TX_CONTROL_SEND_MSG;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible_hrtimeout(pdphy->tx_waitq,
+ pdphy->tx_status != -EINPROGRESS,
+ ms_to_ktime(RECEIVER_RESPONSE_TIME));
+ if (ret) {
+ dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret);
+ return ret;
+ }
+
+ if (hdr && !pdphy->tx_status)
+ pdphy->tx_bytes += data_len + USB_PDPHY_MSG_HDR_LEN;
+
+ return pdphy->tx_status ? pdphy->tx_status : 0;
+}
+EXPORT_SYMBOL(pd_phy_write);
+
+void pd_phy_close(void)
+{
+ int ret;
+ struct usb_pdphy *pdphy = __pdphy;
+
+ if (!pdphy) {
+ pr_err("%s: pdphy not found\n", __func__);
+ return;
+ }
+
+ if (!pdphy->is_opened) {
+ dev_err(pdphy->dev, "%s: not opened\n", __func__);
+ return;
+ }
+
+ pdphy->is_opened = false;
+ pdphy_enable_irq(pdphy, false);
+
+ pdphy->tx_status = -ESHUTDOWN;
+
+ wake_up_all(&pdphy->tx_waitq);
+
+ pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+ pdphy->in_test_data_mode = false;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+ if (ret)
+ return;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
+ if (ret)
+ return;
+
+ pdphy_enable_power(pdphy, false);
+}
+EXPORT_SYMBOL(pd_phy_close);
+
+static irqreturn_t pdphy_msg_tx_irq(int irq, void *data)
+{
+ struct usb_pdphy *pdphy = data;
+
+ /* TX already aborted by received signal */
+ if (pdphy->tx_status != -EINPROGRESS)
+ return IRQ_HANDLED;
+
+ if (irq == pdphy->msg_tx_irq) {
+ pdphy->msg_tx_cnt++;
+ pdphy->tx_status = 0;
+ } else if (irq == pdphy->msg_tx_discarded_irq) {
+ pdphy->msg_tx_discarded_cnt++;
+ pdphy->tx_status = -EBUSY;
+ } else if (irq == pdphy->msg_tx_failed_irq) {
+ pdphy->msg_tx_failed_cnt++;
+ pdphy->tx_status = -EFAULT;
+ } else {
+ dev_err(pdphy->dev, "spurious irq #%d received\n", irq);
+ return IRQ_NONE;
+ }
+
+ wake_up(&pdphy->tx_waitq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_msg_rx_discarded_irq(int irq, void *data)
+{
+ struct usb_pdphy *pdphy = data;
+
+ pdphy->msg_rx_discarded_cnt++;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_sig_rx_irq_thread(int irq, void *data)
+{
+ u8 rx_status, frame_type;
+ int ret;
+ struct usb_pdphy *pdphy = data;
+
+ pdphy->sig_rx_cnt++;
+
+ ret = pdphy_reg_read(pdphy, &rx_status, USB_PDPHY_RX_STATUS, 1);
+ if (ret)
+ goto done;
+
+ frame_type = rx_status & RX_FRAME_TYPE;
+ if (frame_type != HARD_RESET_SIG) {
+ dev_err(pdphy->dev, "%s:unsupported frame type %d\n",
+ __func__, frame_type);
+ goto done;
+ }
+
+ /* Frame filter is reconfigured in pd_phy_open() */
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
+
+ if (pdphy->signal_cb)
+ pdphy->signal_cb(pdphy->usbpd, frame_type);
+
+ if (pdphy->tx_status == -EINPROGRESS) {
+ pdphy->tx_status = -EBUSY;
+ wake_up(&pdphy->tx_waitq);
+ }
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_sig_tx_irq_thread(int irq, void *data)
+{
+ struct usb_pdphy *pdphy = data;
+
+ /* in case of exit from BIST Carrier Mode 2, clear BIST_MODE */
+ pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+
+ pdphy->sig_tx_cnt++;
+ pdphy->tx_status = 0;
+ wake_up(&pdphy->tx_waitq);
+
+ return IRQ_HANDLED;
+}
+
+static int pd_phy_bist_mode(u8 bist_mode)
+{
+ struct usb_pdphy *pdphy = __pdphy;
+
+ dev_dbg(pdphy->dev, "%s: enter BIST mode %d\n", __func__, bist_mode);
+
+ pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+
+ udelay(5);
+
+ return pdphy_masked_write(pdphy, USB_PDPHY_BIST_MODE,
+ BIST_MODE_MASK | BIST_ENABLE, bist_mode | BIST_ENABLE);
+}
+
+static irqreturn_t pdphy_msg_rx_irq(int irq, void *data)
+{
+ u8 size, rx_status, frame_type;
+ u8 buf[32];
+ int ret;
+ struct usb_pdphy *pdphy = data;
+
+ pdphy->msg_rx_cnt++;
+
+ ret = pdphy_reg_read(pdphy, &size, USB_PDPHY_RX_SIZE, 1);
+ if (ret)
+ goto done;
+
+ if (!size || size > 31) {
+ dev_err(pdphy->dev, "%s: invalid size %d\n", __func__, size);
+ goto done;
+ }
+
+ ret = pdphy_reg_read(pdphy, &rx_status, USB_PDPHY_RX_STATUS, 1);
+ if (ret)
+ goto done;
+
+ frame_type = rx_status & RX_FRAME_TYPE;
+ if (frame_type != SOP_MSG) {
+ dev_err(pdphy->dev, "%s:unsupported frame type %d\n",
+ __func__, frame_type);
+ goto done;
+ }
+
+ ret = pdphy_reg_read(pdphy, buf, USB_PDPHY_RX_BUFFER, size + 1);
+ if (ret)
+ goto done;
+
+ /* ack to change ownership of rx buffer back to PDPHY RX HW */
+ pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+
+ if (((buf[0] & 0xf) == PD_MSG_BIST) && size >= 5) { /* BIST */
+ u8 mode = buf[5] >> 4; /* [31:28] of 1st data object */
+
+ pd_phy_bist_mode(mode);
+ pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+
+ if (mode == PD_BIST_TEST_DATA_MODE) {
+ pdphy->in_test_data_mode = true;
+ disable_irq_nosync(irq);
+ }
+ goto done;
+ }
+
+ if (pdphy->msg_rx_cb)
+ pdphy->msg_rx_cb(pdphy->usbpd, frame_type, buf, size + 1);
+
+ print_hex_dump_debug("rx msg:", DUMP_PREFIX_NONE, 32, 4, buf, size + 1,
+ false);
+ pdphy->rx_bytes += size + 1;
+done:
+ return IRQ_HANDLED;
+}
+
+static int pdphy_request_irq(struct usb_pdphy *pdphy,
+ struct device_node *node,
+ int *irq_num, const char *irq_name,
+ irqreturn_t (irq_handler)(int irq, void *data),
+ irqreturn_t (thread_fn)(int irq, void *data),
+ int flags)
+{
+ int ret;
+
+ *irq_num = of_irq_get_byname(node, irq_name);
+ if (*irq_num < 0) {
+ dev_err(pdphy->dev, "Unable to get %s irqn", irq_name);
+ ret = -ENXIO;
+ }
+
+ irq_set_status_flags(*irq_num, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(pdphy->dev, *irq_num, irq_handler,
+ thread_fn, flags, irq_name, pdphy);
+ if (ret < 0) {
+ dev_err(pdphy->dev, "Unable to request %s irq: %dn",
+ irq_name, ret);
+ ret = -ENXIO;
+ }
+
+ return 0;
+}
+
+static int pdphy_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int base;
+ struct usb_pdphy *pdphy;
+
+ pdphy = devm_kzalloc(&pdev->dev, sizeof(*pdphy), GFP_KERNEL);
+ if (!pdphy)
+ return -ENOMEM;
+
+ pdphy->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pdphy->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ dev_set_drvdata(&pdev->dev, pdphy);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get reg base address ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ pdphy->base = base;
+ pdphy->dev = &pdev->dev;
+
+ init_waitqueue_head(&pdphy->tx_waitq);
+
+ pdphy->vdd_pdphy = devm_regulator_get(&pdev->dev, "vdd-pdphy");
+ if (IS_ERR(pdphy->vdd_pdphy)) {
+ dev_err(&pdev->dev, "unable to get vdd-pdphy\n");
+ return PTR_ERR(pdphy->vdd_pdphy);
+ }
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->sig_tx_irq, "sig-tx", NULL,
+ pdphy_sig_tx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->sig_rx_irq, "sig-rx", NULL,
+ pdphy_sig_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->msg_tx_irq, "msg-tx", pdphy_msg_tx_irq,
+ NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->msg_rx_irq, "msg-rx", pdphy_msg_rx_irq,
+ NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->msg_tx_failed_irq, "msg-tx-failed", pdphy_msg_tx_irq,
+ NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->msg_tx_discarded_irq, "msg-tx-discarded",
+ pdphy_msg_tx_irq, NULL,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+ &pdphy->msg_rx_discarded_irq, "msg-rx-discarded",
+ pdphy_msg_rx_discarded_irq, NULL,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (ret < 0)
+ return ret;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_SEC_ACCESS, 0xA5);
+ if (ret)
+ return ret;
+
+ ret = pdphy_reg_write(pdphy, USB_PDPHY_TRIM_3, 0x2);
+ if (ret)
+ return ret;
+
+ /* usbpd_create() could call back to us, so have __pdphy ready */
+ __pdphy = pdphy;
+
+ pdphy->usbpd = usbpd_create(&pdev->dev);
+ if (IS_ERR(pdphy->usbpd)) {
+ dev_err(&pdev->dev, "usbpd_create failed: %ld\n",
+ PTR_ERR(pdphy->usbpd));
+ __pdphy = NULL;
+ return PTR_ERR(pdphy->usbpd);
+ }
+
+ pdphy_create_debugfs_entries(pdphy);
+
+ return 0;
+}
+
+static int pdphy_remove(struct platform_device *pdev)
+{
+ struct usb_pdphy *pdphy = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(pdphy->debug_root);
+ usbpd_destroy(pdphy->usbpd);
+
+ if (pdphy->is_opened)
+ pd_phy_close();
+
+ __pdphy = NULL;
+
+ return 0;
+}
+
+static void pdphy_shutdown(struct platform_device *pdev)
+{
+ struct usb_pdphy *pdphy = platform_get_drvdata(pdev);
+
+ /* let protocol engine shutdown the pdphy synchronously */
+ if (pdphy->shutdown_cb)
+ pdphy->shutdown_cb(pdphy->usbpd);
+}
+
+static const struct of_device_id pdphy_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-pdphy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pdphy_match_table);
+
+static struct platform_driver pdphy_driver = {
+ .driver = {
+ .name = "qpnp-pdphy",
+ .of_match_table = pdphy_match_table,
+ },
+ .probe = pdphy_probe,
+ .remove = pdphy_remove,
+ .shutdown = pdphy_shutdown,
+};
+
+module_platform_driver(pdphy_driver);
+
+MODULE_DESCRIPTION("QPNP PD PHY Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-pdphy");
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
new file mode 100644
index 000000000000..9b6053e940e9
--- /dev/null
+++ b/drivers/usb/pd/usbpd.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _USBPD_H
+#define _USBPD_H
+
+#include <linux/device.h>
+
+struct usbpd;
+
+#if IS_ENABLED(CONFIG_USB_PD_POLICY)
+struct usbpd *usbpd_create(struct device *parent);
+void usbpd_destroy(struct usbpd *pd);
+#else
+static inline struct usbpd *usbpd_create(struct device *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void usbpd_destroy(struct usbpd *pd) { }
+#endif
+
+enum data_role {
+ DR_NONE = -1,
+ DR_UFP = 0,
+ DR_DFP = 1,
+};
+
+enum power_role {
+ PR_NONE = -1,
+ PR_SINK = 0,
+ PR_SRC = 1,
+};
+
+enum pd_sig_type {
+ HARD_RESET_SIG = 0,
+ CABLE_RESET_SIG,
+};
+
+enum pd_sop_type {
+ SOP_MSG = 0,
+ SOPI_MSG,
+ SOPII_MSG,
+};
+
+enum pd_spec_rev {
+ USBPD_REV_20 = 1,
+ USBPD_REV_30 = 2,
+};
+
+/* enable msg and signal to be received by phy */
+#define FRAME_FILTER_EN_SOP BIT(0)
+#define FRAME_FILTER_EN_HARD_RESET BIT(5)
+
+struct pd_phy_params {
+ void (*signal_cb)(struct usbpd *pd, enum pd_sig_type sig);
+ void (*msg_rx_cb)(struct usbpd *pd, enum pd_sop_type sop,
+ u8 *buf, size_t len);
+ void (*shutdown_cb)(struct usbpd *pd);
+ enum data_role data_role;
+ enum power_role power_role;
+ u8 frame_filter_val;
+};
+
+#if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
+int pd_phy_open(struct pd_phy_params *params);
+int pd_phy_signal(enum pd_sig_type sig);
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+ enum pd_sop_type sop);
+int pd_phy_update_roles(enum data_role dr, enum power_role pr);
+void pd_phy_close(void);
+#else
+static inline int pd_phy_open(struct pd_phy_params *params)
+{
+ return -ENODEV;
+}
+
+static inline int pd_phy_signal(enum pd_sig_type type)
+{
+ return -ENODEV;
+}
+
+static inline int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+ enum pd_sop_type sop)
+{
+ return -ENODEV;
+}
+
+static inline int pd_phy_update_roles(enum data_role dr, enum power_role pr)
+{
+ return -ENODEV;
+}
+
+static inline void pd_phy_close(void)
+{
+}
+#endif
+#endif /* _USBPD_H */
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index bdb9578cc296..e358fc8086f7 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -174,6 +174,46 @@ config USB_QCOM_8X16_PHY
To compile this driver as a module, choose M here: the
module will be called phy-qcom-8x16-usb.
+config USB_MSM_HSPHY
+ tristate "MSM HSUSB PHY Driver"
+ depends on ARCH_QCOM
+ select USB_PHY
+ help
+ Enable this to support the High-speed USB transceiver on MSM chips.
+ This driver supports the PHY which uses the QSCRATCH-based register
+ set for its control sequences, normally paired with newer DWC3-based
+ SuperSpeed controllers.
+
+config USB_MSM_SSPHY
+ tristate "MSM SSUSB PHY Driver"
+ depends on ARCH_QCOM
+ select USB_PHY
+ help
+ Enable this to support the SuperSpeed USB transceiver on MSM chips.
+ This driver supports the PHY which uses the QSCRATCH-based register
+ set for its control sequences, normally paired with newer DWC3-based
+ SuperSpeed controllers.
+
+config USB_MSM_SSPHY_QMP
+ tristate "MSM SSUSB QMP PHY Driver"
+ depends on ARCH_QCOM
+ select USB_PHY
+ help
+ Enable this to support the SuperSpeed USB transceiver on MSM chips.
+ This driver supports the PHY which uses the QSCRATCH-based register
+ set for its control sequences, normally paired with newer DWC3-based
+ SuperSpeed controllers.
+
+config MSM_QUSB_PHY
+ tristate "MSM QUSB2 PHY Driver"
+ depends on ARCH_QCOM
+ select USB_PHY
+ help
+ Enable this to support the QUSB2 PHY on MSM chips. This driver supports
+ the high-speed PHY which is usually paired with either the ChipIdea or
+ Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the
+ PHY with a dedicated register I/O memory region.
+
config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index f7543f3b9943..170f3ce3e881 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -23,6 +23,10 @@ obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o
obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o
+obj-$(CONFIG_USB_MSM_HSPHY) += phy-msm-hsusb.o
+obj-$(CONFIG_USB_MSM_SSPHY) += phy-msm-ssusb.o
+obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o
+obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o phy-msm-qusb-v2.o
obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o
diff --git a/drivers/usb/phy/phy-msm-hsusb.c b/drivers/usb/phy/phy-msm-hsusb.c
new file mode 100644
index 000000000000..56832adf8716
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-hsusb.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+
+static int override_phy_init;
+module_param(override_phy_init, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(override_phy_init, "Override HSPHY Init Seq");
+
+
+#define PORT_OFFSET(i) ((i == 0) ? 0x0 : ((i == 1) ? 0x6c : 0x88))
+
+/* QSCRATCH register settings differ based on MSM core ver */
+#define MSM_CORE_VER_120 0x10020061
+#define MSM_CORE_VER_160 0x10060000
+#define MSM_CORE_VER_161 0x10060001
+
+/* QSCRATCH register offsets */
+#define GENERAL_CFG_REG (0x08)
+#define HS_PHY_CTRL_REG(i) (0x10 + PORT_OFFSET(i))
+#define PARAMETER_OVERRIDE_X_REG(i) (0x14 + PORT_OFFSET(i))
+#define ALT_INTERRUPT_EN_REG(i) (0x20 + PORT_OFFSET(i))
+#define HS_PHY_IRQ_STAT_REG(i) (0x24 + PORT_OFFSET(i))
+#define HS_PHY_CTRL_COMMON_REG (0xEC) /* ver >= MSM_CORE_VER_120 */
+
+/* GENERAL_CFG_REG bits */
+#define SEC_UTMI_FREE_CLK_GFM_SEL1 (0x80)
+
+/* HS_PHY_CTRL_REG bits */
+#define RETENABLEN BIT(1)
+#define FSEL_MASK (0x7 << 4)
+#define FSEL_DEFAULT (0x3 << 4)
+#define CLAMP_EN_N BIT(7)
+#define OTGSESSVLD_HV_CLAMP_EN_N BIT(8)
+#define ID_HV_CLAMP_EN_N BIT(9)
+#define COMMONONN BIT(11)
+#define OTGDISABLE0 BIT(12)
+#define VBUSVLDEXT0 BIT(13)
+#define VBUSVLDEXTSEL0 BIT(14)
+#define OTGSESSVLDHV_INTEN BIT(15)
+#define IDHV_INTEN BIT(16)
+#define DPSEHV_CLAMP_EN_N BIT(17)
+#define UTMI_OTG_VBUS_VALID BIT(20)
+#define USB2_UTMI_CLK_EN BIT(21)
+#define USB2_SUSPEND_N BIT(22)
+#define USB2_SUSPEND_N_SEL BIT(23)
+#define DMSEHV_CLAMP_EN_N BIT(24)
+#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(26)
+/* Following exist only when core_ver >= MSM_CORE_VER_120 */
+#define FREECLK_DIS_WHEN_SUSP BIT(27)
+#define SW_SESSVLD_SEL BIT(28)
+#define FREECLOCK_SEL BIT(29)
+
+/* HS_PHY_CTRL_COMMON_REG bits used when core_ver >= MSM_CORE_VER_120 */
+#define COMMON_PLLITUNE_1 BIT(18)
+#define COMMON_PLLBTUNE BIT(15)
+#define COMMON_CLKCORE BIT(14)
+#define COMMON_VBUSVLDEXTSEL0 BIT(12)
+#define COMMON_OTGDISABLE0 BIT(11)
+#define COMMON_OTGTUNE0_MASK (0x7 << 8)
+#define COMMON_OTGTUNE0_DEFAULT (0x4 << 8)
+#define COMMON_COMMONONN BIT(7)
+#define COMMON_FSEL (0x7 << 4)
+#define COMMON_RETENABLEN BIT(3)
+
+/* ALT_INTERRUPT_EN/HS_PHY_IRQ_STAT bits */
+#define ACAINTEN BIT(0)
+#define DMINTEN BIT(1)
+#define DCDINTEN BIT(1)
+#define DPINTEN BIT(3)
+#define CHGDETINTEN BIT(4)
+#define RIDFLOATNINTEN BIT(5)
+#define DPSEHV_INTEN BIT(6)
+#define DMSEHV_INTEN BIT(7)
+#define DPSEHV_HI_INTEN BIT(8)
+#define DPSEHV_LO_INTEN BIT(9)
+#define DMSEHV_HI_INTEN BIT(10)
+#define DMSEHV_LO_INTEN BIT(11)
+#define LINESTATE_INTEN BIT(12)
+#define DPDMHV_INT_MASK (0xFC0)
+#define ALT_INTERRUPT_MASK (0x1FFF)
+
+#define TCSR_USB30_CONTROL BIT(8)
+#define TCSR_HSPHY_ARES BIT(11)
+
+#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
+#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
+#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
+#define USB_HSPHY_3P3_VOL_FSHOST 3150000 /* uV */
+
+#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
+#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
+#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
+
+struct msm_hsphy {
+ struct usb_phy phy;
+ void __iomem *base;
+ void __iomem *tcsr;
+ int hsphy_init_seq;
+ bool set_pllbtune;
+ u32 core_ver;
+
+ struct clk *sleep_clk;
+ bool sleep_clk_reset;
+
+ struct regulator *vdd;
+ struct regulator *vdda33;
+ struct regulator *vdda18;
+ int vdd_levels[3]; /* none, low, high */
+ u32 lpm_flags;
+ bool suspended;
+ bool vdda_force_on;
+
+ /* Using external VBUS/ID notification */
+ bool ext_vbus_id;
+ int num_ports;
+ bool cable_connected;
+};
+
+/* global reference counter between all HSPHY instances */
+static atomic_t hsphy_active_count;
+
+static int msm_hsusb_config_vdd(struct msm_hsphy *phy, int high)
+{
+ int min, ret;
+
+ min = high ? 1 : 0; /* low or none? */
+ ret = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
+ phy->vdd_levels[2]);
+ if (ret) {
+ dev_err(phy->phy.dev, "unable to set voltage for hsusb vdd\n");
+ return ret;
+ }
+
+ dev_dbg(phy->phy.dev, "%s: min_vol:%d max_vol:%d\n", __func__,
+ phy->vdd_levels[min], phy->vdd_levels[2]);
+
+ return ret;
+}
+
+static int msm_hsusb_ldo_enable(struct msm_hsphy *phy, int on)
+{
+ int rc = 0;
+
+ dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
+
+ if (!on)
+ goto disable_regulators;
+
+
+ rc = regulator_set_load(phy->vdda18, USB_HSPHY_1P8_HPM_LOAD);
+ if (rc < 0) {
+ dev_err(phy->phy.dev, "Unable to set HPM of vdda18\n");
+ return rc;
+ }
+
+ rc = regulator_set_voltage(phy->vdda18, USB_HSPHY_1P8_VOL_MIN,
+ USB_HSPHY_1P8_VOL_MAX);
+ if (rc) {
+ dev_err(phy->phy.dev, "unable to set voltage for vdda18\n");
+ goto put_vdda18_lpm;
+ }
+
+ rc = regulator_enable(phy->vdda18);
+ if (rc) {
+ dev_err(phy->phy.dev, "Unable to enable vdda18\n");
+ goto unset_vdda18;
+ }
+
+ rc = regulator_set_load(phy->vdda33, USB_HSPHY_3P3_HPM_LOAD);
+ if (rc < 0) {
+ dev_err(phy->phy.dev, "Unable to set HPM of vdda33\n");
+ goto disable_vdda18;
+ }
+
+ rc = regulator_set_voltage(phy->vdda33, USB_HSPHY_3P3_VOL_MIN,
+ USB_HSPHY_3P3_VOL_MAX);
+ if (rc) {
+ dev_err(phy->phy.dev, "unable to set voltage for vdda33\n");
+ goto put_vdda33_lpm;
+ }
+
+ rc = regulator_enable(phy->vdda33);
+ if (rc) {
+ dev_err(phy->phy.dev, "Unable to enable vdda33\n");
+ goto unset_vdda33;
+ }
+
+ return 0;
+
+disable_regulators:
+ rc = regulator_disable(phy->vdda33);
+ if (rc)
+ dev_err(phy->phy.dev, "Unable to disable vdda33\n");
+
+unset_vdda33:
+ rc = regulator_set_voltage(phy->vdda33, 0, USB_HSPHY_3P3_VOL_MAX);
+ if (rc)
+ dev_err(phy->phy.dev, "unable to set voltage for vdda33\n");
+
+put_vdda33_lpm:
+ rc = regulator_set_load(phy->vdda33, 0);
+ if (rc < 0)
+ dev_err(phy->phy.dev, "Unable to set LPM of vdda33\n");
+
+disable_vdda18:
+ rc = regulator_disable(phy->vdda18);
+ if (rc)
+ dev_err(phy->phy.dev, "Unable to disable vdda18\n");
+
+unset_vdda18:
+ rc = regulator_set_voltage(phy->vdda18, 0, USB_HSPHY_1P8_VOL_MAX);
+ if (rc)
+ dev_err(phy->phy.dev, "unable to set voltage for vdda18\n");
+
+put_vdda18_lpm:
+ rc = regulator_set_load(phy->vdda18, 0);
+ if (rc < 0)
+ dev_err(phy->phy.dev, "Unable to set LPM of vdda18\n");
+
+ return rc < 0 ? rc : 0;
+}
+
+static void msm_usb_write_readback(void *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 write_val, tmp = readl_relaxed(base + offset);
+
+ tmp &= ~mask; /* retain other bits */
+ write_val = tmp | val;
+
+ writel_relaxed(write_val, base + offset);
+
+ /* Read back to see if val was written */
+ tmp = readl_relaxed(base + offset);
+ tmp &= mask; /* clear other bits */
+
+ if (tmp != val)
+ pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
+ __func__, val, offset);
+}
+
+static int msm_hsphy_reset(struct usb_phy *uphy)
+{
+ struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
+ u32 val;
+ int ret;
+
+ /* skip reset if there are other active PHY instances */
+ ret = atomic_read(&hsphy_active_count);
+ if (ret > 1) {
+ dev_dbg(uphy->dev, "skipping reset, inuse count=%d\n", ret);
+ return 0;
+ }
+
+ if (phy->tcsr) {
+ val = readl_relaxed(phy->tcsr);
+
+ /* Assert/deassert TCSR Reset */
+ writel_relaxed((val | TCSR_HSPHY_ARES), phy->tcsr);
+ usleep_range(1000, 1200);
+ writel_relaxed((val & ~TCSR_HSPHY_ARES), phy->tcsr);
+ } else if (phy->sleep_clk_reset) {
+ /* Reset PHY using sleep clock */
+ ret = clk_reset(phy->sleep_clk, CLK_RESET_ASSERT);
+ if (ret) {
+ dev_err(uphy->dev, "hsphy_sleep_clk assert failed\n");
+ return ret;
+ }
+
+ usleep_range(1000, 1200);
+ ret = clk_reset(phy->sleep_clk, CLK_RESET_DEASSERT);
+ if (ret) {
+ dev_err(uphy->dev, "hsphy_sleep_clk reset deassert failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int msm_hsphy_init(struct usb_phy *uphy)
+{
+ struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
+ u32 val;
+
+ msm_hsphy_reset(uphy);
+
+ /* different sequences based on core version */
+ phy->core_ver = readl_relaxed(phy->base);
+
+ /*
+ * HSPHY Initialization: Enable UTMI clock and clamp enable HVINTs,
+ * and disable RETENTION (power-on default is ENABLED)
+ */
+ val = readl_relaxed(phy->base + HS_PHY_CTRL_REG(0));
+ val |= (USB2_UTMI_CLK_EN | CLAMP_MPM_DPSE_DMSE_EN_N | RETENABLEN);
+
+ if (uphy->flags & ENABLE_SECONDARY_PHY) {
+ val &= ~(USB2_UTMI_CLK_EN | FREECLOCK_SEL);
+ val |= FREECLK_DIS_WHEN_SUSP;
+ }
+
+ writel_relaxed(val, phy->base + HS_PHY_CTRL_REG(0));
+ usleep_range(2000, 2200);
+
+ if (uphy->flags & ENABLE_SECONDARY_PHY)
+ msm_usb_write_readback(phy->base, GENERAL_CFG_REG,
+ SEC_UTMI_FREE_CLK_GFM_SEL1,
+ SEC_UTMI_FREE_CLK_GFM_SEL1);
+
+ if (phy->core_ver >= MSM_CORE_VER_120) {
+ if (phy->set_pllbtune) {
+ val = readl_relaxed(phy->base + HS_PHY_CTRL_COMMON_REG);
+ val |= COMMON_PLLBTUNE | COMMON_CLKCORE;
+ val &= ~COMMON_FSEL;
+ writel_relaxed(val, phy->base + HS_PHY_CTRL_COMMON_REG);
+ } else {
+ writel_relaxed(COMMON_OTGDISABLE0 |
+ COMMON_OTGTUNE0_DEFAULT |
+ COMMON_COMMONONN | FSEL_DEFAULT |
+ COMMON_RETENABLEN,
+ phy->base + HS_PHY_CTRL_COMMON_REG);
+ }
+ }
+
+ /*
+ * write HSPHY init value to QSCRATCH reg to set HSPHY parameters like
+ * VBUS valid threshold, disconnect valid threshold, DC voltage level,
+ * preempasis and rise/fall time.
+ */
+ if (override_phy_init)
+ phy->hsphy_init_seq = override_phy_init;
+ if (phy->hsphy_init_seq)
+ msm_usb_write_readback(phy->base,
+ PARAMETER_OVERRIDE_X_REG(0), 0x03FFFFFF,
+ phy->hsphy_init_seq & 0x03FFFFFF);
+
+ return 0;
+}
+
+static int msm_hsphy_set_suspend(struct usb_phy *uphy, int suspend)
+{
+ struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
+ bool host = uphy->flags & PHY_HOST_MODE;
+ bool chg_connected = uphy->flags & PHY_CHARGER_CONNECTED;
+ int i, count;
+
+ if (!!suspend == phy->suspended) {
+ dev_dbg(uphy->dev, "%s\n", suspend ? "already suspended"
+ : "already resumed");
+ return 0;
+ }
+
+ if (suspend) {
+ for (i = 0; i < phy->num_ports; i++) {
+ /* Clear interrupt latch register */
+ writel_relaxed(ALT_INTERRUPT_MASK,
+ phy->base + HS_PHY_IRQ_STAT_REG(i));
+
+ if (host) {
+ /* Enable DP and DM HV interrupts */
+ if (phy->core_ver >= MSM_CORE_VER_120)
+ msm_usb_write_readback(phy->base,
+ ALT_INTERRUPT_EN_REG(i),
+ (LINESTATE_INTEN |
+ DPINTEN | DMINTEN),
+ (LINESTATE_INTEN |
+ DPINTEN | DMINTEN));
+ else
+ msm_usb_write_readback(phy->base,
+ ALT_INTERRUPT_EN_REG(i),
+ DPDMHV_INT_MASK,
+ DPDMHV_INT_MASK);
+
+ udelay(5);
+ } else {
+ /* set the following:
+ * OTGDISABLE0=1
+ * USB2_SUSPEND_N_SEL=1, USB2_SUSPEND_N=0
+ */
+ if (phy->core_ver >= MSM_CORE_VER_120)
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_COMMON_REG,
+ COMMON_OTGDISABLE0,
+ COMMON_OTGDISABLE0);
+ else
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ OTGDISABLE0, OTGDISABLE0);
+
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ (USB2_SUSPEND_N_SEL | USB2_SUSPEND_N),
+ USB2_SUSPEND_N_SEL);
+ /*
+ * Enable PHY retention
+ * RETENABLEN bit is not available on few platforms.
+ */
+ if (!chg_connected) {
+ if (phy->set_pllbtune)
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_COMMON_REG,
+ COMMON_PLLITUNE_1,
+ COMMON_PLLITUNE_1);
+ else
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ RETENABLEN, 0);
+ phy->lpm_flags |= PHY_RETENTIONED;
+ }
+ }
+
+ if (!phy->ext_vbus_id)
+ /* Enable PHY-based IDHV and
+ *OTGSESSVLD HV interrupts
+ */
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ (OTGSESSVLDHV_INTEN | IDHV_INTEN),
+ (OTGSESSVLDHV_INTEN | IDHV_INTEN));
+ }
+ /* can turn off regulators if disconnected in device mode */
+ if (phy->lpm_flags & PHY_RETENTIONED && !phy->cable_connected) {
+ if (phy->ext_vbus_id) {
+ msm_hsusb_ldo_enable(phy, 0);
+ phy->lpm_flags |= PHY_PWR_COLLAPSED;
+ }
+ msm_hsusb_config_vdd(phy, 0);
+ }
+
+ count = atomic_dec_return(&hsphy_active_count);
+ if (count < 0) {
+ dev_WARN(uphy->dev, "hsphy_active_count=%d, something wrong?\n",
+ count);
+ atomic_set(&hsphy_active_count, 0);
+ }
+ } else {
+ atomic_inc(&hsphy_active_count);
+ if (phy->lpm_flags & PHY_RETENTIONED && !phy->cable_connected) {
+ msm_hsusb_config_vdd(phy, 1);
+ if (phy->ext_vbus_id) {
+ msm_hsusb_ldo_enable(phy, 1);
+ phy->lpm_flags &= ~PHY_PWR_COLLAPSED;
+ }
+ phy->lpm_flags &= ~PHY_RETENTIONED;
+ }
+
+ if (phy->core_ver >= MSM_CORE_VER_120) {
+ if (phy->set_pllbtune) {
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_COMMON_REG,
+ FSEL_MASK, 0);
+ } else {
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_COMMON_REG,
+ FSEL_MASK, FSEL_DEFAULT);
+ }
+ }
+ for (i = 0; i < phy->num_ports; i++) {
+ if (!phy->ext_vbus_id)
+ /* Disable HV interrupts */
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ (OTGSESSVLDHV_INTEN | IDHV_INTEN),
+ 0);
+ if (host) {
+ /* Clear interrupt latch register */
+ writel_relaxed(ALT_INTERRUPT_MASK,
+ phy->base + HS_PHY_IRQ_STAT_REG(i));
+ /* Disable DP and DM HV interrupt */
+ if (phy->core_ver >= MSM_CORE_VER_120)
+ msm_usb_write_readback(phy->base,
+ ALT_INTERRUPT_EN_REG(i),
+ LINESTATE_INTEN, 0);
+ else
+ msm_usb_write_readback(phy->base,
+ ALT_INTERRUPT_EN_REG(i),
+ DPDMHV_INT_MASK, 0);
+ } else {
+ /* Disable PHY retention */
+ if (phy->set_pllbtune)
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_COMMON_REG,
+ COMMON_PLLITUNE_1, 0);
+ else
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ RETENABLEN, RETENABLEN);
+
+ /* Bring PHY out of suspend */
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ USB2_SUSPEND_N_SEL, 0);
+
+ if (phy->core_ver >= MSM_CORE_VER_120)
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_COMMON_REG,
+ COMMON_OTGDISABLE0,
+ 0);
+ else
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(i),
+ OTGDISABLE0, 0);
+ }
+ }
+ /*
+ * write HSPHY init value to QSCRATCH reg to set HSPHY
+ * parameters like VBUS valid threshold, disconnect valid
+ * threshold, DC voltage level,preempasis and rise/fall time
+ */
+ if (override_phy_init)
+ phy->hsphy_init_seq = override_phy_init;
+ if (phy->hsphy_init_seq)
+ msm_usb_write_readback(phy->base,
+ PARAMETER_OVERRIDE_X_REG(0),
+ 0x03FFFFFF,
+ phy->hsphy_init_seq & 0x03FFFFFF);
+ }
+
+ phy->suspended = !!suspend; /* double-NOT coerces to bool value */
+ return 0;
+}
+
+static int msm_hsphy_notify_connect(struct usb_phy *uphy,
+ enum usb_device_speed speed)
+{
+ int rc = 0;
+ struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
+
+ phy->cable_connected = true;
+
+ if (uphy->flags & PHY_HOST_MODE) {
+ if (phy->core_ver == MSM_CORE_VER_160 ||
+ phy->core_ver == MSM_CORE_VER_161) {
+ /* Some snps usb2 picophy revisions require 3.15 V to
+ * operate correctly during full speed host mode at
+ * sub zero temperature.
+ */
+ rc = regulator_set_voltage(phy->vdda33,
+ USB_HSPHY_3P3_VOL_FSHOST,
+ USB_HSPHY_3P3_VOL_MAX);
+ if (rc)
+ dev_err(phy->phy.dev,
+ "unable to set voltage for vdda33\n");
+ }
+ return 0;
+ }
+
+ if (!(uphy->flags & PHY_VBUS_VALID_OVERRIDE))
+ return 0;
+
+ /* Set External VBUS Valid Select. Set once, can be left on */
+ if (phy->core_ver >= MSM_CORE_VER_120) {
+ msm_usb_write_readback(phy->base, HS_PHY_CTRL_COMMON_REG,
+ COMMON_VBUSVLDEXTSEL0,
+ COMMON_VBUSVLDEXTSEL0);
+ } else {
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(0),
+ VBUSVLDEXTSEL0, VBUSVLDEXTSEL0);
+ }
+
+ /* Enable D+ pull-up resistor */
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(0),
+ VBUSVLDEXT0, VBUSVLDEXT0);
+
+ /* Set OTG VBUS Valid from HSPHY to controller */
+ msm_usb_write_readback(phy->base, HS_PHY_CTRL_REG(0),
+ UTMI_OTG_VBUS_VALID,
+ UTMI_OTG_VBUS_VALID);
+
+ /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */
+ if (phy->core_ver >= MSM_CORE_VER_120)
+ msm_usb_write_readback(phy->base, HS_PHY_CTRL_REG(0),
+ SW_SESSVLD_SEL, SW_SESSVLD_SEL);
+
+ return 0;
+}
+
+static int msm_hsphy_notify_disconnect(struct usb_phy *uphy,
+ enum usb_device_speed speed)
+{
+ int rc = 0;
+ struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
+
+ phy->cable_connected = false;
+
+ if (uphy->flags & PHY_HOST_MODE) {
+ if (phy->core_ver == MSM_CORE_VER_160 ||
+ phy->core_ver == MSM_CORE_VER_161) {
+ rc = regulator_set_voltage(phy->vdda33,
+ USB_HSPHY_3P3_VOL_MIN,
+ USB_HSPHY_3P3_VOL_MAX);
+ if (rc)
+ dev_err(phy->phy.dev,
+ "unable to set voltage for vdda33\n");
+ }
+ return 0;
+ }
+
+ if (!(uphy->flags & PHY_VBUS_VALID_OVERRIDE))
+ return 0;
+
+ /* Clear OTG VBUS Valid to Controller */
+ msm_usb_write_readback(phy->base, HS_PHY_CTRL_REG(0),
+ UTMI_OTG_VBUS_VALID, 0);
+
+ /* Disable D+ pull-up resistor */
+ msm_usb_write_readback(phy->base,
+ HS_PHY_CTRL_REG(0), VBUSVLDEXT0, 0);
+
+ /* Indicate value is no longer driven by UTMI_OTG_VBUS_VALID bit */
+ if (phy->core_ver >= MSM_CORE_VER_120)
+ msm_usb_write_readback(phy->base, HS_PHY_CTRL_REG(0),
+ SW_SESSVLD_SEL, 0);
+
+ return 0;
+}
+
+static int msm_hsphy_probe(struct platform_device *pdev)
+{
+ struct msm_hsphy *phy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret = 0;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ if (!res) {
+ dev_err(dev, "missing memory base resource\n");
+ ret = -ENODEV;
+ goto err_ret;
+ }
+
+ phy->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!phy->base) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENODEV;
+ goto err_ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr");
+ if (res) {
+ phy->tcsr = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (!phy->tcsr) {
+ dev_err(dev, "tcsr ioremap failed\n");
+ return -ENODEV;
+ }
+
+ /* switch MUX to let SNPS controller use the primary HSPHY */
+ writel_relaxed(readl_relaxed(phy->tcsr) | TCSR_USB30_CONTROL,
+ phy->tcsr);
+ }
+
+ if (of_get_property(dev->of_node, "qcom,primary-phy", NULL)) {
+ dev_dbg(dev, "secondary HSPHY\n");
+ phy->phy.flags |= ENABLE_SECONDARY_PHY;
+ }
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+ (u32 *) phy->vdd_levels,
+ ARRAY_SIZE(phy->vdd_levels));
+ if (ret) {
+ dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+ goto err_ret;
+ }
+
+ phy->ext_vbus_id = of_property_read_bool(dev->of_node,
+ "qcom,ext-vbus-id");
+ phy->phy.dev = dev;
+
+ phy->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(phy->vdd)) {
+ dev_err(dev, "unable to get vdd supply\n");
+ ret = PTR_ERR(phy->vdd);
+ goto err_ret;
+ }
+
+ phy->vdda33 = devm_regulator_get(dev, "vdda33");
+ if (IS_ERR(phy->vdda33)) {
+ dev_err(dev, "unable to get vdda33 supply\n");
+ ret = PTR_ERR(phy->vdda33);
+ goto err_ret;
+ }
+
+ phy->vdda18 = devm_regulator_get(dev, "vdda18");
+ if (IS_ERR(phy->vdda18)) {
+ dev_err(dev, "unable to get vdda18 supply\n");
+ ret = PTR_ERR(phy->vdda18);
+ goto err_ret;
+ }
+
+ ret = msm_hsusb_config_vdd(phy, 1);
+ if (ret) {
+ dev_err(dev, "hsusb vdd_dig configuration failed\n");
+ goto err_ret;
+ }
+
+ ret = regulator_enable(phy->vdd);
+ if (ret) {
+ dev_err(dev, "unable to enable the hsusb vdd_dig\n");
+ goto unconfig_hs_vdd;
+ }
+
+ ret = msm_hsusb_ldo_enable(phy, 1);
+ if (ret) {
+ dev_err(dev, "hsusb vreg enable failed\n");
+ goto disable_hs_vdd;
+ }
+
+ phy->sleep_clk = devm_clk_get(&pdev->dev, "phy_sleep_clk");
+ if (IS_ERR(phy->sleep_clk)) {
+ dev_err(&pdev->dev, "failed to get phy_sleep_clk\n");
+ ret = PTR_ERR(phy->sleep_clk);
+ goto disable_hs_ldo;
+ }
+ clk_prepare_enable(phy->sleep_clk);
+ phy->sleep_clk_reset = of_property_read_bool(dev->of_node,
+ "qcom,sleep-clk-reset");
+
+ if (of_property_read_u32(dev->of_node, "qcom,hsphy-init",
+ &phy->hsphy_init_seq))
+ dev_dbg(dev, "unable to read hsphy init seq\n");
+ else if (!phy->hsphy_init_seq)
+ dev_warn(dev, "hsphy init seq cannot be 0. Using POR value\n");
+
+ if (of_property_read_u32(dev->of_node, "qcom,num-ports",
+ &phy->num_ports))
+ phy->num_ports = 1;
+ else if (phy->num_ports > 3) {
+ dev_err(dev, " number of ports more that 3 is not supported\n");
+ goto disable_clk;
+ }
+
+ phy->set_pllbtune = of_property_read_bool(dev->of_node,
+ "qcom,set-pllbtune");
+
+ /*
+ * If this workaround flag is enabled, the HW requires the 1.8 and 3.x
+ * regulators to be kept ON when entering suspend. The easiest way to
+ * do that is to call regulator_enable() an additional time here,
+ * since it will keep the regulators' reference counts nonzero.
+ */
+ phy->vdda_force_on = of_property_read_bool(dev->of_node,
+ "qcom,vdda-force-on");
+ if (phy->vdda_force_on) {
+ ret = msm_hsusb_ldo_enable(phy, 1);
+ if (ret)
+ goto disable_clk;
+ }
+
+ platform_set_drvdata(pdev, phy);
+
+ if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
+ phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
+
+ phy->phy.init = msm_hsphy_init;
+ phy->phy.set_suspend = msm_hsphy_set_suspend;
+ phy->phy.notify_connect = msm_hsphy_notify_connect;
+ phy->phy.notify_disconnect = msm_hsphy_notify_disconnect;
+ phy->phy.reset = msm_hsphy_reset;
+ /*FIXME: this conflicts with dwc3_otg */
+ /*phy->phy.type = USB_PHY_TYPE_USB2; */
+
+ ret = usb_add_phy_dev(&phy->phy);
+ if (ret)
+ goto disable_clk;
+
+ atomic_inc(&hsphy_active_count);
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(phy->sleep_clk);
+disable_hs_ldo:
+ msm_hsusb_ldo_enable(phy, 0);
+disable_hs_vdd:
+ regulator_disable(phy->vdd);
+unconfig_hs_vdd:
+ msm_hsusb_config_vdd(phy, 0);
+err_ret:
+ return ret;
+}
+
+static int msm_hsphy_remove(struct platform_device *pdev)
+{
+ struct msm_hsphy *phy = platform_get_drvdata(pdev);
+
+ if (!phy)
+ return 0;
+
+ usb_remove_phy(&phy->phy);
+ clk_disable_unprepare(phy->sleep_clk);
+
+ /* Undo the additional regulator enable */
+ if (phy->vdda_force_on)
+ msm_hsusb_ldo_enable(phy, 0);
+ msm_hsusb_ldo_enable(phy, 0);
+ regulator_disable(phy->vdd);
+ msm_hsusb_config_vdd(phy, 0);
+ if (!phy->suspended)
+ atomic_dec(&hsphy_active_count);
+ kfree(phy);
+
+ return 0;
+}
+
+static const struct of_device_id msm_usb_id_table[] = {
+ {
+ .compatible = "qcom,usb-hsphy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm_usb_id_table);
+
+static struct platform_driver msm_hsphy_driver = {
+ .probe = msm_hsphy_probe,
+ .remove = msm_hsphy_remove,
+ .driver = {
+ .name = "msm-usb-hsphy",
+ .of_match_table = of_match_ptr(msm_usb_id_table),
+ },
+};
+
+module_platform_driver(msm_hsphy_driver);
+
+MODULE_DESCRIPTION("MSM USB HS PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
new file mode 100644
index 000000000000..e5f38e42e165
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -0,0 +1,1141 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PWR_CTRL1 0x210
+#define PWR_CTRL1_POWR_DOWN BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE 0x1A0
+#define CORE_READY_STATUS BIT(0)
+
+/* Get TUNE value from efuse bit-mask */
+#define TUNE_VAL_MASK(val, pos, mask) ((val >> pos) & mask)
+
+#define QUSB2PHY_INTR_CTRL 0x22C
+#define DMSE_INTR_HIGH_SEL BIT(4)
+#define DPSE_INTR_HIGH_SEL BIT(3)
+#define CHG_DET_INTR_EN BIT(2)
+#define DMSE_INTR_EN BIT(1)
+#define DPSE_INTR_EN BIT(0)
+
+#define QUSB2PHY_INTR_STAT 0x230
+#define DMSE_INTERRUPT BIT(1)
+#define DPSE_INTERRUPT BIT(0)
+
+#define QUSB2PHY_PORT_TUNE1 0x23c
+#define QUSB2PHY_TEST1 0x24C
+
+#define QUSB2PHY_1P2_VOL_MIN 1200000 /* uV */
+#define QUSB2PHY_1P2_VOL_MAX 1200000 /* uV */
+#define QUSB2PHY_1P2_HPM_LOAD 23000
+
+#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */
+
+#define LINESTATE_DP BIT(0)
+#define LINESTATE_DM BIT(1)
+
+#define QUSB2PHY_PLL_ANALOG_CONTROLS_ONE 0x0
+#define QUSB2PHY_PLL_ANALOG_CONTROLS_TWO 0x4
+
+unsigned int phy_tune1;
+module_param(phy_tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
+
+struct qusb_phy {
+ struct usb_phy phy;
+ struct mutex lock;
+ void __iomem *base;
+ void __iomem *efuse_reg;
+ void __iomem *tcsr_clamp_dig_n;
+
+ struct clk *ref_clk_src;
+ struct clk *ref_clk;
+ struct clk *cfg_ahb_clk;
+ struct reset_control *phy_reset;
+
+ struct regulator *vdd;
+ struct regulator *vdda33;
+ struct regulator *vdda18;
+ struct regulator *vdda12;
+ int vdd_levels[3]; /* none, low, high */
+ int vdda33_levels[3];
+ int init_seq_len;
+ int *qusb_phy_init_seq;
+ int host_init_seq_len;
+ int *qusb_phy_host_init_seq;
+
+ u32 tune_val;
+ int efuse_bit_pos;
+ int efuse_num_of_bits;
+
+ int power_enabled_ref;
+ bool clocks_enabled;
+ bool cable_connected;
+ bool suspended;
+ bool rm_pulldown;
+
+ struct regulator_desc dpdm_rdesc;
+ struct regulator_dev *dpdm_rdev;
+
+ /* emulation targets specific */
+ void __iomem *emu_phy_base;
+ bool emulation;
+ int *emu_init_seq;
+ int emu_init_seq_len;
+ int *phy_pll_reset_seq;
+ int phy_pll_reset_seq_len;
+ int *emu_dcm_reset_seq;
+ int emu_dcm_reset_seq_len;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+ dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
+ __func__, qphy->clocks_enabled, on);
+
+ if (!qphy->clocks_enabled && on) {
+ clk_prepare_enable(qphy->ref_clk_src);
+ clk_prepare_enable(qphy->ref_clk);
+ clk_prepare_enable(qphy->cfg_ahb_clk);
+ qphy->clocks_enabled = true;
+ }
+
+ if (qphy->clocks_enabled && !on) {
+ clk_disable_unprepare(qphy->ref_clk);
+ clk_disable_unprepare(qphy->ref_clk_src);
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ qphy->clocks_enabled = false;
+ }
+
+ dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__,
+ qphy->clocks_enabled);
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+ int min, ret;
+
+ min = high ? 1 : 0; /* low or none? */
+ ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+ qphy->vdd_levels[2]);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+ return ret;
+ }
+
+ dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+ qphy->vdd_levels[min], qphy->vdd_levels[2]);
+ return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
+{
+ int ret = 0;
+
+ mutex_lock(&qphy->lock);
+
+ dev_dbg(qphy->phy.dev,
+ "%s:req to turn %s regulators. power_enabled_ref:%d\n",
+ __func__, on ? "on" : "off", qphy->power_enabled_ref);
+
+ if (on && ++qphy->power_enabled_ref > 1) {
+ dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n");
+ goto done;
+ }
+
+ if (!on) {
+ if (on == qphy->power_enabled_ref) {
+ dev_dbg(qphy->phy.dev,
+ "PHYs' regulators are already off\n");
+ goto done;
+ }
+
+ qphy->power_enabled_ref--;
+ if (!qphy->power_enabled_ref)
+ goto disable_vdda33;
+
+ dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n");
+ goto done;
+ }
+
+ ret = qusb_phy_config_vdd(qphy, true);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+ ret);
+ goto err_vdd;
+ }
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+ goto unconfig_vdd;
+ }
+
+ ret = regulator_set_load(qphy->vdda12, QUSB2PHY_1P2_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda12:%d\n", ret);
+ goto disable_vdd;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda12, QUSB2PHY_1P2_VOL_MIN,
+ QUSB2PHY_1P2_VOL_MAX);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda12:%d\n", ret);
+ goto put_vdda12_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda12);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda12:%d\n", ret);
+ goto unset_vdda12;
+ }
+
+ ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+ goto disable_vdda12;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+ QUSB2PHY_1P8_VOL_MAX);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda18:%d\n", ret);
+ goto put_vdda18_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda18);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+ goto unset_vdda18;
+ }
+
+ ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+ goto disable_vdda18;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda33, qphy->vdda33_levels[0],
+ qphy->vdda33_levels[2]);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda33:%d\n", ret);
+ goto put_vdda33_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda33);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+ goto unset_vdd33;
+ }
+
+ pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+
+ mutex_unlock(&qphy->lock);
+ return ret;
+
+disable_vdda33:
+ ret = regulator_disable(qphy->vdda33);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+ ret = regulator_set_voltage(qphy->vdda33, 0, qphy->vdda33_levels[2]);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+ ret = regulator_set_load(qphy->vdda33, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+ ret = regulator_disable(qphy->vdda18);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+ ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+ ret = regulator_set_load(qphy->vdda18, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdda12:
+ ret = regulator_disable(qphy->vdda12);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda12:%d\n", ret);
+unset_vdda12:
+ ret = regulator_set_voltage(qphy->vdda12, 0, QUSB2PHY_1P2_VOL_MAX);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda12:%d\n", ret);
+put_vdda12_lpm:
+ ret = regulator_set_load(qphy->vdda12, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set LPM of vdda12\n");
+
+disable_vdd:
+ ret = regulator_disable(qphy->vdd);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+ ret);
+
+unconfig_vdd:
+ ret = qusb_phy_config_vdd(qphy, false);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+ ret);
+err_vdd:
+ dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+ /* in case of error in turning on regulators */
+ if (qphy->power_enabled_ref)
+ qphy->power_enabled_ref--;
+done:
+ mutex_unlock(&qphy->lock);
+ return ret;
+}
+
+static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret = 0;
+
+ dev_dbg(phy->dev, "%s value:%d rm_pulldown:%d\n",
+ __func__, value, qphy->rm_pulldown);
+
+ switch (value) {
+ case POWER_SUPPLY_DP_DM_DPF_DMF:
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPF_DMF\n");
+ if (!qphy->rm_pulldown) {
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret >= 0) {
+ qphy->rm_pulldown = true;
+ dev_dbg(phy->dev, "DP_DM_F: rm_pulldown:%d\n",
+ qphy->rm_pulldown);
+ }
+ }
+
+ break;
+
+ case POWER_SUPPLY_DP_DM_DPR_DMR:
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPR_DMR\n");
+ if (qphy->rm_pulldown) {
+ ret = qusb_phy_enable_power(qphy, false);
+ if (ret >= 0) {
+ qphy->rm_pulldown = false;
+ dev_dbg(phy->dev, "DP_DM_R: rm_pulldown:%d\n",
+ qphy->rm_pulldown);
+ }
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ dev_err(phy->dev, "Invalid power supply property(%d)\n", value);
+ break;
+ }
+
+ return ret;
+}
+
+static void qusb_phy_get_tune1_param(struct qusb_phy *qphy)
+{
+ u8 reg;
+ u32 bit_mask = 1;
+
+ pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+ qphy->efuse_num_of_bits,
+ qphy->efuse_bit_pos);
+
+ /* get bit mask based on number of bits to use with efuse reg */
+ bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1;
+
+ /*
+ * if efuse reg is updated (i.e non-zero) then use it to program
+ * tune parameters
+ */
+ qphy->tune_val = readl_relaxed(qphy->efuse_reg);
+ pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n",
+ __func__, bit_mask, qphy->tune_val);
+
+ qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
+ qphy->efuse_bit_pos, bit_mask);
+ reg = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE1);
+ if (qphy->tune_val) {
+ reg = reg & 0x0f;
+ reg |= (qphy->tune_val << 4);
+ }
+ qphy->tune_val = reg;
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+ unsigned long delay)
+{
+ int i;
+
+ pr_debug("Seq count:%d\n", cnt);
+ for (i = 0; i < cnt; i = i+2) {
+ pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+ writel_relaxed(seq[i], base + seq[i+1]);
+ if (delay)
+ usleep_range(delay, (delay + 2000));
+ }
+}
+
+static void qusb_phy_host_init(struct usb_phy *phy)
+{
+ u8 reg;
+ int ret;
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ /* Perform phy reset */
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+ qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
+ qphy->host_init_seq_len, 0);
+
+ /* Ensure above write is completed before turning ON ref clk */
+ wmb();
+
+ /* Require to get phy pll lock successfully */
+ usleep_range(150, 160);
+
+ reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+ dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+ if (!(reg & CORE_READY_STATUS)) {
+ dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+ WARN_ON(1);
+ }
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret;
+ u8 reg;
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ /* bump up vdda33 voltage to operating level*/
+ ret = regulator_set_voltage(qphy->vdda33, qphy->vdda33_levels[1],
+ qphy->vdda33_levels[2]);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda33:%d\n", ret);
+ return ret;
+ }
+
+ qusb_phy_enable_clocks(qphy, true);
+
+ /* Perform phy reset */
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+ if (qphy->emulation) {
+ if (qphy->emu_init_seq)
+ qusb_phy_write_seq(qphy->emu_phy_base,
+ qphy->emu_init_seq, qphy->emu_init_seq_len, 0);
+
+ if (qphy->qusb_phy_init_seq)
+ qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+ qphy->init_seq_len, 0);
+
+ /* Wait for 5ms as per QUSB2 RUMI sequence */
+ usleep_range(5000, 7000);
+
+ if (qphy->phy_pll_reset_seq)
+ qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
+ qphy->phy_pll_reset_seq_len, 10000);
+
+ if (qphy->emu_dcm_reset_seq)
+ qusb_phy_write_seq(qphy->emu_phy_base,
+ qphy->emu_dcm_reset_seq,
+ qphy->emu_dcm_reset_seq_len, 10000);
+
+ return 0;
+ }
+
+ /* Disable the PHY */
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ if (qphy->qusb_phy_init_seq)
+ qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+ qphy->init_seq_len, 0);
+ if (qphy->efuse_reg) {
+ if (!qphy->tune_val)
+ qusb_phy_get_tune1_param(qphy);
+
+ pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
+ qphy->tune_val);
+ writel_relaxed(qphy->tune_val,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+ }
+
+ /* If phy_tune1 modparam set, override tune1 value */
+ if (phy_tune1) {
+ pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
+ __func__, phy_tune1);
+ writel_relaxed(phy_tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+ }
+
+ /* ensure above writes are completed before re-enabling PHY */
+ wmb();
+
+ /* Enable the PHY */
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+ ~PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* Ensure above write is completed before turning ON ref clk */
+ wmb();
+
+ /* Require to get phy pll lock successfully */
+ usleep_range(150, 160);
+
+ reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+ dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+ if (!(reg & CORE_READY_STATUS)) {
+ dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+ WARN_ON(1);
+ }
+ return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ qusb_phy_enable_clocks(qphy, true);
+
+ /* Disable the PHY */
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* Makes sure that above write goes through */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+}
+
+static u32 qusb_phy_get_linestate(struct qusb_phy *qphy)
+{
+ u32 linestate = 0;
+
+ if (qphy->cable_connected) {
+ if (qphy->phy.flags & PHY_HSFS_MODE)
+ linestate |= LINESTATE_DP;
+ else if (qphy->phy.flags & PHY_LS_MODE)
+ linestate |= LINESTATE_DM;
+ }
+ return linestate;
+}
+
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ u32 linestate = 0, intr_mask = 0;
+ static u8 analog_ctrl_two;
+ int ret;
+
+ if (qphy->suspended && suspend) {
+ dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+ __func__);
+ return 0;
+ }
+
+ if (suspend) {
+ /* Bus suspend case */
+ if (qphy->cable_connected ||
+ (qphy->phy.flags & PHY_HOST_MODE)) {
+
+ /* store clock settings like cmos/cml */
+ analog_ctrl_two =
+ readl_relaxed(qphy->base +
+ QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+ /* use CSR & switch to SE clk */
+ writel_relaxed(0xb,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+ /* enable clock bypass */
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+ /* Disable all interrupts */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_INTR_CTRL);
+
+ linestate = qusb_phy_get_linestate(qphy);
+ /*
+ * D+/D- interrupts are level-triggered, but we are
+ * only interested if the line state changes, so enable
+ * the high/low trigger based on current state. In
+ * other words, enable the triggers _opposite_ of what
+ * the current D+/D- levels are.
+ * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+ * configure the mask to trigger on D+ low OR D- high
+ */
+ intr_mask = DMSE_INTERRUPT | DPSE_INTERRUPT;
+ if (!(linestate & LINESTATE_DP)) /* D+ low */
+ intr_mask |= DPSE_INTR_HIGH_SEL;
+ if (!(linestate & LINESTATE_DM)) /* D- low */
+ intr_mask |= DMSE_INTR_HIGH_SEL;
+
+ writel_relaxed(intr_mask,
+ qphy->base + QUSB2PHY_INTR_CTRL);
+
+ if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+
+ /* enable phy auto-resume */
+ writel_relaxed(0x91,
+ qphy->base + QUSB2PHY_TEST1);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_TEST1);
+ }
+
+ dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+ __func__, intr_mask);
+
+ /* Makes sure that above write goes through */
+ wmb();
+ qusb_phy_enable_clocks(qphy, false);
+ } else { /* Cable disconnect case */
+
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n",
+ __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n",
+ __func__);
+
+ writel_relaxed(0x1b,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+ /* enable clock bypass */
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+ /*
+ * clamp needs asserted before
+ * power/clocks can be turned off
+ */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+ qusb_phy_enable_power(qphy, false);
+ }
+ qphy->suspended = true;
+ } else {
+ /* Bus resume case */
+ if (qphy->cable_connected ||
+ (qphy->phy.flags & PHY_HOST_MODE)) {
+ qusb_phy_enable_clocks(qphy, true);
+
+ /* restore the default clock settings */
+ writel_relaxed(analog_ctrl_two,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_TWO);
+
+ /* disable clock bypass */
+ writel_relaxed(0x80,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+ /* Clear all interrupts on resume */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_INTR_CTRL);
+
+ /* Makes sure that above write goes through */
+ wmb();
+ } else { /* Cable connect case */
+ writel_relaxed(0x1, qphy->tcsr_clamp_dig_n);
+
+ /*
+ * clamp needs de-asserted before
+ * power/clocks can be turned on
+ */
+ wmb();
+
+ qusb_phy_enable_power(qphy, true);
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n",
+ __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n",
+ __func__);
+
+ qusb_phy_enable_clocks(qphy, true);
+ }
+ qphy->suspended = false;
+ }
+
+ return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ qphy->cable_connected = true;
+
+ if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE)
+ qusb_phy_host_init(phy);
+
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
+ return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ qphy->cable_connected = false;
+
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
+ return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPF_DMF);
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPR_DMR);
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
+ qphy->rm_pulldown);
+ return qphy->rm_pulldown;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+ .enable = qusb_phy_dpdm_regulator_enable,
+ .disable = qusb_phy_dpdm_regulator_disable,
+ .is_enabled = qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+ struct device *dev = qphy->phy.dev;
+ struct regulator_config cfg = {};
+ struct regulator_init_data *init_data;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return -ENOMEM;
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+ qphy->dpdm_rdesc.owner = THIS_MODULE;
+ qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+ qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+ qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = qphy;
+ cfg.of_node = dev->of_node;
+
+ qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+ if (IS_ERR(qphy->dpdm_rdev))
+ return PTR_ERR(qphy->dpdm_rdev);
+
+ return 0;
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+ struct qusb_phy *qphy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret = 0, size = 0;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->phy.dev = dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qusb_phy_base");
+ qphy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->base))
+ return PTR_ERR(qphy->base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "emu_phy_base");
+ if (res) {
+ qphy->emu_phy_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->emu_phy_base)) {
+ dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
+ qphy->emu_phy_base = NULL;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tcsr_clamp_dig_n_1p8");
+ if (res) {
+ qphy->tcsr_clamp_dig_n = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+ dev_dbg(dev, "couldn't ioremap tcsr_clamp_dig_n\n");
+ return PTR_ERR(qphy->tcsr_clamp_dig_n);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "efuse_addr");
+ if (res) {
+ qphy->efuse_reg = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (!IS_ERR_OR_NULL(qphy->efuse_reg)) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,efuse-bit-pos",
+ &qphy->efuse_bit_pos);
+ if (!ret) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,efuse-num-bits",
+ &qphy->efuse_num_of_bits);
+ }
+
+ if (ret) {
+ dev_err(dev,
+ "DT Value for efuse is invalid.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+ if (IS_ERR(qphy->ref_clk_src))
+ dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+ qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(qphy->ref_clk))
+ dev_dbg(dev, "clk get failed for ref_clk\n");
+ else
+ clk_set_rate(qphy->ref_clk, 19200000);
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "cfg_ahb_clk") >= 0) {
+ qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+ if (IS_ERR(qphy->cfg_ahb_clk)) {
+ ret = PTR_ERR(qphy->cfg_ahb_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "clk get failed for cfg_ahb_clk ret %d\n", ret);
+ return ret;
+ }
+ }
+
+ qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+ if (IS_ERR(qphy->phy_reset))
+ return PTR_ERR(qphy->phy_reset);
+
+ qphy->emulation = of_property_read_bool(dev->of_node,
+ "qcom,emulation");
+
+ of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
+ if (size) {
+ qphy->emu_init_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->emu_init_seq) {
+ qphy->emu_init_seq_len =
+ (size / sizeof(*qphy->emu_init_seq));
+ if (qphy->emu_init_seq_len % 2) {
+ dev_err(dev, "invalid emu_init_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,emu-init-seq",
+ qphy->emu_init_seq,
+ qphy->emu_init_seq_len);
+ } else {
+ dev_dbg(dev,
+ "error allocating memory for emu_init_seq\n");
+ }
+ }
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
+ if (size) {
+ qphy->phy_pll_reset_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->phy_pll_reset_seq) {
+ qphy->phy_pll_reset_seq_len =
+ (size / sizeof(*qphy->phy_pll_reset_seq));
+ if (qphy->phy_pll_reset_seq_len % 2) {
+ dev_err(dev, "invalid phy_pll_reset_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,phy-pll-reset-seq",
+ qphy->phy_pll_reset_seq,
+ qphy->phy_pll_reset_seq_len);
+ } else {
+ dev_dbg(dev,
+ "error allocating memory for phy_pll_reset_seq\n");
+ }
+ }
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
+ if (size) {
+ qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->emu_dcm_reset_seq) {
+ qphy->emu_dcm_reset_seq_len =
+ (size / sizeof(*qphy->emu_dcm_reset_seq));
+ if (qphy->emu_dcm_reset_seq_len % 2) {
+ dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,emu-dcm-reset-seq",
+ qphy->emu_dcm_reset_seq,
+ qphy->emu_dcm_reset_seq_len);
+ } else {
+ dev_dbg(dev,
+ "error allocating memory for emu_dcm_reset_seq\n");
+ }
+ }
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+ if (size) {
+ qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->qusb_phy_init_seq) {
+ qphy->init_seq_len =
+ (size / sizeof(*qphy->qusb_phy_init_seq));
+ if (qphy->init_seq_len % 2) {
+ dev_err(dev, "invalid init_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qusb-phy-init-seq",
+ qphy->qusb_phy_init_seq,
+ qphy->init_seq_len);
+ } else {
+ dev_err(dev,
+ "error allocating memory for phy_init_seq\n");
+ }
+ }
+
+ qphy->host_init_seq_len = of_property_count_elems_of_size(dev->of_node,
+ "qcom,qusb-phy-host-init-seq",
+ sizeof(*qphy->qusb_phy_host_init_seq));
+ if (qphy->host_init_seq_len > 0) {
+ qphy->qusb_phy_host_init_seq = devm_kcalloc(dev,
+ qphy->host_init_seq_len,
+ sizeof(*qphy->qusb_phy_host_init_seq),
+ GFP_KERNEL);
+ if (qphy->qusb_phy_host_init_seq)
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qusb-phy-host-init-seq",
+ qphy->qusb_phy_host_init_seq,
+ qphy->host_init_seq_len);
+ else
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+ (u32 *) qphy->vdd_levels,
+ ARRAY_SIZE(qphy->vdd_levels));
+ if (ret) {
+ dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,vdda33-voltage-level",
+ (u32 *) qphy->vdda33_levels,
+ ARRAY_SIZE(qphy->vdda33_levels));
+ if (ret == -EINVAL) {
+ qphy->vdda33_levels[0] = QUSB2PHY_3P3_VOL_MIN;
+ qphy->vdda33_levels[1] = QUSB2PHY_3P3_VOL_MIN;
+ qphy->vdda33_levels[2] = QUSB2PHY_3P3_VOL_MAX;
+ } else if (ret) {
+ dev_err(dev, "error reading qcom,vdda33-voltage-level property\n");
+ return ret;
+ }
+
+ qphy->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(qphy->vdd)) {
+ dev_err(dev, "unable to get vdd supply\n");
+ return PTR_ERR(qphy->vdd);
+ }
+
+ qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+ if (IS_ERR(qphy->vdda33)) {
+ dev_err(dev, "unable to get vdda33 supply\n");
+ return PTR_ERR(qphy->vdda33);
+ }
+
+ qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+ if (IS_ERR(qphy->vdda18)) {
+ dev_err(dev, "unable to get vdda18 supply\n");
+ return PTR_ERR(qphy->vdda18);
+ }
+
+ qphy->vdda12 = devm_regulator_get(dev, "vdda12");
+ if (IS_ERR(qphy->vdda12)) {
+ dev_err(dev, "unable to get vdda12 supply\n");
+ return PTR_ERR(qphy->vdda12);
+ }
+
+ mutex_init(&qphy->lock);
+
+ platform_set_drvdata(pdev, qphy);
+
+ qphy->phy.label = "msm-qusb-phy-v2";
+ qphy->phy.init = qusb_phy_init;
+ qphy->phy.set_suspend = qusb_phy_set_suspend;
+ qphy->phy.shutdown = qusb_phy_shutdown;
+ qphy->phy.type = USB_PHY_TYPE_USB2;
+ qphy->phy.notify_connect = qusb_phy_notify_connect;
+ qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
+
+ ret = usb_add_phy_dev(&qphy->phy);
+ if (ret)
+ return ret;
+
+ ret = qusb_phy_regulator_init(qphy);
+ if (ret)
+ usb_remove_phy(&qphy->phy);
+
+ /* de-asseert clamp dig n to reduce leakage on 1p8 upon boot up */
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+ return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+ struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&qphy->phy);
+
+ if (qphy->clocks_enabled) {
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ clk_disable_unprepare(qphy->ref_clk);
+ clk_disable_unprepare(qphy->ref_clk_src);
+ qphy->clocks_enabled = false;
+ }
+
+ qusb_phy_enable_power(qphy, false);
+
+ return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+ { .compatible = "qcom,qusb2phy-v2", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+ .probe = qusb_phy_probe,
+ .remove = qusb_phy_remove,
+ .driver = {
+ .name = "msm-qusb-phy-v2",
+ .of_match_table = of_match_ptr(qusb_phy_id_table),
+ },
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY v2 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
new file mode 100644
index 000000000000..1b09c028d098
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -0,0 +1,1543 @@
+/*
+ * Copyright (c) 2014-2017,2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PLL_PWR_CTL 0x18
+#define REF_BUF_EN BIT(0)
+#define REXT_EN BIT(1)
+#define PLL_BYPASSNL BIT(2)
+#define REXT_TRIM_0 BIT(4)
+
+#define QUSB2PHY_PLL_AUTOPGM_CTL1 0x1C
+#define PLL_RESET_N_CNT_5 0x5
+#define PLL_RESET_N BIT(4)
+#define PLL_AUTOPGM_EN BIT(7)
+
+#define QUSB2PHY_PLL_STATUS 0x38
+#define QUSB2PHY_PLL_LOCK BIT(5)
+
+#define QUSB2PHY_PORT_QC1 0x70
+#define VDM_SRC_EN BIT(4)
+#define IDP_SRC_EN BIT(3)
+#define VDP_SRC_EN BIT(2)
+
+#define QUSB2PHY_PORT_QC2 0x74
+#define RDM_UP_EN BIT(1)
+#define RDP_UP_EN BIT(3)
+#define RPUM_LOW_EN BIT(4)
+#define RPUP_LOW_EN BIT(5)
+
+#define QUSB2PHY_PORT_POWERDOWN 0xB4
+#define CLAMP_N_EN BIT(5)
+#define FREEZIO_N BIT(1)
+#define POWER_DOWN BIT(0)
+
+#define QUSB2PHY_PORT_TEST_CTRL 0xB8
+
+#define QUSB2PHY_PWR_CTRL1 0x210
+#define PWR_CTRL1_CLAMP_N_EN BIT(1)
+#define PWR_CTRL1_POWR_DOWN BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE 0x1A0
+#define CORE_READY_STATUS BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL1 0xC0
+#define SUSPEND_N BIT(5)
+#define TERM_SELECT BIT(4)
+#define XCVR_SELECT_FS BIT(2)
+#define OP_MODE_NON_DRIVE BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL2 0xC4
+#define UTMI_ULPI_SEL BIT(7)
+#define UTMI_TEST_MUX_SEL BIT(6)
+
+#define QUSB2PHY_PLL_TEST 0x04
+#define CLK_REF_SEL BIT(7)
+
+#define QUSB2PHY_PORT_TUNE1 0x80
+#define QUSB2PHY_PORT_TUNE2 0x84
+#define QUSB2PHY_PORT_TUNE3 0x88
+#define QUSB2PHY_PORT_TUNE4 0x8C
+#define QUSB2PHY_PORT_TUNE5 0x90
+
+/* Get TUNE2's high nibble value read from efuse */
+#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask) ((val >> pos) & mask)
+
+#define QUSB2PHY_PORT_INTR_CTRL 0xBC
+#define CHG_DET_INTR_EN BIT(4)
+#define DMSE_INTR_HIGH_SEL BIT(3)
+#define DMSE_INTR_EN BIT(2)
+#define DPSE_INTR_HIGH_SEL BIT(1)
+#define DPSE_INTR_EN BIT(0)
+
+#define QUSB2PHY_PORT_INT_STATUS 0xF0
+#define QUSB2PHY_PORT_UTMI_STATUS 0xF4
+#define LINESTATE_DP BIT(0)
+#define LINESTATE_DM BIT(1)
+
+
+#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */
+
+#define QUSB2PHY_REFCLK_ENABLE BIT(0)
+
+unsigned int tune1;
+module_param(tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
+unsigned int tune2;
+module_param(tune2, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
+
+unsigned int tune3;
+module_param(tune3, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+unsigned int tune4;
+module_param(tune4, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+unsigned int tune5;
+module_param(tune5, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
+struct qusb_phy {
+ struct usb_phy phy;
+ void __iomem *base;
+ void __iomem *tune2_efuse_reg;
+ void __iomem *ref_clk_base;
+ void __iomem *tcsr_clamp_dig_n;
+
+ struct clk *ref_clk_src;
+ struct clk *ref_clk;
+ struct clk *cfg_ahb_clk;
+ struct reset_control *phy_reset;
+
+ struct regulator *vdd;
+ struct regulator *vdda33;
+ struct regulator *vdda18;
+ int vdd_levels[3]; /* none, low, high */
+ int init_seq_len;
+ int *qusb_phy_init_seq;
+ u32 major_rev;
+
+ u32 tune2_val;
+ int tune2_efuse_bit_pos;
+ int tune2_efuse_num_of_bits;
+ int tune2_efuse_correction;
+
+ bool power_enabled;
+ bool clocks_enabled;
+ bool cable_connected;
+ bool suspended;
+ bool ulpi_mode;
+ bool rm_pulldown;
+ bool is_se_clk;
+
+ struct regulator_desc dpdm_rdesc;
+ struct regulator_dev *dpdm_rdev;
+
+ bool dpdm_pulsing_enabled;
+ struct power_supply *dpdm_psy;
+ struct power_supply_desc dpdm_psy_desc;
+
+ /* emulation targets specific */
+ void __iomem *emu_phy_base;
+ bool emulation;
+ int *emu_init_seq;
+ int emu_init_seq_len;
+ int *phy_pll_reset_seq;
+ int phy_pll_reset_seq_len;
+ int *emu_dcm_reset_seq;
+ int emu_dcm_reset_seq_len;
+ bool put_into_high_z_state;
+ struct mutex phy_lock;
+ spinlock_t pulse_lock;
+};
+
+static enum power_supply_property dpdm_props[] = {
+ POWER_SUPPLY_PROP_DP_DM,
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+ dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
+ __func__, qphy->clocks_enabled, on);
+
+ if (!qphy->clocks_enabled && on) {
+ clk_prepare_enable(qphy->ref_clk_src);
+ clk_prepare_enable(qphy->ref_clk);
+ clk_prepare_enable(qphy->cfg_ahb_clk);
+ qphy->clocks_enabled = true;
+ }
+
+ if (qphy->clocks_enabled && !on) {
+ clk_disable_unprepare(qphy->ref_clk);
+ clk_disable_unprepare(qphy->ref_clk_src);
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ qphy->clocks_enabled = false;
+ }
+
+ dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__,
+ qphy->clocks_enabled);
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+ int min, ret;
+
+ min = high ? 1 : 0; /* low or none? */
+ ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+ qphy->vdd_levels[2]);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+ return ret;
+ }
+
+ dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+ qphy->vdd_levels[min], qphy->vdd_levels[2]);
+ return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
+{
+ int ret = 0;
+
+ dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
+ __func__, on ? "on" : "off", qphy->power_enabled);
+
+ if (qphy->power_enabled == on) {
+ dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
+ return 0;
+ }
+
+ if (!on)
+ goto disable_vdda33;
+
+ ret = qusb_phy_config_vdd(qphy, true);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+ ret);
+ goto err_vdd;
+ }
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+ goto unconfig_vdd;
+ }
+
+ ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+ goto disable_vdd;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+ QUSB2PHY_1P8_VOL_MAX);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda18:%d\n", ret);
+ goto put_vdda18_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda18);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+ goto unset_vdda18;
+ }
+
+ ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+ goto disable_vdda18;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
+ QUSB2PHY_3P3_VOL_MAX);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda33:%d\n", ret);
+ goto put_vdda33_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda33);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+ goto unset_vdd33;
+ }
+
+ qphy->power_enabled = true;
+
+ pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+ return ret;
+
+disable_vdda33:
+ ret = regulator_disable(qphy->vdda33);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+ ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+ ret = regulator_set_load(qphy->vdda33, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+ ret = regulator_disable(qphy->vdda18);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+ ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+ ret = regulator_set_load(qphy->vdda18, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdd:
+ ret = regulator_disable(qphy->vdd);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+ ret);
+
+unconfig_vdd:
+ ret = qusb_phy_config_vdd(qphy, false);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+ ret);
+err_vdd:
+ qphy->power_enabled = false;
+ dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+ return ret;
+}
+
+#define PHY_PULSE_TIME_USEC 250
+static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret = 0;
+ unsigned long flags;
+ u32 reg;
+
+ dev_dbg(phy->dev, "%s value:%d rm_pulldown:%d\n",
+ __func__, value, qphy->rm_pulldown);
+
+ switch (value) {
+ case POWER_SUPPLY_DP_DM_DPF_DMF:
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPF_DMF\n");
+ mutex_lock(&qphy->phy_lock);
+ if (!qphy->rm_pulldown) {
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret >= 0) {
+ qphy->rm_pulldown = true;
+ dev_dbg(phy->dev, "DP_DM_F: rm_pulldown:%d\n",
+ qphy->rm_pulldown);
+ }
+
+ if (qphy->put_into_high_z_state) {
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x1,
+ qphy->tcsr_clamp_dig_n);
+
+ qusb_phy_enable_clocks(qphy, true);
+
+ dev_dbg(phy->dev, "RESET QUSB PHY\n");
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "phyassert failed\n");
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "deassert failed\n");
+
+ /*
+ * Phy in non-driving mode leaves Dp and Dm
+ * lines in high-Z state. Controller power
+ * collapse is not switching phy to non-driving
+ * mode causing charger detection failure. Bring
+ * phy to non-driving mode by overriding
+ * controller output via UTMI interface.
+ */
+ writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+ OP_MODE_NON_DRIVE,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+ writel_relaxed(UTMI_ULPI_SEL |
+ UTMI_TEST_MUX_SEL,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ /* Disable PHY */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+ POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ /* Make sure that above write is completed */
+ wmb();
+
+ if (qphy->suspended)
+ qusb_phy_enable_clocks(qphy, false);
+ }
+ }
+
+ /* Clear QC1 and QC2 registers when rm_pulldown = 1 */
+ if (qphy->dpdm_pulsing_enabled && qphy->rm_pulldown) {
+ dev_dbg(phy->dev, "clearing qc1 and qc2 registers.\n");
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret)
+ goto clk_error;
+
+ /* Clear qc1 and qc2 registers */
+ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_QC1);
+ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_QC2);
+ /* to make sure above write goes through */
+ mb();
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ }
+ mutex_unlock(&qphy->phy_lock);
+
+ break;
+
+ case POWER_SUPPLY_DP_DM_DPR_DMR:
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPR_DMR\n");
+ mutex_lock(&qphy->phy_lock);
+ if (qphy->rm_pulldown) {
+ dev_dbg(phy->dev, "clearing qc1 and qc2 registers.\n");
+ if (qphy->dpdm_pulsing_enabled) {
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret)
+ goto clk_error;
+
+ /* Clear qc1 and qc2 registers */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_QC1);
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_QC2);
+ /* to make sure above write goes through */
+ mb();
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ }
+
+ if (!qphy->cable_connected) {
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0,
+ qphy->tcsr_clamp_dig_n);
+ dev_dbg(phy->dev, "turn off for HVDCP case\n");
+ ret = qusb_phy_enable_power(qphy, false);
+ }
+ if (ret >= 0) {
+ qphy->rm_pulldown = false;
+ dev_dbg(phy->dev, "DP_DM_R: rm_pulldown:%d\n",
+ qphy->rm_pulldown);
+ }
+ }
+ mutex_unlock(&qphy->phy_lock);
+ break;
+
+ case POWER_SUPPLY_DP_DM_DP0P6_DMF:
+ if (!qphy->dpdm_pulsing_enabled)
+ break;
+
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DP0P6_DMF\n");
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret)
+ goto clk_error;
+
+ /* Set DP to 0.6v and DM to High Z state */
+ writel_relaxed(VDP_SRC_EN, qphy->base + QUSB2PHY_PORT_QC1);
+ /* complete above write */
+ mb();
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ break;
+
+ case POWER_SUPPLY_DP_DM_DP0P6_DM3P3:
+ if (!qphy->dpdm_pulsing_enabled)
+ break;
+
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DP0PHVDCP_36_DM3P3\n");
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret)
+ goto clk_error;
+
+ /* Set DP to 0.6v */
+ writel_relaxed(VDP_SRC_EN, qphy->base + QUSB2PHY_PORT_QC1);
+ /* Set DM to 3.075v */
+ writel_relaxed(RPUM_LOW_EN | RDM_UP_EN,
+ qphy->base + QUSB2PHY_PORT_QC2);
+ /* complete above write */
+ mb();
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ break;
+
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ if (!qphy->dpdm_pulsing_enabled)
+ break;
+
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DP_PULSE\n");
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret)
+ goto clk_error;
+
+ spin_lock_irqsave(&qphy->pulse_lock, flags);
+ /*Set DP to 3.075v, sleep for .25 ms */
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC2);
+ reg |= (RDP_UP_EN | RPUP_LOW_EN);
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC2);
+
+ /* complete above write */
+ mb();
+
+ /*
+ * It is recommended to wait here to get voltage change on
+ * DP/DM line.
+ */
+ udelay(PHY_PULSE_TIME_USEC);
+
+ /* Set DP to 0.6v, sleep 2-3ms */
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC1);
+ reg |= VDP_SRC_EN;
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC1);
+
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC2);
+ reg &= ~(RDP_UP_EN | RPUP_LOW_EN);
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC2);
+ /* complete above write */
+ mb();
+ spin_unlock_irqrestore(&qphy->pulse_lock, flags);
+ /*
+ * It is recommended to wait here to get voltage change on
+ * DP/DM line.
+ */
+ usleep_range(2000, 3000);
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ break;
+
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ if (!qphy->dpdm_pulsing_enabled)
+ break;
+
+ dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DM_PULSE\n");
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret)
+ goto clk_error;
+
+ spin_lock_irqsave(&qphy->pulse_lock, flags);
+ /* Set DM to 0.6v, sleep .25 ms */
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC1);
+ reg |= VDM_SRC_EN;
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC1);
+
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC2);
+ reg &= ~(RDM_UP_EN | RPUM_LOW_EN);
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC2);
+
+ /* complete above write */
+ mb();
+
+ /*
+ * It is recommended to wait here to get voltage change on
+ * DP/DM line.
+ */
+ udelay(PHY_PULSE_TIME_USEC);
+
+ /* DM to 3.075v, sleep 2-3ms */
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC2);
+ reg |= (RPUM_LOW_EN | RDM_UP_EN);
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC2);
+
+ reg = readl_relaxed(qphy->base + QUSB2PHY_PORT_QC1);
+ reg &= ~VDM_SRC_EN;
+ writel_relaxed(reg, qphy->base + QUSB2PHY_PORT_QC1);
+
+ /* complete above write */
+ mb();
+ spin_unlock_irqrestore(&qphy->pulse_lock, flags);
+
+ /*
+ * It is recommended to wait here to get voltage change on
+ * DP/DM line.
+ */
+ usleep_range(2000, 3000);
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(phy->dev, "Invalid power supply property(%d)\n", value);
+ break;
+ }
+
+clk_error:
+ return ret;
+}
+
+static int qusb_phy_get_property_usb(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ return -EINVAL;
+}
+
+static int qusb_phy_set_property_usb(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ struct qusb_phy *qphy = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_DP_DM:
+ ret = qusb_phy_update_dpdm(&qphy->phy, val->intval);
+ if (ret) {
+ dev_dbg(qphy->phy.dev, "error in dpdm update: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+{
+ u8 num_of_bits;
+ u32 bit_mask = 1;
+ u8 reg_val;
+
+ pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+ qphy->tune2_efuse_num_of_bits,
+ qphy->tune2_efuse_bit_pos);
+
+ /* get bit mask based on number of bits to use with efuse reg */
+ if (qphy->tune2_efuse_num_of_bits) {
+ num_of_bits = qphy->tune2_efuse_num_of_bits;
+ bit_mask = (bit_mask << num_of_bits) - 1;
+ }
+
+ /*
+ * Read EFUSE register having TUNE2 parameter's high nibble.
+ * If efuse register shows value as 0x0, then use previous value
+ * as it is. Otherwise use efuse register based value for this purpose.
+ */
+ qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
+ pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
+ __func__, bit_mask, qphy->tune2_val);
+
+ qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
+ qphy->tune2_efuse_bit_pos, bit_mask);
+
+ /* Update higher nibble of TUNE2 value for better rise/fall times */
+ if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+ if (qphy->tune2_efuse_correction > 5 ||
+ qphy->tune2_efuse_correction < -10)
+ pr_warn("Correction value is out of range : %d\n",
+ qphy->tune2_efuse_correction);
+ else
+ qphy->tune2_val = qphy->tune2_val +
+ qphy->tune2_efuse_correction;
+ }
+
+ reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+ if (qphy->tune2_val) {
+ reg_val &= 0x0f;
+ reg_val |= (qphy->tune2_val << 4);
+ }
+
+ qphy->tune2_val = reg_val;
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+ unsigned long delay)
+{
+ int i;
+
+ pr_debug("Seq count:%d\n", cnt);
+ for (i = 0; i < cnt; i = i+2) {
+ pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+ writel_relaxed(seq[i], base + seq[i+1]);
+ if (delay)
+ usleep_range(delay, (delay + 2000));
+ }
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret, reset_val = 0;
+ u8 reg;
+ bool pll_lock_fail = false;
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret)
+ return ret;
+
+ qusb_phy_enable_clocks(qphy, true);
+
+ /*
+ * ref clock is enabled by default after power on reset. Linux clock
+ * driver will disable this clock as part of late init if peripheral
+ * driver(s) does not explicitly votes for it. Linux clock driver also
+ * does not disable the clock until late init even if peripheral
+ * driver explicitly requests it and cannot defer the probe until late
+ * init. Hence, Explicitly disable the clock using register write to
+ * allow QUSB PHY PLL to lock properly.
+ */
+ if (qphy->ref_clk_base) {
+ writel_relaxed((readl_relaxed(qphy->ref_clk_base) &
+ ~QUSB2PHY_REFCLK_ENABLE),
+ qphy->ref_clk_base);
+ /* Make sure that above write complete to get ref clk OFF */
+ wmb();
+ }
+
+ /* Perform phy reset */
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+ if (qphy->emulation) {
+ if (qphy->emu_init_seq)
+ qusb_phy_write_seq(qphy->emu_phy_base,
+ qphy->emu_init_seq, qphy->emu_init_seq_len, 0);
+
+ if (qphy->qusb_phy_init_seq)
+ qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+ qphy->init_seq_len, 0);
+
+ /* Wait for 5ms as per QUSB2 RUMI sequence */
+ usleep_range(5000, 7000);
+
+ if (qphy->phy_pll_reset_seq)
+ qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
+ qphy->phy_pll_reset_seq_len, 10000);
+
+ if (qphy->emu_dcm_reset_seq)
+ qusb_phy_write_seq(qphy->emu_phy_base,
+ qphy->emu_dcm_reset_seq,
+ qphy->emu_dcm_reset_seq_len, 10000);
+
+ return 0;
+ }
+
+ /* Disable the PHY */
+ if (qphy->major_rev < 2)
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ else
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* configure for ULPI mode if requested */
+ if (qphy->ulpi_mode)
+ writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ /* save reset value to override based on clk scheme */
+ if (qphy->ref_clk_base)
+ reset_val = readl_relaxed(qphy->base + QUSB2PHY_PLL_TEST);
+
+ if (qphy->qusb_phy_init_seq)
+ qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+ qphy->init_seq_len, 0);
+
+ /*
+ * Check for EFUSE value only if tune2_efuse_reg is available
+ * and try to read EFUSE value only once i.e. not every USB
+ * cable connect case.
+ */
+ if (qphy->tune2_efuse_reg && !tune2) {
+ if (!qphy->tune2_val)
+ qusb_phy_get_tune2_param(qphy);
+
+ pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
+ qphy->tune2_val);
+ writel_relaxed(qphy->tune2_val,
+ qphy->base + QUSB2PHY_PORT_TUNE2);
+ }
+
+ /* If tune modparam set, override tune value */
+
+ pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+ __func__, tune1, tune2, tune3, tune4, tune5);
+ if (tune1)
+ writel_relaxed(tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+
+ if (tune2)
+ writel_relaxed(tune2,
+ qphy->base + QUSB2PHY_PORT_TUNE2);
+
+ if (tune3)
+ writel_relaxed(tune3,
+ qphy->base + QUSB2PHY_PORT_TUNE3);
+
+ if (tune4)
+ writel_relaxed(tune4,
+ qphy->base + QUSB2PHY_PORT_TUNE4);
+
+ if (tune5)
+ writel_relaxed(tune5,
+ qphy->base + QUSB2PHY_PORT_TUNE5);
+
+ /* ensure above writes are completed before re-enabling PHY */
+ wmb();
+
+ /* Enable the PHY */
+ if (qphy->major_rev < 2)
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ else
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+ ~PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* Ensure above write is completed before turning ON ref clk */
+ wmb();
+
+ /* Require to get phy pll lock successfully */
+ usleep_range(150, 160);
+
+ /* Turn on phy ref_clk if DIFF_CLK else select SE_CLK */
+ if (qphy->ref_clk_base) {
+ if (!qphy->is_se_clk) {
+ reset_val &= ~CLK_REF_SEL;
+ writel_relaxed((readl_relaxed(qphy->ref_clk_base) |
+ QUSB2PHY_REFCLK_ENABLE),
+ qphy->ref_clk_base);
+ } else {
+ reset_val |= CLK_REF_SEL;
+ writel_relaxed(reset_val,
+ qphy->base + QUSB2PHY_PLL_TEST);
+ }
+
+ /* Make sure above write is completed to get PLL source clock */
+ wmb();
+
+ /* Required to get PHY PLL lock successfully */
+ usleep_range(50000, 51000);
+ }
+
+ if (qphy->major_rev < 2) {
+ reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_STATUS);
+ dev_dbg(phy->dev, "QUSB2PHY_PLL_STATUS:%x\n", reg);
+ if (!(reg & QUSB2PHY_PLL_LOCK))
+ pll_lock_fail = true;
+ } else {
+ reg = readb_relaxed(qphy->base +
+ QUSB2PHY_PLL_COMMON_STATUS_ONE);
+ dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+ if (!(reg & CORE_READY_STATUS))
+ pll_lock_fail = true;
+ }
+
+ if (pll_lock_fail) {
+ dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ qusb_phy_enable_clocks(qphy, true);
+
+ /* Disable the PHY */
+ if (qphy->major_rev < 2)
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ else
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+}
+
+/**
+ * Returns DP/DM linestate with Idp_src enabled to detect if lines are floating
+ *
+ * @uphy - usb phy pointer.
+ *
+ */
+static int qusb_phy_linestate_with_idp_src(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ u8 int_status, ret;
+
+ /* Disable/powerdown the PHY */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+
+ /* Put PHY in non-driving mode */
+ writel_relaxed(TERM_SELECT | XCVR_SELECT_FS | OP_MODE_NON_DRIVE |
+ SUSPEND_N, qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+
+ /* Switch PHY to utmi register mode */
+ writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ writel_relaxed(PLL_RESET_N_CNT_5,
+ qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+
+ /* Enable PHY */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+
+ writel_relaxed(REF_BUF_EN | REXT_EN | PLL_BYPASSNL | REXT_TRIM_0,
+ qphy->base + QUSB2PHY_PLL_PWR_CTL);
+
+ usleep_range(5, 1000);
+
+ writel_relaxed(PLL_RESET_N | PLL_RESET_N_CNT_5,
+ qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+ usleep_range(50, 1000);
+
+ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_QC1);
+ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_QC2);
+
+ /* Enable all chg_det events from PHY */
+ writel_relaxed(0x1F, qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+ /* Enable Idp_src */
+ writel_relaxed(IDP_SRC_EN, qphy->base + QUSB2PHY_PORT_QC1);
+
+ usleep_range(1000, 2000);
+ int_status = readl_relaxed(qphy->base + QUSB2PHY_PORT_INT_STATUS);
+
+ /* Exit chg_det mode, set PHY regs to default values */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN); /* 23 */
+
+ writel_relaxed(PLL_AUTOPGM_EN | PLL_RESET_N | PLL_RESET_N_CNT_5,
+ qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+
+ writel_relaxed(UTMI_ULPI_SEL, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ writel_relaxed(TERM_SELECT, qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+
+ int_status = int_status & 0x5;
+
+ /*
+ * int_status's Bit(0) is DP and Bit(2) is DM.
+ * Caller expects bit(1) as DP and bit(0) DM i.e. usual linestate format
+ */
+ ret = (int_status >> 2) | ((int_status & 0x1) << 1);
+ pr_debug("%s: int_status:%x, dpdm:%x\n", __func__, int_status, ret);
+
+ return ret;
+}
+
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ u32 linestate = 0, intr_mask = 0;
+
+ if (qphy->suspended && suspend) {
+ dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+ __func__);
+ return 0;
+ }
+
+ if (suspend) {
+ /* Bus suspend case */
+ if (qphy->cable_connected ||
+ (qphy->phy.flags & PHY_HOST_MODE)) {
+ /* Clear all interrupts */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+ linestate = readl_relaxed(qphy->base +
+ QUSB2PHY_PORT_UTMI_STATUS);
+
+ /*
+ * D+/D- interrupts are level-triggered, but we are
+ * only interested if the line state changes, so enable
+ * the high/low trigger based on current state. In
+ * other words, enable the triggers _opposite_ of what
+ * the current D+/D- levels are.
+ * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+ * configure the mask to trigger on D+ low OR D- high
+ */
+ intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
+ if (!(linestate & LINESTATE_DP)) /* D+ low */
+ intr_mask |= DPSE_INTR_HIGH_SEL;
+ if (!(linestate & LINESTATE_DM)) /* D- low */
+ intr_mask |= DMSE_INTR_HIGH_SEL;
+
+ writel_relaxed(intr_mask,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+ if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+ /* enable phy auto-resume */
+ writel_relaxed(0x0C,
+ qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x04,
+ qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+ }
+
+
+ dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+ __func__, intr_mask);
+
+ /* Makes sure that above write goes through */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+ } else { /* Disconnect case */
+ mutex_lock(&qphy->phy_lock);
+ /* Disable all interrupts */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+ /* Disable PHY */
+ writel_relaxed(POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ /* Make sure that above write is completed */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0,
+ qphy->tcsr_clamp_dig_n);
+ /* Do not disable power rails if there is vote for it */
+ if (!qphy->rm_pulldown)
+ qusb_phy_enable_power(qphy, false);
+ else
+ dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n");
+ mutex_unlock(&qphy->phy_lock);
+
+ /*
+ * Set put_into_high_z_state to true so next USB
+ * cable connect, DPF_DMF request performs PHY
+ * reset and put it into high-z state. For bootup
+ * with or without USB cable, it doesn't require
+ * to put QUSB PHY into high-z state.
+ */
+ qphy->put_into_high_z_state = true;
+ }
+ qphy->suspended = true;
+ } else {
+ /* Bus suspend case */
+ if (qphy->cable_connected ||
+ (qphy->phy.flags & PHY_HOST_MODE)) {
+ qusb_phy_enable_clocks(qphy, true);
+ /* Clear all interrupts on resume */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+ } else {
+ qusb_phy_enable_power(qphy, true);
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x1,
+ qphy->tcsr_clamp_dig_n);
+ qusb_phy_enable_clocks(qphy, true);
+ }
+ qphy->suspended = false;
+ }
+
+ return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ qphy->cable_connected = true;
+
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
+ return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ qphy->cable_connected = false;
+
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
+ return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPF_DMF);
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ return qusb_phy_update_dpdm(&qphy->phy, POWER_SUPPLY_DP_DM_DPR_DMR);
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
+ qphy->rm_pulldown);
+ return qphy->rm_pulldown;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+ .enable = qusb_phy_dpdm_regulator_enable,
+ .disable = qusb_phy_dpdm_regulator_disable,
+ .is_enabled = qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+ struct device *dev = qphy->phy.dev;
+ struct regulator_config cfg = {};
+ struct regulator_init_data *init_data;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return -ENOMEM;
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+ qphy->dpdm_rdesc.owner = THIS_MODULE;
+ qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+ qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+ qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = qphy;
+ cfg.of_node = dev->of_node;
+
+ qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+ if (IS_ERR(qphy->dpdm_rdev))
+ return PTR_ERR(qphy->dpdm_rdev);
+
+ return 0;
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+ struct qusb_phy *qphy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret = 0, size = 0;
+ const char *phy_type;
+ bool hold_phy_reset;
+ struct power_supply_config dpdm_cfg = {};
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->phy.dev = dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qusb_phy_base");
+ qphy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->base))
+ return PTR_ERR(qphy->base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "emu_phy_base");
+ if (res) {
+ qphy->emu_phy_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->emu_phy_base)) {
+ dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
+ qphy->emu_phy_base = NULL;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tune2_efuse_addr");
+ if (res) {
+ qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-bit-pos",
+ &qphy->tune2_efuse_bit_pos);
+ if (!ret) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-num-bits",
+ &qphy->tune2_efuse_num_of_bits);
+ }
+ of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-correction",
+ &qphy->tune2_efuse_correction);
+
+ if (ret) {
+ dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ref_clk_addr");
+ if (res) {
+ qphy->ref_clk_base = devm_ioremap_nocache(dev,
+ res->start, resource_size(res));
+ if (IS_ERR(qphy->ref_clk_base)) {
+ dev_dbg(dev, "ref_clk_address is not available.\n");
+ return PTR_ERR(qphy->ref_clk_base);
+ }
+
+ ret = of_property_read_string(dev->of_node,
+ "qcom,phy-clk-scheme", &phy_type);
+ if (ret) {
+ dev_err(dev, "error need qsub_phy_clk_scheme.\n");
+ return ret;
+ }
+
+ if (!strcasecmp(phy_type, "cml")) {
+ qphy->is_se_clk = false;
+ } else if (!strcasecmp(phy_type, "cmos")) {
+ qphy->is_se_clk = true;
+ } else {
+ dev_err(dev, "erro invalid qusb_phy_clk_scheme\n");
+ return -EINVAL;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tcsr_clamp_dig_n_1p8");
+ if (res) {
+ qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+ res->start, resource_size(res));
+ if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+ dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+ qphy->tcsr_clamp_dig_n = NULL;
+ }
+ }
+
+
+ qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+ if (IS_ERR(qphy->ref_clk_src))
+ dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+ qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(qphy->ref_clk))
+ dev_dbg(dev, "clk get failed for ref_clk\n");
+ else
+ clk_set_rate(qphy->ref_clk, 19200000);
+
+ qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+ if (IS_ERR(qphy->cfg_ahb_clk))
+ return PTR_ERR(qphy->cfg_ahb_clk);
+
+ qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+ if (IS_ERR(qphy->phy_reset))
+ return PTR_ERR(qphy->phy_reset);
+
+ qphy->emulation = of_property_read_bool(dev->of_node,
+ "qcom,emulation");
+
+ of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
+ if (size) {
+ qphy->emu_init_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->emu_init_seq) {
+ qphy->emu_init_seq_len =
+ (size / sizeof(*qphy->emu_init_seq));
+ if (qphy->emu_init_seq_len % 2) {
+ dev_err(dev, "invalid emu_init_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,emu-init-seq",
+ qphy->emu_init_seq,
+ qphy->emu_init_seq_len);
+ } else {
+ dev_dbg(dev, "error allocating memory for emu_init_seq\n");
+ }
+ }
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
+ if (size) {
+ qphy->phy_pll_reset_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->phy_pll_reset_seq) {
+ qphy->phy_pll_reset_seq_len =
+ (size / sizeof(*qphy->phy_pll_reset_seq));
+ if (qphy->phy_pll_reset_seq_len % 2) {
+ dev_err(dev, "invalid phy_pll_reset_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,phy-pll-reset-seq",
+ qphy->phy_pll_reset_seq,
+ qphy->phy_pll_reset_seq_len);
+ } else {
+ dev_dbg(dev, "error allocating memory for phy_pll_reset_seq\n");
+ }
+ }
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
+ if (size) {
+ qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->emu_dcm_reset_seq) {
+ qphy->emu_dcm_reset_seq_len =
+ (size / sizeof(*qphy->emu_dcm_reset_seq));
+ if (qphy->emu_dcm_reset_seq_len % 2) {
+ dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,emu-dcm-reset-seq",
+ qphy->emu_dcm_reset_seq,
+ qphy->emu_dcm_reset_seq_len);
+ } else {
+ dev_dbg(dev, "error allocating memory for emu_dcm_reset_seq\n");
+ }
+ }
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+ if (size) {
+ qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->qusb_phy_init_seq) {
+ qphy->init_seq_len =
+ (size / sizeof(*qphy->qusb_phy_init_seq));
+ if (qphy->init_seq_len % 2) {
+ dev_err(dev, "invalid init_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qusb-phy-init-seq",
+ qphy->qusb_phy_init_seq,
+ qphy->init_seq_len);
+ } else {
+ dev_err(dev, "error allocating memory for phy_init_seq\n");
+ }
+ }
+
+ qphy->ulpi_mode = false;
+ ret = of_property_read_string(dev->of_node, "phy_type", &phy_type);
+
+ if (!ret) {
+ if (!strcasecmp(phy_type, "ulpi"))
+ qphy->ulpi_mode = true;
+ } else {
+ dev_err(dev, "error reading phy_type property\n");
+ return ret;
+ }
+
+ hold_phy_reset = of_property_read_bool(dev->of_node, "qcom,hold-reset");
+
+ /* use default major revision as 2 */
+ qphy->major_rev = 2;
+ ret = of_property_read_u32(dev->of_node, "qcom,major-rev",
+ &qphy->major_rev);
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+ (u32 *) qphy->vdd_levels,
+ ARRAY_SIZE(qphy->vdd_levels));
+ if (ret) {
+ dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+ return ret;
+ }
+
+ qphy->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(qphy->vdd)) {
+ dev_err(dev, "unable to get vdd supply\n");
+ return PTR_ERR(qphy->vdd);
+ }
+
+ qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+ if (IS_ERR(qphy->vdda33)) {
+ dev_err(dev, "unable to get vdda33 supply\n");
+ return PTR_ERR(qphy->vdda33);
+ }
+
+ qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+ if (IS_ERR(qphy->vdda18)) {
+ dev_err(dev, "unable to get vdda18 supply\n");
+ return PTR_ERR(qphy->vdda18);
+ }
+
+ mutex_init(&qphy->phy_lock);
+ spin_lock_init(&qphy->pulse_lock);
+ platform_set_drvdata(pdev, qphy);
+
+ qphy->phy.label = "msm-qusb-phy";
+ qphy->phy.init = qusb_phy_init;
+ qphy->phy.set_suspend = qusb_phy_set_suspend;
+ qphy->phy.shutdown = qusb_phy_shutdown;
+ qphy->phy.type = USB_PHY_TYPE_USB2;
+ qphy->phy.notify_connect = qusb_phy_notify_connect;
+ qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
+ qphy->phy.dpdm_with_idp_src = qusb_phy_linestate_with_idp_src;
+
+ /*
+ * On some platforms multiple QUSB PHYs are available. If QUSB PHY is
+ * not used, there is leakage current seen with QUSB PHY related voltage
+ * rail. Hence keep QUSB PHY into reset state explicitly here.
+ */
+ if (hold_phy_reset) {
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(dev, "%s:phy_reset assert failed\n", __func__);
+ }
+
+ qphy->dpdm_pulsing_enabled = of_property_read_bool(dev->of_node,
+ "qcom,enable-dpdm-pulsing");
+
+ if (qphy->dpdm_pulsing_enabled) {
+ qphy->dpdm_psy_desc.name = "dpdm";
+ qphy->dpdm_psy_desc.type = POWER_SUPPLY_TYPE_USB;
+ qphy->dpdm_psy_desc.properties = dpdm_props;
+ qphy->dpdm_psy_desc.num_properties = ARRAY_SIZE(dpdm_props);
+ qphy->dpdm_psy_desc.set_property = qusb_phy_set_property_usb;
+ qphy->dpdm_psy_desc.get_property = qusb_phy_get_property_usb;
+
+ dpdm_cfg.drv_data = qphy;
+ dpdm_cfg.of_node = dev->of_node;
+ qphy->dpdm_psy = power_supply_register(&pdev->dev,
+ &qphy->dpdm_psy_desc, &dpdm_cfg);
+ if (IS_ERR(qphy->dpdm_psy)) {
+ dev_err(&pdev->dev, "%s:dpdm power_supply_register failed\n",
+ __func__);
+ return PTR_ERR(qphy->dpdm_psy);
+ }
+ }
+
+ ret = usb_add_phy_dev(&qphy->phy);
+ if (ret)
+ goto unregister_psy;
+
+ ret = qusb_phy_regulator_init(qphy);
+ if (ret)
+ goto remove_phy;
+
+ /* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+ return ret;
+
+remove_phy:
+ usb_remove_phy(&qphy->phy);
+unregister_psy:
+ if (qphy->dpdm_psy)
+ power_supply_unregister(qphy->dpdm_psy);
+
+ return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+ struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+ if (qphy->dpdm_psy)
+ power_supply_unregister(qphy->dpdm_psy);
+ usb_remove_phy(&qphy->phy);
+
+ if (qphy->clocks_enabled) {
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ clk_disable_unprepare(qphy->ref_clk);
+ clk_disable_unprepare(qphy->ref_clk_src);
+ qphy->clocks_enabled = false;
+ }
+
+ qusb_phy_enable_power(qphy, false);
+
+ return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+ { .compatible = "qcom,qusb2phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+ .probe = qusb_phy_probe,
+ .remove = qusb_phy_remove,
+ .driver = {
+ .name = "msm-qusb-phy",
+ .of_match_table = of_match_ptr(qusb_phy_id_table),
+ },
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
new file mode 100644
index 000000000000..8cfbb1c100fe
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/reset.h>
+
+enum ldo_levels {
+ VOLTAGE_LEVEL_NONE = 0,
+ VOLTAGE_LEVEL_MIN,
+ VOLTAGE_LEVEL_MAX,
+};
+
+#define INIT_MAX_TIME_USEC 1000
+
+/* default CORE votlage and load values */
+#define USB_SSPHY_1P2_VOL_MIN 1200000 /* uV */
+#define USB_SSPHY_1P2_VOL_MAX 1200000 /* uV */
+#define USB_SSPHY_HPM_LOAD 23000 /* uA */
+
+#define USB_SSPHY_LOAD_DEFAULT -1
+
+/* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+
+/* PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL bits */
+
+/* 0 - selects Lane A. 1 - selects Lane B */
+#define SW_PORTSELECT BIT(0)
+/* port select mux: 1 - sw control. 0 - HW control*/
+#define SW_PORTSELECT_MX BIT(1)
+
+enum qmp_phy_rev_reg {
+ USB3_PHY_PCS_STATUS,
+ USB3_PHY_AUTONOMOUS_MODE_CTRL,
+ USB3_PHY_LFPS_RXTERM_IRQ_CLEAR,
+ USB3_PHY_POWER_DOWN_CONTROL,
+ USB3_PHY_SW_RESET,
+ USB3_PHY_START,
+ USB3_PHY_PCS_MISC_TYPEC_CTRL,
+ USB3_PHY_REG_MAX,
+};
+
+/* reg values to write */
+struct qmp_reg_val {
+ u32 offset;
+ u32 val;
+ u32 delay;
+};
+
+struct msm_ssphy_qmp {
+ struct usb_phy phy;
+ void __iomem *base;
+ void __iomem *vls_clamp_reg;
+ void __iomem *tcsr_usb3_dp_phymode;
+
+ struct regulator *vdd;
+ int vdd_levels[3]; /* none, low, high */
+ struct regulator *core_ldo;
+ int core_voltage_levels[3];
+ struct regulator *fpc_redrive_ldo;
+ int redrive_voltage_levels[3];
+ int redrive_load;
+ struct clk *ref_clk_src;
+ struct clk *ref_clk;
+ struct clk *aux_clk;
+ struct clk *cfg_ahb_clk;
+ struct clk *pipe_clk;
+ bool power_enabled;
+ struct reset_control *phy_reset;
+ struct reset_control *phy_phy_reset;
+
+ bool clk_enabled;
+ bool cable_connected;
+ bool in_suspend;
+ bool emulation;
+ unsigned int *phy_reg; /* revision based offset */
+ unsigned int *qmp_phy_init_seq;
+ int init_seq_len;
+ unsigned int *qmp_phy_reg_offset;
+ int reg_offset_cnt;
+};
+
+static const struct of_device_id msm_usb_id_table[] = {
+ {
+ .compatible = "qcom,usb-ssphy-qmp",
+ },
+ {
+ .compatible = "qcom,usb-ssphy-qmp-v1",
+ },
+ {
+ .compatible = "qcom,usb-ssphy-qmp-v2",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm_usb_id_table);
+
+static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
+{
+ return phy->cable_connected ? "connected" : "disconnected";
+}
+
+static void msm_ssusb_qmp_clr_lfps_rxterm_int(struct msm_ssphy_qmp *phy)
+{
+ writeb_relaxed(1, phy->base +
+ phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
+ /* flush the previous write before next write */
+ wmb();
+ writeb_relaxed(0, phy->base +
+ phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
+}
+
+static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
+ int enable)
+{
+ u8 val;
+ unsigned int autonomous_mode_offset =
+ phy->phy_reg[USB3_PHY_AUTONOMOUS_MODE_CTRL];
+
+ dev_dbg(phy->phy.dev, "enabling QMP autonomous mode with cable %s\n",
+ get_cable_status_str(phy));
+
+ if (enable) {
+ msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+ val = readb_relaxed(phy->base + autonomous_mode_offset);
+ val |= ARCVR_DTCT_EN;
+ if (phy->phy.flags & DEVICE_IN_SS_MODE) {
+ val |= ALFPS_DTCT_EN;
+ val &= ~ARCVR_DTCT_EVENT_SEL;
+ } else {
+ val &= ~ALFPS_DTCT_EN;
+ val |= ARCVR_DTCT_EVENT_SEL;
+ }
+
+ writeb_relaxed(val, phy->base + autonomous_mode_offset);
+ /* clamp phy level shifter to perform autonomous detection */
+ writel_relaxed(0x1, phy->vls_clamp_reg);
+ } else {
+ writel_relaxed(0x0, phy->vls_clamp_reg);
+ writeb_relaxed(0, phy->base + autonomous_mode_offset);
+ msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+ }
+}
+
+static int msm_ldo_enable(struct msm_ssphy_qmp *phy,
+ struct regulator *ldo, int *voltage_levels, int load)
+{
+ int ret = 0;
+
+ dev_dbg(phy->phy.dev,
+ "ldo: min_vol:%duV max_vol:%duV\n",
+ voltage_levels[VOLTAGE_LEVEL_MIN],
+ voltage_levels[VOLTAGE_LEVEL_MAX]);
+
+ if (load > 0) {
+ ret = regulator_set_load(ldo, load);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = regulator_set_voltage(ldo,
+ voltage_levels[VOLTAGE_LEVEL_MIN],
+ voltage_levels[VOLTAGE_LEVEL_MAX]);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(ldo);
+
+ return ret;
+}
+
+static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
+{
+ int min, rc = 0;
+
+ dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
+
+ if (phy->power_enabled == on) {
+ dev_dbg(phy->phy.dev, "PHYs' regulators status %d\n",
+ phy->power_enabled);
+ return 0;
+ }
+
+ phy->power_enabled = on;
+
+ min = on ? 1 : 0; /* low or none? */
+
+ if (!on)
+ goto disable_regulators;
+
+ if (phy->fpc_redrive_ldo) {
+ rc = msm_ldo_enable(phy, phy->fpc_redrive_ldo,
+ phy->redrive_voltage_levels,
+ phy->redrive_load);
+ if (rc < 0) {
+ dev_err(phy->phy.dev,
+ "enable phy->fpc_redrive_ldo failed\n");
+ return rc;
+ }
+ }
+
+ rc = msm_ldo_enable(phy, phy->vdd, phy->vdd_levels,
+ USB_SSPHY_LOAD_DEFAULT);
+ if (rc < 0) {
+ dev_err(phy->phy.dev, "enable phy->vdd failed\n");
+ goto disable_fpc_redrive;
+ }
+
+ rc = msm_ldo_enable(phy, phy->core_ldo, phy->core_voltage_levels,
+ USB_SSPHY_HPM_LOAD);
+ if (rc < 0) {
+ dev_err(phy->phy.dev, "enable phy->core_ldo failed\n");
+ goto disable_vdd;
+ }
+
+ return 0;
+
+disable_regulators:
+ rc = regulator_disable(phy->core_ldo);
+ if (rc)
+ dev_err(phy->phy.dev, "disable phy->core_ldo failed\n");
+
+disable_vdd:
+ rc = regulator_disable(phy->vdd);
+ if (rc)
+ dev_err(phy->phy.dev, "disable phy->vdd failed\n");
+
+disable_fpc_redrive:
+ if (phy->fpc_redrive_ldo) {
+ rc = regulator_disable(phy->fpc_redrive_ldo);
+ if (rc)
+ dev_err(phy->phy.dev,
+ "disable phy->fpc_redrive_ldo failed\n");
+ }
+
+ return rc < 0 ? rc : 0;
+}
+
+static int configure_phy_regs(struct usb_phy *uphy,
+ const struct qmp_reg_val *reg)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+
+ if (!reg) {
+ dev_err(uphy->dev, "NULL PHY configuration\n");
+ return -EINVAL;
+ }
+
+ while (reg->offset != -1) {
+ writel_relaxed(reg->val, phy->base + reg->offset);
+ if (reg->delay)
+ usleep_range(reg->delay, reg->delay + 10);
+ reg++;
+ }
+ return 0;
+}
+
+/* SSPHY Initialization */
+static int msm_ssphy_qmp_init(struct usb_phy *uphy)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+ int ret, val;
+ unsigned init_timeout_usec = INIT_MAX_TIME_USEC;
+ const struct qmp_reg_val *reg = NULL;
+
+ dev_dbg(uphy->dev, "Initializing QMP phy\n");
+
+ if (phy->emulation)
+ return 0;
+
+ ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+ if (ret) {
+ dev_err(phy->phy.dev,
+ "msm_ssusb_qmp_ldo_enable(1) failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ if (!phy->clk_enabled) {
+ if (phy->ref_clk_src)
+ clk_prepare_enable(phy->ref_clk_src);
+ if (phy->ref_clk)
+ clk_prepare_enable(phy->ref_clk);
+ clk_prepare_enable(phy->aux_clk);
+ clk_prepare_enable(phy->cfg_ahb_clk);
+ clk_set_rate(phy->pipe_clk, 125000000);
+ clk_prepare_enable(phy->pipe_clk);
+ phy->clk_enabled = true;
+ }
+
+ writel_relaxed(0x01,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
+ /* Make sure that above write completed to get PHY into POWER DOWN */
+ mb();
+
+ reg = (struct qmp_reg_val *)phy->qmp_phy_init_seq;
+
+ /* Main configuration */
+ ret = configure_phy_regs(uphy, reg);
+ if (ret) {
+ dev_err(uphy->dev, "Failed the main PHY configuration\n");
+ return ret;
+ }
+
+ /* perform lane selection */
+ val = -EINVAL;
+ if (phy->phy.flags & PHY_LANE_A)
+ val = SW_PORTSELECT_MX;
+
+ if (phy->phy.flags & PHY_LANE_B)
+ val = SW_PORTSELECT | SW_PORTSELECT_MX;
+
+ if (val > 0)
+ writel_relaxed(val,
+ phy->base + phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
+
+ writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
+ writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_SW_RESET]);
+
+ /* Make sure above write completed to bring PHY out of reset */
+ mb();
+
+ /* Wait for PHY initialization to be done */
+ do {
+ if (readl_relaxed(phy->base +
+ phy->phy_reg[USB3_PHY_PCS_STATUS]) & PHYSTATUS)
+ usleep_range(1, 2);
+ else
+ break;
+ } while (--init_timeout_usec);
+
+ if (!init_timeout_usec) {
+ dev_err(uphy->dev, "QMP PHY initialization timeout\n");
+ dev_err(uphy->dev, "USB3_PHY_PCS_STATUS:%x\n",
+ readl_relaxed(phy->base +
+ phy->phy_reg[USB3_PHY_PCS_STATUS]));
+ return -EBUSY;
+ };
+
+ return 0;
+}
+
+static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+ int ret;
+
+ dev_dbg(uphy->dev, "Resetting QMP phy\n");
+
+ /* Assert USB3 PHY reset */
+ ret = reset_control_assert(phy->phy_phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_phy_reset assert failed\n");
+ goto exit;
+ }
+
+ /* Assert USB3 PHY CSR reset */
+ ret = reset_control_assert(phy->phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_reset assert failed\n");
+ goto deassert_phy_phy_reset;
+ }
+
+ /* select usb3 phy mode */
+ if (phy->tcsr_usb3_dp_phymode)
+ writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+
+ /* Deassert USB3 PHY CSR reset */
+ ret = reset_control_deassert(phy->phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_reset deassert failed\n");
+ goto deassert_phy_phy_reset;
+ }
+
+ /* Deassert USB3 PHY reset */
+ ret = reset_control_deassert(phy->phy_phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+ goto exit;
+ }
+
+ return 0;
+
+deassert_phy_phy_reset:
+ ret = reset_control_deassert(phy->phy_phy_reset);
+ if (ret)
+ dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+exit:
+ phy->in_suspend = false;
+
+ return ret;
+}
+
+static int msm_ssphy_power_enable(struct msm_ssphy_qmp *phy, bool on)
+{
+ bool host = phy->phy.flags & PHY_HOST_MODE;
+ int ret = 0;
+
+ /*
+ * Turn off the phy's LDOs when cable is disconnected for device mode
+ * with external vbus_id indication.
+ */
+ if (!host && !phy->cable_connected) {
+ if (on) {
+ ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+ if (ret)
+ dev_err(phy->phy.dev,
+ "msm_ssusb_qmp_ldo_enable(1) failed, ret=%d\n",
+ ret);
+ } else {
+ ret = msm_ssusb_qmp_ldo_enable(phy, 0);
+ if (ret)
+ dev_err(phy->phy.dev,
+ "msm_ssusb_qmp_ldo_enable(0) failed, ret=%d\n",
+ ret);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Performs QMP PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+
+ dev_dbg(uphy->dev, "QMP PHY set_suspend for %s called with cable %s\n",
+ (suspend ? "suspend" : "resume"),
+ get_cable_status_str(phy));
+
+ if (phy->in_suspend == suspend) {
+ dev_dbg(uphy->dev, "%s: USB PHY is already %s.\n",
+ __func__, (suspend ? "suspended" : "resumed"));
+ return 0;
+ }
+
+ if (suspend) {
+ if (phy->cable_connected) {
+ if (phy->vls_clamp_reg)
+ msm_ssusb_qmp_enable_autonomous(phy, 1);
+ } else {
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+ }
+
+ /* Make sure above write completed with PHY */
+ wmb();
+
+ if (phy->clk_enabled) {
+ clk_disable_unprepare(phy->cfg_ahb_clk);
+ clk_disable_unprepare(phy->aux_clk);
+ clk_disable_unprepare(phy->pipe_clk);
+ if (phy->ref_clk)
+ clk_disable_unprepare(phy->ref_clk);
+ if (phy->ref_clk_src)
+ clk_disable_unprepare(phy->ref_clk_src);
+ phy->clk_enabled = false;
+ }
+ phy->in_suspend = true;
+ msm_ssphy_power_enable(phy, 0);
+ dev_dbg(uphy->dev, "QMP PHY is suspend\n");
+ } else {
+ msm_ssphy_power_enable(phy, 1);
+ clk_prepare_enable(phy->pipe_clk);
+ if (!phy->clk_enabled) {
+ if (phy->ref_clk_src)
+ clk_prepare_enable(phy->ref_clk_src);
+ if (phy->ref_clk)
+ clk_prepare_enable(phy->ref_clk);
+ clk_prepare_enable(phy->aux_clk);
+ clk_prepare_enable(phy->cfg_ahb_clk);
+ phy->clk_enabled = true;
+ }
+ if (!phy->cable_connected) {
+ writel_relaxed(0x01,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+ } else {
+ if (phy->vls_clamp_reg)
+ msm_ssusb_qmp_enable_autonomous(phy, 0);
+ }
+
+ /* Make sure that above write completed with PHY */
+ wmb();
+
+ phy->in_suspend = false;
+ dev_dbg(uphy->dev, "QMP PHY is resumed\n");
+ }
+
+ return 0;
+}
+
+static int msm_ssphy_qmp_notify_connect(struct usb_phy *uphy,
+ enum usb_device_speed speed)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+
+ dev_dbg(uphy->dev, "QMP phy connect notification\n");
+ phy->cable_connected = true;
+ dev_dbg(uphy->dev, "cable_connected=%d\n", phy->cable_connected);
+ return 0;
+}
+
+static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy,
+ enum usb_device_speed speed)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+ readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
+ dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
+ dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
+ phy->cable_connected = false;
+ return 0;
+}
+
+static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+{
+ struct msm_ssphy_qmp *phy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret = 0, size = 0, len;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->aux_clk = devm_clk_get(dev, "aux_clk");
+ if (IS_ERR(phy->aux_clk)) {
+ ret = PTR_ERR(phy->aux_clk);
+ phy->aux_clk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get aux_clk\n");
+ goto err;
+ }
+
+ clk_set_rate(phy->aux_clk, clk_round_rate(phy->aux_clk, ULONG_MAX));
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "cfg_ahb_clk") >= 0) {
+ phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+ if (IS_ERR(phy->cfg_ahb_clk)) {
+ ret = PTR_ERR(phy->cfg_ahb_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get cfg_ahb_clk ret %d\n", ret);
+ goto err;
+ }
+ }
+
+ phy->pipe_clk = devm_clk_get(dev, "pipe_clk");
+ if (IS_ERR(phy->pipe_clk)) {
+ ret = PTR_ERR(phy->pipe_clk);
+ phy->pipe_clk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pipe_clk\n");
+ goto err;
+ }
+
+ phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+ if (IS_ERR(phy->phy_reset)) {
+ ret = PTR_ERR(phy->phy_reset);
+ dev_dbg(dev, "failed to get phy_reset\n");
+ goto err;
+ }
+
+ phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset");
+ if (IS_ERR(phy->phy_phy_reset)) {
+ ret = PTR_ERR(phy->phy_phy_reset);
+ dev_dbg(dev, "failed to get phy_phy_reset\n");
+ goto err;
+ }
+
+ of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size);
+ if (size) {
+ phy->qmp_phy_reg_offset = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (phy->qmp_phy_reg_offset) {
+ phy->reg_offset_cnt =
+ (size / sizeof(*phy->qmp_phy_reg_offset));
+ if (phy->reg_offset_cnt > USB3_PHY_REG_MAX) {
+ dev_err(dev, "invalid reg offset count\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qmp-phy-reg-offset",
+ phy->qmp_phy_reg_offset,
+ phy->reg_offset_cnt);
+ } else {
+ dev_err(dev, "err mem alloc for qmp_phy_reg_offset\n");
+ return -ENOMEM;
+ }
+ phy->phy_reg = phy->qmp_phy_reg_offset;
+ } else {
+ dev_err(dev, "err provide qcom,qmp-phy-reg-offset\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qmp_phy_base");
+ if (!res) {
+ dev_err(dev, "failed getting qmp_phy_base\n");
+ return -ENODEV;
+ }
+ phy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->base)) {
+ ret = PTR_ERR(phy->base);
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vls_clamp_reg");
+ if (!res) {
+ dev_dbg(dev, "vls_clamp_reg not passed\n");
+ } else {
+ phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->vls_clamp_reg)) {
+ dev_err(dev, "couldn't find vls_clamp_reg address.\n");
+ return PTR_ERR(phy->vls_clamp_reg);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tcsr_usb3_dp_phymode");
+ if (res) {
+ phy->tcsr_usb3_dp_phymode = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->tcsr_usb3_dp_phymode)) {
+ dev_err(dev, "err getting tcsr_usb3_dp_phymode addr\n");
+ return PTR_ERR(phy->tcsr_usb3_dp_phymode);
+ }
+ }
+
+ phy->emulation = of_property_read_bool(dev->of_node,
+ "qcom,emulation");
+ if (!phy->emulation) {
+ of_get_property(dev->of_node, "qcom,qmp-phy-init-seq", &size);
+ if (size) {
+ if (size % sizeof(*phy->qmp_phy_init_seq)) {
+ dev_err(dev, "invalid init_seq_len\n");
+ return -EINVAL;
+ }
+ phy->qmp_phy_init_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (phy->qmp_phy_init_seq) {
+ phy->init_seq_len =
+ (size / sizeof(*phy->qmp_phy_init_seq));
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qmp-phy-init-seq",
+ phy->qmp_phy_init_seq,
+ phy->init_seq_len);
+ } else {
+ dev_err(dev, "error allocating memory for phy_init_seq\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(dev, "error need qmp-phy-init-seq\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Set default core voltage values */
+ phy->core_voltage_levels[VOLTAGE_LEVEL_NONE] = 0;
+ phy->core_voltage_levels[VOLTAGE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+ phy->core_voltage_levels[VOLTAGE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+
+ if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
+ len == sizeof(phy->core_voltage_levels)) {
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,core-voltage-level",
+ (u32 *)phy->core_voltage_levels,
+ len / sizeof(u32));
+ if (ret) {
+ dev_err(dev, "err qcom,core-voltage-level property\n");
+ goto err;
+ }
+ }
+
+ if (of_get_property(dev->of_node, "qcom,vdd-voltage-level", &len) &&
+ len == sizeof(phy->vdd_levels)) {
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,vdd-voltage-level",
+ (u32 *) phy->vdd_levels,
+ len / sizeof(u32));
+ if (ret) {
+ dev_err(dev, "err qcom,vdd-voltage-level property\n");
+ goto err;
+ }
+ } else {
+ ret = -EINVAL;
+ dev_err(dev, "error invalid inputs for vdd-voltage-level\n");
+ goto err;
+ }
+
+ phy->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(phy->vdd)) {
+ dev_err(dev, "unable to get vdd supply\n");
+ ret = PTR_ERR(phy->vdd);
+ goto err;
+ }
+
+ phy->core_ldo = devm_regulator_get(dev, "core");
+ if (IS_ERR(phy->core_ldo)) {
+ dev_err(dev, "unable to get core ldo supply\n");
+ ret = PTR_ERR(phy->core_ldo);
+ goto err;
+ }
+
+ phy->fpc_redrive_ldo = devm_regulator_get_optional(dev, "fpc-redrive");
+ if (IS_ERR(phy->fpc_redrive_ldo)) {
+ phy->fpc_redrive_ldo = NULL;
+ dev_dbg(dev, "no FPC re-drive ldo regulator\n");
+ } else {
+ if (of_get_property(dev->of_node,
+ "qcom,redrive-voltage-level", &len) &&
+ len == sizeof(phy->redrive_voltage_levels)) {
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,redrive-voltage-level",
+ (u32 *) phy->redrive_voltage_levels,
+ len / sizeof(u32));
+ if (ret) {
+ dev_err(dev,
+ "err qcom,redrive-voltage-level\n");
+ goto err;
+ }
+ } else {
+ ret = -EINVAL;
+ dev_err(dev, "err inputs for redrive-voltage-level\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "qcom,redrive-load",
+ &phy->redrive_load);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read redrive load\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "Get FPC re-drive ldo regulator\n");
+ }
+
+ phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+ if (IS_ERR(phy->ref_clk_src))
+ phy->ref_clk_src = NULL;
+ phy->ref_clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(phy->ref_clk))
+ phy->ref_clk = NULL;
+
+ platform_set_drvdata(pdev, phy);
+
+ if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
+ phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
+
+ phy->phy.dev = dev;
+ phy->phy.init = msm_ssphy_qmp_init;
+ phy->phy.set_suspend = msm_ssphy_qmp_set_suspend;
+ phy->phy.notify_connect = msm_ssphy_qmp_notify_connect;
+ phy->phy.notify_disconnect = msm_ssphy_qmp_notify_disconnect;
+ phy->phy.reset = msm_ssphy_qmp_reset;
+ phy->phy.type = USB_PHY_TYPE_USB3;
+
+ ret = usb_add_phy_dev(&phy->phy);
+
+err:
+ return ret;
+}
+
+static int msm_ssphy_qmp_remove(struct platform_device *pdev)
+{
+ struct msm_ssphy_qmp *phy = platform_get_drvdata(pdev);
+
+ if (!phy)
+ return 0;
+
+ usb_remove_phy(&phy->phy);
+ if (phy->ref_clk)
+ clk_disable_unprepare(phy->ref_clk);
+ if (phy->ref_clk_src)
+ clk_disable_unprepare(phy->ref_clk_src);
+ msm_ssusb_qmp_ldo_enable(phy, 0);
+ clk_disable_unprepare(phy->aux_clk);
+ clk_disable_unprepare(phy->cfg_ahb_clk);
+ clk_disable_unprepare(phy->pipe_clk);
+ kfree(phy);
+ return 0;
+}
+
+static struct platform_driver msm_ssphy_qmp_driver = {
+ .probe = msm_ssphy_qmp_probe,
+ .remove = msm_ssphy_qmp_remove,
+ .driver = {
+ .name = "msm-usb-ssphy-qmp",
+ .of_match_table = of_match_ptr(msm_usb_id_table),
+ },
+};
+
+module_platform_driver(msm_ssphy_qmp_driver);
+
+MODULE_DESCRIPTION("MSM USB SS QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-ssusb.c b/drivers/usb/phy/phy-msm-ssusb.c
new file mode 100644
index 000000000000..5ea19deb8219
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-ssusb.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/msm_hsusb.h>
+
+static int ss_phy_override_deemphasis;
+module_param(ss_phy_override_deemphasis, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ss_phy_override_deemphasis, "Override SSPHY demphasis value");
+
+/* QSCRATCH SSPHY control registers */
+#define SS_PHY_CTRL_REG 0x30
+#define SS_PHY_PARAM_CTRL_1 0x34
+#define SS_PHY_PARAM_CTRL_2 0x38
+#define SS_CR_PROTOCOL_DATA_IN_REG 0x3C
+#define SS_CR_PROTOCOL_DATA_OUT_REG 0x40
+#define SS_CR_PROTOCOL_CAP_ADDR_REG 0x44
+#define SS_CR_PROTOCOL_CAP_DATA_REG 0x48
+#define SS_CR_PROTOCOL_READ_REG 0x4C
+#define SS_CR_PROTOCOL_WRITE_REG 0x50
+
+/* SS_PHY_CTRL_REG bits */
+#define SS_PHY_RESET BIT(7)
+#define REF_SS_PHY_EN BIT(8)
+#define LANE0_PWR_PRESENT BIT(24)
+#define TEST_POWERDOWN BIT(26)
+#define REF_USE_PAD BIT(28)
+
+#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
+#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
+#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
+
+struct msm_ssphy {
+ struct usb_phy phy;
+ void __iomem *base;
+ struct clk *core_clk; /* USB3 master clock */
+ struct clk *com_reset_clk; /* PHY common block reset */
+ struct clk *reset_clk; /* SS PHY reset */
+ struct regulator *vdd;
+ struct regulator *vdda18;
+ atomic_t active_count; /* num of active instances */
+ bool suspended;
+ int vdd_levels[3]; /* none, low, high */
+ int deemphasis_val;
+};
+
+static int msm_ssusb_config_vdd(struct msm_ssphy *phy, int high)
+{
+ int min, ret;
+
+ min = high ? 1 : 0; /* low or none? */
+ ret = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
+ phy->vdd_levels[2]);
+ if (ret) {
+ dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+ return ret;
+ }
+
+ dev_dbg(phy->phy.dev, "%s: min_vol:%d max_vol:%d\n", __func__,
+ phy->vdd_levels[min], phy->vdd_levels[2]);
+ return ret;
+}
+
+static int msm_ssusb_ldo_enable(struct msm_ssphy *phy, int on)
+{
+ int rc = 0;
+
+ dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
+
+ if (!on)
+ goto disable_regulators;
+
+
+ rc = regulator_set_load(phy->vdda18, USB_SSPHY_1P8_HPM_LOAD);
+ if (rc < 0) {
+ dev_err(phy->phy.dev, "Unable to set HPM of vdda18\n");
+ return rc;
+ }
+
+ rc = regulator_set_voltage(phy->vdda18, USB_SSPHY_1P8_VOL_MIN,
+ USB_SSPHY_1P8_VOL_MAX);
+ if (rc) {
+ dev_err(phy->phy.dev, "unable to set voltage for vdda18\n");
+ goto put_vdda18_lpm;
+ }
+
+ rc = regulator_enable(phy->vdda18);
+ if (rc) {
+ dev_err(phy->phy.dev, "Unable to enable vdda18\n");
+ goto unset_vdda18;
+ }
+
+ return 0;
+
+disable_regulators:
+ rc = regulator_disable(phy->vdda18);
+ if (rc)
+ dev_err(phy->phy.dev, "Unable to disable vdda18\n");
+
+unset_vdda18:
+ rc = regulator_set_voltage(phy->vdda18, 0, USB_SSPHY_1P8_VOL_MAX);
+ if (rc)
+ dev_err(phy->phy.dev, "unable to set voltage for vdda18\n");
+
+put_vdda18_lpm:
+ rc = regulator_set_load(phy->vdda18, 0);
+ if (rc < 0)
+ dev_err(phy->phy.dev, "Unable to set LPM of vdda18\n");
+
+ return rc < 0 ? rc : 0;
+}
+
+static void msm_usb_write_readback(void *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 write_val, tmp = readl_relaxed(base + offset);
+
+ tmp &= ~mask; /* retain other bits */
+ write_val = tmp | val;
+
+ writel_relaxed(write_val, base + offset);
+
+ /* Read back to see if val was written */
+ tmp = readl_relaxed(base + offset);
+ tmp &= mask; /* clear other bits */
+
+ if (tmp != val)
+ pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
+ __func__, val, offset);
+}
+
+/**
+ * Write SSPHY register with debug info.
+ *
+ * @base - base virtual address.
+ * @addr - SSPHY address to write.
+ * @val - value to write.
+ *
+ */
+static void msm_ssusb_write_phycreg(void *base, u32 addr, u32 val)
+{
+ writel_relaxed(addr, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ writel_relaxed(0x1, base + SS_CR_PROTOCOL_CAP_ADDR_REG);
+ while (readl_relaxed(base + SS_CR_PROTOCOL_CAP_ADDR_REG))
+ cpu_relax();
+
+ writel_relaxed(val, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ writel_relaxed(0x1, base + SS_CR_PROTOCOL_CAP_DATA_REG);
+ while (readl_relaxed(base + SS_CR_PROTOCOL_CAP_DATA_REG))
+ cpu_relax();
+
+ writel_relaxed(0x1, base + SS_CR_PROTOCOL_WRITE_REG);
+ while (readl_relaxed(base + SS_CR_PROTOCOL_WRITE_REG))
+ cpu_relax();
+}
+
+/**
+ * Read SSPHY register with debug info.
+ *
+ * @base - base virtual address.
+ * @addr - SSPHY address to read.
+ *
+ */
+static u32 msm_ssusb_read_phycreg(void *base, u32 addr)
+{
+ bool first_read = true;
+
+ writel_relaxed(addr, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ writel_relaxed(0x1, base + SS_CR_PROTOCOL_CAP_ADDR_REG);
+ while (readl_relaxed(base + SS_CR_PROTOCOL_CAP_ADDR_REG))
+ cpu_relax();
+
+ /*
+ * Due to hardware bug, first read of SSPHY register might be
+ * incorrect. Hence as workaround, SW should perform SSPHY register
+ * read twice, but use only second read and ignore first read.
+ */
+retry:
+ writel_relaxed(0x1, base + SS_CR_PROTOCOL_READ_REG);
+ while (readl_relaxed(base + SS_CR_PROTOCOL_READ_REG))
+ cpu_relax();
+
+ if (first_read) {
+ readl_relaxed(base + SS_CR_PROTOCOL_DATA_OUT_REG);
+ first_read = false;
+ goto retry;
+ }
+
+ return readl_relaxed(base + SS_CR_PROTOCOL_DATA_OUT_REG);
+}
+
+static int msm_ssphy_set_params(struct usb_phy *uphy)
+{
+ struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy);
+ u32 data = 0;
+
+ /*
+ * WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
+ * in HS mode instead of SS mode. Workaround it by asserting
+ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus mode
+ */
+ data = msm_ssusb_read_phycreg(phy->base, 0x102D);
+ data |= (1 << 7);
+ msm_ssusb_write_phycreg(phy->base, 0x102D, data);
+
+ data = msm_ssusb_read_phycreg(phy->base, 0x1010);
+ data &= ~0xFF0;
+ data |= 0x20;
+ msm_ssusb_write_phycreg(phy->base, 0x1010, data);
+
+ /*
+ * Fix RX Equalization setting as follows
+ * LANE0.RX_OVRD_IN_HI. RX_EQ_EN set to 0
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_EN_OVRD set to 1
+ * LANE0.RX_OVRD_IN_HI.RX_EQ set to 3
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_OVRD set to 1
+ */
+ data = msm_ssusb_read_phycreg(phy->base, 0x1006);
+ data &= ~(1 << 6);
+ data |= (1 << 7);
+ data &= ~(0x7 << 8);
+ data |= (0x3 << 8);
+ data |= (0x1 << 11);
+ msm_ssusb_write_phycreg(phy->base, 0x1006, data);
+
+ /*
+ * Set EQ and TX launch amplitudes as follows
+ * LANE0.TX_OVRD_DRV_LO.PREEMPH set to 22
+ * LANE0.TX_OVRD_DRV_LO.AMPLITUDE set to 127
+ * LANE0.TX_OVRD_DRV_LO.EN set to 1.
+ */
+ data = msm_ssusb_read_phycreg(phy->base, 0x1002);
+ data &= ~0x3F80;
+ if (ss_phy_override_deemphasis)
+ phy->deemphasis_val = ss_phy_override_deemphasis;
+ if (phy->deemphasis_val)
+ data |= (phy->deemphasis_val << 7);
+ else
+ data |= (0x16 << 7);
+ data &= ~0x7F;
+ data |= (0x7F | (1 << 14));
+ msm_ssusb_write_phycreg(phy->base, 0x1002, data);
+
+ /*
+ * Set the QSCRATCH SS_PHY_PARAM_CTRL1 parameters as follows
+ * TX_FULL_SWING [26:20] amplitude to 127
+ * TX_DEEMPH_3_5DB [13:8] to 22
+ * LOS_BIAS [2:0] to 0x5
+ */
+ msm_usb_write_readback(phy->base, SS_PHY_PARAM_CTRL_1,
+ 0x07f03f07, 0x07f01605);
+
+ return 0;
+}
+
+/* SSPHY Initialization */
+static int msm_ssphy_init(struct usb_phy *uphy)
+{
+ struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy);
+ u32 val;
+
+ /* Ensure clock is on before accessing QSCRATCH registers */
+ clk_prepare_enable(phy->core_clk);
+
+ /* read initial value */
+ val = readl_relaxed(phy->base + SS_PHY_CTRL_REG);
+
+ /* Use clk reset, if available; otherwise use SS_PHY_RESET bit */
+ if (phy->com_reset_clk) {
+ clk_reset(phy->com_reset_clk, CLK_RESET_ASSERT);
+ clk_reset(phy->reset_clk, CLK_RESET_ASSERT);
+ udelay(10); /* 10us required before de-asserting */
+ clk_reset(phy->com_reset_clk, CLK_RESET_DEASSERT);
+ clk_reset(phy->reset_clk, CLK_RESET_DEASSERT);
+ } else {
+ writel_relaxed(val | SS_PHY_RESET, phy->base + SS_PHY_CTRL_REG);
+ udelay(10); /* 10us required before de-asserting */
+ writel_relaxed(val, phy->base + SS_PHY_CTRL_REG);
+ }
+
+ /* Use ref_clk from pads and set its parameters */
+ val |= REF_USE_PAD;
+ writel_relaxed(val, phy->base + SS_PHY_CTRL_REG);
+ msleep(30);
+
+ /* Ref clock must be stable now, enable ref clock for HS mode */
+ val |= LANE0_PWR_PRESENT | REF_SS_PHY_EN;
+ writel_relaxed(val, phy->base + SS_PHY_CTRL_REG);
+ usleep_range(2000, 2200);
+
+ /*
+ * Reinitialize SSPHY parameters as SS_PHY RESET will reset
+ * the internal registers to default values.
+ */
+ msm_ssphy_set_params(uphy);
+
+ clk_disable_unprepare(phy->core_clk);
+
+ return 0;
+}
+
+static int msm_ssphy_set_suspend(struct usb_phy *uphy, int suspend)
+{
+ struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy);
+ void __iomem *base = phy->base;
+ int count;
+
+ /* Ensure clock is on before accessing QSCRATCH registers */
+ clk_prepare_enable(phy->core_clk);
+
+ if (suspend) {
+ count = atomic_dec_return(&phy->active_count);
+ if (count > 0 || phy->suspended) {
+ dev_dbg(uphy->dev, "Skipping suspend, active_count=%d phy->suspended=%d\n",
+ count, phy->suspended);
+ goto done;
+ }
+
+ if (count < 0) {
+ dev_WARN(uphy->dev, "Suspended too many times! active_count=%d\n",
+ count);
+ atomic_set(&phy->active_count, 0);
+ }
+
+ /* Clear REF_SS_PHY_EN */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG, REF_SS_PHY_EN, 0);
+ /* Clear REF_USE_PAD */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG, REF_USE_PAD, 0);
+ /* Set TEST_POWERDOWN (enables PHY retention) */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG, TEST_POWERDOWN,
+ TEST_POWERDOWN);
+ if (phy->com_reset_clk &&
+ !(phy->phy.flags & ENABLE_SECONDARY_PHY)) {
+ /* leave these asserted until resuming */
+ clk_reset(phy->com_reset_clk, CLK_RESET_ASSERT);
+ clk_reset(phy->reset_clk, CLK_RESET_ASSERT);
+ }
+
+ msm_ssusb_ldo_enable(phy, 0);
+ msm_ssusb_config_vdd(phy, 0);
+ phy->suspended = true;
+ } else {
+ count = atomic_inc_return(&phy->active_count);
+ if (count > 1 || !phy->suspended) {
+ dev_dbg(uphy->dev, "Skipping resume, active_count=%d phy->suspended=%d\n",
+ count, phy->suspended);
+ goto done;
+ }
+
+ phy->suspended = false;
+ msm_ssusb_config_vdd(phy, 1);
+ msm_ssusb_ldo_enable(phy, 1);
+
+ if (phy->phy.flags & ENABLE_SECONDARY_PHY) {
+ dev_err(uphy->dev, "secondary PHY, skipping reset\n");
+ goto done;
+ }
+
+ if (phy->com_reset_clk) {
+ clk_reset(phy->com_reset_clk, CLK_RESET_DEASSERT);
+ clk_reset(phy->reset_clk, CLK_RESET_DEASSERT);
+ } else {
+ /* Assert SS PHY RESET */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG,
+ SS_PHY_RESET, SS_PHY_RESET);
+ }
+
+ /* Set REF_USE_PAD */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG, REF_USE_PAD,
+ REF_USE_PAD);
+ /* Set REF_SS_PHY_EN */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG, REF_SS_PHY_EN,
+ REF_SS_PHY_EN);
+ /* Clear TEST_POWERDOWN */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG, TEST_POWERDOWN,
+ 0);
+ if (!phy->com_reset_clk) {
+ udelay(10); /* 10us required before de-asserting */
+ msm_usb_write_readback(base, SS_PHY_CTRL_REG,
+ SS_PHY_RESET, 0);
+ }
+ }
+
+done:
+ clk_disable_unprepare(phy->core_clk);
+ return 0;
+}
+
+static int msm_ssphy_notify_connect(struct usb_phy *uphy,
+ enum usb_device_speed speed)
+{
+ struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy);
+
+ if (uphy->flags & PHY_HOST_MODE)
+ return 0;
+
+ if (uphy->flags & PHY_VBUS_VALID_OVERRIDE)
+ /* Indicate power present to SS phy */
+ msm_usb_write_readback(phy->base, SS_PHY_CTRL_REG,
+ LANE0_PWR_PRESENT, LANE0_PWR_PRESENT);
+
+ return 0;
+}
+
+static int msm_ssphy_notify_disconnect(struct usb_phy *uphy,
+ enum usb_device_speed speed)
+{
+ struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy);
+
+ if (uphy->flags & PHY_HOST_MODE)
+ return 0;
+
+ if (uphy->flags & PHY_VBUS_VALID_OVERRIDE)
+ /* Clear power indication to SS phy */
+ msm_usb_write_readback(phy->base, SS_PHY_CTRL_REG,
+ LANE0_PWR_PRESENT, 0);
+
+ return 0;
+}
+
+static int msm_ssphy_probe(struct platform_device *pdev)
+{
+ struct msm_ssphy *phy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret = 0;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory base resource\n");
+ return -ENODEV;
+ }
+
+ phy->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!phy->base) {
+ dev_err(dev, "ioremap failed\n");
+ return -ENODEV;
+ }
+
+ phy->core_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(phy->core_clk)) {
+ dev_err(dev, "unable to get core_clk\n");
+ return PTR_ERR(phy->core_clk);
+ }
+
+ phy->com_reset_clk = devm_clk_get(dev, "com_reset_clk");
+ if (IS_ERR(phy->com_reset_clk)) {
+ dev_dbg(dev, "com_reset_clk unavailable\n");
+ phy->com_reset_clk = NULL;
+ }
+
+ phy->reset_clk = devm_clk_get(dev, "reset_clk");
+ if (IS_ERR(phy->reset_clk)) {
+ dev_dbg(dev, "reset_clk unavailable\n");
+ phy->reset_clk = NULL;
+ }
+
+ if (of_get_property(dev->of_node, "qcom,primary-phy", NULL)) {
+ dev_dbg(dev, "secondary HSPHY\n");
+ phy->phy.flags |= ENABLE_SECONDARY_PHY;
+ }
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+ (u32 *) phy->vdd_levels,
+ ARRAY_SIZE(phy->vdd_levels));
+ if (ret) {
+ dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+ return ret;
+ }
+
+ phy->phy.dev = dev;
+ phy->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(phy->vdd)) {
+ dev_err(dev, "unable to get vdd supply\n");
+ return PTR_ERR(phy->vdd);
+ }
+
+ phy->vdda18 = devm_regulator_get(dev, "vdda18");
+ if (IS_ERR(phy->vdda18)) {
+ dev_err(dev, "unable to get vdda18 supply\n");
+ return PTR_ERR(phy->vdda18);
+ }
+
+ ret = msm_ssusb_config_vdd(phy, 1);
+ if (ret) {
+ dev_err(dev, "ssusb vdd_dig configuration failed\n");
+ return ret;
+ }
+
+ ret = regulator_enable(phy->vdd);
+ if (ret) {
+ dev_err(dev, "unable to enable the ssusb vdd_dig\n");
+ goto unconfig_ss_vdd;
+ }
+
+ ret = msm_ssusb_ldo_enable(phy, 1);
+ if (ret) {
+ dev_err(dev, "ssusb vreg enable failed\n");
+ goto disable_ss_vdd;
+ }
+
+ platform_set_drvdata(pdev, phy);
+
+ if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
+ phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
+
+ if (of_property_read_u32(dev->of_node, "qcom,deemphasis-value",
+ &phy->deemphasis_val))
+ dev_dbg(dev, "unable to read ssphy deemphasis value\n");
+
+ phy->phy.init = msm_ssphy_init;
+ phy->phy.set_suspend = msm_ssphy_set_suspend;
+ phy->phy.notify_connect = msm_ssphy_notify_connect;
+ phy->phy.notify_disconnect = msm_ssphy_notify_disconnect;
+ phy->phy.type = USB_PHY_TYPE_USB3;
+
+ ret = usb_add_phy_dev(&phy->phy);
+ if (ret)
+ goto disable_ss_ldo;
+
+ return 0;
+
+disable_ss_ldo:
+ msm_ssusb_ldo_enable(phy, 0);
+disable_ss_vdd:
+ regulator_disable(phy->vdd);
+unconfig_ss_vdd:
+ msm_ssusb_config_vdd(phy, 0);
+
+ return ret;
+}
+
+static int msm_ssphy_remove(struct platform_device *pdev)
+{
+ struct msm_ssphy *phy = platform_get_drvdata(pdev);
+
+ if (!phy)
+ return 0;
+
+ usb_remove_phy(&phy->phy);
+ msm_ssusb_ldo_enable(phy, 0);
+ regulator_disable(phy->vdd);
+ msm_ssusb_config_vdd(phy, 0);
+ kfree(phy);
+
+ return 0;
+}
+
+static const struct of_device_id msm_usb_id_table[] = {
+ {
+ .compatible = "qcom,usb-ssphy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm_usb_id_table);
+
+static struct platform_driver msm_ssphy_driver = {
+ .probe = msm_ssphy_probe,
+ .remove = msm_ssphy_remove,
+ .driver = {
+ .name = "msm-usb-ssphy",
+ .of_match_table = of_match_ptr(msm_usb_id_table),
+ },
+};
+
+module_platform_driver(msm_ssphy_driver);
+
+MODULE_DESCRIPTION("MSM USB SS PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e155cb..f1360f20ffe4 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2011, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1727,7 +1727,7 @@ static int msm_otg_probe(struct platform_device *pdev)
writel(0x1, phy_select);
}
- dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
+ dev_info(&pdev->dev, "OTG regs = %pK\n", motg->regs);
motg->irq = platform_get_irq(pdev, 0);
if (motg->irq < 0) {