summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c113
1 files changed, 99 insertions, 14 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9dbf1583bd7a..abe6d3c17047 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -114,10 +114,20 @@ int xhci_halt(struct xhci_hcd *xhci)
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (!ret) {
xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- } else
+ } else {
xhci_warn(xhci, "Host not halted after %u microseconds.\n",
XHCI_MAX_HALT_USEC);
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
+ if (delayed_work_pending(&xhci->cmd_timer)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Cleanup command queue");
+ cancel_delayed_work(&xhci->cmd_timer);
+ xhci_cleanup_command_queue(xhci);
+ }
+
return ret;
}
@@ -128,7 +138,13 @@ static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ /*
+ * disable irq to avoid xhci_irq flooding due to unhandeled port
+ * change event in halt state, as soon as xhci_start clears halt bit
+ */
+ disable_irq(hcd->irq);
temp = readl(&xhci->op_regs->command);
temp |= (CMD_RUN);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
@@ -149,6 +165,8 @@ static int xhci_start(struct xhci_hcd *xhci)
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ enable_irq(hcd->irq);
+
return ret;
}
@@ -647,7 +665,7 @@ int xhci_run(struct usb_hcd *hcd)
temp = readl(&xhci->ir_set->irq_pending);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
+ "// Enabling event ring interrupter %pK by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
@@ -745,6 +763,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
spin_lock_irq(&xhci->lock);
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ spin_unlock_irq(&xhci->lock);
+ return;
+ }
xhci_halt(xhci);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
@@ -935,7 +957,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
- if (!hcd->state)
+ if (!hcd->state || xhci->suspended)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
@@ -1005,6 +1027,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
+ xhci->suspended = true;
return rc;
}
@@ -1024,7 +1047,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
- if (!hcd->state)
+ if (!hcd->state || !xhci->suspended)
return 0;
/* Wait a bit if either of the roothubs need to settle from the
@@ -1162,6 +1185,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ xhci->suspended = false;
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -1485,7 +1509,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
exit:
return ret;
dying:
- xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
+ xhci_dbg(xhci, "Ep 0x%x: URB %pK submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
ret = -ESHUTDOWN;
@@ -1621,7 +1645,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
i = urb_priv->td_cnt;
if (i < urb_priv->length)
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Cancel URB %p, dev %s, ep 0x%x, "
+ "Cancel URB %pK, dev %s, ep 0x%x, "
"starting at offset 0x%llx",
urb, urb->dev->devpath,
urb->ep->desc.bEndpointAddress,
@@ -1689,7 +1713,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (xhci->xhc_state & XHCI_STATE_DYING)
return -ENODEV;
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1717,7 +1741,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
xhci_get_endpoint_flag(&ep->desc)) {
/* Do not warn when called after a usb_device_reset */
if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+ xhci_warn(xhci, "xHCI %s called with disabled ep %pK\n",
__func__, ep);
return 0;
}
@@ -1809,7 +1833,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* ignore this request.
*/
if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
- xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+ xhci_warn(xhci, "xHCI %s called with enabled ep %pK\n",
__func__, ep);
return 0;
}
@@ -2790,7 +2814,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
(xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
@@ -2887,7 +2911,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
return;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ xhci_dbg(xhci, "%s called for udev %pK\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) {
@@ -2940,7 +2964,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit config ep after "
"reset ep command\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
+ xhci_warn(xhci, "WARN deq seg = %pK, deq ptr = %pK\n",
deq_state->new_deq_seg,
deq_state->new_deq_ptr);
return;
@@ -3674,6 +3698,7 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
+ virt_dev->udev = NULL;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev->udev = NULL;
@@ -3967,7 +3992,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
- "Slot ID %d dcbaa entry @%p = %#016llx",
+ "Slot ID %d dcbaa entry @%pK = %#016llx",
udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
(unsigned long long)
@@ -4998,6 +5023,61 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
}
EXPORT_SYMBOL_GPL(xhci_gen_setup);
+dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
+ unsigned intr_num)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (intr_num >= xhci->max_interrupters) {
+ xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
+ xhci->max_interrupters);
+ return 0;
+ }
+
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
+ xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+ && xhci->sec_event_ring[intr_num]->first_seg)
+ return xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+ return 0;
+}
+
+static dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+ return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+ return 0;
+}
+
+dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
+ struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+ int ret;
+ unsigned int ep_index;
+ struct xhci_virt_device *virt_dev;
+
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+ if (ret <= 0) {
+ xhci_err(xhci, "%s: invalid args\n", __func__);
+ return 0;
+ }
+
+ virt_dev = xhci->devs[udev->slot_id];
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ if (virt_dev->eps[ep_index].ring &&
+ virt_dev->eps[ep_index].ring->first_seg)
+ return virt_dev->eps[ep_index].ring->first_seg->dma;
+
+ return 0;
+}
+
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -5057,6 +5137,11 @@ static const struct hc_driver xhci_hc_driver = {
.enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
.disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
.find_raw_port_number = xhci_find_raw_port_number,
+ .sec_event_ring_setup = xhci_sec_event_ring_setup,
+ .sec_event_ring_cleanup = xhci_sec_event_ring_cleanup,
+ .get_sec_event_ring_dma_addr = xhci_get_sec_event_ring_dma_addr,
+ .get_xfer_ring_dma_addr = xhci_get_xfer_ring_dma_addr,
+ .get_dcba_dma_addr = xhci_get_dcba_dma_addr,
};
void xhci_init_driver(struct hc_driver *drv,