aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-hcd.c
diff options
context:
space:
mode:
authorSarah Sharp2009-09-02 12:14:28 -0700
committerGreg Kroah-Hartman2009-09-23 06:46:18 -0700
commit624defa12f304b4d11eda309bc207fa5a1900d0f (patch)
treefb350ade85d9f1703c28eae0b2683c0a70aec8a7 /drivers/usb/host/xhci-hcd.c
parent2f697f6cbff155b3ce4053a50cdf00b5be4dda11 (diff)
USB: xhci: Support interrupt transfers.
Interrupt transfers are submitted to the xHCI hardware using the same TRB type as bulk transfers. Re-use the bulk transfer enqueueing code to enqueue interrupt transfers. Interrupt transfers are a bit different than bulk transfers. When the interrupt endpoint is to be serviced, the xHC will consume (at most) one TD. A TD (comprised of sg list entries) can take several service intervals to transmit. The important thing for device drivers to note is that if they use the scatter gather interface to submit interrupt requests, they will not get data sent from two different scatter gather lists in the same service interval. For now, the xHCI driver will use the service interval from the endpoint's descriptor (bInterval). Drivers will need a hook to poll at a more frequent interval. Set urb->interval to the interval that the xHCI hardware will use. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r--drivers/usb/host/xhci-hcd.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 1d4a1e3f9533..e478a63488fb 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -727,6 +727,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
+ } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
+ slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
} else {
ret = -EINVAL;
}