diff options
author | Greg Kroah-Hartman | 2016-03-04 18:43:07 -0800 |
---|---|---|
committer | Greg Kroah-Hartman | 2016-03-04 18:43:07 -0800 |
commit | 3d0712deb0a416021e55febc7ec7f6c24f460e06 (patch) | |
tree | cf56aba54ac85516fc753b662a1880e8a50ee5d9 | |
parent | 83253b3b0286ab6111e0266da74fb10b7b92746b (diff) | |
parent | 0561f77e2db9e72dc32e4f82b56fca8ba6b31171 (diff) |
Merge tag 'usb-for-v4.6' of http://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-next
Felipe writes:
usb changes for v4.6 merge window
This is almost all under drivers/usb/dwc2/. Many
changes to the host side implementation of dwc2 have
been done by Douglas Anderson.
We also have USB 3.1 support added to the Gadget
Framework and, because of that work, dwc3 got
support to Synopsys new DWC_usb31 IP core.
Other than these 2 important series, we also have
the usual collection of non-critical fixes,
Documentation updates, and minor changes all over
the place.
74 files changed, 5264 insertions, 3479 deletions
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt index 221368207ca4..20a68bf2b4e7 100644 --- a/Documentation/devicetree/bindings/usb/dwc2.txt +++ b/Documentation/devicetree/bindings/usb/dwc2.txt @@ -8,6 +8,8 @@ Required properties: - rockchip,rk3066-usb: The DWC2 USB controller instance in the rk3066 Soc; - "rockchip,rk3188-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3188 Soc; - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc; + - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs; + - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs; - snps,dwc2: A generic DWC2 USB controller with default parameters. - reg : Should contain 1 register range (address and length) - interrupts : Should contain 1 interrupt diff --git a/Documentation/usb/chipidea.txt b/Documentation/usb/chipidea.txt index 05f735a1b5a5..678741b0f213 100644 --- a/Documentation/usb/chipidea.txt +++ b/Documentation/usb/chipidea.txt @@ -26,16 +26,17 @@ cat /sys/kernel/debug/ci_hdrc.0/registers On B-device: echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req - if HNP polling is not supported, also need: - On A-device: - echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req - B-device should take host role and enumrate A-device. 4) A-device switch back to host. On B-device: echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req + or, by introducing HNP polling, B-Host can know when A-peripheral wish + to be host role, so this role switch also can be trigged in A-peripheral + side by answering the polling from B-Host, this can be done on A-device: + echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req + A-device should switch back to host and enumrate B-device. 5) Remove B-device(unplug micro B plug) and insert again in 10 seconds, diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h index c5bae9c035d5..b7ddd27419c2 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h @@ -395,204 +395,6 @@ #define CRP_AD_CBE_BESL 20 #define CRP_AD_CBE_WRITE 0x00010000 - -/* - * USB Device Controller - * - * These are used by the USB gadget driver, so they don't follow the - * IXP4XX_ naming convetions. - * - */ -# define IXP4XX_USB_REG(x) (*((volatile u32 *)(x))) - -/* UDC Undocumented - Reserved1 */ -#define UDC_RES1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0004) -/* UDC Undocumented - Reserved2 */ -#define UDC_RES2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0008) -/* UDC Undocumented - Reserved3 */ -#define UDC_RES3 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x000C) -/* UDC Control Register */ -#define UDCCR IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0000) -/* UDC Endpoint 0 Control/Status Register */ -#define UDCCS0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0010) -/* UDC Endpoint 1 (IN) Control/Status Register */ -#define UDCCS1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0014) -/* UDC Endpoint 2 (OUT) Control/Status Register */ -#define UDCCS2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0018) -/* UDC Endpoint 3 (IN) Control/Status Register */ -#define UDCCS3 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x001C) -/* UDC Endpoint 4 (OUT) Control/Status Register */ -#define UDCCS4 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0020) -/* UDC Endpoint 5 (Interrupt) Control/Status Register */ -#define UDCCS5 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0024) -/* UDC Endpoint 6 (IN) Control/Status Register */ -#define UDCCS6 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0028) -/* UDC Endpoint 7 (OUT) Control/Status Register */ -#define UDCCS7 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x002C) -/* UDC Endpoint 8 (IN) Control/Status Register */ -#define UDCCS8 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0030) -/* UDC Endpoint 9 (OUT) Control/Status Register */ -#define UDCCS9 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0034) -/* UDC Endpoint 10 (Interrupt) Control/Status Register */ -#define UDCCS10 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0038) -/* UDC Endpoint 11 (IN) Control/Status Register */ -#define UDCCS11 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x003C) -/* UDC Endpoint 12 (OUT) Control/Status Register */ -#define UDCCS12 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0040) -/* UDC Endpoint 13 (IN) Control/Status Register */ -#define UDCCS13 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0044) -/* UDC Endpoint 14 (OUT) Control/Status Register */ -#define UDCCS14 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0048) -/* UDC Endpoint 15 (Interrupt) Control/Status Register */ -#define UDCCS15 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x004C) -/* UDC Frame Number Register High */ -#define UFNRH IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0060) -/* UDC Frame Number Register Low */ -#define UFNRL IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0064) -/* UDC Byte Count Reg 2 */ -#define UBCR2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0068) -/* UDC Byte Count Reg 4 */ -#define UBCR4 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x006c) -/* UDC Byte Count Reg 7 */ -#define UBCR7 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0070) -/* UDC Byte Count Reg 9 */ -#define UBCR9 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0074) -/* UDC Byte Count Reg 12 */ -#define UBCR12 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0078) -/* UDC Byte Count Reg 14 */ -#define UBCR14 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x007c) -/* UDC Endpoint 0 Data Register */ -#define UDDR0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0080) -/* UDC Endpoint 1 Data Register */ -#define UDDR1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0100) -/* UDC Endpoint 2 Data Register */ -#define UDDR2 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0180) -/* UDC Endpoint 3 Data Register */ -#define UDDR3 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0200) -/* UDC Endpoint 4 Data Register */ -#define UDDR4 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0400) -/* UDC Endpoint 5 Data Register */ -#define UDDR5 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x00A0) -/* UDC Endpoint 6 Data Register */ -#define UDDR6 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0600) -/* UDC Endpoint 7 Data Register */ -#define UDDR7 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0680) -/* UDC Endpoint 8 Data Register */ -#define UDDR8 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0700) -/* UDC Endpoint 9 Data Register */ -#define UDDR9 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0900) -/* UDC Endpoint 10 Data Register */ -#define UDDR10 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x00C0) -/* UDC Endpoint 11 Data Register */ -#define UDDR11 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0B00) -/* UDC Endpoint 12 Data Register */ -#define UDDR12 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0B80) -/* UDC Endpoint 13 Data Register */ -#define UDDR13 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0C00) -/* UDC Endpoint 14 Data Register */ -#define UDDR14 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0E00) -/* UDC Endpoint 15 Data Register */ -#define UDDR15 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x00E0) -/* UDC Interrupt Control Register 0 */ -#define UICR0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0050) -/* UDC Interrupt Control Register 1 */ -#define UICR1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0054) -/* UDC Status Interrupt Register 0 */ -#define USIR0 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x0058) -/* UDC Status Interrupt Register 1 */ -#define USIR1 IXP4XX_USB_REG(IXP4XX_USB_BASE_VIRT+0x005C) - -#define UDCCR_UDE (1 << 0) /* UDC enable */ -#define UDCCR_UDA (1 << 1) /* UDC active */ -#define UDCCR_RSM (1 << 2) /* Device resume */ -#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */ -#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */ -#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */ -#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */ -#define UDCCR_REM (1 << 7) /* Reset interrupt mask */ - -#define UDCCS0_OPR (1 << 0) /* OUT packet ready */ -#define UDCCS0_IPR (1 << 1) /* IN packet ready */ -#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */ -#define UDCCS0_SST (1 << 4) /* Sent stall */ -#define UDCCS0_FST (1 << 5) /* Force stall */ -#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */ -#define UDCCS0_SA (1 << 7) /* Setup active */ - -#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */ -#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */ -#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */ -#define UDCCS_BI_SST (1 << 4) /* Sent stall */ -#define UDCCS_BI_FST (1 << 5) /* Force stall */ -#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */ - -#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */ -#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */ -#define UDCCS_BO_DME (1 << 3) /* DMA enable */ -#define UDCCS_BO_SST (1 << 4) /* Sent stall */ -#define UDCCS_BO_FST (1 << 5) /* Force stall */ -#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */ -#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */ - -#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */ -#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */ -#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */ -#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */ - -#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */ -#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */ -#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */ -#define UDCCS_IO_DME (1 << 3) /* DMA enable */ -#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */ -#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */ - -#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */ -#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */ -#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */ -#define UDCCS_INT_SST (1 << 4) /* Sent stall */ -#define UDCCS_INT_FST (1 << 5) /* Force stall */ -#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */ - -#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */ -#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */ -#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */ -#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */ -#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */ -#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */ -#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */ -#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */ - -#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */ -#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */ -#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */ -#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */ -#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */ -#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */ -#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */ -#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */ - -#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */ -#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */ -#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */ -#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */ -#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */ -#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */ -#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */ -#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */ - -#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */ -#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */ -#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */ -#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */ -#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */ -#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */ -#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */ -#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */ - #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ /* "fuse" bits of IXP_EXP_CFG2 */ diff --git a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h b/arch/arm/mach-pxa/include/mach/pxa25x-udc.h index 1b80a4805a60..e69de29bb2d1 100644 --- a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h +++ b/arch/arm/mach-pxa/include/mach/pxa25x-udc.h @@ -1,163 +0,0 @@ -#ifndef _ASM_ARCH_PXA25X_UDC_H -#define _ASM_ARCH_PXA25X_UDC_H - -#ifdef _ASM_ARCH_PXA27X_UDC_H -#error "You can't include both PXA25x and PXA27x UDC support" -#endif - -#define UDC_RES1 __REG(0x40600004) /* UDC Undocumented - Reserved1 */ -#define UDC_RES2 __REG(0x40600008) /* UDC Undocumented - Reserved2 */ -#define UDC_RES3 __REG(0x4060000C) /* UDC Undocumented - Reserved3 */ - -#define UDCCR __REG(0x40600000) /* UDC Control Register */ -#define UDCCR_UDE (1 << 0) /* UDC enable */ -#define UDCCR_UDA (1 << 1) /* UDC active */ -#define UDCCR_RSM (1 << 2) /* Device resume */ -#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */ -#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */ -#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */ -#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */ -#define UDCCR_REM (1 << 7) /* Reset interrupt mask */ - -#define UDCCS0 __REG(0x40600010) /* UDC Endpoint 0 Control/Status Register */ -#define UDCCS0_OPR (1 << 0) /* OUT packet ready */ -#define UDCCS0_IPR (1 << 1) /* IN packet ready */ -#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */ -#define UDCCS0_SST (1 << 4) /* Sent stall */ -#define UDCCS0_FST (1 << 5) /* Force stall */ -#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */ -#define UDCCS0_SA (1 << 7) /* Setup active */ - -/* Bulk IN - Endpoint 1,6,11 */ -#define UDCCS1 __REG(0x40600014) /* UDC Endpoint 1 (IN) Control/Status Register */ -#define UDCCS6 __REG(0x40600028) /* UDC Endpoint 6 (IN) Control/Status Register */ -#define UDCCS11 __REG(0x4060003C) /* UDC Endpoint 11 (IN) Control/Status Register */ - -#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */ -#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */ -#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */ -#define UDCCS_BI_SST (1 << 4) /* Sent stall */ -#define UDCCS_BI_FST (1 << 5) /* Force stall */ -#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */ - -/* Bulk OUT - Endpoint 2,7,12 */ -#define UDCCS2 __REG(0x40600018) /* UDC Endpoint 2 (OUT) Control/Status Register */ -#define UDCCS7 __REG(0x4060002C) /* UDC Endpoint 7 (OUT) Control/Status Register */ -#define UDCCS12 __REG(0x40600040) /* UDC Endpoint 12 (OUT) Control/Status Register */ - -#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */ -#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */ -#define UDCCS_BO_DME (1 << 3) /* DMA enable */ -#define UDCCS_BO_SST (1 << 4) /* Sent stall */ -#define UDCCS_BO_FST (1 << 5) /* Force stall */ -#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */ -#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */ - -/* Isochronous IN - Endpoint 3,8,13 */ -#define UDCCS3 __REG(0x4060001C) /* UDC Endpoint 3 (IN) Control/Status Register */ -#define UDCCS8 __REG(0x40600030) /* UDC Endpoint 8 (IN) Control/Status Register */ -#define UDCCS13 __REG(0x40600044) /* UDC Endpoint 13 (IN) Control/Status Register */ - -#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */ -#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */ -#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */ -#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */ - -/* Isochronous OUT - Endpoint 4,9,14 */ -#define UDCCS4 __REG(0x40600020) /* UDC Endpoint 4 (OUT) Control/Status Register */ -#define UDCCS9 __REG(0x40600034) /* UDC Endpoint 9 (OUT) Control/Status Register */ -#define UDCCS14 __REG(0x40600048) /* UDC Endpoint 14 (OUT) Control/Status Register */ - -#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */ -#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */ -#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */ -#define UDCCS_IO_DME (1 << 3) /* DMA enable */ -#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */ -#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */ - -/* Interrupt IN - Endpoint 5,10,15 */ -#define UDCCS5 __REG(0x40600024) /* UDC Endpoint 5 (Interrupt) Control/Status Register */ -#define UDCCS10 __REG(0x40600038) /* UDC Endpoint 10 (Interrupt) Control/Status Register */ -#define UDCCS15 __REG(0x4060004C) /* UDC Endpoint 15 (Interrupt) Control/Status Register */ - -#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */ -#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */ -#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */ -#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */ -#define UDCCS_INT_SST (1 << 4) /* Sent stall */ -#define UDCCS_INT_FST (1 << 5) /* Force stall */ -#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */ - -#define UFNRH __REG(0x40600060) /* UDC Frame Number Register High */ -#define UFNRL __REG(0x40600064) /* UDC Frame Number Register Low */ -#define UBCR2 __REG(0x40600068) /* UDC Byte Count Reg 2 */ -#define UBCR4 __REG(0x4060006c) /* UDC Byte Count Reg 4 */ -#define UBCR7 __REG(0x40600070) /* UDC Byte Count Reg 7 */ -#define UBCR9 __REG(0x40600074) /* UDC Byte Count Reg 9 */ -#define UBCR12 __REG(0x40600078) /* UDC Byte Count Reg 12 */ -#define UBCR14 __REG(0x4060007c) /* UDC Byte Count Reg 14 */ -#define UDDR0 __REG(0x40600080) /* UDC Endpoint 0 Data Register */ -#define UDDR1 __REG(0x40600100) /* UDC Endpoint 1 Data Register */ -#define UDDR2 __REG(0x40600180) /* UDC Endpoint 2 Data Register */ -#define UDDR3 __REG(0x40600200) /* UDC Endpoint 3 Data Register */ -#define UDDR4 __REG(0x40600400) /* UDC Endpoint 4 Data Register */ -#define UDDR5 __REG(0x406000A0) /* UDC Endpoint 5 Data Register */ -#define UDDR6 __REG(0x40600600) /* UDC Endpoint 6 Data Register */ -#define UDDR7 __REG(0x40600680) /* UDC Endpoint 7 Data Register */ -#define UDDR8 __REG(0x40600700) /* UDC Endpoint 8 Data Register */ -#define UDDR9 __REG(0x40600900) /* UDC Endpoint 9 Data Register */ -#define UDDR10 __REG(0x406000C0) /* UDC Endpoint 10 Data Register */ -#define UDDR11 __REG(0x40600B00) /* UDC Endpoint 11 Data Register */ -#define UDDR12 __REG(0x40600B80) /* UDC Endpoint 12 Data Register */ -#define UDDR13 __REG(0x40600C00) /* UDC Endpoint 13 Data Register */ -#define UDDR14 __REG(0x40600E00) /* UDC Endpoint 14 Data Register */ -#define UDDR15 __REG(0x406000E0) /* UDC Endpoint 15 Data Register */ - -#define UICR0 __REG(0x40600050) /* UDC Interrupt Control Register 0 */ - -#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */ -#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */ -#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */ -#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */ -#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */ -#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */ -#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */ -#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */ - -#define UICR1 __REG(0x40600054) /* UDC Interrupt Control Register 1 */ - -#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */ -#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */ -#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */ -#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */ -#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */ -#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */ -#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */ -#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */ - -#define USIR0 __REG(0x40600058) /* UDC Status Interrupt Register 0 */ - -#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */ -#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */ -#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */ -#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */ -#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */ -#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */ -#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */ -#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */ - -#define USIR1 __REG(0x4060005C) /* UDC Status Interrupt Register 1 */ - -#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */ -#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */ -#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */ -#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */ -#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */ -#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */ -#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */ -#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */ - -#endif diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index d5c57f1e98fd..dca78565eb55 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -26,7 +26,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_HWA_HCD) += host/ obj-$(CONFIG_USB_IMX21_HCD) += host/ -obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ +obj-$(CONFIG_USB_FSL_USB2) += host/ obj-$(CONFIG_USB_FOTG210_HCD) += host/ obj-$(CONFIG_USB_MAX3421_HCD) += host/ diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index ba90dc66703d..de8e22ec3902 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -66,6 +66,11 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr, return count; } ci->fsm.a_bus_req = 1; + if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) { + ci->gadget.host_request_flag = 1; + mutex_unlock(&ci->fsm.lock); + return count; + } } ci_otg_queue_work(ci); @@ -144,8 +149,14 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr, mutex_lock(&ci->fsm.lock); if (buf[0] == '0') ci->fsm.b_bus_req = 0; - else if (buf[0] == '1') + else if (buf[0] == '1') { ci->fsm.b_bus_req = 1; + if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) { + ci->gadget.host_request_flag = 1; + mutex_unlock(&ci->fsm.lock); + return count; + } + } ci_otg_queue_work(ci); mutex_unlock(&ci->fsm.lock); @@ -198,6 +209,7 @@ static unsigned otg_timer_ms[] = { TA_AIDL_BDIS, TB_ASE0_BRST, TA_BIDL_ADIS, + TB_AIDL_BDIS, TB_SE0_SRP, TB_SRP_FAIL, 0, @@ -309,6 +321,12 @@ static int a_bidl_adis_tmout(struct ci_hdrc *ci) return 0; } +static int b_aidl_bdis_tmout(struct ci_hdrc *ci) +{ + ci->fsm.a_bus_suspend = 1; + return 0; +} + static int b_se0_srp_tmout(struct ci_hdrc *ci) { ci->fsm.b_se0_srp = 1; @@ -353,6 +371,7 @@ static int (*otg_timer_handlers[])(struct ci_hdrc *) = { a_aidl_bdis_tmout, /* A_AIDL_BDIS */ b_ase0_brst_tmout, /* B_ASE0_BRST */ a_bidl_adis_tmout, /* A_BIDL_ADIS */ + b_aidl_bdis_tmout, /* B_AIDL_BDIS */ b_se0_srp_tmout, /* B_SE0_SRP */ b_srp_fail_tmout, /* B_SRP_FAIL */ NULL, /* A_WAIT_ENUM */ @@ -644,9 +663,9 @@ static void ci_otg_fsm_event(struct ci_hdrc *ci) break; case OTG_STATE_B_PERIPHERAL: if ((intr_sts & USBi_SLI) && port_conn && otg_bsess_vld) { - fsm->a_bus_suspend = 1; - ci_otg_queue_work(ci); + ci_otg_add_timer(ci, B_AIDL_BDIS); } else if (intr_sts & USBi_PCI) { + ci_otg_del_timer(ci, B_AIDL_BDIS); if (fsm->a_bus_suspend == 1) fsm->a_bus_suspend = 0; } @@ -786,6 +805,10 @@ int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci) ci->fsm.id = hw_read_otgsc(ci, OTGSC_ID) ? 1 : 0; ci->fsm.otg->state = OTG_STATE_UNDEFINED; ci->fsm.ops = &ci_otg_ops; + ci->gadget.hnp_polling_support = 1; + ci->fsm.host_req_flag = devm_kzalloc(ci->dev, 1, GFP_KERNEL); + if (!ci->fsm.host_req_flag) + return -ENOMEM; mutex_init(&ci->fsm.lock); diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h index 262d6ef8df7c..6366fe398ba6 100644 --- a/drivers/usb/chipidea/otg_fsm.h +++ b/drivers/usb/chipidea/otg_fsm.h @@ -62,6 +62,8 @@ /* SSEND time before SRP */ #define TB_SSEND_SRP (1500) /* minimum 1.5 sec, section:5.1.2 */ +#define TB_AIDL_BDIS (20) /* 4ms ~ 150ms, section 5.2.1 */ + #if IS_ENABLED(CONFIG_USB_OTG_FSM) int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 00250ab38ddb..065f5d97aa67 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1067,7 +1067,8 @@ __acquires(ci->lock) } break; case USB_REQ_GET_STATUS: - if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && + if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) || + le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) && type != (USB_DIR_IN|USB_RECIP_ENDPOINT) && type != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto delegate; diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index 61d538aa2346..504708f59b93 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c @@ -78,6 +78,8 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state) fsm->b_srp_done = 0; break; case OTG_STATE_B_PERIPHERAL: + if (fsm->otg->gadget) + fsm->otg->gadget->host_request_flag = 0; break; case OTG_STATE_B_WAIT_ACON: otg_del_timer(fsm, B_ASE0_BRST); @@ -107,6 +109,8 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state) case OTG_STATE_A_PERIPHERAL: otg_del_timer(fsm, A_BIDL_ADIS); fsm->a_bidl_adis_tmout = 0; + if (fsm->otg->gadget) + fsm->otg->gadget->host_request_flag = 0; break; case OTG_STATE_A_WAIT_VFALL: otg_del_timer(fsm, A_WAIT_VFALL); @@ -120,6 +124,87 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state) } } +static void otg_hnp_polling_work(struct work_struct *work) +{ + struct otg_fsm *fsm = container_of(to_delayed_work(work), + struct otg_fsm, hnp_polling_work); + struct usb_device *udev; + enum usb_otg_state state = fsm->otg->state; + u8 flag; + int retval; + + if (state != OTG_STATE_A_HOST && state != OTG_STATE_B_HOST) + return; + + udev = usb_hub_find_child(fsm->otg->host->root_hub, 1); + if (!udev) { + dev_err(fsm->otg->host->controller, + "no usb dev connected, can't start HNP polling\n"); + return; + } + + *fsm->host_req_flag = 0; + /* Get host request flag from connected USB device */ + retval = usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + USB_REQ_GET_STATUS, + USB_DIR_IN | USB_RECIP_DEVICE, + 0, + OTG_STS_SELECTOR, + fsm->host_req_flag, + 1, + USB_CTRL_GET_TIMEOUT); + if (retval != 1) { + dev_err(&udev->dev, "Get one byte OTG status failed\n"); + return; + } + + flag = *fsm->host_req_flag; + if (flag == 0) { + /* Continue HNP polling */ + schedule_delayed_work(&fsm->hnp_polling_work, + msecs_to_jiffies(T_HOST_REQ_POLL)); + return; + } else if (flag != HOST_REQUEST_FLAG) { + dev_err(&udev->dev, "host request flag %d is invalid\n", flag); + return; + } + + /* Host request flag is set */ + if (state == OTG_STATE_A_HOST) { + /* Set b_hnp_enable */ + if (!fsm->otg->host->b_hnp_enable) { + retval = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_B_HNP_ENABLE, + 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + if (retval >= 0) + fsm->otg->host->b_hnp_enable = 1; + } + fsm->a_bus_req = 0; + } else if (state == OTG_STATE_B_HOST) { + fsm->b_bus_req = 0; + } + + otg_statemachine(fsm); +} + +static void otg_start_hnp_polling(struct otg_fsm *fsm) +{ + /* + * The memory of host_req_flag should be allocated by + * controller driver, otherwise, hnp polling is not started. + */ + if (!fsm->host_req_flag) + return; + + INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work); + schedule_delayed_work(&fsm->hnp_polling_work, + msecs_to_jiffies(T_HOST_REQ_POLL)); +} + /* Called when entering a state */ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) { @@ -169,6 +254,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) otg_set_protocol(fsm, PROTO_HOST); usb_bus_start_enum(fsm->otg->host, fsm->otg->host->otg_port); + otg_start_hnp_polling(fsm); break; case OTG_STATE_A_IDLE: otg_drv_vbus(fsm, 0); @@ -203,6 +289,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) */ if (!fsm->a_bus_req || fsm->a_suspend_req_inf) otg_add_timer(fsm, A_WAIT_ENUM); + otg_start_hnp_polling(fsm); break; case OTG_STATE_A_SUSPEND: otg_drv_vbus(fsm, 1); diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 46c4ba75dc2a..4135a5ff67ca 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -56,189 +56,6 @@ #include "core.h" #include "hcd.h" -#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) -/** - * dwc2_backup_host_registers() - Backup controller host registers. - * When suspending usb bus, registers needs to be backuped - * if controller power is disabled once suspended. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hregs_backup *hr; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Backup Host regs */ - hr = &hsotg->hr_backup; - hr->hcfg = dwc2_readl(hsotg->regs + HCFG); - hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); - for (i = 0; i < hsotg->core_params->host_channels; ++i) - hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); - - hr->hprt0 = dwc2_read_hprt0(hsotg); - hr->hfir = dwc2_readl(hsotg->regs + HFIR); - hr->valid = true; - - return 0; -} - -/** - * dwc2_restore_host_registers() - Restore controller host registers. - * When resuming usb bus, device registers needs to be restored - * if controller power were disabled. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hregs_backup *hr; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Restore host regs */ - hr = &hsotg->hr_backup; - if (!hr->valid) { - dev_err(hsotg->dev, "%s: no host registers to restore\n", - __func__); - return -EINVAL; - } - hr->valid = false; - - dwc2_writel(hr->hcfg, hsotg->regs + HCFG); - dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); - - for (i = 0; i < hsotg->core_params->host_channels; ++i) - dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); - - dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); - dwc2_writel(hr->hfir, hsotg->regs + HFIR); - hsotg->frame_number = 0; - - return 0; -} -#else -static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) -{ return 0; } - -static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) -{ return 0; } -#endif - -#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ - IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) -/** - * dwc2_backup_device_registers() - Backup controller device registers. - * When suspending usb bus, registers needs to be backuped - * if controller power is disabled once suspended. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_dregs_backup *dr; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Backup dev regs */ - dr = &hsotg->dr_backup; - - dr->dcfg = dwc2_readl(hsotg->regs + DCFG); - dr->dctl = dwc2_readl(hsotg->regs + DCTL); - dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); - dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); - dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); - - for (i = 0; i < hsotg->num_of_eps; i++) { - /* Backup IN EPs */ - dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); - - /* Ensure DATA PID is correctly configured */ - if (dr->diepctl[i] & DXEPCTL_DPID) - dr->diepctl[i] |= DXEPCTL_SETD1PID; - else - dr->diepctl[i] |= DXEPCTL_SETD0PID; - - dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); - dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); - - /* Backup OUT EPs */ - dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); - - /* Ensure DATA PID is correctly configured */ - if (dr->doepctl[i] & DXEPCTL_DPID) - dr->doepctl[i] |= DXEPCTL_SETD1PID; - else - dr->doepctl[i] |= DXEPCTL_SETD0PID; - - dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); - dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); - } - dr->valid = true; - return 0; -} - -/** - * dwc2_restore_device_registers() - Restore controller device registers. - * When resuming usb bus, device registers needs to be restored - * if controller power were disabled. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_dregs_backup *dr; - u32 dctl; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Restore dev regs */ - dr = &hsotg->dr_backup; - if (!dr->valid) { - dev_err(hsotg->dev, "%s: no device registers to restore\n", - __func__); - return -EINVAL; - } - dr->valid = false; - - dwc2_writel(dr->dcfg, hsotg->regs + DCFG); - dwc2_writel(dr->dctl, hsotg->regs + DCTL); - dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); - dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); - dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); - - for (i = 0; i < hsotg->num_of_eps; i++) { - /* Restore IN EPs */ - dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); - dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); - dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); - - /* Restore OUT EPs */ - dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); - dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); - dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); - } - - /* Set the Power-On Programming done bit */ - dctl = dwc2_readl(hsotg->regs + DCTL); - dctl |= DCTL_PWRONPRGDONE; - dwc2_writel(dctl, hsotg->regs + DCTL); - - return 0; -} -#else -static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) -{ return 0; } - -static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) -{ return 0; } -#endif - /** * dwc2_backup_global_registers() - Backup global controller registers. * When suspending usb bus, registers needs to be backuped @@ -421,62 +238,6 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) return ret; } -/** - * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, - * used in both device and host modes - * - * @hsotg: Programming view of the DWC_otg controller - */ -static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) -{ - u32 intmsk; - - /* Clear any pending OTG Interrupts */ - dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); - - /* Clear any pending interrupts */ - dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); - - /* Enable the interrupts in the GINTMSK */ - intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; - - if (hsotg->core_params->dma_enable <= 0) - intmsk |= GINTSTS_RXFLVL; - if (hsotg->core_params->external_id_pin_ctl <= 0) - intmsk |= GINTSTS_CONIDSTSCHNG; - - intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | - GINTSTS_SESSREQINT; - - dwc2_writel(intmsk, hsotg->regs + GINTMSK); -} - -/* - * Initializes the FSLSPClkSel field of the HCFG register depending on the - * PHY type - */ -static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) -{ - u32 hcfg, val; - - if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) || - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { - /* Full speed PHY */ - val = HCFG_FSLSPCLKSEL_48_MHZ; - } else { - /* High speed PHY running at full speed or high speed */ - val = HCFG_FSLSPCLKSEL_30_60_MHZ; - } - - dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); - hcfg = dwc2_readl(hsotg->regs + HCFG); - hcfg &= ~HCFG_FSLSPCLKSEL_MASK; - hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; - dwc2_writel(hcfg, hsotg->regs + HCFG); -} - /* * Do core a soft reset of the core. Be careful with this because it * resets all the internal state machines of the core. @@ -646,1644 +407,6 @@ int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg) return 0; } -static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) -{ - u32 usbcfg, i2cctl; - int retval = 0; - - /* - * core_init() is now called on every switch so only call the - * following for the first time through - */ - if (select_phy) { - dev_dbg(hsotg->dev, "FS PHY selected\n"); - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - if (!(usbcfg & GUSBCFG_PHYSEL)) { - usbcfg |= GUSBCFG_PHYSEL; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Reset after a PHY select */ - retval = dwc2_core_reset_and_force_dr_mode(hsotg); - - if (retval) { - dev_err(hsotg->dev, - "%s: Reset failed, aborting", __func__); - return retval; - } - } - } - - /* - * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also - * do this on HNP Dev/Host mode switches (done in dev_init and - * host_init). - */ - if (dwc2_is_host_mode(hsotg)) - dwc2_init_fs_ls_pclk_sel(hsotg); - - if (hsotg->core_params->i2c_enable > 0) { - dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); - - /* Program GUSBCFG.OtgUtmiFsSel to I2C */ - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Program GI2CCTL.I2CEn */ - i2cctl = dwc2_readl(hsotg->regs + GI2CCTL); - i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; - i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; - i2cctl &= ~GI2CCTL_I2CEN; - dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); - i2cctl |= GI2CCTL_I2CEN; - dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); - } - - return retval; -} - -static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) -{ - u32 usbcfg, usbcfg_old; - int retval = 0; - - if (!select_phy) - return 0; - - usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG); - - /* - * HS PHY parameters. These parameters are preserved during soft reset - * so only program the first time. Do a soft reset immediately after - * setting phyif. - */ - switch (hsotg->core_params->phy_type) { - case DWC2_PHY_TYPE_PARAM_ULPI: - /* ULPI interface */ - dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); - usbcfg |= GUSBCFG_ULPI_UTMI_SEL; - usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); - if (hsotg->core_params->phy_ulpi_ddr > 0) - usbcfg |= GUSBCFG_DDRSEL; - break; - case DWC2_PHY_TYPE_PARAM_UTMI: - /* UTMI+ interface */ - dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); - usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); - if (hsotg->core_params->phy_utmi_width == 16) - usbcfg |= GUSBCFG_PHYIF16; - break; - default: - dev_err(hsotg->dev, "FS PHY selected at HS!\n"); - break; - } - - if (usbcfg != usbcfg_old) { - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Reset after setting the PHY parameters */ - retval = dwc2_core_reset_and_force_dr_mode(hsotg); - if (retval) { - dev_err(hsotg->dev, - "%s: Reset failed, aborting", __func__); - return retval; - } - } - - return retval; -} - -static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) -{ - u32 usbcfg; - int retval = 0; - - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { - /* If FS mode with FS PHY */ - retval = dwc2_fs_phy_init(hsotg, select_phy); - if (retval) - return retval; - } else { - /* High speed PHY */ - retval = dwc2_hs_phy_init(hsotg, select_phy); - if (retval) - return retval; - } - - if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) { - dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg |= GUSBCFG_ULPI_FS_LS; - usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - } else { - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg &= ~GUSBCFG_ULPI_FS_LS; - usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - } - - return retval; -} - -static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) -{ - u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); - - switch (hsotg->hw_params.arch) { - case GHWCFG2_EXT_DMA_ARCH: - dev_err(hsotg->dev, "External DMA Mode not supported\n"); - return -EINVAL; - - case GHWCFG2_INT_DMA_ARCH: - dev_dbg(hsotg->dev, "Internal DMA Mode\n"); - if (hsotg->core_params->ahbcfg != -1) { - ahbcfg &= GAHBCFG_CTRL_MASK; - ahbcfg |= hsotg->core_params->ahbcfg & - ~GAHBCFG_CTRL_MASK; - } - break; - - case GHWCFG2_SLAVE_ONLY_ARCH: - default: - dev_dbg(hsotg->dev, "Slave Only Mode\n"); - break; - } - - dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", - hsotg->core_params->dma_enable, - hsotg->core_params->dma_desc_enable); - - if (hsotg->core_params->dma_enable > 0) { - if (hsotg->core_params->dma_desc_enable > 0) - dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); - else - dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); - } else { - dev_dbg(hsotg->dev, "Using Slave mode\n"); - hsotg->core_params->dma_desc_enable = 0; - } - - if (hsotg->core_params->dma_enable > 0) - ahbcfg |= GAHBCFG_DMA_EN; - - dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); - - return 0; -} - -static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) -{ - u32 usbcfg; - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); - - switch (hsotg->hw_params.op_mode) { - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - if (hsotg->core_params->otg_cap == - DWC2_CAP_PARAM_HNP_SRP_CAPABLE) - usbcfg |= GUSBCFG_HNPCAP; - if (hsotg->core_params->otg_cap != - DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) - usbcfg |= GUSBCFG_SRPCAP; - break; - - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - if (hsotg->core_params->otg_cap != - DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) - usbcfg |= GUSBCFG_SRPCAP; - break; - - case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: - case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: - default: - break; - } - - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); -} - -/** - * dwc2_core_init() - Initializes the DWC_otg controller registers and - * prepares the core for device mode or host mode operation - * - * @hsotg: Programming view of the DWC_otg controller - * @initial_setup: If true then this is the first init for this instance. - */ -int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) -{ - u32 usbcfg, otgctl; - int retval; - - dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - - /* Set ULPI External VBUS bit if needed */ - usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; - if (hsotg->core_params->phy_ulpi_ext_vbus == - DWC2_PHY_ULPI_EXTERNAL_VBUS) - usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; - - /* Set external TS Dline pulsing bit if needed */ - usbcfg &= ~GUSBCFG_TERMSELDLPULSE; - if (hsotg->core_params->ts_dline > 0) - usbcfg |= GUSBCFG_TERMSELDLPULSE; - - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* - * Reset the Controller - * - * We only need to reset the controller if this is a re-init. - * For the first init we know for sure that earlier code reset us (it - * needed to in order to properly detect various parameters). - */ - if (!initial_setup) { - retval = dwc2_core_reset_and_force_dr_mode(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", - __func__); - return retval; - } - } - - /* - * This needs to happen in FS mode before any other programming occurs - */ - retval = dwc2_phy_init(hsotg, initial_setup); - if (retval) - return retval; - - /* Program the GAHBCFG Register */ - retval = dwc2_gahbcfg_init(hsotg); - if (retval) - return retval; - - /* Program the GUSBCFG register */ - dwc2_gusbcfg_init(hsotg); - - /* Program the GOTGCTL register */ - otgctl = dwc2_readl(hsotg->regs + GOTGCTL); - otgctl &= ~GOTGCTL_OTGVER; - if (hsotg->core_params->otg_ver > 0) - otgctl |= GOTGCTL_OTGVER; - dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); - - /* Clear the SRP success bit for FS-I2c */ - hsotg->srp_success = 0; - - /* Enable common interrupts */ - dwc2_enable_common_interrupts(hsotg); - - /* - * Do device or host initialization based on mode during PCD and - * HCD initialization - */ - if (dwc2_is_host_mode(hsotg)) { - dev_dbg(hsotg->dev, "Host Mode\n"); - hsotg->op_state = OTG_STATE_A_HOST; - } else { - dev_dbg(hsotg->dev, "Device Mode\n"); - hsotg->op_state = OTG_STATE_B_PERIPHERAL; - } - - return 0; -} - -/** - * dwc2_enable_host_interrupts() - Enables the Host mode interrupts - * - * @hsotg: Programming view of DWC_otg controller - */ -void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) -{ - u32 intmsk; - - dev_dbg(hsotg->dev, "%s()\n", __func__); - - /* Disable all interrupts */ - dwc2_writel(0, hsotg->regs + GINTMSK); - dwc2_writel(0, hsotg->regs + HAINTMSK); - - /* Enable the common interrupts */ - dwc2_enable_common_interrupts(hsotg); - - /* Enable host mode interrupts without disturbing common interrupts */ - intmsk = dwc2_readl(hsotg->regs + GINTMSK); - intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; - dwc2_writel(intmsk, hsotg->regs + GINTMSK); -} - -/** - * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts - * - * @hsotg: Programming view of DWC_otg controller - */ -void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) -{ - u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK); - - /* Disable host mode interrupts without disturbing common interrupts */ - intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | - GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); - dwc2_writel(intmsk, hsotg->regs + GINTMSK); -} - -/* - * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size - * For system that have a total fifo depth that is smaller than the default - * RX + TX fifo size. - * - * @hsotg: Programming view of DWC_otg controller - */ -static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) -{ - struct dwc2_core_params *params = hsotg->core_params; - struct dwc2_hw_params *hw = &hsotg->hw_params; - u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; - - total_fifo_size = hw->total_fifo_size; - rxfsiz = params->host_rx_fifo_size; - nptxfsiz = params->host_nperio_tx_fifo_size; - ptxfsiz = params->host_perio_tx_fifo_size; - - /* - * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth - * allocation with support for high bandwidth endpoints. Synopsys - * defines MPS(Max Packet size) for a periodic EP=1024, and for - * non-periodic as 512. - */ - if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { - /* - * For Buffer DMA mode/Scatter Gather DMA mode - * 2 * ((Largest Packet size / 4) + 1 + 1) + n - * with n = number of host channel. - * 2 * ((1024/4) + 2) = 516 - */ - rxfsiz = 516 + hw->host_channels; - - /* - * min non-periodic tx fifo depth - * 2 * (largest non-periodic USB packet used / 4) - * 2 * (512/4) = 256 - */ - nptxfsiz = 256; - - /* - * min periodic tx fifo depth - * (largest packet size*MC)/4 - * (1024 * 3)/4 = 768 - */ - ptxfsiz = 768; - - params->host_rx_fifo_size = rxfsiz; - params->host_nperio_tx_fifo_size = nptxfsiz; - params->host_perio_tx_fifo_size = ptxfsiz; - } - - /* - * If the summation of RX, NPTX and PTX fifo sizes is still - * bigger than the total_fifo_size, then we have a problem. - * - * We won't be able to allocate as many endpoints. Right now, - * we're just printing an error message, but ideally this FIFO - * allocation algorithm would be improved in the future. - * - * FIXME improve this FIFO allocation algorithm. - */ - if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) - dev_err(hsotg->dev, "invalid fifo sizes\n"); -} - -static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) -{ - struct dwc2_core_params *params = hsotg->core_params; - u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; - - if (!params->enable_dynamic_fifo) - return; - - dwc2_calculate_dynamic_fifo(hsotg); - - /* Rx FIFO */ - grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); - dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); - grxfsiz &= ~GRXFSIZ_DEPTH_MASK; - grxfsiz |= params->host_rx_fifo_size << - GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; - dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ); - dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", - dwc2_readl(hsotg->regs + GRXFSIZ)); - - /* Non-periodic Tx FIFO */ - dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + GNPTXFSIZ)); - nptxfsiz = params->host_nperio_tx_fifo_size << - FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; - nptxfsiz |= params->host_rx_fifo_size << - FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; - dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); - dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + GNPTXFSIZ)); - - /* Periodic Tx FIFO */ - dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + HPTXFSIZ)); - hptxfsiz = params->host_perio_tx_fifo_size << - FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; - hptxfsiz |= (params->host_rx_fifo_size + - params->host_nperio_tx_fifo_size) << - FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; - dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ); - dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + HPTXFSIZ)); - - if (hsotg->core_params->en_multiple_tx_fifo > 0 && - hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { - /* - * Global DFIFOCFG calculation for Host mode - - * include RxFIFO, NPTXFIFO and HPTXFIFO - */ - dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); - dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; - dfifocfg |= (params->host_rx_fifo_size + - params->host_nperio_tx_fifo_size + - params->host_perio_tx_fifo_size) << - GDFIFOCFG_EPINFOBASE_SHIFT & - GDFIFOCFG_EPINFOBASE_MASK; - dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG); - } -} - -/** - * dwc2_core_host_init() - Initializes the DWC_otg controller registers for - * Host mode - * - * @hsotg: Programming view of DWC_otg controller - * - * This function flushes the Tx and Rx FIFOs and flushes any entries in the - * request queues. Host channels are reset to ensure that they are ready for - * performing transfers. - */ -void dwc2_core_host_init(struct dwc2_hsotg *hsotg) -{ - u32 hcfg, hfir, otgctl; - - dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); - - /* Restart the Phy Clock */ - dwc2_writel(0, hsotg->regs + PCGCTL); - - /* Initialize Host Configuration Register */ - dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { - hcfg = dwc2_readl(hsotg->regs + HCFG); - hcfg |= HCFG_FSLSSUPP; - dwc2_writel(hcfg, hsotg->regs + HCFG); - } - - /* - * This bit allows dynamic reloading of the HFIR register during - * runtime. This bit needs to be programmed during initial configuration - * and its value must not be changed during runtime. - */ - if (hsotg->core_params->reload_ctl > 0) { - hfir = dwc2_readl(hsotg->regs + HFIR); - hfir |= HFIR_RLDCTRL; - dwc2_writel(hfir, hsotg->regs + HFIR); - } - - if (hsotg->core_params->dma_desc_enable > 0) { - u32 op_mode = hsotg->hw_params.op_mode; - if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || - !hsotg->hw_params.dma_desc_enable || - op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || - op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || - op_mode == GHWCFG2_OP_MODE_UNDEFINED) { - dev_err(hsotg->dev, - "Hardware does not support descriptor DMA mode -\n"); - dev_err(hsotg->dev, - "falling back to buffer DMA mode.\n"); - hsotg->core_params->dma_desc_enable = 0; - } else { - hcfg = dwc2_readl(hsotg->regs + HCFG); - hcfg |= HCFG_DESCDMA; - dwc2_writel(hcfg, hsotg->regs + HCFG); - } - } - - /* Configure data FIFO sizes */ - dwc2_config_fifos(hsotg); - - /* TODO - check this */ - /* Clear Host Set HNP Enable in the OTG Control Register */ - otgctl = dwc2_readl(hsotg->regs + GOTGCTL); - otgctl &= ~GOTGCTL_HSTSETHNPEN; - dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - - /* Make sure the FIFOs are flushed */ - dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); - dwc2_flush_rx_fifo(hsotg); - - /* Clear Host Set HNP Enable in the OTG Control Register */ - otgctl = dwc2_readl(hsotg->regs + GOTGCTL); - otgctl &= ~GOTGCTL_HSTSETHNPEN; - dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - - if (hsotg->core_params->dma_desc_enable <= 0) { - int num_channels, i; - u32 hcchar; - - /* Flush out any leftover queued requests */ - num_channels = hsotg->core_params->host_channels; - for (i = 0; i < num_channels; i++) { - hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); - hcchar &= ~HCCHAR_CHENA; - hcchar |= HCCHAR_CHDIS; - hcchar &= ~HCCHAR_EPDIR; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); - } - - /* Halt all channels to put them into a known state */ - for (i = 0; i < num_channels; i++) { - int count = 0; - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); - hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; - hcchar &= ~HCCHAR_EPDIR; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); - dev_dbg(hsotg->dev, "%s: Halt channel %d\n", - __func__, i); - do { - hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); - if (++count > 1000) { - dev_err(hsotg->dev, - "Unable to clear enable on channel %d\n", - i); - break; - } - udelay(1); - } while (hcchar & HCCHAR_CHENA); - } - } - - /* Turn on the vbus power */ - dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); - if (hsotg->op_state == OTG_STATE_A_HOST) { - u32 hprt0 = dwc2_read_hprt0(hsotg); - - dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", - !!(hprt0 & HPRT0_PWR)); - if (!(hprt0 & HPRT0_PWR)) { - hprt0 |= HPRT0_PWR; - dwc2_writel(hprt0, hsotg->regs + HPRT0); - } - } - - dwc2_enable_host_interrupts(hsotg); -} - -static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 hcintmsk = HCINTMSK_CHHLTD; - - switch (chan->ep_type) { - case USB_ENDPOINT_XFER_CONTROL: - case USB_ENDPOINT_XFER_BULK: - dev_vdbg(hsotg->dev, "control/bulk\n"); - hcintmsk |= HCINTMSK_XFERCOMPL; - hcintmsk |= HCINTMSK_STALL; - hcintmsk |= HCINTMSK_XACTERR; - hcintmsk |= HCINTMSK_DATATGLERR; - if (chan->ep_is_in) { - hcintmsk |= HCINTMSK_BBLERR; - } else { - hcintmsk |= HCINTMSK_NAK; - hcintmsk |= HCINTMSK_NYET; - if (chan->do_ping) - hcintmsk |= HCINTMSK_ACK; - } - - if (chan->do_split) { - hcintmsk |= HCINTMSK_NAK; - if (chan->complete_split) - hcintmsk |= HCINTMSK_NYET; - else - hcintmsk |= HCINTMSK_ACK; - } - - if (chan->error_state) - hcintmsk |= HCINTMSK_ACK; - break; - - case USB_ENDPOINT_XFER_INT: - if (dbg_perio()) - dev_vdbg(hsotg->dev, "intr\n"); - hcintmsk |= HCINTMSK_XFERCOMPL; - hcintmsk |= HCINTMSK_NAK; - hcintmsk |= HCINTMSK_STALL; - hcintmsk |= HCINTMSK_XACTERR; - hcintmsk |= HCINTMSK_DATATGLERR; - hcintmsk |= HCINTMSK_FRMOVRUN; - - if (chan->ep_is_in) - hcintmsk |= HCINTMSK_BBLERR; - if (chan->error_state) - hcintmsk |= HCINTMSK_ACK; - if (chan->do_split) { - if (chan->complete_split) - hcintmsk |= HCINTMSK_NYET; - else - hcintmsk |= HCINTMSK_ACK; - } - break; - - case USB_ENDPOINT_XFER_ISOC: - if (dbg_perio()) - dev_vdbg(hsotg->dev, "isoc\n"); - hcintmsk |= HCINTMSK_XFERCOMPL; - hcintmsk |= HCINTMSK_FRMOVRUN; - hcintmsk |= HCINTMSK_ACK; - - if (chan->ep_is_in) { - hcintmsk |= HCINTMSK_XACTERR; - hcintmsk |= HCINTMSK_BBLERR; - } - break; - default: - dev_err(hsotg->dev, "## Unknown EP type ##\n"); - break; - } - - dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); -} - -static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 hcintmsk = HCINTMSK_CHHLTD; - - /* - * For Descriptor DMA mode core halts the channel on AHB error. - * Interrupt is not required. - */ - if (hsotg->core_params->dma_desc_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "desc DMA disabled\n"); - hcintmsk |= HCINTMSK_AHBERR; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "desc DMA enabled\n"); - if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) - hcintmsk |= HCINTMSK_XFERCOMPL; - } - - if (chan->error_state && !chan->do_split && - chan->ep_type != USB_ENDPOINT_XFER_ISOC) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "setting ACK\n"); - hcintmsk |= HCINTMSK_ACK; - if (chan->ep_is_in) { - hcintmsk |= HCINTMSK_DATATGLERR; - if (chan->ep_type != USB_ENDPOINT_XFER_INT) - hcintmsk |= HCINTMSK_NAK; - } - } - - dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); -} - -static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 intmsk; - - if (hsotg->core_params->dma_enable > 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA enabled\n"); - dwc2_hc_enable_dma_ints(hsotg, chan); - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA disabled\n"); - dwc2_hc_enable_slave_ints(hsotg, chan); - } - - /* Enable the top level host channel interrupt */ - intmsk = dwc2_readl(hsotg->regs + HAINTMSK); - intmsk |= 1 << chan->hc_num; - dwc2_writel(intmsk, hsotg->regs + HAINTMSK); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); - - /* Make sure host channel interrupts are enabled */ - intmsk = dwc2_readl(hsotg->regs + GINTMSK); - intmsk |= GINTSTS_HCHINT; - dwc2_writel(intmsk, hsotg->regs + GINTMSK); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); -} - -/** - * dwc2_hc_init() - Prepares a host channel for transferring packets to/from - * a specific endpoint - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * The HCCHARn register is set up with the characteristics specified in chan. - * Host channel interrupts that may need to be serviced while this transfer is - * in progress are enabled. - */ -void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) -{ - u8 hc_num = chan->hc_num; - u32 hcintmsk; - u32 hcchar; - u32 hcsplt = 0; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - /* Clear old interrupt conditions for this host channel */ - hcintmsk = 0xffffffff; - hcintmsk &= ~HCINTMSK_RESERVED14_31; - dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num)); - - /* Enable channel interrupts required for this transfer */ - dwc2_hc_enable_ints(hsotg, chan); - - /* - * Program the HCCHARn register with the endpoint characteristics for - * the current transfer - */ - hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; - hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; - if (chan->ep_is_in) - hcchar |= HCCHAR_EPDIR; - if (chan->speed == USB_SPEED_LOW) - hcchar |= HCCHAR_LSPDDEV; - hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; - hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num)); - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", - hc_num, hcchar); - - dev_vdbg(hsotg->dev, "%s: Channel %d\n", - __func__, hc_num); - dev_vdbg(hsotg->dev, " Dev Addr: %d\n", - chan->dev_addr); - dev_vdbg(hsotg->dev, " Ep Num: %d\n", - chan->ep_num); - dev_vdbg(hsotg->dev, " Is In: %d\n", - chan->ep_is_in); - dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", - chan->speed == USB_SPEED_LOW); - dev_vdbg(hsotg->dev, " Ep Type: %d\n", - chan->ep_type); - dev_vdbg(hsotg->dev, " Max Pkt: %d\n", - chan->max_packet); - } - - /* Program the HCSPLT register for SPLITs */ - if (chan->do_split) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, - "Programming HC %d with split --> %s\n", - hc_num, - chan->complete_split ? "CSPLIT" : "SSPLIT"); - if (chan->complete_split) - hcsplt |= HCSPLT_COMPSPLT; - hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & - HCSPLT_XACTPOS_MASK; - hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & - HCSPLT_HUBADDR_MASK; - hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & - HCSPLT_PRTADDR_MASK; - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, " comp split %d\n", - chan->complete_split); - dev_vdbg(hsotg->dev, " xact pos %d\n", - chan->xact_pos); - dev_vdbg(hsotg->dev, " hub addr %d\n", - chan->hub_addr); - dev_vdbg(hsotg->dev, " hub port %d\n", - chan->hub_port); - dev_vdbg(hsotg->dev, " is_in %d\n", - chan->ep_is_in); - dev_vdbg(hsotg->dev, " Max Pkt %d\n", - chan->max_packet); - dev_vdbg(hsotg->dev, " xferlen %d\n", - chan->xfer_len); - } - } - - dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); -} - -/** - * dwc2_hc_halt() - Attempts to halt a host channel - * - * @hsotg: Controller register interface - * @chan: Host channel to halt - * @halt_status: Reason for halting the channel - * - * This function should only be called in Slave mode or to abort a transfer in - * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the - * controller halts the channel when the transfer is complete or a condition - * occurs that requires application intervention. - * - * In slave mode, checks for a free request queue entry, then sets the Channel - * Enable and Channel Disable bits of the Host Channel Characteristics - * register of the specified channel to intiate the halt. If there is no free - * request queue entry, sets only the Channel Disable bit of the HCCHARn - * register to flush requests for this channel. In the latter case, sets a - * flag to indicate that the host channel needs to be halted when a request - * queue slot is open. - * - * In DMA mode, always sets the Channel Enable and Channel Disable bits of the - * HCCHARn register. The controller ensures there is space in the request - * queue before submitting the halt request. - * - * Some time may elapse before the core flushes any posted requests for this - * host channel and halts. The Channel Halted interrupt handler completes the - * deactivation of the host channel. - */ -void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, - enum dwc2_halt_status halt_status) -{ - u32 nptxsts, hptxsts, hcchar; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) - dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); - - if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || - halt_status == DWC2_HC_XFER_AHB_ERR) { - /* - * Disable all channel interrupts except Ch Halted. The QTD - * and QH state associated with this transfer has been cleared - * (in the case of URB_DEQUEUE), so the channel needs to be - * shut down carefully to prevent crashes. - */ - u32 hcintmsk = HCINTMSK_CHHLTD; - - dev_vdbg(hsotg->dev, "dequeue/error\n"); - dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); - - /* - * Make sure no other interrupts besides halt are currently - * pending. Handling another interrupt could cause a crash due - * to the QTD and QH state. - */ - dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); - - /* - * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR - * even if the channel was already halted for some other - * reason - */ - chan->halt_status = halt_status; - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - if (!(hcchar & HCCHAR_CHENA)) { - /* - * The channel is either already halted or it hasn't - * started yet. In DMA mode, the transfer may halt if - * it finishes normally or a condition occurs that - * requires driver intervention. Don't want to halt - * the channel again. In either Slave or DMA mode, - * it's possible that the transfer has been assigned - * to a channel, but not started yet when an URB is - * dequeued. Don't want to halt a channel that hasn't - * started yet. - */ - return; - } - } - if (chan->halt_pending) { - /* - * A halt has already been issued for this channel. This might - * happen when a transfer is aborted by a higher level in - * the stack. - */ - dev_vdbg(hsotg->dev, - "*** %s: Channel %d, chan->halt_pending already set ***\n", - __func__, chan->hc_num); - return; - } - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - - /* No need to set the bit in DDMA for disabling the channel */ - /* TODO check it everywhere channel is disabled */ - if (hsotg->core_params->dma_desc_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "desc DMA disabled\n"); - hcchar |= HCCHAR_CHENA; - } else { - if (dbg_hc(chan)) - dev_dbg(hsotg->dev, "desc DMA enabled\n"); - } - hcchar |= HCCHAR_CHDIS; - - if (hsotg->core_params->dma_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA not enabled\n"); - hcchar |= HCCHAR_CHENA; - - /* Check for space in the request queue to issue the halt */ - if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || - chan->ep_type == USB_ENDPOINT_XFER_BULK) { - dev_vdbg(hsotg->dev, "control/bulk\n"); - nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); - if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { - dev_vdbg(hsotg->dev, "Disabling channel\n"); - hcchar &= ~HCCHAR_CHENA; - } - } else { - if (dbg_perio()) - dev_vdbg(hsotg->dev, "isoc/intr\n"); - hptxsts = dwc2_readl(hsotg->regs + HPTXSTS); - if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || - hsotg->queuing_high_bandwidth) { - if (dbg_perio()) - dev_vdbg(hsotg->dev, "Disabling channel\n"); - hcchar &= ~HCCHAR_CHENA; - } - } - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA enabled\n"); - } - - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - chan->halt_status = halt_status; - - if (hcchar & HCCHAR_CHENA) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Channel enabled\n"); - chan->halt_pending = 1; - chan->halt_on_queue = 0; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Channel disabled\n"); - chan->halt_on_queue = 1; - } - - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", - hcchar); - dev_vdbg(hsotg->dev, " halt_pending: %d\n", - chan->halt_pending); - dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", - chan->halt_on_queue); - dev_vdbg(hsotg->dev, " halt_status: %d\n", - chan->halt_status); - } -} - -/** - * dwc2_hc_cleanup() - Clears the transfer state for a host channel - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Identifies the host channel to clean up - * - * This function is normally called after a transfer is done and the host - * channel is being released - */ -void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) -{ - u32 hcintmsk; - - chan->xfer_started = 0; - - /* - * Clear channel interrupt enables and any unhandled channel interrupt - * conditions - */ - dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); - hcintmsk = 0xffffffff; - hcintmsk &= ~HCINTMSK_RESERVED14_31; - dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); -} - -/** - * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in - * which frame a periodic transfer should occur - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Identifies the host channel to set up and its properties - * @hcchar: Current value of the HCCHAR register for the specified host channel - * - * This function has no effect on non-periodic transfers - */ -static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan, u32 *hcchar) -{ - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - /* 1 if _next_ frame is odd, 0 if it's even */ - if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) - *hcchar |= HCCHAR_ODDFRM; - } -} - -static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) -{ - /* Set up the initial PID for the transfer */ - if (chan->speed == USB_SPEED_HIGH) { - if (chan->ep_is_in) { - if (chan->multi_count == 1) - chan->data_pid_start = DWC2_HC_PID_DATA0; - else if (chan->multi_count == 2) - chan->data_pid_start = DWC2_HC_PID_DATA1; - else - chan->data_pid_start = DWC2_HC_PID_DATA2; - } else { - if (chan->multi_count == 1) - chan->data_pid_start = DWC2_HC_PID_DATA0; - else - chan->data_pid_start = DWC2_HC_PID_MDATA; - } - } else { - chan->data_pid_start = DWC2_HC_PID_DATA0; - } -} - -/** - * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with - * the Host Channel - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * This function should only be called in Slave mode. For a channel associated - * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel - * associated with a periodic EP, the periodic Tx FIFO is written. - * - * Upon return the xfer_buf and xfer_count fields in chan are incremented by - * the number of bytes written to the Tx FIFO. - */ -static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 i; - u32 remaining_count; - u32 byte_count; - u32 dword_count; - u32 __iomem *data_fifo; - u32 *data_buf = (u32 *)chan->xfer_buf; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); - - remaining_count = chan->xfer_len - chan->xfer_count; - if (remaining_count > chan->max_packet) - byte_count = chan->max_packet; - else - byte_count = remaining_count; - - dword_count = (byte_count + 3) / 4; - - if (((unsigned long)data_buf & 0x3) == 0) { - /* xfer_buf is DWORD aligned */ - for (i = 0; i < dword_count; i++, data_buf++) - dwc2_writel(*data_buf, data_fifo); - } else { - /* xfer_buf is not DWORD aligned */ - for (i = 0; i < dword_count; i++, data_buf++) { - u32 data = data_buf[0] | data_buf[1] << 8 | - data_buf[2] << 16 | data_buf[3] << 24; - dwc2_writel(data, data_fifo); - } - } - - chan->xfer_count += byte_count; - chan->xfer_buf += byte_count; -} - -/** - * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host - * channel and starts the transfer - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel. The xfer_len value - * may be reduced to accommodate the max widths of the XferSize and - * PktCnt fields in the HCTSIZn register. The multi_count value may be - * changed to reflect the final xfer_len value. - * - * This function may be called in either Slave mode or DMA mode. In Slave mode, - * the caller must ensure that there is sufficient space in the request queue - * and Tx Data FIFO. - * - * For an OUT transfer in Slave mode, it loads a data packet into the - * appropriate FIFO. If necessary, additional data packets are loaded in the - * Host ISR. - * - * For an IN transfer in Slave mode, a data packet is requested. The data - * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, - * additional data packets are requested in the Host ISR. - * - * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ - * register along with a packet count of 1 and the channel is enabled. This - * causes a single PING transaction to occur. Other fields in HCTSIZ are - * simply set to 0 since no data transfer occurs in this case. - * - * For a PING transfer in DMA mode, the HCTSIZ register is initialized with - * all the information required to perform the subsequent data transfer. In - * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the - * controller performs the entire PING protocol, then starts the data - * transfer. - */ -void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; - u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; - u32 hcchar; - u32 hctsiz = 0; - u16 num_packets; - u32 ec_mc; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - if (chan->do_ping) { - if (hsotg->core_params->dma_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "ping, no DMA\n"); - dwc2_hc_do_ping(hsotg, chan); - chan->xfer_started = 1; - return; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "ping, DMA\n"); - hctsiz |= TSIZ_DOPNG; - } - } - - if (chan->do_split) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "split\n"); - num_packets = 1; - - if (chan->complete_split && !chan->ep_is_in) - /* - * For CSPLIT OUT Transfer, set the size to 0 so the - * core doesn't expect any data written to the FIFO - */ - chan->xfer_len = 0; - else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) - chan->xfer_len = chan->max_packet; - else if (!chan->ep_is_in && chan->xfer_len > 188) - chan->xfer_len = 188; - - hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & - TSIZ_XFERSIZE_MASK; - - /* For split set ec_mc for immediate retries */ - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) - ec_mc = 3; - else - ec_mc = 1; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "no split\n"); - /* - * Ensure that the transfer length and packet count will fit - * in the widths allocated for them in the HCTSIZn register - */ - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - /* - * Make sure the transfer size is no larger than one - * (micro)frame's worth of data. (A check was done - * when the periodic transfer was accepted to ensure - * that a (micro)frame's worth of data can be - * programmed into a channel.) - */ - u32 max_periodic_len = - chan->multi_count * chan->max_packet; - - if (chan->xfer_len > max_periodic_len) - chan->xfer_len = max_periodic_len; - } else if (chan->xfer_len > max_hc_xfer_size) { - /* - * Make sure that xfer_len is a multiple of max packet - * size - */ - chan->xfer_len = - max_hc_xfer_size - chan->max_packet + 1; - } - - if (chan->xfer_len > 0) { - num_packets = (chan->xfer_len + chan->max_packet - 1) / - chan->max_packet; - if (num_packets > max_hc_pkt_count) { - num_packets = max_hc_pkt_count; - chan->xfer_len = num_packets * chan->max_packet; - } - } else { - /* Need 1 packet for transfer length of 0 */ - num_packets = 1; - } - - if (chan->ep_is_in) - /* - * Always program an integral # of max packets for IN - * transfers - */ - chan->xfer_len = num_packets * chan->max_packet; - - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) - /* - * Make sure that the multi_count field matches the - * actual transfer length - */ - chan->multi_count = num_packets; - - if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) - dwc2_set_pid_isoc(chan); - - hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & - TSIZ_XFERSIZE_MASK; - - /* The ec_mc gets the multi_count for non-split */ - ec_mc = chan->multi_count; - } - - chan->start_pkt_count = num_packets; - hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; - hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & - TSIZ_SC_MC_PID_MASK; - dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", - hctsiz, chan->hc_num); - - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - dev_vdbg(hsotg->dev, " Xfer Size: %d\n", - (hctsiz & TSIZ_XFERSIZE_MASK) >> - TSIZ_XFERSIZE_SHIFT); - dev_vdbg(hsotg->dev, " Num Pkts: %d\n", - (hctsiz & TSIZ_PKTCNT_MASK) >> - TSIZ_PKTCNT_SHIFT); - dev_vdbg(hsotg->dev, " Start PID: %d\n", - (hctsiz & TSIZ_SC_MC_PID_MASK) >> - TSIZ_SC_MC_PID_SHIFT); - } - - if (hsotg->core_params->dma_enable > 0) { - dma_addr_t dma_addr; - - if (chan->align_buf) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "align_buf\n"); - dma_addr = chan->align_buf; - } else { - dma_addr = chan->xfer_dma; - } - dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", - (unsigned long)dma_addr, chan->hc_num); - } - - /* Start the split */ - if (chan->do_split) { - u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num)); - - hcsplt |= HCSPLT_SPLTENA; - dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); - } - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - hcchar &= ~HCCHAR_MULTICNT_MASK; - hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; - dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); - - if (hcchar & HCCHAR_CHDIS) - dev_warn(hsotg->dev, - "%s: chdis set, channel %d, hcchar 0x%08x\n", - __func__, chan->hc_num, hcchar); - - /* Set host channel enable after all other setup is complete */ - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", - (hcchar & HCCHAR_MULTICNT_MASK) >> - HCCHAR_MULTICNT_SHIFT); - - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, - chan->hc_num); - - chan->xfer_started = 1; - chan->requests++; - - if (hsotg->core_params->dma_enable <= 0 && - !chan->ep_is_in && chan->xfer_len > 0) - /* Load OUT packet into the appropriate Tx FIFO */ - dwc2_hc_write_packet(hsotg, chan); -} - -/** - * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a - * host channel and starts the transfer in Descriptor DMA mode - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. - * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field - * with micro-frame bitmap. - * - * Initializes HCDMA register with descriptor list address and CTD value then - * starts the transfer via enabling the channel. - */ -void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 hcchar; - u32 hctsiz = 0; - - if (chan->do_ping) - hctsiz |= TSIZ_DOPNG; - - if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) - dwc2_set_pid_isoc(chan); - - /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ - hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & - TSIZ_SC_MC_PID_MASK; - - /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ - hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; - - /* Non-zero only for high-speed interrupt endpoints */ - hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; - - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - dev_vdbg(hsotg->dev, " Start PID: %d\n", - chan->data_pid_start); - dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); - } - - dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - - dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, - chan->desc_list_sz, DMA_TO_DEVICE); - - dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num)); - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", - &chan->desc_list_addr, chan->hc_num); - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - hcchar &= ~HCCHAR_MULTICNT_MASK; - hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & - HCCHAR_MULTICNT_MASK; - - if (hcchar & HCCHAR_CHDIS) - dev_warn(hsotg->dev, - "%s: chdis set, channel %d, hcchar 0x%08x\n", - __func__, chan->hc_num, hcchar); - - /* Set host channel enable after all other setup is complete */ - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", - (hcchar & HCCHAR_MULTICNT_MASK) >> - HCCHAR_MULTICNT_SHIFT); - - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, - chan->hc_num); - - chan->xfer_started = 1; - chan->requests++; -} - -/** - * dwc2_hc_continue_transfer() - Continues a data transfer that was started by - * a previous call to dwc2_hc_start_transfer() - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * The caller must ensure there is sufficient space in the request queue and Tx - * Data FIFO. This function should only be called in Slave mode. In DMA mode, - * the controller acts autonomously to complete transfers programmed to a host - * channel. - * - * For an OUT transfer, a new data packet is loaded into the appropriate FIFO - * if there is any data remaining to be queued. For an IN transfer, another - * data packet is always requested. For the SETUP phase of a control transfer, - * this function does nothing. - * - * Return: 1 if a new request is queued, 0 if no more requests are required - * for this transfer - */ -int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - - if (chan->do_split) - /* SPLITs always queue just once per channel */ - return 0; - - if (chan->data_pid_start == DWC2_HC_PID_SETUP) - /* SETUPs are queued only once since they can't be NAK'd */ - return 0; - - if (chan->ep_is_in) { - /* - * Always queue another request for other IN transfers. If - * back-to-back INs are issued and NAKs are received for both, - * the driver may still be processing the first NAK when the - * second NAK is received. When the interrupt handler clears - * the NAK interrupt for the first NAK, the second NAK will - * not be seen. So we can't depend on the NAK interrupt - * handler to requeue a NAK'd request. Instead, IN requests - * are issued each time this function is called. When the - * transfer completes, the extra requests for the channel will - * be flushed. - */ - u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - - dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", - hcchar); - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - chan->requests++; - return 1; - } - - /* OUT transfers */ - - if (chan->xfer_count < chan->xfer_len) { - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - u32 hcchar = dwc2_readl(hsotg->regs + - HCCHAR(chan->hc_num)); - - dwc2_hc_set_even_odd_frame(hsotg, chan, - &hcchar); - } - - /* Load OUT packet into the appropriate Tx FIFO */ - dwc2_hc_write_packet(hsotg, chan); - chan->requests++; - return 1; - } - - return 0; -} - -/** - * dwc2_hc_do_ping() - Starts a PING transfer - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * This function should only be called in Slave mode. The Do Ping bit is set in - * the HCTSIZ register, then the channel is enabled. - */ -void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) -{ - u32 hcchar; - u32 hctsiz; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - - - hctsiz = TSIZ_DOPNG; - hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; - dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); -} - -/** - * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for - * the HFIR register according to PHY type and speed - * - * @hsotg: Programming view of DWC_otg controller - * - * NOTE: The caller can modify the value of the HFIR register only after the - * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) - * has been set - */ -u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) -{ - u32 usbcfg; - u32 hprt0; - int clock = 60; /* default value */ - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - hprt0 = dwc2_readl(hsotg->regs + HPRT0); - - if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && - !(usbcfg & GUSBCFG_PHYIF16)) - clock = 60; - if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == - GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) - clock = 48; - if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && - !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) - clock = 30; - if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && - !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) - clock = 60; - if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && - !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) - clock = 48; - if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) - clock = 48; - if ((usbcfg & GUSBCFG_PHYSEL) && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) - clock = 48; - - if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) - /* High speed case */ - return 125 * clock; - else - /* FS/LS case */ - return 1000 * clock; -} - -/** - * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination - * buffer - * - * @core_if: Programming view of DWC_otg controller - * @dest: Destination buffer for the packet - * @bytes: Number of bytes to copy to the destination - */ -void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) -{ - u32 __iomem *fifo = hsotg->regs + HCFIFO(0); - u32 *data_buf = (u32 *)dest; - int word_count = (bytes + 3) / 4; - int i; - - /* - * Todo: Account for the case where dest is not dword aligned. This - * requires reading data from the FIFO into a u32 temp buffer, then - * moving it into the data buffer. - */ - - dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); - - for (i = 0; i < word_count; i++, data_buf++) - *data_buf = dwc2_readl(fifo); -} - /** * dwc2_dump_host_registers() - Prints the host registers * @@ -3355,13 +1478,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; hw->max_transfer_size = (1 << (width + 11)) - 1; - /* - * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates - * coherent buffers with this size, and if it's too large we can - * exhaust the coherent DMA pool. - */ - if (hw->max_transfer_size > 65535) - hw->max_transfer_size = 65535; width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; hw->max_packet_count = (1 << (width + 4)) - 1; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 7fb6434f4639..3c58d633ce80 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -44,6 +44,26 @@ #include <linux/usb/phy.h> #include "hw.h" +/* + * Suggested defines for tracers: + * - no_printk: Disable tracing + * - pr_info: Print this info to the console + * - trace_printk: Print this info to trace buffer (good for verbose logging) + */ + +#define DWC2_TRACE_SCHEDULER no_printk +#define DWC2_TRACE_SCHEDULER_VB no_printk + +/* Detailed scheduler tracing, but won't overwhelm console */ +#define dwc2_sch_dbg(hsotg, fmt, ...) \ + DWC2_TRACE_SCHEDULER(pr_fmt("%s: SCH: " fmt), \ + dev_name(hsotg->dev), ##__VA_ARGS__) + +/* Verbose scheduler tracing */ +#define dwc2_sch_vdbg(hsotg, fmt, ...) \ + DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ + dev_name(hsotg->dev), ##__VA_ARGS__) + static inline u32 dwc2_readl(const void __iomem *addr) { u32 value = __raw_readl(addr); @@ -572,6 +592,84 @@ struct dwc2_hregs_backup { bool valid; }; +/* + * Constants related to high speed periodic scheduling + * + * We have a periodic schedule that is DWC2_HS_SCHEDULE_UFRAMES long. From a + * reservation point of view it's assumed that the schedule goes right back to + * the beginning after the end of the schedule. + * + * What does that mean for scheduling things with a long interval? It means + * we'll reserve time for them in every possible microframe that they could + * ever be scheduled in. ...but we'll still only actually schedule them as + * often as they were requested. + * + * We keep our schedule in a "bitmap" structure. This simplifies having + * to keep track of and merge intervals: we just let the bitmap code do most + * of the heavy lifting. In a way scheduling is much like memory allocation. + * + * We schedule 100us per uframe or 80% of 125us (the maximum amount you're + * supposed to schedule for periodic transfers). That's according to spec. + * + * Note that though we only schedule 80% of each microframe, the bitmap that we + * keep the schedule in is tightly packed (AKA it doesn't have 100us worth of + * space for each uFrame). + * + * Requirements: + * - DWC2_HS_SCHEDULE_UFRAMES must even divide 0x4000 (HFNUM_MAX_FRNUM + 1) + * - DWC2_HS_SCHEDULE_UFRAMES must be 8 times DWC2_LS_SCHEDULE_FRAMES (probably + * could be any multiple of 8 times DWC2_LS_SCHEDULE_FRAMES, but there might + * be bugs). The 8 comes from the USB spec: number of microframes per frame. + */ +#define DWC2_US_PER_UFRAME 125 +#define DWC2_HS_PERIODIC_US_PER_UFRAME 100 + +#define DWC2_HS_SCHEDULE_UFRAMES 8 +#define DWC2_HS_SCHEDULE_US (DWC2_HS_SCHEDULE_UFRAMES * \ + DWC2_HS_PERIODIC_US_PER_UFRAME) + +/* + * Constants related to low speed scheduling + * + * For high speed we schedule every 1us. For low speed that's a bit overkill, + * so we make up a unit called a "slice" that's worth 25us. There are 40 + * slices in a full frame and we can schedule 36 of those (90%) for periodic + * transfers. + * + * Our low speed schedule can be as short as 1 frame or could be longer. When + * we only schedule 1 frame it means that we'll need to reserve a time every + * frame even for things that only transfer very rarely, so something that runs + * every 2048 frames will get time reserved in every frame. Our low speed + * schedule can be longer and we'll be able to handle more overlap, but that + * will come at increased memory cost and increased time to schedule. + * + * Note: one other advantage of a short low speed schedule is that if we mess + * up and miss scheduling we can jump in and use any of the slots that we + * happened to reserve. + * + * With 25 us per slice and 1 frame in the schedule, we only need 4 bytes for + * the schedule. There will be one schedule per TT. + * + * Requirements: + * - DWC2_US_PER_SLICE must evenly divide DWC2_LS_PERIODIC_US_PER_FRAME. + */ +#define DWC2_US_PER_SLICE 25 +#define DWC2_SLICES_PER_UFRAME (DWC2_US_PER_UFRAME / DWC2_US_PER_SLICE) + +#define DWC2_ROUND_US_TO_SLICE(us) \ + (DIV_ROUND_UP((us), DWC2_US_PER_SLICE) * \ + DWC2_US_PER_SLICE) + +#define DWC2_LS_PERIODIC_US_PER_FRAME \ + 900 +#define DWC2_LS_PERIODIC_SLICES_PER_FRAME \ + (DWC2_LS_PERIODIC_US_PER_FRAME / \ + DWC2_US_PER_SLICE) + +#define DWC2_LS_SCHEDULE_FRAMES 1 +#define DWC2_LS_SCHEDULE_SLICES (DWC2_LS_SCHEDULE_FRAMES * \ + DWC2_LS_PERIODIC_SLICES_PER_FRAME) + /** * struct dwc2_hsotg - Holds the state of the driver, including the non-periodic * and periodic schedules @@ -657,11 +755,14 @@ struct dwc2_hregs_backup { * periodic_sched_ready because it must be rescheduled for * the next frame. Otherwise, the item moves to * periodic_sched_inactive. + * @split_order: List keeping track of channels doing splits, in order. * @periodic_usecs: Total bandwidth claimed so far for periodic transfers. * This value is in microseconds per (micro)frame. The * assumption is that all periodic transfers may occur in * the same (micro)frame. - * @frame_usecs: Internal variable used by the microframe scheduler + * @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the + * host is in high speed mode; low speed schedules are + * stored elsewhere since we need one per TT. * @frame_number: Frame number read from the core at SOF. The value ranges * from 0 to HFNUM_MAX_FRNUM. * @periodic_qh_count: Count of periodic QHs, if using several eps. Used for @@ -780,16 +881,19 @@ struct dwc2_hsotg { struct list_head periodic_sched_ready; struct list_head periodic_sched_assigned; struct list_head periodic_sched_queued; + struct list_head split_order; u16 periodic_usecs; - u16 frame_usecs[8]; + unsigned long hs_periodic_bitmap[ + DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)]; u16 frame_number; u16 periodic_qh_count; bool bus_suspended; bool new_connection; + u16 last_frame_num; + #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS #define FRAME_NUM_ARRAY_SIZE 1000 - u16 last_frame_num; u16 *frame_num_array; u16 *last_frame_num_array; int frame_num_idx; @@ -885,34 +989,11 @@ enum dwc2_halt_status { */ extern int dwc2_core_reset(struct dwc2_hsotg *hsotg); extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); -extern void dwc2_core_host_init(struct dwc2_hsotg *hsotg); extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); -/* - * Host core Functions. - * The following functions support managing the DWC_otg controller in host - * mode. - */ -extern void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan); -extern void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, - enum dwc2_halt_status halt_status); -extern void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg); -extern void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg); - -extern u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg); extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); /* @@ -924,7 +1005,6 @@ extern void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes); extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num); extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg); -extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup); extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd); extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); @@ -1191,6 +1271,8 @@ extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); +int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg); #else static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2) { return 0; } @@ -1208,22 +1290,37 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) +{ return 0; } +static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) +{ return 0; } #endif #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg); +extern int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us); extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg); extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force); extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg); +int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg); +int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg); #else static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) { return 0; } +static inline int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, + int us) +{ return 0; } static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {} static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {} static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {} static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {} static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) { return 0; } +static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) +{ return 0; } +static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) +{ return 0; } + #endif #endif /* __DWC2_CORE_H__ */ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 422ab7da4eb5..e9940dd004e4 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3668,3 +3668,105 @@ int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg) return 0; } + +/** + * dwc2_backup_device_registers() - Backup controller device registers. + * When suspending usb bus, registers needs to be backuped + * if controller power is disabled once suspended. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_dregs_backup *dr; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Backup dev regs */ + dr = &hsotg->dr_backup; + + dr->dcfg = dwc2_readl(hsotg->regs + DCFG); + dr->dctl = dwc2_readl(hsotg->regs + DCTL); + dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); + dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); + dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); + + for (i = 0; i < hsotg->num_of_eps; i++) { + /* Backup IN EPs */ + dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); + + /* Ensure DATA PID is correctly configured */ + if (dr->diepctl[i] & DXEPCTL_DPID) + dr->diepctl[i] |= DXEPCTL_SETD1PID; + else + dr->diepctl[i] |= DXEPCTL_SETD0PID; + + dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); + dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); + + /* Backup OUT EPs */ + dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); + + /* Ensure DATA PID is correctly configured */ + if (dr->doepctl[i] & DXEPCTL_DPID) + dr->doepctl[i] |= DXEPCTL_SETD1PID; + else + dr->doepctl[i] |= DXEPCTL_SETD0PID; + + dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); + dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); + } + dr->valid = true; + return 0; +} + +/** + * dwc2_restore_device_registers() - Restore controller device registers. + * When resuming usb bus, device registers needs to be restored + * if controller power were disabled. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_dregs_backup *dr; + u32 dctl; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Restore dev regs */ + dr = &hsotg->dr_backup; + if (!dr->valid) { + dev_err(hsotg->dev, "%s: no device registers to restore\n", + __func__); + return -EINVAL; + } + dr->valid = false; + + dwc2_writel(dr->dcfg, hsotg->regs + DCFG); + dwc2_writel(dr->dctl, hsotg->regs + DCTL); + dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); + dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); + dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); + + for (i = 0; i < hsotg->num_of_eps; i++) { + /* Restore IN EPs */ + dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); + dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); + dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); + + /* Restore OUT EPs */ + dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); + dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); + dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); + } + + /* Set the Power-On Programming done bit */ + dctl = dwc2_readl(hsotg->regs + DCTL); + dctl |= DCTL_PWRONPRGDONE; + dwc2_writel(dctl, hsotg->regs + DCTL); + + return 0; +} diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 8847c72e55f6..1f6255131857 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -54,6 +54,535 @@ #include "core.h" #include "hcd.h" +/* + * ========================================================================= + * Host Core Layer Functions + * ========================================================================= + */ + +/** + * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, + * used in both device and host modes + * + * @hsotg: Programming view of the DWC_otg controller + */ +static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) +{ + u32 intmsk; + + /* Clear any pending OTG Interrupts */ + dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); + + /* Clear any pending interrupts */ + dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); + + /* Enable the interrupts in the GINTMSK */ + intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; + + if (hsotg->core_params->dma_enable <= 0) + intmsk |= GINTSTS_RXFLVL; + if (hsotg->core_params->external_id_pin_ctl <= 0) + intmsk |= GINTSTS_CONIDSTSCHNG; + + intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | + GINTSTS_SESSREQINT; + + dwc2_writel(intmsk, hsotg->regs + GINTMSK); +} + +/* + * Initializes the FSLSPClkSel field of the HCFG register depending on the + * PHY type + */ +static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) +{ + u32 hcfg, val; + + if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && + hsotg->core_params->ulpi_fs_ls > 0) || + hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { + /* Full speed PHY */ + val = HCFG_FSLSPCLKSEL_48_MHZ; + } else { + /* High speed PHY running at full speed or high speed */ + val = HCFG_FSLSPCLKSEL_30_60_MHZ; + } + + dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg &= ~HCFG_FSLSPCLKSEL_MASK; + hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; + dwc2_writel(hcfg, hsotg->regs + HCFG); +} + +static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) +{ + u32 usbcfg, i2cctl; + int retval = 0; + + /* + * core_init() is now called on every switch so only call the + * following for the first time through + */ + if (select_phy) { + dev_dbg(hsotg->dev, "FS PHY selected\n"); + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + if (!(usbcfg & GUSBCFG_PHYSEL)) { + usbcfg |= GUSBCFG_PHYSEL; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* Reset after a PHY select */ + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + + if (retval) { + dev_err(hsotg->dev, + "%s: Reset failed, aborting", __func__); + return retval; + } + } + } + + /* + * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also + * do this on HNP Dev/Host mode switches (done in dev_init and + * host_init). + */ + if (dwc2_is_host_mode(hsotg)) + dwc2_init_fs_ls_pclk_sel(hsotg); + + if (hsotg->core_params->i2c_enable > 0) { + dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); + + /* Program GUSBCFG.OtgUtmiFsSel to I2C */ + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* Program GI2CCTL.I2CEn */ + i2cctl = dwc2_readl(hsotg->regs + GI2CCTL); + i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; + i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; + i2cctl &= ~GI2CCTL_I2CEN; + dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); + i2cctl |= GI2CCTL_I2CEN; + dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); + } + + return retval; +} + +static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) +{ + u32 usbcfg, usbcfg_old; + int retval = 0; + + if (!select_phy) + return 0; + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg_old = usbcfg; + + /* + * HS PHY parameters. These parameters are preserved during soft reset + * so only program the first time. Do a soft reset immediately after + * setting phyif. + */ + switch (hsotg->core_params->phy_type) { + case DWC2_PHY_TYPE_PARAM_ULPI: + /* ULPI interface */ + dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); + usbcfg |= GUSBCFG_ULPI_UTMI_SEL; + usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); + if (hsotg->core_params->phy_ulpi_ddr > 0) + usbcfg |= GUSBCFG_DDRSEL; + break; + case DWC2_PHY_TYPE_PARAM_UTMI: + /* UTMI+ interface */ + dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); + usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); + if (hsotg->core_params->phy_utmi_width == 16) + usbcfg |= GUSBCFG_PHYIF16; + break; + default: + dev_err(hsotg->dev, "FS PHY selected at HS!\n"); + break; + } + + if (usbcfg != usbcfg_old) { + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* Reset after setting the PHY parameters */ + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + if (retval) { + dev_err(hsotg->dev, + "%s: Reset failed, aborting", __func__); + return retval; + } + } + + return retval; +} + +static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) +{ + u32 usbcfg; + int retval = 0; + + if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && + hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { + /* If FS mode with FS PHY */ + retval = dwc2_fs_phy_init(hsotg, select_phy); + if (retval) + return retval; + } else { + /* High speed PHY */ + retval = dwc2_hs_phy_init(hsotg, select_phy); + if (retval) + return retval; + } + + if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && + hsotg->core_params->ulpi_fs_ls > 0) { + dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg |= GUSBCFG_ULPI_FS_LS; + usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + } else { + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg &= ~GUSBCFG_ULPI_FS_LS; + usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + } + + return retval; +} + +static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) +{ + u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); + + switch (hsotg->hw_params.arch) { + case GHWCFG2_EXT_DMA_ARCH: + dev_err(hsotg->dev, "External DMA Mode not supported\n"); + return -EINVAL; + + case GHWCFG2_INT_DMA_ARCH: + dev_dbg(hsotg->dev, "Internal DMA Mode\n"); + if (hsotg->core_params->ahbcfg != -1) { + ahbcfg &= GAHBCFG_CTRL_MASK; + ahbcfg |= hsotg->core_params->ahbcfg & + ~GAHBCFG_CTRL_MASK; + } + break; + + case GHWCFG2_SLAVE_ONLY_ARCH: + default: + dev_dbg(hsotg->dev, "Slave Only Mode\n"); + break; + } + + dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", + hsotg->core_params->dma_enable, + hsotg->core_params->dma_desc_enable); + + if (hsotg->core_params->dma_enable > 0) { + if (hsotg->core_params->dma_desc_enable > 0) + dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); + else + dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); + } else { + dev_dbg(hsotg->dev, "Using Slave mode\n"); + hsotg->core_params->dma_desc_enable = 0; + } + + if (hsotg->core_params->dma_enable > 0) + ahbcfg |= GAHBCFG_DMA_EN; + + dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); + + return 0; +} + +static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) +{ + u32 usbcfg; + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); + + switch (hsotg->hw_params.op_mode) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + if (hsotg->core_params->otg_cap == + DWC2_CAP_PARAM_HNP_SRP_CAPABLE) + usbcfg |= GUSBCFG_HNPCAP; + if (hsotg->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) + usbcfg |= GUSBCFG_SRPCAP; + break; + + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + if (hsotg->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) + usbcfg |= GUSBCFG_SRPCAP; + break; + + case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: + case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: + default: + break; + } + + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); +} + +/** + * dwc2_enable_host_interrupts() - Enables the Host mode interrupts + * + * @hsotg: Programming view of DWC_otg controller + */ +static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) +{ + u32 intmsk; + + dev_dbg(hsotg->dev, "%s()\n", __func__); + + /* Disable all interrupts */ + dwc2_writel(0, hsotg->regs + GINTMSK); + dwc2_writel(0, hsotg->regs + HAINTMSK); + + /* Enable the common interrupts */ + dwc2_enable_common_interrupts(hsotg); + + /* Enable host mode interrupts without disturbing common interrupts */ + intmsk = dwc2_readl(hsotg->regs + GINTMSK); + intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; + dwc2_writel(intmsk, hsotg->regs + GINTMSK); +} + +/** + * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts + * + * @hsotg: Programming view of DWC_otg controller + */ +static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) +{ + u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK); + + /* Disable host mode interrupts without disturbing common interrupts */ + intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | + GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); + dwc2_writel(intmsk, hsotg->regs + GINTMSK); +} + +/* + * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size + * For system that have a total fifo depth that is smaller than the default + * RX + TX fifo size. + * + * @hsotg: Programming view of DWC_otg controller + */ +static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_hw_params *hw = &hsotg->hw_params; + u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; + + total_fifo_size = hw->total_fifo_size; + rxfsiz = params->host_rx_fifo_size; + nptxfsiz = params->host_nperio_tx_fifo_size; + ptxfsiz = params->host_perio_tx_fifo_size; + + /* + * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth + * allocation with support for high bandwidth endpoints. Synopsys + * defines MPS(Max Packet size) for a periodic EP=1024, and for + * non-periodic as 512. + */ + if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { + /* + * For Buffer DMA mode/Scatter Gather DMA mode + * 2 * ((Largest Packet size / 4) + 1 + 1) + n + * with n = number of host channel. + * 2 * ((1024/4) + 2) = 516 + */ + rxfsiz = 516 + hw->host_channels; + + /* + * min non-periodic tx fifo depth + * 2 * (largest non-periodic USB packet used / 4) + * 2 * (512/4) = 256 + */ + nptxfsiz = 256; + + /* + * min periodic tx fifo depth + * (largest packet size*MC)/4 + * (1024 * 3)/4 = 768 + */ + ptxfsiz = 768; + + params->host_rx_fifo_size = rxfsiz; + params->host_nperio_tx_fifo_size = nptxfsiz; + params->host_perio_tx_fifo_size = ptxfsiz; + } + + /* + * If the summation of RX, NPTX and PTX fifo sizes is still + * bigger than the total_fifo_size, then we have a problem. + * + * We won't be able to allocate as many endpoints. Right now, + * we're just printing an error message, but ideally this FIFO + * allocation algorithm would be improved in the future. + * + * FIXME improve this FIFO allocation algorithm. + */ + if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) + dev_err(hsotg->dev, "invalid fifo sizes\n"); +} + +static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *params = hsotg->core_params; + u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; + + if (!params->enable_dynamic_fifo) + return; + + dwc2_calculate_dynamic_fifo(hsotg); + + /* Rx FIFO */ + grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); + dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); + grxfsiz &= ~GRXFSIZ_DEPTH_MASK; + grxfsiz |= params->host_rx_fifo_size << + GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; + dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ); + dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", + dwc2_readl(hsotg->regs + GRXFSIZ)); + + /* Non-periodic Tx FIFO */ + dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + GNPTXFSIZ)); + nptxfsiz = params->host_nperio_tx_fifo_size << + FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; + nptxfsiz |= params->host_rx_fifo_size << + FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; + dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); + dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + GNPTXFSIZ)); + + /* Periodic Tx FIFO */ + dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + HPTXFSIZ)); + hptxfsiz = params->host_perio_tx_fifo_size << + FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; + hptxfsiz |= (params->host_rx_fifo_size + + params->host_nperio_tx_fifo_size) << + FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; + dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ); + dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + HPTXFSIZ)); + + if (hsotg->core_params->en_multiple_tx_fifo > 0 && + hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { + /* + * Global DFIFOCFG calculation for Host mode - + * include RxFIFO, NPTXFIFO and HPTXFIFO + */ + dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); + dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; + dfifocfg |= (params->host_rx_fifo_size + + params->host_nperio_tx_fifo_size + + params->host_perio_tx_fifo_size) << + GDFIFOCFG_EPINFOBASE_SHIFT & + GDFIFOCFG_EPINFOBASE_MASK; + dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG); + } +} + +/** + * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for + * the HFIR register according to PHY type and speed + * + * @hsotg: Programming view of DWC_otg controller + * + * NOTE: The caller can modify the value of the HFIR register only after the + * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) + * has been set + */ +u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) +{ + u32 usbcfg; + u32 hprt0; + int clock = 60; /* default value */ + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + hprt0 = dwc2_readl(hsotg->regs + HPRT0); + + if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && + !(usbcfg & GUSBCFG_PHYIF16)) + clock = 60; + if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == + GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) + clock = 48; + if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && + !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) + clock = 30; + if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && + !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) + clock = 60; + if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && + !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) + clock = 48; + if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) + clock = 48; + if ((usbcfg & GUSBCFG_PHYSEL) && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) + clock = 48; + + if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) + /* High speed case */ + return 125 * clock - 1; + + /* FS/LS case */ + return 1000 * clock - 1; +} + +/** + * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination + * buffer + * + * @core_if: Programming view of DWC_otg controller + * @dest: Destination buffer for the packet + * @bytes: Number of bytes to copy to the destination + */ +void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) +{ + u32 __iomem *fifo = hsotg->regs + HCFIFO(0); + u32 *data_buf = (u32 *)dest; + int word_count = (bytes + 3) / 4; + int i; + + /* + * Todo: Account for the case where dest is not dword aligned. This + * requires reading data from the FIFO into a u32 temp buffer, then + * moving it into the data buffer. + */ + + dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); + + for (i = 0; i < word_count; i++, data_buf++) + *data_buf = dwc2_readl(fifo); +} + /** * dwc2_dump_channel_info() - Prints the state of a host channel * @@ -77,7 +606,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, u32 hc_dma; int i; - if (chan == NULL) + if (!chan) return; hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); @@ -120,6 +649,1056 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, } /* + * ========================================================================= + * Low Level Host Channel Access Functions + * ========================================================================= + */ + +static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcintmsk = HCINTMSK_CHHLTD; + + switch (chan->ep_type) { + case USB_ENDPOINT_XFER_CONTROL: + case USB_ENDPOINT_XFER_BULK: + dev_vdbg(hsotg->dev, "control/bulk\n"); + hcintmsk |= HCINTMSK_XFERCOMPL; + hcintmsk |= HCINTMSK_STALL; + hcintmsk |= HCINTMSK_XACTERR; + hcintmsk |= HCINTMSK_DATATGLERR; + if (chan->ep_is_in) { + hcintmsk |= HCINTMSK_BBLERR; + } else { + hcintmsk |= HCINTMSK_NAK; + hcintmsk |= HCINTMSK_NYET; + if (chan->do_ping) + hcintmsk |= HCINTMSK_ACK; + } + + if (chan->do_split) { + hcintmsk |= HCINTMSK_NAK; + if (chan->complete_split) + hcintmsk |= HCINTMSK_NYET; + else + hcintmsk |= HCINTMSK_ACK; + } + + if (chan->error_state) + hcintmsk |= HCINTMSK_ACK; + break; + + case USB_ENDPOINT_XFER_INT: + if (dbg_perio()) + dev_vdbg(hsotg->dev, "intr\n"); + hcintmsk |= HCINTMSK_XFERCOMPL; + hcintmsk |= HCINTMSK_NAK; + hcintmsk |= HCINTMSK_STALL; + hcintmsk |= HCINTMSK_XACTERR; + hcintmsk |= HCINTMSK_DATATGLERR; + hcintmsk |= HCINTMSK_FRMOVRUN; + + if (chan->ep_is_in) + hcintmsk |= HCINTMSK_BBLERR; + if (chan->error_state) + hcintmsk |= HCINTMSK_ACK; + if (chan->do_split) { + if (chan->complete_split) + hcintmsk |= HCINTMSK_NYET; + else + hcintmsk |= HCINTMSK_ACK; + } + break; + + case USB_ENDPOINT_XFER_ISOC: + if (dbg_perio()) + dev_vdbg(hsotg->dev, "isoc\n"); + hcintmsk |= HCINTMSK_XFERCOMPL; + hcintmsk |= HCINTMSK_FRMOVRUN; + hcintmsk |= HCINTMSK_ACK; + + if (chan->ep_is_in) { + hcintmsk |= HCINTMSK_XACTERR; + hcintmsk |= HCINTMSK_BBLERR; + } + break; + default: + dev_err(hsotg->dev, "## Unknown EP type ##\n"); + break; + } + + dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); +} + +static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcintmsk = HCINTMSK_CHHLTD; + + /* + * For Descriptor DMA mode core halts the channel on AHB error. + * Interrupt is not required. + */ + if (hsotg->core_params->dma_desc_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "desc DMA disabled\n"); + hcintmsk |= HCINTMSK_AHBERR; + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "desc DMA enabled\n"); + if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) + hcintmsk |= HCINTMSK_XFERCOMPL; + } + + if (chan->error_state && !chan->do_split && + chan->ep_type != USB_ENDPOINT_XFER_ISOC) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "setting ACK\n"); + hcintmsk |= HCINTMSK_ACK; + if (chan->ep_is_in) { + hcintmsk |= HCINTMSK_DATATGLERR; + if (chan->ep_type != USB_ENDPOINT_XFER_INT) + hcintmsk |= HCINTMSK_NAK; + } + } + + dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); +} + +static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 intmsk; + + if (hsotg->core_params->dma_enable > 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA enabled\n"); + dwc2_hc_enable_dma_ints(hsotg, chan); + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA disabled\n"); + dwc2_hc_enable_slave_ints(hsotg, chan); + } + + /* Enable the top level host channel interrupt */ + intmsk = dwc2_readl(hsotg->regs + HAINTMSK); + intmsk |= 1 << chan->hc_num; + dwc2_writel(intmsk, hsotg->regs + HAINTMSK); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); + + /* Make sure host channel interrupts are enabled */ + intmsk = dwc2_readl(hsotg->regs + GINTMSK); + intmsk |= GINTSTS_HCHINT; + dwc2_writel(intmsk, hsotg->regs + GINTMSK); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); +} + +/** + * dwc2_hc_init() - Prepares a host channel for transferring packets to/from + * a specific endpoint + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * The HCCHARn register is set up with the characteristics specified in chan. + * Host channel interrupts that may need to be serviced while this transfer is + * in progress are enabled. + */ +static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) +{ + u8 hc_num = chan->hc_num; + u32 hcintmsk; + u32 hcchar; + u32 hcsplt = 0; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + + /* Clear old interrupt conditions for this host channel */ + hcintmsk = 0xffffffff; + hcintmsk &= ~HCINTMSK_RESERVED14_31; + dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num)); + + /* Enable channel interrupts required for this transfer */ + dwc2_hc_enable_ints(hsotg, chan); + + /* + * Program the HCCHARn register with the endpoint characteristics for + * the current transfer + */ + hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; + hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; + if (chan->ep_is_in) + hcchar |= HCCHAR_EPDIR; + if (chan->speed == USB_SPEED_LOW) + hcchar |= HCCHAR_LSPDDEV; + hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; + hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num)); + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", + hc_num, hcchar); + + dev_vdbg(hsotg->dev, "%s: Channel %d\n", + __func__, hc_num); + dev_vdbg(hsotg->dev, " Dev Addr: %d\n", + chan->dev_addr); + dev_vdbg(hsotg->dev, " Ep Num: %d\n", + chan->ep_num); + dev_vdbg(hsotg->dev, " Is In: %d\n", + chan->ep_is_in); + dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", + chan->speed == USB_SPEED_LOW); + dev_vdbg(hsotg->dev, " Ep Type: %d\n", + chan->ep_type); + dev_vdbg(hsotg->dev, " Max Pkt: %d\n", + chan->max_packet); + } + + /* Program the HCSPLT register for SPLITs */ + if (chan->do_split) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, + "Programming HC %d with split --> %s\n", + hc_num, + chan->complete_split ? "CSPLIT" : "SSPLIT"); + if (chan->complete_split) + hcsplt |= HCSPLT_COMPSPLT; + hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & + HCSPLT_XACTPOS_MASK; + hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & + HCSPLT_HUBADDR_MASK; + hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & + HCSPLT_PRTADDR_MASK; + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, " comp split %d\n", + chan->complete_split); + dev_vdbg(hsotg->dev, " xact pos %d\n", + chan->xact_pos); + dev_vdbg(hsotg->dev, " hub addr %d\n", + chan->hub_addr); + dev_vdbg(hsotg->dev, " hub port %d\n", + chan->hub_port); + dev_vdbg(hsotg->dev, " is_in %d\n", + chan->ep_is_in); + dev_vdbg(hsotg->dev, " Max Pkt %d\n", + chan->max_packet); + dev_vdbg(hsotg->dev, " xferlen %d\n", + chan->xfer_len); + } + } + + dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); +} + +/** + * dwc2_hc_halt() - Attempts to halt a host channel + * + * @hsotg: Controller register interface + * @chan: Host channel to halt + * @halt_status: Reason for halting the channel + * + * This function should only be called in Slave mode or to abort a transfer in + * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the + * controller halts the channel when the transfer is complete or a condition + * occurs that requires application intervention. + * + * In slave mode, checks for a free request queue entry, then sets the Channel + * Enable and Channel Disable bits of the Host Channel Characteristics + * register of the specified channel to intiate the halt. If there is no free + * request queue entry, sets only the Channel Disable bit of the HCCHARn + * register to flush requests for this channel. In the latter case, sets a + * flag to indicate that the host channel needs to be halted when a request + * queue slot is open. + * + * In DMA mode, always sets the Channel Enable and Channel Disable bits of the + * HCCHARn register. The controller ensures there is space in the request + * queue before submitting the halt request. + * + * Some time may elapse before the core flushes any posted requests for this + * host channel and halts. The Channel Halted interrupt handler completes the + * deactivation of the host channel. + */ +void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, + enum dwc2_halt_status halt_status) +{ + u32 nptxsts, hptxsts, hcchar; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) + dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); + + if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || + halt_status == DWC2_HC_XFER_AHB_ERR) { + /* + * Disable all channel interrupts except Ch Halted. The QTD + * and QH state associated with this transfer has been cleared + * (in the case of URB_DEQUEUE), so the channel needs to be + * shut down carefully to prevent crashes. + */ + u32 hcintmsk = HCINTMSK_CHHLTD; + + dev_vdbg(hsotg->dev, "dequeue/error\n"); + dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); + + /* + * Make sure no other interrupts besides halt are currently + * pending. Handling another interrupt could cause a crash due + * to the QTD and QH state. + */ + dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); + + /* + * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR + * even if the channel was already halted for some other + * reason + */ + chan->halt_status = halt_status; + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + if (!(hcchar & HCCHAR_CHENA)) { + /* + * The channel is either already halted or it hasn't + * started yet. In DMA mode, the transfer may halt if + * it finishes normally or a condition occurs that + * requires driver intervention. Don't want to halt + * the channel again. In either Slave or DMA mode, + * it's possible that the transfer has been assigned + * to a channel, but not started yet when an URB is + * dequeued. Don't want to halt a channel that hasn't + * started yet. + */ + return; + } + } + if (chan->halt_pending) { + /* + * A halt has already been issued for this channel. This might + * happen when a transfer is aborted by a higher level in + * the stack. + */ + dev_vdbg(hsotg->dev, + "*** %s: Channel %d, chan->halt_pending already set ***\n", + __func__, chan->hc_num); + return; + } + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + + /* No need to set the bit in DDMA for disabling the channel */ + /* TODO check it everywhere channel is disabled */ + if (hsotg->core_params->dma_desc_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "desc DMA disabled\n"); + hcchar |= HCCHAR_CHENA; + } else { + if (dbg_hc(chan)) + dev_dbg(hsotg->dev, "desc DMA enabled\n"); + } + hcchar |= HCCHAR_CHDIS; + + if (hsotg->core_params->dma_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA not enabled\n"); + hcchar |= HCCHAR_CHENA; + + /* Check for space in the request queue to issue the halt */ + if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || + chan->ep_type == USB_ENDPOINT_XFER_BULK) { + dev_vdbg(hsotg->dev, "control/bulk\n"); + nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); + if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { + dev_vdbg(hsotg->dev, "Disabling channel\n"); + hcchar &= ~HCCHAR_CHENA; + } + } else { + if (dbg_perio()) + dev_vdbg(hsotg->dev, "isoc/intr\n"); + hptxsts = dwc2_readl(hsotg->regs + HPTXSTS); + if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || + hsotg->queuing_high_bandwidth) { + if (dbg_perio()) + dev_vdbg(hsotg->dev, "Disabling channel\n"); + hcchar &= ~HCCHAR_CHENA; + } + } + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA enabled\n"); + } + + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + chan->halt_status = halt_status; + + if (hcchar & HCCHAR_CHENA) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Channel enabled\n"); + chan->halt_pending = 1; + chan->halt_on_queue = 0; + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Channel disabled\n"); + chan->halt_on_queue = 1; + } + + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", + hcchar); + dev_vdbg(hsotg->dev, " halt_pending: %d\n", + chan->halt_pending); + dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", + chan->halt_on_queue); + dev_vdbg(hsotg->dev, " halt_status: %d\n", + chan->halt_status); + } +} + +/** + * dwc2_hc_cleanup() - Clears the transfer state for a host channel + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Identifies the host channel to clean up + * + * This function is normally called after a transfer is done and the host + * channel is being released + */ +void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) +{ + u32 hcintmsk; + + chan->xfer_started = 0; + + list_del_init(&chan->split_order_list_entry); + + /* + * Clear channel interrupt enables and any unhandled channel interrupt + * conditions + */ + dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); + hcintmsk = 0xffffffff; + hcintmsk &= ~HCINTMSK_RESERVED14_31; + dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); +} + +/** + * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in + * which frame a periodic transfer should occur + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Identifies the host channel to set up and its properties + * @hcchar: Current value of the HCCHAR register for the specified host channel + * + * This function has no effect on non-periodic transfers + */ +static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan, u32 *hcchar) +{ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) { + int host_speed; + int xfer_ns; + int xfer_us; + int bytes_in_fifo; + u16 fifo_space; + u16 frame_number; + u16 wire_frame; + + /* + * Try to figure out if we're an even or odd frame. If we set + * even and the current frame number is even the the transfer + * will happen immediately. Similar if both are odd. If one is + * even and the other is odd then the transfer will happen when + * the frame number ticks. + * + * There's a bit of a balancing act to get this right. + * Sometimes we may want to send data in the current frame (AK + * right away). We might want to do this if the frame number + * _just_ ticked, but we might also want to do this in order + * to continue a split transaction that happened late in a + * microframe (so we didn't know to queue the next transfer + * until the frame number had ticked). The problem is that we + * need a lot of knowledge to know if there's actually still + * time to send things or if it would be better to wait until + * the next frame. + * + * We can look at how much time is left in the current frame + * and make a guess about whether we'll have time to transfer. + * We'll do that. + */ + + /* Get speed host is running at */ + host_speed = (chan->speed != USB_SPEED_HIGH && + !chan->do_split) ? chan->speed : USB_SPEED_HIGH; + + /* See how many bytes are in the periodic FIFO right now */ + fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) & + TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; + bytes_in_fifo = sizeof(u32) * + (hsotg->core_params->host_perio_tx_fifo_size - + fifo_space); + + /* + * Roughly estimate bus time for everything in the periodic + * queue + our new transfer. This is "rough" because we're + * using a function that makes takes into account IN/OUT + * and INT/ISO and we're just slamming in one value for all + * transfers. This should be an over-estimate and that should + * be OK, but we can probably tighten it. + */ + xfer_ns = usb_calc_bus_time(host_speed, false, false, + chan->xfer_len + bytes_in_fifo); + xfer_us = NS_TO_US(xfer_ns); + + /* See what frame number we'll be at by the time we finish */ + frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us); + + /* This is when we were scheduled to be on the wire */ + wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1); + + /* + * If we'd finish _after_ the frame we're scheduled in then + * it's hopeless. Just schedule right away and hope for the + * best. Note that it _might_ be wise to call back into the + * scheduler to pick a better frame, but this is better than + * nothing. + */ + if (dwc2_frame_num_gt(frame_number, wire_frame)) { + dwc2_sch_vdbg(hsotg, + "QH=%p EO MISS fr=%04x=>%04x (%+d)\n", + chan->qh, wire_frame, frame_number, + dwc2_frame_num_dec(frame_number, + wire_frame)); + wire_frame = frame_number; + + /* + * We picked a different frame number; communicate this + * back to the scheduler so it doesn't try to schedule + * another in the same frame. + * + * Remember that next_active_frame is 1 before the wire + * frame. + */ + chan->qh->next_active_frame = + dwc2_frame_num_dec(frame_number, 1); + } + + if (wire_frame & 1) + *hcchar |= HCCHAR_ODDFRM; + else + *hcchar &= ~HCCHAR_ODDFRM; + } +} + +static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) +{ + /* Set up the initial PID for the transfer */ + if (chan->speed == USB_SPEED_HIGH) { + if (chan->ep_is_in) { + if (chan->multi_count == 1) + chan->data_pid_start = DWC2_HC_PID_DATA0; + else if (chan->multi_count == 2) + chan->data_pid_start = DWC2_HC_PID_DATA1; + else + chan->data_pid_start = DWC2_HC_PID_DATA2; + } else { + if (chan->multi_count == 1) + chan->data_pid_start = DWC2_HC_PID_DATA0; + else + chan->data_pid_start = DWC2_HC_PID_MDATA; + } + } else { + chan->data_pid_start = DWC2_HC_PID_DATA0; + } +} + +/** + * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with + * the Host Channel + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * This function should only be called in Slave mode. For a channel associated + * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel + * associated with a periodic EP, the periodic Tx FIFO is written. + * + * Upon return the xfer_buf and xfer_count fields in chan are incremented by + * the number of bytes written to the Tx FIFO. + */ +static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 i; + u32 remaining_count; + u32 byte_count; + u32 dword_count; + u32 __iomem *data_fifo; + u32 *data_buf = (u32 *)chan->xfer_buf; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + + data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); + + remaining_count = chan->xfer_len - chan->xfer_count; + if (remaining_count > chan->max_packet) + byte_count = chan->max_packet; + else + byte_count = remaining_count; + + dword_count = (byte_count + 3) / 4; + + if (((unsigned long)data_buf & 0x3) == 0) { + /* xfer_buf is DWORD aligned */ + for (i = 0; i < dword_count; i++, data_buf++) + dwc2_writel(*data_buf, data_fifo); + } else { + /* xfer_buf is not DWORD aligned */ + for (i = 0; i < dword_count; i++, data_buf++) { + u32 data = data_buf[0] | data_buf[1] << 8 | + data_buf[2] << 16 | data_buf[3] << 24; + dwc2_writel(data, data_fifo); + } + } + + chan->xfer_count += byte_count; + chan->xfer_buf += byte_count; +} + +/** + * dwc2_hc_do_ping() - Starts a PING transfer + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * This function should only be called in Slave mode. The Do Ping bit is set in + * the HCTSIZ register, then the channel is enabled. + */ +static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcchar; + u32 hctsiz; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + + hctsiz = TSIZ_DOPNG; + hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; + dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); +} + +/** + * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host + * channel and starts the transfer + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel. The xfer_len value + * may be reduced to accommodate the max widths of the XferSize and + * PktCnt fields in the HCTSIZn register. The multi_count value may be + * changed to reflect the final xfer_len value. + * + * This function may be called in either Slave mode or DMA mode. In Slave mode, + * the caller must ensure that there is sufficient space in the request queue + * and Tx Data FIFO. + * + * For an OUT transfer in Slave mode, it loads a data packet into the + * appropriate FIFO. If necessary, additional data packets are loaded in the + * Host ISR. + * + * For an IN transfer in Slave mode, a data packet is requested. The data + * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, + * additional data packets are requested in the Host ISR. + * + * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ + * register along with a packet count of 1 and the channel is enabled. This + * causes a single PING transaction to occur. Other fields in HCTSIZ are + * simply set to 0 since no data transfer occurs in this case. + * + * For a PING transfer in DMA mode, the HCTSIZ register is initialized with + * all the information required to perform the subsequent data transfer. In + * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the + * controller performs the entire PING protocol, then starts the data + * transfer. + */ +static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; + u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; + u32 hcchar; + u32 hctsiz = 0; + u16 num_packets; + u32 ec_mc; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + + if (chan->do_ping) { + if (hsotg->core_params->dma_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "ping, no DMA\n"); + dwc2_hc_do_ping(hsotg, chan); + chan->xfer_started = 1; + return; + } + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "ping, DMA\n"); + + hctsiz |= TSIZ_DOPNG; + } + + if (chan->do_split) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "split\n"); + num_packets = 1; + + if (chan->complete_split && !chan->ep_is_in) + /* + * For CSPLIT OUT Transfer, set the size to 0 so the + * core doesn't expect any data written to the FIFO + */ + chan->xfer_len = 0; + else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) + chan->xfer_len = chan->max_packet; + else if (!chan->ep_is_in && chan->xfer_len > 188) + chan->xfer_len = 188; + + hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK; + + /* For split set ec_mc for immediate retries */ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) + ec_mc = 3; + else + ec_mc = 1; + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "no split\n"); + /* + * Ensure that the transfer length and packet count will fit + * in the widths allocated for them in the HCTSIZn register + */ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) { + /* + * Make sure the transfer size is no larger than one + * (micro)frame's worth of data. (A check was done + * when the periodic transfer was accepted to ensure + * that a (micro)frame's worth of data can be + * programmed into a channel.) + */ + u32 max_periodic_len = + chan->multi_count * chan->max_packet; + + if (chan->xfer_len > max_periodic_len) + chan->xfer_len = max_periodic_len; + } else if (chan->xfer_len > max_hc_xfer_size) { + /* + * Make sure that xfer_len is a multiple of max packet + * size + */ + chan->xfer_len = + max_hc_xfer_size - chan->max_packet + 1; + } + + if (chan->xfer_len > 0) { + num_packets = (chan->xfer_len + chan->max_packet - 1) / + chan->max_packet; + if (num_packets > max_hc_pkt_count) { + num_packets = max_hc_pkt_count; + chan->xfer_len = num_packets * chan->max_packet; + } + } else { + /* Need 1 packet for transfer length of 0 */ + num_packets = 1; + } + + if (chan->ep_is_in) + /* + * Always program an integral # of max packets for IN + * transfers + */ + chan->xfer_len = num_packets * chan->max_packet; + + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) + /* + * Make sure that the multi_count field matches the + * actual transfer length + */ + chan->multi_count = num_packets; + + if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) + dwc2_set_pid_isoc(chan); + + hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK; + + /* The ec_mc gets the multi_count for non-split */ + ec_mc = chan->multi_count; + } + + chan->start_pkt_count = num_packets; + hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; + hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK; + dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", + hctsiz, chan->hc_num); + + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + dev_vdbg(hsotg->dev, " Xfer Size: %d\n", + (hctsiz & TSIZ_XFERSIZE_MASK) >> + TSIZ_XFERSIZE_SHIFT); + dev_vdbg(hsotg->dev, " Num Pkts: %d\n", + (hctsiz & TSIZ_PKTCNT_MASK) >> + TSIZ_PKTCNT_SHIFT); + dev_vdbg(hsotg->dev, " Start PID: %d\n", + (hctsiz & TSIZ_SC_MC_PID_MASK) >> + TSIZ_SC_MC_PID_SHIFT); + } + + if (hsotg->core_params->dma_enable > 0) { + dwc2_writel((u32)chan->xfer_dma, + hsotg->regs + HCDMA(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", + (unsigned long)chan->xfer_dma, chan->hc_num); + } + + /* Start the split */ + if (chan->do_split) { + u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num)); + + hcsplt |= HCSPLT_SPLTENA; + dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); + } + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + hcchar &= ~HCCHAR_MULTICNT_MASK; + hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; + dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); + + if (hcchar & HCCHAR_CHDIS) + dev_warn(hsotg->dev, + "%s: chdis set, channel %d, hcchar 0x%08x\n", + __func__, chan->hc_num, hcchar); + + /* Set host channel enable after all other setup is complete */ + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", + (hcchar & HCCHAR_MULTICNT_MASK) >> + HCCHAR_MULTICNT_SHIFT); + + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, + chan->hc_num); + + chan->xfer_started = 1; + chan->requests++; + + if (hsotg->core_params->dma_enable <= 0 && + !chan->ep_is_in && chan->xfer_len > 0) + /* Load OUT packet into the appropriate Tx FIFO */ + dwc2_hc_write_packet(hsotg, chan); +} + +/** + * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a + * host channel and starts the transfer in Descriptor DMA mode + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. + * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field + * with micro-frame bitmap. + * + * Initializes HCDMA register with descriptor list address and CTD value then + * starts the transfer via enabling the channel. + */ +void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcchar; + u32 hctsiz = 0; + + if (chan->do_ping) + hctsiz |= TSIZ_DOPNG; + + if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) + dwc2_set_pid_isoc(chan); + + /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ + hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK; + + /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ + hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; + + /* Non-zero only for high-speed interrupt endpoints */ + hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; + + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + dev_vdbg(hsotg->dev, " Start PID: %d\n", + chan->data_pid_start); + dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); + } + + dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); + + dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, + chan->desc_list_sz, DMA_TO_DEVICE); + + dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num)); + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", + &chan->desc_list_addr, chan->hc_num); + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + hcchar &= ~HCCHAR_MULTICNT_MASK; + hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK; + + if (hcchar & HCCHAR_CHDIS) + dev_warn(hsotg->dev, + "%s: chdis set, channel %d, hcchar 0x%08x\n", + __func__, chan->hc_num, hcchar); + + /* Set host channel enable after all other setup is complete */ + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", + (hcchar & HCCHAR_MULTICNT_MASK) >> + HCCHAR_MULTICNT_SHIFT); + + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, + chan->hc_num); + + chan->xfer_started = 1; + chan->requests++; +} + +/** + * dwc2_hc_continue_transfer() - Continues a data transfer that was started by + * a previous call to dwc2_hc_start_transfer() + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * The caller must ensure there is sufficient space in the request queue and Tx + * Data FIFO. This function should only be called in Slave mode. In DMA mode, + * the controller acts autonomously to complete transfers programmed to a host + * channel. + * + * For an OUT transfer, a new data packet is loaded into the appropriate FIFO + * if there is any data remaining to be queued. For an IN transfer, another + * data packet is always requested. For the SETUP phase of a control transfer, + * this function does nothing. + * + * Return: 1 if a new request is queued, 0 if no more requests are required + * for this transfer + */ +static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + + if (chan->do_split) + /* SPLITs always queue just once per channel */ + return 0; + + if (chan->data_pid_start == DWC2_HC_PID_SETUP) + /* SETUPs are queued only once since they can't be NAK'd */ + return 0; + + if (chan->ep_is_in) { + /* + * Always queue another request for other IN transfers. If + * back-to-back INs are issued and NAKs are received for both, + * the driver may still be processing the first NAK when the + * second NAK is received. When the interrupt handler clears + * the NAK interrupt for the first NAK, the second NAK will + * not be seen. So we can't depend on the NAK interrupt + * handler to requeue a NAK'd request. Instead, IN requests + * are issued each time this function is called. When the + * transfer completes, the extra requests for the channel will + * be flushed. + */ + u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + + dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", + hcchar); + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + chan->requests++; + return 1; + } + + /* OUT transfers */ + + if (chan->xfer_count < chan->xfer_len) { + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) { + u32 hcchar = dwc2_readl(hsotg->regs + + HCCHAR(chan->hc_num)); + + dwc2_hc_set_even_odd_frame(hsotg, chan, + &hcchar); + } + + /* Load OUT packet into the appropriate Tx FIFO */ + dwc2_hc_write_packet(hsotg, chan); + chan->requests++; + return 1; + } + + return 0; +} + +/* + * ========================================================================= + * HCD + * ========================================================================= + */ + +/* * Processes all the URBs in a single list of QHs. Completes them with * -ETIMEDOUT and frees the QTD. * @@ -164,6 +1743,9 @@ static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg, qtd_list_entry) dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); + if (qh->channel && qh->channel->qh == qh) + qh->channel->qh = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_hcd_qh_free(hsotg, qh); spin_lock_irqsave(&hsotg->lock, flags); @@ -554,7 +2136,12 @@ static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); ep->hcpriv = NULL; + + if (qh->channel && qh->channel->qh == qh) + qh->channel->qh = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); + dwc2_hcd_qh_free(hsotg, qh); return 0; @@ -580,6 +2167,224 @@ static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, return 0; } +/** + * dwc2_core_init() - Initializes the DWC_otg controller registers and + * prepares the core for device mode or host mode operation + * + * @hsotg: Programming view of the DWC_otg controller + * @initial_setup: If true then this is the first init for this instance. + */ +static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) +{ + u32 usbcfg, otgctl; + int retval; + + dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + + /* Set ULPI External VBUS bit if needed */ + usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; + if (hsotg->core_params->phy_ulpi_ext_vbus == + DWC2_PHY_ULPI_EXTERNAL_VBUS) + usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; + + /* Set external TS Dline pulsing bit if needed */ + usbcfg &= ~GUSBCFG_TERMSELDLPULSE; + if (hsotg->core_params->ts_dline > 0) + usbcfg |= GUSBCFG_TERMSELDLPULSE; + + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* + * Reset the Controller + * + * We only need to reset the controller if this is a re-init. + * For the first init we know for sure that earlier code reset us (it + * needed to in order to properly detect various parameters). + */ + if (!initial_setup) { + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + if (retval) { + dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", + __func__); + return retval; + } + } + + /* + * This needs to happen in FS mode before any other programming occurs + */ + retval = dwc2_phy_init(hsotg, initial_setup); + if (retval) + return retval; + + /* Program the GAHBCFG Register */ + retval = dwc2_gahbcfg_init(hsotg); + if (retval) + return retval; + + /* Program the GUSBCFG register */ + dwc2_gusbcfg_init(hsotg); + + /* Program the GOTGCTL register */ + otgctl = dwc2_readl(hsotg->regs + GOTGCTL); + otgctl &= ~GOTGCTL_OTGVER; + if (hsotg->core_params->otg_ver > 0) + otgctl |= GOTGCTL_OTGVER; + dwc2_writel(otgctl, hsotg->regs + GOTGCTL); + dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); + + /* Clear the SRP success bit for FS-I2c */ + hsotg->srp_success = 0; + + /* Enable common interrupts */ + dwc2_enable_common_interrupts(hsotg); + + /* + * Do device or host initialization based on mode during PCD and + * HCD initialization + */ + if (dwc2_is_host_mode(hsotg)) { + dev_dbg(hsotg->dev, "Host Mode\n"); + hsotg->op_state = OTG_STATE_A_HOST; + } else { + dev_dbg(hsotg->dev, "Device Mode\n"); + hsotg->op_state = OTG_STATE_B_PERIPHERAL; + } + + return 0; +} + +/** + * dwc2_core_host_init() - Initializes the DWC_otg controller registers for + * Host mode + * + * @hsotg: Programming view of DWC_otg controller + * + * This function flushes the Tx and Rx FIFOs and flushes any entries in the + * request queues. Host channels are reset to ensure that they are ready for + * performing transfers. + */ +static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) +{ + u32 hcfg, hfir, otgctl; + + dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); + + /* Restart the Phy Clock */ + dwc2_writel(0, hsotg->regs + PCGCTL); + + /* Initialize Host Configuration Register */ + dwc2_init_fs_ls_pclk_sel(hsotg); + if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg |= HCFG_FSLSSUPP; + dwc2_writel(hcfg, hsotg->regs + HCFG); + } + + /* + * This bit allows dynamic reloading of the HFIR register during + * runtime. This bit needs to be programmed during initial configuration + * and its value must not be changed during runtime. + */ + if (hsotg->core_params->reload_ctl > 0) { + hfir = dwc2_readl(hsotg->regs + HFIR); + hfir |= HFIR_RLDCTRL; + dwc2_writel(hfir, hsotg->regs + HFIR); + } + + if (hsotg->core_params->dma_desc_enable > 0) { + u32 op_mode = hsotg->hw_params.op_mode; + + if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || + !hsotg->hw_params.dma_desc_enable || + op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || + op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || + op_mode == GHWCFG2_OP_MODE_UNDEFINED) { + dev_err(hsotg->dev, + "Hardware does not support descriptor DMA mode -\n"); + dev_err(hsotg->dev, + "falling back to buffer DMA mode.\n"); + hsotg->core_params->dma_desc_enable = 0; + } else { + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg |= HCFG_DESCDMA; + dwc2_writel(hcfg, hsotg->regs + HCFG); + } + } + + /* Configure data FIFO sizes */ + dwc2_config_fifos(hsotg); + + /* TODO - check this */ + /* Clear Host Set HNP Enable in the OTG Control Register */ + otgctl = dwc2_readl(hsotg->regs + GOTGCTL); + otgctl &= ~GOTGCTL_HSTSETHNPEN; + dwc2_writel(otgctl, hsotg->regs + GOTGCTL); + + /* Make sure the FIFOs are flushed */ + dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); + dwc2_flush_rx_fifo(hsotg); + + /* Clear Host Set HNP Enable in the OTG Control Register */ + otgctl = dwc2_readl(hsotg->regs + GOTGCTL); + otgctl &= ~GOTGCTL_HSTSETHNPEN; + dwc2_writel(otgctl, hsotg->regs + GOTGCTL); + + if (hsotg->core_params->dma_desc_enable <= 0) { + int num_channels, i; + u32 hcchar; + + /* Flush out any leftover queued requests */ + num_channels = hsotg->core_params->host_channels; + for (i = 0; i < num_channels; i++) { + hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); + hcchar &= ~HCCHAR_CHENA; + hcchar |= HCCHAR_CHDIS; + hcchar &= ~HCCHAR_EPDIR; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); + } + + /* Halt all channels to put them into a known state */ + for (i = 0; i < num_channels; i++) { + int count = 0; + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); + hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; + hcchar &= ~HCCHAR_EPDIR; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); + dev_dbg(hsotg->dev, "%s: Halt channel %d\n", + __func__, i); + do { + hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); + if (++count > 1000) { + dev_err(hsotg->dev, + "Unable to clear enable on channel %d\n", + i); + break; + } + udelay(1); + } while (hcchar & HCCHAR_CHENA); + } + } + + /* Turn on the vbus power */ + dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); + if (hsotg->op_state == OTG_STATE_A_HOST) { + u32 hprt0 = dwc2_read_hprt0(hsotg); + + dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", + !!(hprt0 & HPRT0_PWR)); + if (!(hprt0 & HPRT0_PWR)) { + hprt0 |= HPRT0_PWR; + dwc2_writel(hprt0, hsotg->regs + HPRT0); + } + } + + dwc2_enable_host_interrupts(hsotg); +} + /* * Initializes dynamic portions of the DWC_otg HCD state * @@ -635,9 +2440,9 @@ static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg, chan->hub_port = (u8)hub_port; } -static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan, - struct dwc2_qtd *qtd, void *bufptr) +static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan, + struct dwc2_qtd *qtd) { struct dwc2_hcd_urb *urb = qtd->urb; struct dwc2_hcd_iso_packet_desc *frame_desc; @@ -657,7 +2462,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, else chan->xfer_buf = urb->setup_packet; chan->xfer_len = 8; - bufptr = NULL; break; case DWC2_CONTROL_DATA: @@ -684,7 +2488,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->xfer_dma = hsotg->status_buf_dma; else chan->xfer_buf = hsotg->status_buf; - bufptr = NULL; break; } break; @@ -717,14 +2520,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; - /* For non-dword aligned buffers */ - if (hsotg->core_params->dma_enable > 0 && - (chan->xfer_dma & 0x3)) - bufptr = (u8 *)urb->buf + frame_desc->offset + - qtd->isoc_split_offset; - else - bufptr = NULL; - if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { if (chan->xfer_len <= 188) chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; @@ -733,63 +2528,93 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, } break; } +} + +#define DWC2_USB_DMA_ALIGN 4 + +struct dma_aligned_buffer { + void *kmalloc_ptr; + void *old_xfer_buffer; + u8 data[0]; +}; + +static void dwc2_free_dma_aligned_buffer(struct urb *urb) +{ + struct dma_aligned_buffer *temp; + + if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) + return; - return bufptr; + temp = container_of(urb->transfer_buffer, + struct dma_aligned_buffer, data); + + if (usb_urb_dir_in(urb)) + memcpy(temp->old_xfer_buffer, temp->data, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->old_xfer_buffer; + kfree(temp->kmalloc_ptr); + + urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } -static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, - struct dwc2_host_chan *chan, - struct dwc2_hcd_urb *urb, void *bufptr) +static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { - u32 buf_size; - struct urb *usb_urb; - struct usb_hcd *hcd; + struct dma_aligned_buffer *temp, *kmalloc_ptr; + size_t kmalloc_size; - if (!qh->dw_align_buf) { - if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) - buf_size = hsotg->core_params->max_transfer_size; - else - /* 3072 = 3 max-size Isoc packets */ - buf_size = 3072; + if (urb->num_sgs || urb->sg || + urb->transfer_buffer_length == 0 || + !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) + return 0; - qh->dw_align_buf = kmalloc(buf_size, GFP_ATOMIC | GFP_DMA); - if (!qh->dw_align_buf) - return -ENOMEM; - qh->dw_align_buf_size = buf_size; - } + /* Allocate a buffer with enough padding for alignment */ + kmalloc_size = urb->transfer_buffer_length + + sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; - if (chan->xfer_len) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - usb_urb = urb->priv; + kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); + if (!kmalloc_ptr) + return -ENOMEM; - if (usb_urb) { - if (usb_urb->transfer_flags & - (URB_SETUP_MAP_SINGLE | URB_DMA_MAP_SG | - URB_DMA_MAP_PAGE | URB_DMA_MAP_SINGLE)) { - hcd = dwc2_hsotg_to_hcd(hsotg); - usb_hcd_unmap_urb_for_dma(hcd, usb_urb); - } - if (!chan->ep_is_in) - memcpy(qh->dw_align_buf, bufptr, - chan->xfer_len); - } else { - dev_warn(hsotg->dev, "no URB in dwc2_urb\n"); - } - } + /* Position our struct dma_aligned_buffer such that data is aligned */ + temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; + temp->kmalloc_ptr = kmalloc_ptr; + temp->old_xfer_buffer = urb->transfer_buffer; + if (usb_urb_dir_out(urb)) + memcpy(temp->data, urb->transfer_buffer, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->data; - qh->dw_align_buf_dma = dma_map_single(hsotg->dev, - qh->dw_align_buf, qh->dw_align_buf_size, - chan->ep_is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) { - dev_err(hsotg->dev, "can't map align_buf\n"); - chan->align_buf = 0; - return -EINVAL; - } + urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; - chan->align_buf = qh->dw_align_buf_dma; return 0; } +static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + int ret; + + /* We assume setup_dma is always aligned; warn if not */ + WARN_ON_ONCE(urb->setup_dma && + (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1))); + + ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags); + if (ret) + return ret; + + ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); + if (ret) + dwc2_free_dma_aligned_buffer(urb); + + return ret; +} + +static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ + usb_hcd_unmap_urb_for_dma(hcd, urb); + dwc2_free_dma_aligned_buffer(urb); +} + /** * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host * channel and initializes the host channel to perform the transactions. The @@ -804,7 +2629,6 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) struct dwc2_host_chan *chan; struct dwc2_hcd_urb *urb; struct dwc2_qtd *qtd; - void *bufptr = NULL; if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); @@ -866,16 +2690,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) !dwc2_hcd_is_pipe_in(&urb->pipe_info)) urb->actual_length = urb->length; - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->core_params->dma_enable > 0) chan->xfer_dma = urb->dma + urb->actual_length; - - /* For non-dword aligned case */ - if (hsotg->core_params->dma_desc_enable <= 0 && - (chan->xfer_dma & 0x3)) - bufptr = (u8 *)urb->buf + urb->actual_length; - } else { + else chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; - } chan->xfer_len = urb->length - urb->actual_length; chan->xfer_count = 0; @@ -887,27 +2705,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) chan->do_split = 0; /* Set the transfer attributes */ - bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr); - - /* Non DWORD-aligned buffer case */ - if (bufptr) { - dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); - if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) { - dev_err(hsotg->dev, - "%s: Failed to allocate memory to handle non-dword aligned buffer\n", - __func__); - /* Add channel back to free list */ - chan->align_buf = 0; - chan->multi_count = 0; - list_add_tail(&chan->hc_list_entry, - &hsotg->free_hc_list); - qtd->in_process = 0; - qh->channel = NULL; - return -ENOMEM; - } - } else { - chan->align_buf = 0; - } + dwc2_hc_init_xfer(hsotg, chan, qtd); if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) @@ -968,7 +2766,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( * periodic assigned schedule */ qh_ptr = qh_ptr->next; - list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_assigned); ret_val = DWC2_TRANSACTION_PERIODIC; } @@ -1001,8 +2800,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( * non-periodic active schedule */ qh_ptr = qh_ptr->next; - list_move(&qh->qh_list_entry, - &hsotg->non_periodic_sched_active); + list_move_tail(&qh->qh_list_entry, + &hsotg->non_periodic_sched_active); if (ret_val == DWC2_TRANSACTION_NONE) ret_val = DWC2_TRANSACTION_NON_PERIODIC; @@ -1043,6 +2842,11 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, { int retval = 0; + if (chan->do_split) + /* Put ourselves on the list to keep order straight */ + list_move_tail(&chan->split_order_list_entry, + &hsotg->split_order); + if (hsotg->core_params->dma_enable > 0) { if (hsotg->core_params->dma_desc_enable > 0) { if (!chan->xfer_started || @@ -1102,10 +2906,14 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) u32 fspcavail; u32 gintmsk; int status; - int no_queue_space = 0; - int no_fifo_space = 0; + bool no_queue_space = false; + bool no_fifo_space = false; u32 qspcavail; + /* If empty list then just adjust interrupt enables */ + if (list_empty(&hsotg->periodic_sched_assigned)) + goto exit; + if (dbg_perio()) dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); @@ -1175,50 +2983,40 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * Move the QH from the periodic assigned schedule to * the periodic queued schedule */ - list_move(&qh->qh_list_entry, - &hsotg->periodic_sched_queued); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_queued); /* done queuing high bandwidth */ hsotg->queuing_high_bandwidth = 0; } } - if (hsotg->core_params->dma_enable <= 0) { - tx_status = dwc2_readl(hsotg->regs + HPTXSTS); - qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> - TXSTS_QSPCAVAIL_SHIFT; - fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> - TXSTS_FSPCAVAIL_SHIFT; - if (dbg_perio()) { - dev_vdbg(hsotg->dev, - " P Tx Req Queue Space Avail (after queue): %d\n", - qspcavail); - dev_vdbg(hsotg->dev, - " P Tx FIFO Space Avail (after queue): %d\n", - fspcavail); - } - - if (!list_empty(&hsotg->periodic_sched_assigned) || - no_queue_space || no_fifo_space) { - /* - * May need to queue more transactions as the request - * queue or Tx FIFO empties. Enable the periodic Tx - * FIFO empty interrupt. (Always use the half-empty - * level to ensure that new requests are loaded as - * soon as possible.) - */ - gintmsk = dwc2_readl(hsotg->regs + GINTMSK); +exit: + if (no_queue_space || no_fifo_space || + (hsotg->core_params->dma_enable <= 0 && + !list_empty(&hsotg->periodic_sched_assigned))) { + /* + * May need to queue more transactions as the request + * queue or Tx FIFO empties. Enable the periodic Tx + * FIFO empty interrupt. (Always use the half-empty + * level to ensure that new requests are loaded as + * soon as possible.) + */ + gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + if (!(gintmsk & GINTSTS_PTXFEMP)) { gintmsk |= GINTSTS_PTXFEMP; dwc2_writel(gintmsk, hsotg->regs + GINTMSK); - } else { - /* - * Disable the Tx FIFO empty interrupt since there are - * no more transactions that need to be queued right - * now. This function is called from interrupt - * handlers to queue more transactions as transfer - * states change. - */ - gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + } + } else { + /* + * Disable the Tx FIFO empty interrupt since there are + * no more transactions that need to be queued right + * now. This function is called from interrupt + * handlers to queue more transactions as transfer + * states change. + */ + gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + if (gintmsk & GINTSTS_PTXFEMP) { gintmsk &= ~GINTSTS_PTXFEMP; dwc2_writel(gintmsk, hsotg->regs + GINTMSK); } @@ -1365,9 +3163,8 @@ void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "Queue Transactions\n"); #endif /* Process host channels associated with periodic transfers */ - if ((tr_type == DWC2_TRANSACTION_PERIODIC || - tr_type == DWC2_TRANSACTION_ALL) && - !list_empty(&hsotg->periodic_sched_assigned)) + if (tr_type == DWC2_TRANSACTION_PERIODIC || + tr_type == DWC2_TRANSACTION_ALL) dwc2_process_periodic_channels(hsotg); /* Process host channels associated with non-periodic transfers */ @@ -1947,6 +3744,35 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; } +int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us) +{ + u32 hprt = dwc2_readl(hsotg->regs + HPRT0); + u32 hfir = dwc2_readl(hsotg->regs + HFIR); + u32 hfnum = dwc2_readl(hsotg->regs + HFNUM); + unsigned int us_per_frame; + unsigned int frame_number; + unsigned int remaining; + unsigned int interval; + unsigned int phy_clks; + + /* High speed has 125 us per (micro) frame; others are 1 ms per */ + us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125; + + /* Extract fields */ + frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; + remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT; + interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT; + + /* + * Number of phy clocks since the last tick of the frame number after + * "us" has passed. + */ + phy_clks = (interval - remaining) + + DIV_ROUND_UP(interval * us, us_per_frame); + + return dwc2_frame_num_inc(frame_number, phy_clks / interval); +} + int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) { return hsotg->op_state == OTG_STATE_B_HOST; @@ -2223,6 +4049,90 @@ void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, *hub_port = urb->dev->ttport; } +/** + * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context + * + * This will get the dwc2_tt structure (and ttport) associated with the given + * context (which is really just a struct urb pointer). + * + * The first time this is called for a given TT we allocate memory for our + * structure. When everyone is done and has called dwc2_host_put_tt_info() + * then the refcount for the structure will go to 0 and we'll free it. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: The QH structure. + * @context: The priv pointer from a struct dwc2_hcd_urb. + * @mem_flags: Flags for allocating memory. + * @ttport: We'll return this device's port number here. That's used to + * reference into the bitmap if we're on a multi_tt hub. + * + * Return: a pointer to a struct dwc2_tt. Don't forget to call + * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure. + */ + +struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context, + gfp_t mem_flags, int *ttport) +{ + struct urb *urb = context; + struct dwc2_tt *dwc_tt = NULL; + + if (urb->dev->tt) { + *ttport = urb->dev->ttport; + + dwc_tt = urb->dev->tt->hcpriv; + if (dwc_tt == NULL) { + size_t bitmap_size; + + /* + * For single_tt we need one schedule. For multi_tt + * we need one per port. + */ + bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP * + sizeof(dwc_tt->periodic_bitmaps[0]); + if (urb->dev->tt->multi) + bitmap_size *= urb->dev->tt->hub->maxchild; + + dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size, + mem_flags); + if (dwc_tt == NULL) + return NULL; + + dwc_tt->usb_tt = urb->dev->tt; + dwc_tt->usb_tt->hcpriv = dwc_tt; + } + + dwc_tt->refcount++; + } + + return dwc_tt; +} + +/** + * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info() + * + * Frees resources allocated by dwc2_host_get_tt_info() if all current holders + * of the structure are done. + * + * It's OK to call this with NULL. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @dwc_tt: The pointer returned by dwc2_host_get_tt_info. + */ +void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt) +{ + /* Model kfree and make put of NULL a no-op */ + if (dwc_tt == NULL) + return; + + WARN_ON(dwc_tt->refcount < 1); + + dwc_tt->refcount--; + if (!dwc_tt->refcount) { + dwc_tt->usb_tt->hcpriv = NULL; + kfree(dwc_tt); + } +} + int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) { struct urb *urb = context; @@ -2334,9 +4244,7 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, kfree(qtd->urb); qtd->urb = NULL; - spin_unlock(&hsotg->lock); usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); - spin_lock(&hsotg->lock); } /* @@ -2789,6 +4697,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, fail3: dwc2_urb->priv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); + if (qh_allocated && qh->channel && qh->channel->qh == qh) + qh->channel->qh = NULL; fail2: spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; @@ -2955,7 +4865,7 @@ static struct hc_driver dwc2_hc_driver = { .hcd_priv_size = sizeof(struct wrapper_priv_data), .irq = _dwc2_hcd_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, .start = _dwc2_hcd_start, .stop = _dwc2_hcd_stop, @@ -2971,6 +4881,9 @@ static struct hc_driver dwc2_hc_driver = { .bus_suspend = _dwc2_hcd_suspend, .bus_resume = _dwc2_hcd_resume, + + .map_urb_for_dma = dwc2_map_urb_for_dma, + .unmap_urb_for_dma = dwc2_unmap_urb_for_dma, }; /* @@ -3081,8 +4994,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); if (!hsotg->last_frame_num_array) goto error1; - hsotg->last_frame_num = HFNUM_MAX_FRNUM; #endif + hsotg->last_frame_num = HFNUM_MAX_FRNUM; /* Check if the bus driver or platform code has setup a dma_mask */ if (hsotg->core_params->dma_enable > 0 && @@ -3146,6 +5059,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); INIT_LIST_HEAD(&hsotg->periodic_sched_queued); + INIT_LIST_HEAD(&hsotg->split_order); + /* * Create a host channel descriptor for each host channel implemented * in the controller. Initialize the channel descriptor array. @@ -3159,12 +5074,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (channel == NULL) goto error3; channel->hc_num = i; + INIT_LIST_HEAD(&channel->split_order_list_entry); hsotg->hc_ptr_array[i] = channel; } - if (hsotg->core_params->uframe_sched > 0) - dwc2_hcd_init_usecs(hsotg); - /* Initialize hsotg start work */ INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); @@ -3317,3 +5230,67 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) kfree(hsotg->frame_num_array); #endif } + +/** + * dwc2_backup_host_registers() - Backup controller host registers. + * When suspending usb bus, registers needs to be backuped + * if controller power is disabled once suspended. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hregs_backup *hr; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Backup Host regs */ + hr = &hsotg->hr_backup; + hr->hcfg = dwc2_readl(hsotg->regs + HCFG); + hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); + for (i = 0; i < hsotg->core_params->host_channels; ++i) + hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); + + hr->hprt0 = dwc2_read_hprt0(hsotg); + hr->hfir = dwc2_readl(hsotg->regs + HFIR); + hr->valid = true; + + return 0; +} + +/** + * dwc2_restore_host_registers() - Restore controller host registers. + * When resuming usb bus, device registers needs to be restored + * if controller power were disabled. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hregs_backup *hr; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Restore host regs */ + hr = &hsotg->hr_backup; + if (!hr->valid) { + dev_err(hsotg->dev, "%s: no host registers to restore\n", + __func__); + return -EINVAL; + } + hr->valid = false; + + dwc2_writel(hr->hcfg, hsotg->regs + HCFG); + dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); + + for (i = 0; i < hsotg->core_params->host_channels; ++i) + dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); + + dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); + dwc2_writel(hr->hfir, hsotg->regs + HFIR); + hsotg->frame_number = 0; + + return 0; +} diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 8f0a29cefdf7..89fa26cb25f4 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -75,8 +75,6 @@ struct dwc2_qh; * (micro)frame * @xfer_buf: Pointer to current transfer buffer position * @xfer_dma: DMA address of xfer_buf - * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not - * DWORD aligned * @xfer_len: Total number of bytes to transfer * @xfer_count: Number of bytes transferred so far * @start_pkt_count: Packet count at start of transfer @@ -108,6 +106,7 @@ struct dwc2_qh; * @hc_list_entry: For linking to list of host channels * @desc_list_addr: Current QH's descriptor list DMA address * @desc_list_sz: Current QH's descriptor list size + * @split_order_list_entry: List entry for keeping track of the order of splits * * This structure represents the state of a single host channel when acting in * host mode. It contains the data items needed to transfer packets to an @@ -133,7 +132,6 @@ struct dwc2_host_chan { u8 *xfer_buf; dma_addr_t xfer_dma; - dma_addr_t align_buf; u32 xfer_len; u32 xfer_count; u16 start_pkt_count; @@ -161,6 +159,7 @@ struct dwc2_host_chan { struct list_head hc_list_entry; dma_addr_t desc_list_addr; u32 desc_list_sz; + struct list_head split_order_list_entry; }; struct dwc2_hcd_pipe_info { @@ -213,9 +212,47 @@ enum dwc2_transaction_type { DWC2_TRANSACTION_ALL, }; +/* The number of elements per LS bitmap (per port on multi_tt) */ +#define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \ + BITS_PER_LONG) + +/** + * struct dwc2_tt - dwc2 data associated with a usb_tt + * + * @refcount: Number of Queue Heads (QHs) holding a reference. + * @usb_tt: Pointer back to the official usb_tt. + * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted + * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP + * elements (so sizeof(long) times that in bytes). + * + * This structure is stored in the hcpriv of the official usb_tt. + */ +struct dwc2_tt { + int refcount; + struct usb_tt *usb_tt; + unsigned long periodic_bitmaps[]; +}; + +/** + * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus. + * + * @start_schedule_usecs: The start time on the main bus schedule. Note that + * the main bus schedule is tightly packed and this + * time should be interpreted as tightly packed (so + * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us + * instead of 125 us). + * @duration_us: How long this transfer goes. + */ + +struct dwc2_hs_transfer_time { + u32 start_schedule_us; + u16 duration_us; +}; + /** * struct dwc2_qh - Software queue head structure * + * @hsotg: The HCD state structure for the DWC OTG controller * @ep_type: Endpoint type. One of the following values: * - USB_ENDPOINT_XFER_CONTROL * - USB_ENDPOINT_XFER_BULK @@ -236,17 +273,35 @@ enum dwc2_transaction_type { * @do_split: Full/low speed endpoint on high-speed hub requires split * @td_first: Index of first activated isochronous transfer descriptor * @td_last: Index of last activated isochronous transfer descriptor - * @usecs: Bandwidth in microseconds per (micro)frame - * @interval: Interval between transfers in (micro)frames - * @sched_frame: (Micro)frame to initialize a periodic transfer. - * The transfer executes in the following (micro)frame. - * @frame_usecs: Internal variable used by the microframe scheduler - * @start_split_frame: (Micro)frame at which last start split was initialized + * @host_us: Bandwidth in microseconds per transfer as seen by host + * @device_us: Bandwidth in microseconds per transfer as seen by device + * @host_interval: Interval between transfers as seen by the host. If + * the host is high speed and the device is low speed this + * will be 8 times device interval. + * @device_interval: Interval between transfers as seen by the device. + * interval. + * @next_active_frame: (Micro)frame _before_ we next need to put something on + * the bus. We'll move the qh to active here. If the + * host is in high speed mode this will be a uframe. If + * the host is in low speed mode this will be a full frame. + * @start_active_frame: If we are partway through a split transfer, this will be + * what next_active_frame was when we started. Otherwise + * it should always be the same as next_active_frame. + * @num_hs_transfers: Number of transfers in hs_transfers. + * Normally this is 1 but can be more than one for splits. + * Always >= 1 unless the host is in low/full speed mode. + * @hs_transfers: Transfers that are scheduled as seen by the high speed + * bus. Not used if host is in low or full speed mode (but + * note that it IS USED if the device is low or full speed + * as long as the HOST is in high speed mode). + * @ls_start_schedule_slice: Start time (in slices) on the low speed bus + * schedule that's being used by this device. This + * will be on the periodic_bitmap in a + * "struct dwc2_tt". Not used if this device is high + * speed. Note that this is in "schedule slice" which + * is tightly packed. + * @ls_duration_us: Duration on the low speed bus schedule. * @ntd: Actual number of transfer descriptors in a list - * @dw_align_buf: Used instead of original buffer if its physical address - * is not dword-aligned - * @dw_align_buf_size: Size of dw_align_buf - * @dw_align_buf_dma: DMA address for dw_align_buf * @qtd_list: List of QTDs for this QH * @channel: Host channel currently processing transfers for this QH * @qh_list_entry: Entry for QH in either the periodic or non-periodic @@ -257,13 +312,20 @@ enum dwc2_transaction_type { * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer * descriptor and indicates original XferSize value for the * descriptor + * @unreserve_timer: Timer for releasing periodic reservation. + * @dwc2_tt: Pointer to our tt info (or NULL if no tt). + * @ttport: Port number within our tt. * @tt_buffer_dirty True if clear_tt_buffer_complete is pending + * @unreserve_pending: True if we planned to unreserve but haven't yet. + * @schedule_low_speed: True if we have a low/full speed component (either the + * host is in low/full speed mode or do_split). * * A Queue Head (QH) holds the static characteristics of an endpoint and * maintains a list of transfers (QTDs) for that endpoint. A QH structure may * be entered in either the non-periodic or periodic schedule. */ struct dwc2_qh { + struct dwc2_hsotg *hsotg; u8 ep_type; u8 ep_is_in; u16 maxp; @@ -273,15 +335,16 @@ struct dwc2_qh { u8 do_split; u8 td_first; u8 td_last; - u16 usecs; - u16 interval; - u16 sched_frame; - u16 frame_usecs[8]; - u16 start_split_frame; + u16 host_us; + u16 device_us; + u16 host_interval; + u16 device_interval; + u16 next_active_frame; + u16 start_active_frame; + s16 num_hs_transfers; + struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; + u32 ls_start_schedule_slice; u16 ntd; - u8 *dw_align_buf; - int dw_align_buf_size; - dma_addr_t dw_align_buf_dma; struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; @@ -289,7 +352,12 @@ struct dwc2_qh { dma_addr_t desc_list_dma; u32 desc_list_sz; u32 *n_bytes; + struct timer_list unreserve_timer; + struct dwc2_tt *dwc_tt; + int ttport; unsigned tt_buffer_dirty:1; + unsigned unreserve_pending:1; + unsigned schedule_low_speed:1; }; /** @@ -362,6 +430,8 @@ struct hc_xfer_info { }; #endif +u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg); + /* Gets the struct usb_hcd that contains a struct dwc2_hsotg */ static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg) { @@ -383,6 +453,12 @@ static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr) dwc2_writel(mask, hsotg->regs + HCINTMSK(chnum)); } +void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan); +void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, + enum dwc2_halt_status halt_status); +void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan); + /* * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they * are read as 1, they won't clear when written back. @@ -456,7 +532,6 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, /* Schedule Queue Functions */ /* Implemented in hcd_queue.c */ -extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, gfp_t mem_flags); @@ -571,6 +646,11 @@ static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc) return (frame + inc) & HFNUM_MAX_FRNUM; } +static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec) +{ + return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM; +} + static inline u16 dwc2_full_frame_num(u16 frame) { return (frame & HFNUM_MAX_FRNUM) >> 3; @@ -648,7 +728,7 @@ static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg, return 0; } - return qh->usecs; + return qh->host_us; } extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, @@ -717,6 +797,12 @@ extern void dwc2_host_start(struct dwc2_hsotg *hsotg); extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg); extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, int *hub_port); +extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, + void *context, gfp_t mem_flags, + int *ttport); + +extern void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, + struct dwc2_tt *dwc_tt); extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context); extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, int status); @@ -739,7 +825,7 @@ do { \ _qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd, \ qtd_list_entry); \ if (usb_pipeint(_qtd_->urb->pipe) && \ - (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \ + (_qh_)->start_active_frame != 0 && !_qtd_->complete_split) { \ _hfnum_.d32 = dwc2_readl((_hcd_)->regs + HFNUM); \ switch (_hfnum_.b.frnum & 0x7) { \ case 7: \ diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index a41274aa52ad..0e1d42b5dec5 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -81,7 +81,7 @@ static u16 dwc2_max_desc_num(struct dwc2_qh *qh) static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) { return qh->dev_speed == USB_SPEED_HIGH ? - (qh->interval + 8 - 1) / 8 : qh->interval; + (qh->host_interval + 8 - 1) / 8 : qh->host_interval; } static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, @@ -111,7 +111,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, dma_unmap_single(hsotg->dev, qh->desc_list_dma, qh->desc_list_sz, DMA_FROM_DEVICE); - kfree(qh->desc_list); + kmem_cache_free(desc_cache, qh->desc_list); qh->desc_list = NULL; return -ENOMEM; } @@ -252,7 +252,7 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, chan = qh->channel; inc = dwc2_frame_incr_val(qh); if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) - i = dwc2_frame_list_idx(qh->sched_frame); + i = dwc2_frame_list_idx(qh->next_active_frame); else i = 0; @@ -278,13 +278,13 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, return; chan->schinfo = 0; - if (chan->speed == USB_SPEED_HIGH && qh->interval) { + if (chan->speed == USB_SPEED_HIGH && qh->host_interval) { j = 1; /* TODO - check this */ - inc = (8 + qh->interval - 1) / qh->interval; + inc = (8 + qh->host_interval - 1) / qh->host_interval; for (i = 0; i < inc; i++) { chan->schinfo |= j; - j = j << qh->interval; + j = j << qh->host_interval; } } else { chan->schinfo = 0xff; @@ -431,7 +431,10 @@ static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); - /* sched_frame is always frame number (not uFrame) both in FS and HS! */ + /* + * next_active_frame is always frame number (not uFrame) both in FS + * and HS! + */ /* * skip_frames is used to limit activated descriptors number @@ -514,13 +517,13 @@ static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, */ fr_idx_tmp = dwc2_frame_list_idx(frame); fr_idx = (FRLISTEN_64_SIZE + - dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) - % dwc2_frame_incr_val(qh); + dwc2_frame_list_idx(qh->next_active_frame) - + fr_idx_tmp) % dwc2_frame_incr_val(qh); fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; } else { - qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, + qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); - fr_idx = dwc2_frame_list_idx(qh->sched_frame); + fr_idx = dwc2_frame_list_idx(qh->next_active_frame); } qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); @@ -583,7 +586,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, u16 next_idx; idx = qh->td_last; - inc = qh->interval; + inc = qh->host_interval; hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); cur_idx = dwc2_frame_list_idx(hsotg->frame_number); next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); @@ -605,11 +608,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, } } - if (qh->interval) { - ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / - qh->interval; + if (qh->host_interval) { + ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) / + qh->host_interval; if (skip_frames && !qh->channel) - ntd_max -= skip_frames / qh->interval; + ntd_max -= skip_frames / qh->host_interval; } max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? @@ -1029,7 +1032,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, idx); if (rc < 0) return; - idx = dwc2_desclist_idx_inc(idx, qh->interval, + idx = dwc2_desclist_idx_inc(idx, qh->host_interval, chan->speed); if (!rc) continue; @@ -1039,7 +1042,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, /* rc == DWC2_CMPL_STOP */ - if (qh->interval >= 32) + if (qh->host_interval >= 32) goto stop_scan; qh->td_first = idx; @@ -1242,8 +1245,10 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, for (i = 0; i < qtd_desc_count; i++) { if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, desc_num, halt_status, - &xfer_done)) + &xfer_done)) { + qtd = NULL; goto stop_scan; + } desc_num++; } @@ -1258,7 +1263,7 @@ stop_scan: if (halt_status == DWC2_HC_XFER_STALL) qh->data_toggle = DWC2_HC_PID_DATA0; else - dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); + dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL); } if (halt_status == DWC2_HC_XFER_COMPLETE) { @@ -1326,8 +1331,8 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, dwc2_hcd_qh_unlink(hsotg, qh); } else { /* Keep in assigned schedule to continue transfer */ - list_move(&qh->qh_list_entry, - &hsotg->periodic_sched_assigned); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_assigned); /* * If channel has been halted during giveback of urb * then prevent any new scheduling. diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index cadba8b13c48..906f223542ee 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -55,12 +55,16 @@ /* This function is for debug only */ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) { -#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS u16 curr_frame_number = hsotg->frame_number; + u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1); + + if (expected != curr_frame_number) + dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n", + expected, curr_frame_number); +#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { - if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) != - curr_frame_number) { + if (expected != curr_frame_number) { hsotg->frame_num_array[hsotg->frame_num_idx] = curr_frame_number; hsotg->last_frame_num_array[hsotg->frame_num_idx] = @@ -79,14 +83,15 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) } hsotg->dumped_frame_num_array = 1; } - hsotg->last_frame_num = curr_frame_number; #endif + hsotg->last_frame_num = curr_frame_number; } static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd) { + struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub; struct urb *usb_urb; if (!chan->qh) @@ -102,6 +107,15 @@ static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt) return; + /* + * The root hub doesn't really have a TT, but Linux thinks it + * does because how could you have a "high speed hub" that + * directly talks directly to low speed devices without a TT? + * It's all lies. Lies, I tell you. + */ + if (usb_urb->dev->tt->hub == root_hub) + return; + if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) { chan->qh->tt_buffer_dirty = 1; if (usb_hub_clear_tt_buffer(usb_urb)) @@ -138,13 +152,19 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg) while (qh_entry != &hsotg->periodic_sched_inactive) { qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry); qh_entry = qh_entry->next; - if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) + if (dwc2_frame_num_le(qh->next_active_frame, + hsotg->frame_number)) { + dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n", + qh, hsotg->frame_number, + qh->next_active_frame); + /* * Move QH to the ready list to be executed next * (micro)frame */ - list_move(&qh->qh_list_entry, + list_move_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); + } } tr_type = dwc2_hcd_select_transactions(hsotg); if (tr_type != DWC2_TRANSACTION_NONE) @@ -472,18 +492,6 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, xfer_length = urb->length - urb->actual_length; } - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && xfer_length) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + urb->actual_length, - chan->qh->dw_align_buf, xfer_length); - } - dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n", urb->actual_length, xfer_length); urb->actual_length += xfer_length; @@ -573,21 +581,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state( frame_desc->status = 0; frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, halt_status, NULL); - - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && frame_desc->actual_length) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", - __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + frame_desc->offset + - qtd->isoc_split_offset, - chan->qh->dw_align_buf, - frame_desc->actual_length); - } break; case DWC2_HC_XFER_FRAME_OVERRUN: urb->error_count++; @@ -608,21 +601,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state( frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, halt_status, NULL); - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && frame_desc->actual_length) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", - __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + frame_desc->offset + - qtd->isoc_split_offset, - chan->qh->dw_align_buf, - frame_desc->actual_length); - } - /* Skip whole frame */ if (chan->qh->do_split && chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && @@ -688,8 +666,6 @@ static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, } no_qtd: - if (qh->channel) - qh->channel->align_buf = 0; qh->channel = NULL; dwc2_hcd_qh_deactivate(hsotg, qh, continue_split); } @@ -846,7 +822,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg, * halt to be queued when the periodic schedule is * processed. */ - list_move(&chan->qh->qh_list_entry, + list_move_tail(&chan->qh->qh_list_entry, &hsotg->periodic_sched_assigned); /* @@ -954,14 +930,6 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, frame_desc->actual_length += len; - if (chan->align_buf) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, DMA_FROM_DEVICE); - memcpy(qtd->urb->buf + frame_desc->offset + - qtd->isoc_split_offset, chan->qh->dw_align_buf, len); - } - qtd->isoc_split_offset += len; if (frame_desc->actual_length >= frame_desc->length) { @@ -1184,19 +1152,6 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg, xfer_length = urb->length - urb->actual_length; } - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && xfer_length && chan->ep_is_in) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + urb->actual_length, - chan->qh->dw_align_buf, - xfer_length); - } - urb->actual_length += xfer_length; hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); @@ -1416,14 +1371,50 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - int frnum = dwc2_hcd_get_frame_number(hsotg); + struct dwc2_qh *qh = chan->qh; + bool past_end; + + if (hsotg->core_params->uframe_sched <= 0) { + int frnum = dwc2_hcd_get_frame_number(hsotg); + + /* Don't have num_hs_transfers; simple logic */ + past_end = dwc2_full_frame_num(frnum) != + dwc2_full_frame_num(qh->next_active_frame); + } else { + int end_frnum; - if (dwc2_full_frame_num(frnum) != - dwc2_full_frame_num(chan->qh->sched_frame)) { /* - * No longer in the same full speed frame. - * Treat this as a transaction error. - */ + * Figure out the end frame based on schedule. + * + * We don't want to go on trying again and again + * forever. Let's stop when we've done all the + * transfers that were scheduled. + * + * We're going to be comparing start_active_frame + * and next_active_frame, both of which are 1 + * before the time the packet goes on the wire, + * so that cancels out. Basically if had 1 + * transfer and we saw 1 NYET then we're done. + * We're getting a NYET here so if next >= + * (start + num_transfers) we're done. The + * complexity is that for all but ISOC_OUT we + * skip one slot. + */ + end_frnum = dwc2_frame_num_inc( + qh->start_active_frame, + qh->num_hs_transfers); + + if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || + qh->ep_is_in) + end_frnum = + dwc2_frame_num_inc(end_frnum, 1); + + past_end = dwc2_frame_num_le( + end_frnum, qh->next_active_frame); + } + + if (past_end) { + /* Treat this as a transaction error. */ #if 0 /* * Todo: Fix system performance so this can @@ -2008,6 +1999,16 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) } dwc2_writel(hcint, hsotg->regs + HCINT(chnum)); + + /* + * If we got an interrupt after someone called + * dwc2_hcd_endpoint_disable() we don't want to crash below + */ + if (!chan->qh) { + dev_warn(hsotg->dev, "Interrupt on disabled channel\n"); + return; + } + chan->hcint = hcint; hcint &= hcintmsk; @@ -2130,6 +2131,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) { u32 haint; int i; + struct dwc2_host_chan *chan, *chan_tmp; haint = dwc2_readl(hsotg->regs + HAINT); if (dbg_perio()) { @@ -2138,6 +2140,22 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint); } + /* + * According to USB 2.0 spec section 11.18.8, a host must + * issue complete-split transactions in a microframe for a + * set of full-/low-speed endpoints in the same relative + * order as the start-splits were issued in a microframe for. + */ + list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order, + split_order_list_entry) { + int hc_num = chan->hc_num; + + if (haint & (1 << hc_num)) { + dwc2_hc_n_intr(hsotg, hc_num); + haint &= ~(1 << hc_num); + } + } + for (i = 0; i < hsotg->core_params->host_channels; i++) { if (haint & (1 << i)) dwc2_hc_n_intr(hsotg, i); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 27d402f680a3..7f634fd771c7 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -38,6 +38,7 @@ * This file contains the functions to manage Queue Heads and Queue * Transfer Descriptors for Host mode */ +#include <linux/gcd.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> @@ -53,194 +54,8 @@ #include "core.h" #include "hcd.h" -/** - * dwc2_qh_init() - Initializes a QH structure - * - * @hsotg: The HCD state structure for the DWC OTG controller - * @qh: The QH to init - * @urb: Holds the information about the device/endpoint needed to initialize - * the QH - */ -#define SCHEDULE_SLOP 10 -static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, - struct dwc2_hcd_urb *urb) -{ - int dev_speed, hub_addr, hub_port; - char *speed, *type; - - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - /* Initialize QH */ - qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); - qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; - - qh->data_toggle = DWC2_HC_PID_DATA0; - qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info); - INIT_LIST_HEAD(&qh->qtd_list); - INIT_LIST_HEAD(&qh->qh_list_entry); - - /* FS/LS Endpoint on HS Hub, NOT virtual root hub */ - dev_speed = dwc2_host_get_speed(hsotg, urb->priv); - - dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); - - if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) && - hub_addr != 0 && hub_addr != 1) { - dev_vdbg(hsotg->dev, - "QH init: EP %d: TT found at hub addr %d, for port %d\n", - dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr, - hub_port); - qh->do_split = 1; - } - - if (qh->ep_type == USB_ENDPOINT_XFER_INT || - qh->ep_type == USB_ENDPOINT_XFER_ISOC) { - /* Compute scheduling parameters once and save them */ - u32 hprt, prtspd; - - /* Todo: Account for split transfers in the bus time */ - int bytecount = - dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); - - qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ? - USB_SPEED_HIGH : dev_speed, qh->ep_is_in, - qh->ep_type == USB_ENDPOINT_XFER_ISOC, - bytecount)); - - /* Ensure frame_number corresponds to the reality */ - hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); - /* Start in a slightly future (micro)frame */ - qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, - SCHEDULE_SLOP); - qh->interval = urb->interval; -#if 0 - /* Increase interrupt polling rate for debugging */ - if (qh->ep_type == USB_ENDPOINT_XFER_INT) - qh->interval = 8; -#endif - hprt = dwc2_readl(hsotg->regs + HPRT0); - prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; - if (prtspd == HPRT0_SPD_HIGH_SPEED && - (dev_speed == USB_SPEED_LOW || - dev_speed == USB_SPEED_FULL)) { - qh->interval *= 8; - qh->sched_frame |= 0x7; - qh->start_split_frame = qh->sched_frame; - } - dev_dbg(hsotg->dev, "interval=%d\n", qh->interval); - } - - dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n"); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n", - dwc2_hcd_get_dev_addr(&urb->pipe_info)); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n", - dwc2_hcd_get_ep_num(&urb->pipe_info), - dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); - - qh->dev_speed = dev_speed; - - switch (dev_speed) { - case USB_SPEED_LOW: - speed = "low"; - break; - case USB_SPEED_FULL: - speed = "full"; - break; - case USB_SPEED_HIGH: - speed = "high"; - break; - default: - speed = "?"; - break; - } - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed); - - switch (qh->ep_type) { - case USB_ENDPOINT_XFER_ISOC: - type = "isochronous"; - break; - case USB_ENDPOINT_XFER_INT: - type = "interrupt"; - break; - case USB_ENDPOINT_XFER_CONTROL: - type = "control"; - break; - case USB_ENDPOINT_XFER_BULK: - type = "bulk"; - break; - default: - type = "?"; - break; - } - - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type); - - if (qh->ep_type == USB_ENDPOINT_XFER_INT) { - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n", - qh->usecs); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n", - qh->interval); - } -} - -/** - * dwc2_hcd_qh_create() - Allocates and initializes a QH - * - * @hsotg: The HCD state structure for the DWC OTG controller - * @urb: Holds the information about the device/endpoint needed - * to initialize the QH - * @atomic_alloc: Flag to do atomic allocation if needed - * - * Return: Pointer to the newly allocated QH, or NULL on error - */ -struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, - struct dwc2_hcd_urb *urb, - gfp_t mem_flags) -{ - struct dwc2_qh *qh; - - if (!urb->priv) - return NULL; - - /* Allocate memory */ - qh = kzalloc(sizeof(*qh), mem_flags); - if (!qh) - return NULL; - - dwc2_qh_init(hsotg, qh, urb); - - if (hsotg->core_params->dma_desc_enable > 0 && - dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { - dwc2_hcd_qh_free(hsotg, qh); - return NULL; - } - - return qh; -} - -/** - * dwc2_hcd_qh_free() - Frees the QH - * - * @hsotg: HCD instance - * @qh: The QH to free - * - * QH should already be removed from the list. QTD list should already be empty - * if called from URB Dequeue. - * - * Must NOT be called with interrupt disabled or spinlock held - */ -void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) -{ - if (qh->desc_list) { - dwc2_hcd_qh_free_ddma(hsotg, qh); - } else { - /* kfree(NULL) is safe */ - kfree(qh->dw_align_buf); - qh->dw_align_buf_dma = (dma_addr_t)0; - } - kfree(qh); -} +/* Wait this long before releasing periodic reservation */ +#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5)) /** * dwc2_periodic_channel_available() - Checks that a channel is available for a @@ -301,19 +116,19 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, * High speed mode * Max periodic usecs is 80% x 125 usec = 100 usec */ - max_claimed_usecs = 100 - qh->usecs; + max_claimed_usecs = 100 - qh->host_us; } else { /* * Full speed mode * Max periodic usecs is 90% x 1000 usec = 900 usec */ - max_claimed_usecs = 900 - qh->usecs; + max_claimed_usecs = 900 - qh->host_us; } if (hsotg->periodic_usecs > max_claimed_usecs) { dev_err(hsotg->dev, "%s: already claimed usecs %d, required usecs %d\n", - __func__, hsotg->periodic_usecs, qh->usecs); + __func__, hsotg->periodic_usecs, qh->host_us); status = -ENOSPC; } @@ -321,113 +136,1177 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, } /** - * Microframe scheduler - * track the total use in hsotg->frame_usecs - * keep each qh use in qh->frame_usecs - * when surrendering the qh then donate the time back + * pmap_schedule() - Schedule time in a periodic bitmap (pmap). + * + * @map: The bitmap representing the schedule; will be updated + * upon success. + * @bits_per_period: The schedule represents several periods. This is how many + * bits are in each period. It's assumed that the beginning + * of the schedule will repeat after its end. + * @periods_in_map: The number of periods in the schedule. + * @num_bits: The number of bits we need per period we want to reserve + * in this function call. + * @interval: How often we need to be scheduled for the reservation this + * time. 1 means every period. 2 means every other period. + * ...you get the picture? + * @start: The bit number to start at. Normally 0. Must be within + * the interval or we return failure right away. + * @only_one_period: Normally we'll allow picking a start anywhere within the + * first interval, since we can still make all repetition + * requirements by doing that. However, if you pass true + * here then we'll return failure if we can't fit within + * the period that "start" is in. + * + * The idea here is that we want to schedule time for repeating events that all + * want the same resource. The resource is divided into fixed-sized periods + * and the events want to repeat every "interval" periods. The schedule + * granularity is one bit. + * + * To keep things "simple", we'll represent our schedule with a bitmap that + * contains a fixed number of periods. This gets rid of a lot of complexity + * but does mean that we need to handle things specially (and non-ideally) if + * the number of the periods in the schedule doesn't match well with the + * intervals that we're trying to schedule. + * + * Here's an explanation of the scheme we'll implement, assuming 8 periods. + * - If interval is 1, we need to take up space in each of the 8 + * periods we're scheduling. Easy. + * - If interval is 2, we need to take up space in half of the + * periods. Again, easy. + * - If interval is 3, we actually need to fall back to interval 1. + * Why? Because we might need time in any period. AKA for the + * first 8 periods, we'll be in slot 0, 3, 6. Then we'll be + * in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to + * 0, 3, and 6. Since we could be in any frame we need to reserve + * for all of them. Sucks, but that's what you gotta do. Note that + * if we were instead scheduling 8 * 3 = 24 we'd do much better, but + * then we need more memory and time to do scheduling. + * - If interval is 4, easy. + * - If interval is 5, we again need interval 1. The schedule will be + * 0, 5, 2, 7, 4, 1, 6, 3, 0 + * - If interval is 6, we need interval 2. 0, 6, 4, 2. + * - If interval is 7, we need interval 1. + * - If interval is 8, we need interval 8. + * + * If you do the math, you'll see that we need to pretend that interval is + * equal to the greatest_common_divisor(interval, periods_in_map). + * + * Note that at the moment this function tends to front-pack the schedule. + * In some cases that's really non-ideal (it's hard to schedule things that + * need to repeat every period). In other cases it's perfect (you can easily + * schedule bigger, less often repeating things). + * + * Here's the algorithm in action (8 periods, 5 bits per period): + * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0 + * |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2 + * |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5 + * |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2 + * |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2 + * |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3 + * |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6 + * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4 + * |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1 + * | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0 + * | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5 + * | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2 + * | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3 + * | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6 + * | | | | | | | | | Remv 1 bits, intv 1 at 4 + * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0 + * |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2 + * |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3 + * |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5 + * |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6 + * |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8 + * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12 + * + * This function is pretty generic and could be easily abstracted if anything + * needed similar scheduling. + * + * Returns either -ENOSPC or a >= 0 start bit which should be passed to the + * unschedule routine. The map bitmap will be updated on a non-error result. */ -static const unsigned short max_uframe_usecs[] = { - 100, 100, 100, 100, 100, 100, 30, 0 -}; +static int pmap_schedule(unsigned long *map, int bits_per_period, + int periods_in_map, int num_bits, + int interval, int start, bool only_one_period) +{ + int interval_bits; + int to_reserve; + int first_end; + int i; + + if (num_bits > bits_per_period) + return -ENOSPC; + + /* Adjust interval as per description */ + interval = gcd(interval, periods_in_map); + + interval_bits = bits_per_period * interval; + to_reserve = periods_in_map / interval; + + /* If start has gotten us past interval then we can't schedule */ + if (start >= interval_bits) + return -ENOSPC; + + if (only_one_period) + /* Must fit within same period as start; end at begin of next */ + first_end = (start / bits_per_period + 1) * bits_per_period; + else + /* Can fit anywhere in the first interval */ + first_end = interval_bits; + + /* + * We'll try to pick the first repetition, then see if that time + * is free for each of the subsequent repetitions. If it's not + * we'll adjust the start time for the next search of the first + * repetition. + */ + while (start + num_bits <= first_end) { + int end; + + /* Need to stay within this period */ + end = (start / bits_per_period + 1) * bits_per_period; + + /* Look for num_bits us in this microframe starting at start */ + start = bitmap_find_next_zero_area(map, end, start, num_bits, + 0); + + /* + * We should get start >= end if we fail. We might be + * able to check the next microframe depending on the + * interval, so continue on (start already updated). + */ + if (start >= end) { + start = end; + continue; + } + + /* At this point we have a valid point for first one */ + for (i = 1; i < to_reserve; i++) { + int ith_start = start + interval_bits * i; + int ith_end = end + interval_bits * i; + int ret; + + /* Use this as a dumb "check if bits are 0" */ + ret = bitmap_find_next_zero_area( + map, ith_start + num_bits, ith_start, num_bits, + 0); + + /* We got the right place, continue checking */ + if (ret == ith_start) + continue; + + /* Move start up for next time and exit for loop */ + ith_start = bitmap_find_next_zero_area( + map, ith_end, ith_start, num_bits, 0); + if (ith_start >= ith_end) + /* Need a while new period next time */ + start = end; + else + start = ith_start - interval_bits * i; + break; + } + + /* If didn't exit the for loop with a break, we have success */ + if (i == to_reserve) + break; + } -void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) + if (start + num_bits > first_end) + return -ENOSPC; + + for (i = 0; i < to_reserve; i++) { + int ith_start = start + interval_bits * i; + + bitmap_set(map, ith_start, num_bits); + } + + return start; +} + +/** + * pmap_unschedule() - Undo work done by pmap_schedule() + * + * @map: See pmap_schedule(). + * @bits_per_period: See pmap_schedule(). + * @periods_in_map: See pmap_schedule(). + * @num_bits: The number of bits that was passed to schedule. + * @interval: The interval that was passed to schedule. + * @start: The return value from pmap_schedule(). + */ +static void pmap_unschedule(unsigned long *map, int bits_per_period, + int periods_in_map, int num_bits, + int interval, int start) { + int interval_bits; + int to_release; int i; - for (i = 0; i < 8; i++) - hsotg->frame_usecs[i] = max_uframe_usecs[i]; + /* Adjust interval as per description in pmap_schedule() */ + interval = gcd(interval, periods_in_map); + + interval_bits = bits_per_period * interval; + to_release = periods_in_map / interval; + + for (i = 0; i < to_release; i++) { + int ith_start = start + interval_bits * i; + + bitmap_clear(map, ith_start, num_bits); + } } -static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +/* + * cat_printf() - A printf() + strcat() helper + * + * This is useful for concatenating a bunch of strings where each string is + * constructed using printf. + * + * @buf: The destination buffer; will be updated to point after the printed + * data. + * @size: The number of bytes in the buffer (includes space for '\0'). + * @fmt: The format for printf. + * @...: The args for printf. + */ +static void cat_printf(char **buf, size_t *size, const char *fmt, ...) { - unsigned short utime = qh->usecs; + va_list args; int i; - for (i = 0; i < 8; i++) { - /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ - if (utime <= hsotg->frame_usecs[i]) { - hsotg->frame_usecs[i] -= utime; - qh->frame_usecs[i] += utime; - return i; - } + if (*size == 0) + return; + + va_start(args, fmt); + i = vsnprintf(*buf, *size, fmt, args); + va_end(args); + + if (i >= *size) { + (*buf)[*size - 1] = '\0'; + *buf += *size; + *size = 0; + } else { + *buf += i; + *size -= i; } - return -ENOSPC; } /* - * use this for FS apps that can span multiple uframes + * pmap_print() - Print the given periodic map + * + * Will attempt to print out the periodic schedule. + * + * @map: See pmap_schedule(). + * @bits_per_period: See pmap_schedule(). + * @periods_in_map: See pmap_schedule(). + * @period_name: The name of 1 period, like "uFrame" + * @units: The name of the units, like "us". + * @print_fn: The function to call for printing. + * @print_data: Opaque data to pass to the print function. + */ +static void pmap_print(unsigned long *map, int bits_per_period, + int periods_in_map, const char *period_name, + const char *units, + void (*print_fn)(const char *str, void *data), + void *print_data) +{ + int period; + + for (period = 0; period < periods_in_map; period++) { + char tmp[64]; + char *buf = tmp; + size_t buf_size = sizeof(tmp); + int period_start = period * bits_per_period; + int period_end = period_start + bits_per_period; + int start = 0; + int count = 0; + bool printed = false; + int i; + + for (i = period_start; i < period_end + 1; i++) { + /* Handle case when ith bit is set */ + if (i < period_end && + bitmap_find_next_zero_area(map, i + 1, + i, 1, 0) != i) { + if (count == 0) + start = i - period_start; + count++; + continue; + } + + /* ith bit isn't set; don't care if count == 0 */ + if (count == 0) + continue; + + if (!printed) + cat_printf(&buf, &buf_size, "%s %d: ", + period_name, period); + else + cat_printf(&buf, &buf_size, ", "); + printed = true; + + cat_printf(&buf, &buf_size, "%d %s -%3d %s", start, + units, start + count - 1, units); + count = 0; + } + + if (printed) + print_fn(tmp, print_data); + } +} + +/** + * dwc2_get_ls_map() - Get the map used for the given qh + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * + * We'll always get the periodic map out of our TT. Note that even if we're + * running the host straight in low speed / full speed mode it appears as if + * a TT is allocated for us, so we'll use it. If that ever changes we can + * add logic here to get a map out of "hsotg" if !qh->do_split. + * + * Returns: the map or NULL if a map couldn't be found. */ -static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) { - unsigned short utime = qh->usecs; - unsigned short xtime; - int t_left; + unsigned long *map; + + /* Don't expect to be missing a TT and be doing low speed scheduling */ + if (WARN_ON(!qh->dwc_tt)) + return NULL; + + /* Get the map and adjust if this is a multi_tt hub */ + map = qh->dwc_tt->periodic_bitmaps; + if (qh->dwc_tt->usb_tt->multi) + map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + + return map; +} + +struct dwc2_qh_print_data { + struct dwc2_hsotg *hsotg; + struct dwc2_qh *qh; +}; + +/** + * dwc2_qh_print() - Helper function for dwc2_qh_schedule_print() + * + * @str: The string to print + * @data: A pointer to a struct dwc2_qh_print_data + */ +static void dwc2_qh_print(const char *str, void *data) +{ + struct dwc2_qh_print_data *print_data = data; + + dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str); +} + +/** + * dwc2_qh_schedule_print() - Print the periodic schedule + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH to print. + */ +static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + struct dwc2_qh_print_data print_data = { hsotg, qh }; int i; - int j; - int k; - for (i = 0; i < 8; i++) { - if (hsotg->frame_usecs[i] <= 0) + /* + * The printing functions are quite slow and inefficient. + * If we don't have tracing turned on, don't run unless the special + * define is turned on. + */ +#ifndef DWC2_PRINT_SCHEDULE + return; +#endif + + if (qh->schedule_low_speed) { + unsigned long *map = dwc2_get_ls_map(hsotg, qh); + + dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us", + qh, qh->device_us, + DWC2_ROUND_US_TO_SLICE(qh->device_us), + DWC2_US_PER_SLICE * qh->ls_start_schedule_slice); + + if (map) { + dwc2_sch_dbg(hsotg, + "QH=%p Whole low/full speed map %p now:\n", + qh, map); + pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, + DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices", + dwc2_qh_print, &print_data); + } + } + + for (i = 0; i < qh->num_hs_transfers; i++) { + struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i; + int uframe = trans_time->start_schedule_us / + DWC2_HS_PERIODIC_US_PER_UFRAME; + int rel_us = trans_time->start_schedule_us % + DWC2_HS_PERIODIC_US_PER_UFRAME; + + dwc2_sch_dbg(hsotg, + "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n", + qh, i, trans_time->duration_us, uframe, rel_us); + } + if (qh->num_hs_transfers) { + dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh); + pmap_print(hsotg->hs_periodic_bitmap, + DWC2_HS_PERIODIC_US_PER_UFRAME, + DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us", + dwc2_qh_print, &print_data); + } + +} + +/** + * dwc2_ls_pmap_schedule() - Schedule a low speed QH + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * @search_slice: We'll start trying to schedule at the passed slice. + * Remember that slices are the units of the low speed + * schedule (think 25us or so). + * + * Wraps pmap_schedule() with the right parameters for low speed scheduling. + * + * Normally we schedule low speed devices on the map associated with the TT. + * + * Returns: 0 for success or an error code. + */ +static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, + int search_slice) +{ + int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE); + unsigned long *map = dwc2_get_ls_map(hsotg, qh); + int slice; + + if (map == NULL) + return -EINVAL; + + /* + * Schedule on the proper low speed map with our low speed scheduling + * parameters. Note that we use the "device_interval" here since + * we want the low speed interval and the only way we'd be in this + * function is if the device is low speed. + * + * If we happen to be doing low speed and high speed scheduling for the + * same transaction (AKA we have a split) we always do low speed first. + * That means we can always pass "false" for only_one_period (that + * parameters is only useful when we're trying to get one schedule to + * match what we already planned in the other schedule). + */ + slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, + DWC2_LS_SCHEDULE_FRAMES, slices, + qh->device_interval, search_slice, false); + + if (slice < 0) + return slice; + + qh->ls_start_schedule_slice = slice; + return 0; +} + +/** + * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule() + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE); + unsigned long *map = dwc2_get_ls_map(hsotg, qh); + + /* Schedule should have failed, so no worries about no error code */ + if (map == NULL) + return; + + pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, + DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval, + qh->ls_start_schedule_slice); +} + +/** + * dwc2_hs_pmap_schedule - Schedule in the main high speed schedule + * + * This will schedule something on the main dwc2 schedule. + * + * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll + * update this with the result upon success. We also use the duration from + * the same structure. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * @only_one_period: If true we will limit ourselves to just looking at + * one period (aka one 100us chunk). This is used if we have + * already scheduled something on the low speed schedule and + * need to find something that matches on the high speed one. + * @index: The index into qh->hs_transfers that we're working with. + * + * Returns: 0 for success or an error code. Upon success the + * dwc2_hs_transfer_time specified by "index" will be updated. + */ +static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, + bool only_one_period, int index) +{ + struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index; + int us; + + us = pmap_schedule(hsotg->hs_periodic_bitmap, + DWC2_HS_PERIODIC_US_PER_UFRAME, + DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us, + qh->host_interval, trans_time->start_schedule_us, + only_one_period); + + if (us < 0) + return us; + + trans_time->start_schedule_us = us; + return 0; +} + +/** + * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule() + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, int index) +{ + struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index; + + pmap_unschedule(hsotg->hs_periodic_bitmap, + DWC2_HS_PERIODIC_US_PER_UFRAME, + DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us, + qh->host_interval, trans_time->start_schedule_us); +} + +/** + * dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer. + * + * This is the most complicated thing in USB. We have to find matching time + * in both the global high speed schedule for the port and the low speed + * schedule for the TT associated with the given device. + * + * Being here means that the host must be running in high speed mode and the + * device is in low or full speed mode (and behind a hub). + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); + int ls_search_slice; + int err = 0; + int host_interval_in_sched; + + /* + * The interval (how often to repeat) in the actual host schedule. + * See pmap_schedule() for gcd() explanation. + */ + host_interval_in_sched = gcd(qh->host_interval, + DWC2_HS_SCHEDULE_UFRAMES); + + /* + * We always try to find space in the low speed schedule first, then + * try to find high speed time that matches. If we don't, we'll bump + * up the place we start searching in the low speed schedule and try + * again. To start we'll look right at the beginning of the low speed + * schedule. + * + * Note that this will tend to front-load the high speed schedule. + * We may eventually want to try to avoid this by either considering + * both schedules together or doing some sort of round robin. + */ + ls_search_slice = 0; + + while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) { + int start_s_uframe; + int ssplit_s_uframe; + int second_s_uframe; + int rel_uframe; + int first_count; + int middle_count; + int end_count; + int first_data_bytes; + int other_data_bytes; + int i; + + if (qh->schedule_low_speed) { + err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice); + + /* + * If we got an error here there's no other magic we + * can do, so bail. All the looping above is only + * helpful to redo things if we got a low speed slot + * and then couldn't find a matching high speed slot. + */ + if (err) + return err; + } else { + /* Must be missing the tt structure? Why? */ + WARN_ON_ONCE(1); + } + + /* + * This will give us a number 0 - 7 if + * DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ... + */ + start_s_uframe = qh->ls_start_schedule_slice / + DWC2_SLICES_PER_UFRAME; + + /* Get a number that's always 0 - 7 */ + rel_uframe = (start_s_uframe % 8); + + /* + * If we were going to start in uframe 7 then we would need to + * issue a start split in uframe 6, which spec says is not OK. + * Move on to the next full frame (assuming there is one). + * + * See 11.18.4 Host Split Transaction Scheduling Requirements + * bullet 1. + */ + if (rel_uframe == 7) { + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + ls_search_slice = + (qh->ls_start_schedule_slice / + DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) * + DWC2_LS_PERIODIC_SLICES_PER_FRAME; continue; + } /* - * we need n consecutive slots so use j as a start slot - * j plus j+1 must be enough time (for now) + * For ISOC in: + * - start split (frame -1) + * - complete split w/ data (frame +1) + * - complete split w/ data (frame +2) + * - ... + * - complete split w/ data (frame +num_data_packets) + * - complete split w/ data (frame +num_data_packets+1) + * - complete split w/ data (frame +num_data_packets+2, max 8) + * ...though if frame was "0" then max is 7... + * + * For ISOC out we might need to do: + * - start split w/ data (frame -1) + * - start split w/ data (frame +0) + * - ... + * - start split w/ data (frame +num_data_packets-2) + * + * For INTERRUPT in we might need to do: + * - start split (frame -1) + * - complete split w/ data (frame +1) + * - complete split w/ data (frame +2) + * - complete split w/ data (frame +3, max 8) + * + * For INTERRUPT out we might need to do: + * - start split w/ data (frame -1) + * - complete split (frame +1) + * - complete split (frame +2) + * - complete split (frame +3, max 8) + * + * Start adjusting! */ - xtime = hsotg->frame_usecs[i]; - for (j = i + 1; j < 8; j++) { - /* - * if we add this frame remaining time to xtime we may - * be OK, if not we need to test j for a complete frame - */ - if (xtime + hsotg->frame_usecs[j] < utime) { - if (hsotg->frame_usecs[j] < - max_uframe_usecs[j]) - continue; + ssplit_s_uframe = (start_s_uframe + + host_interval_in_sched - 1) % + host_interval_in_sched; + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in) + second_s_uframe = start_s_uframe; + else + second_s_uframe = start_s_uframe + 1; + + /* First data transfer might not be all 188 bytes. */ + first_data_bytes = 188 - + DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice % + DWC2_SLICES_PER_UFRAME), + DWC2_SLICES_PER_UFRAME); + if (first_data_bytes > bytecount) + first_data_bytes = bytecount; + other_data_bytes = bytecount - first_data_bytes; + + /* + * For now, skip OUT xfers where first xfer is partial + * + * Main dwc2 code assumes: + * - INT transfers never get split in two. + * - ISOC transfers can always transfer 188 bytes the first + * time. + * + * Until that code is fixed, try again if the first transfer + * couldn't transfer everything. + * + * This code can be removed if/when the rest of dwc2 handles + * the above cases. Until it's fixed we just won't be able + * to schedule quite as tightly. + */ + if (!qh->ep_is_in && + (first_data_bytes != min_t(int, 188, bytecount))) { + dwc2_sch_dbg(hsotg, + "QH=%p avoiding broken 1st xfer (%d, %d)\n", + qh, first_data_bytes, bytecount); + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + ls_search_slice = (start_s_uframe + 1) * + DWC2_SLICES_PER_UFRAME; + continue; + } + + /* Start by assuming transfers for the bytes */ + qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188); + + /* + * Everything except ISOC OUT has extra transfers. Rules are + * complicated. See 11.18.4 Host Split Transaction Scheduling + * Requirements bullet 3. + */ + if (qh->ep_type == USB_ENDPOINT_XFER_INT) { + if (rel_uframe == 6) + qh->num_hs_transfers += 2; + else + qh->num_hs_transfers += 3; + + if (qh->ep_is_in) { + /* + * First is start split, middle/end is data. + * Allocate full data bytes for all data. + */ + first_count = 4; + middle_count = bytecount; + end_count = bytecount; + } else { + /* + * First is data, middle/end is complete. + * First transfer and second can have data. + * Rest should just have complete split. + */ + first_count = first_data_bytes; + middle_count = max_t(int, 4, other_data_bytes); + end_count = 4; } - if (xtime >= utime) { - t_left = utime; - for (k = i; k < 8; k++) { - t_left -= hsotg->frame_usecs[k]; - if (t_left <= 0) { - qh->frame_usecs[k] += - hsotg->frame_usecs[k] - + t_left; - hsotg->frame_usecs[k] = -t_left; - return i; - } else { - qh->frame_usecs[k] += - hsotg->frame_usecs[k]; - hsotg->frame_usecs[k] = 0; - } - } + } else { + if (qh->ep_is_in) { + int last; + + /* Account for the start split */ + qh->num_hs_transfers++; + + /* Calculate "L" value from spec */ + last = rel_uframe + qh->num_hs_transfers + 1; + + /* Start with basic case */ + if (last <= 6) + qh->num_hs_transfers += 2; + else + qh->num_hs_transfers += 1; + + /* Adjust downwards */ + if (last >= 6 && rel_uframe == 0) + qh->num_hs_transfers--; + + /* 1st = start; rest can contain data */ + first_count = 4; + middle_count = min_t(int, 188, bytecount); + end_count = middle_count; + } else { + /* All contain data, last might be smaller */ + first_count = first_data_bytes; + middle_count = min_t(int, 188, + other_data_bytes); + end_count = other_data_bytes % 188; } - /* add the frame time to x time */ - xtime += hsotg->frame_usecs[j]; - /* we must have a fully available next frame or break */ - if (xtime < utime && - hsotg->frame_usecs[j] == max_uframe_usecs[j]) - continue; } + + /* Assign durations per uFrame */ + qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count); + for (i = 1; i < qh->num_hs_transfers - 1; i++) + qh->hs_transfers[i].duration_us = + HS_USECS_ISO(middle_count); + if (qh->num_hs_transfers > 1) + qh->hs_transfers[qh->num_hs_transfers - 1].duration_us = + HS_USECS_ISO(end_count); + + /* + * Assign start us. The call below to dwc2_hs_pmap_schedule() + * will start with these numbers but may adjust within the same + * microframe. + */ + qh->hs_transfers[0].start_schedule_us = + ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME; + for (i = 1; i < qh->num_hs_transfers; i++) + qh->hs_transfers[i].start_schedule_us = + ((second_s_uframe + i - 1) % + DWC2_HS_SCHEDULE_UFRAMES) * + DWC2_HS_PERIODIC_US_PER_UFRAME; + + /* Try to schedule with filled in hs_transfers above */ + for (i = 0; i < qh->num_hs_transfers; i++) { + err = dwc2_hs_pmap_schedule(hsotg, qh, true, i); + if (err) + break; + } + + /* If we scheduled all w/out breaking out then we're all good */ + if (i == qh->num_hs_transfers) + break; + + for (; i >= 0; i--) + dwc2_hs_pmap_unschedule(hsotg, qh, i); + + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + + /* Try again starting in the next microframe */ + ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME; } - return -ENOSPC; + + if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES) + return -ENOSPC; + + return 0; +} + +/** + * dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer. + * + * Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean + * interface. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + /* In non-split host and device time are the same */ + WARN_ON(qh->host_us != qh->device_us); + WARN_ON(qh->host_interval != qh->device_interval); + WARN_ON(qh->num_hs_transfers != 1); + + /* We'll have one transfer; init start to 0 before calling scheduler */ + qh->hs_transfers[0].start_schedule_us = 0; + qh->hs_transfers[0].duration_us = qh->host_us; + + return dwc2_hs_pmap_schedule(hsotg, qh, false, 0); +} + +/** + * dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer. + * + * Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean + * interface. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + /* In non-split host and device time are the same */ + WARN_ON(qh->host_us != qh->device_us); + WARN_ON(qh->host_interval != qh->device_interval); + WARN_ON(!qh->schedule_low_speed); + + /* Run on the main low speed schedule (no split = no hub = no TT) */ + return dwc2_ls_pmap_schedule(hsotg, qh, 0); } -static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +/** + * dwc2_uframe_schedule - Schedule a QH for a periodic xfer. + * + * Calls one of the 3 sub-function depending on what type of transfer this QH + * is for. Also adds some printing. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int ret; - if (qh->dev_speed == USB_SPEED_HIGH) { - /* if this is a hs transaction we need a full frame */ - ret = dwc2_find_single_uframe(hsotg, qh); + if (qh->dev_speed == USB_SPEED_HIGH) + ret = dwc2_uframe_schedule_hs(hsotg, qh); + else if (!qh->do_split) + ret = dwc2_uframe_schedule_ls(hsotg, qh); + else + ret = dwc2_uframe_schedule_split(hsotg, qh); + + if (ret) + dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret); + else + dwc2_qh_schedule_print(hsotg, qh); + + return ret; +} + +/** + * dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule(). + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + int i; + + for (i = 0; i < qh->num_hs_transfers; i++) + dwc2_hs_pmap_unschedule(hsotg, qh, i); + + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + + dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh); +} + +/** + * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled + * + * Takes a qh that has already been scheduled (which means we know we have the + * bandwdith reserved for us) and set the next_active_frame and the + * start_active_frame. + * + * This is expected to be called on qh's that weren't previously actively + * running. It just picks the next frame that we can fit into without any + * thought about the past. + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: QH for a periodic endpoint + * + */ +static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + u16 frame_number; + u16 earliest_frame; + u16 next_active_frame; + u16 relative_frame; + u16 interval; + + /* + * Use the real frame number rather than the cached value as of the + * last SOF to give us a little extra slop. + */ + frame_number = dwc2_hcd_get_frame_number(hsotg); + + /* + * We wouldn't want to start any earlier than the next frame just in + * case the frame number ticks as we're doing this calculation. + * + * NOTE: if we could quantify how long till we actually get scheduled + * we might be able to avoid the "+ 1" by looking at the upper part of + * HFNUM (the FRREM field). For now we'll just use the + 1 though. + */ + earliest_frame = dwc2_frame_num_inc(frame_number, 1); + next_active_frame = earliest_frame; + + /* Get the "no microframe schduler" out of the way... */ + if (hsotg->core_params->uframe_sched <= 0) { + if (qh->do_split) + /* Splits are active at microframe 0 minus 1 */ + next_active_frame |= 0x7; + goto exit; + } + + if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) { + /* + * We're either at high speed or we're doing a split (which + * means we're talking high speed to a hub). In any case + * the first frame should be based on when the first scheduled + * event is. + */ + WARN_ON(qh->num_hs_transfers < 1); + + relative_frame = qh->hs_transfers[0].start_schedule_us / + DWC2_HS_PERIODIC_US_PER_UFRAME; + + /* Adjust interval as per high speed schedule */ + interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES); + } else { /* - * if this is a fs transaction we may need a sequence - * of frames + * Low or full speed directly on dwc2. Just about the same + * as high speed but on a different schedule and with slightly + * different adjustments. Note that this works because when + * the host and device are both low speed then frames in the + * controller tick at low speed. */ - ret = dwc2_find_multi_uframe(hsotg, qh); + relative_frame = qh->ls_start_schedule_slice / + DWC2_LS_PERIODIC_SLICES_PER_FRAME; + interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES); } - return ret; + + /* Scheduler messed up if frame is past interval */ + WARN_ON(relative_frame >= interval); + + /* + * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've + * done the gcd(), so it's safe to move to the beginning of the current + * interval like this. + * + * After this we might be before earliest_frame, but don't worry, + * we'll fix it... + */ + next_active_frame = (next_active_frame / interval) * interval; + + /* + * Actually choose to start at the frame number we've been + * scheduled for. + */ + next_active_frame = dwc2_frame_num_inc(next_active_frame, + relative_frame); + + /* + * We actually need 1 frame before since the next_active_frame is + * the frame number we'll be put on the ready list and we won't be on + * the bus until 1 frame later. + */ + next_active_frame = dwc2_frame_num_dec(next_active_frame, 1); + + /* + * By now we might actually be before the earliest_frame. Let's move + * up intervals until we're not. + */ + while (dwc2_frame_num_gt(earliest_frame, next_active_frame)) + next_active_frame = dwc2_frame_num_inc(next_active_frame, + interval); + +exit: + qh->next_active_frame = next_active_frame; + qh->start_active_frame = next_active_frame; + + dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n", + qh, frame_number, qh->next_active_frame); +} + +/** + * dwc2_do_reserve() - Make a periodic reservation + * + * Try to allocate space in the periodic schedule. Depending on parameters + * this might use the microframe scheduler or the dumb scheduler. + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: QH for the periodic transfer. + * + * Returns: 0 upon success; error upon failure. + */ +static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + int status; + + if (hsotg->core_params->uframe_sched > 0) { + status = dwc2_uframe_schedule(hsotg, qh); + } else { + status = dwc2_periodic_channel_available(hsotg); + if (status) { + dev_info(hsotg->dev, + "%s: No host channel available for periodic transfer\n", + __func__); + return status; + } + + status = dwc2_check_periodic_bandwidth(hsotg, qh); + } + + if (status) { + dev_dbg(hsotg->dev, + "%s: Insufficient periodic bandwidth for periodic transfer\n", + __func__); + return status; + } + + if (hsotg->core_params->uframe_sched <= 0) + /* Reserve periodic channel */ + hsotg->periodic_channels++; + + /* Update claimed usecs per (micro)frame */ + hsotg->periodic_usecs += qh->host_us; + + dwc2_pick_first_frame(hsotg, qh); + + return 0; +} + +/** + * dwc2_do_unreserve() - Actually release the periodic reservation + * + * This function actually releases the periodic bandwidth that was reserved + * by the given qh. + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: QH for the periodic transfer. + */ +static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + assert_spin_locked(&hsotg->lock); + + WARN_ON(!qh->unreserve_pending); + + /* No more unreserve pending--we're doing it */ + qh->unreserve_pending = false; + + if (WARN_ON(!list_empty(&qh->qh_list_entry))) + list_del_init(&qh->qh_list_entry); + + /* Update claimed usecs per (micro)frame */ + hsotg->periodic_usecs -= qh->host_us; + + if (hsotg->core_params->uframe_sched > 0) { + dwc2_uframe_unschedule(hsotg, qh); + } else { + /* Release periodic channel reservation */ + hsotg->periodic_channels--; + } +} + +/** + * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation + * + * According to the kernel doc for usb_submit_urb() (specifically the part about + * "Reserved Bandwidth Transfers"), we need to keep a reservation active as + * long as a device driver keeps submitting. Since we're using HCD_BH to give + * back the URB we need to give the driver a little bit of time before we + * release the reservation. This worker is called after the appropriate + * delay. + * + * @work: Pointer to a qh unreserve_work. + */ +static void dwc2_unreserve_timer_fn(unsigned long data) +{ + struct dwc2_qh *qh = (struct dwc2_qh *)data; + struct dwc2_hsotg *hsotg = qh->hsotg; + unsigned long flags; + + /* + * Wait for the lock, or for us to be scheduled again. We + * could be scheduled again if: + * - We started executing but didn't get the lock yet. + * - A new reservation came in, but cancel didn't take effect + * because we already started executing. + * - The timer has been kicked again. + * In that case cancel and wait for the next call. + */ + while (!spin_trylock_irqsave(&hsotg->lock, flags)) { + if (timer_pending(&qh->unreserve_timer)) + return; + } + + /* + * Might be no more unreserve pending if: + * - We started executing but didn't get the lock yet. + * - A new reservation came in, but cancel didn't take effect + * because we already started executing. + * + * We can't put this in the loop above because unreserve_pending needs + * to be accessed under lock, so we can only check it once we got the + * lock. + */ + if (qh->unreserve_pending) + dwc2_do_unreserve(hsotg, qh); + + spin_unlock_irqrestore(&hsotg->lock, flags); } /** @@ -474,42 +1353,6 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int status; - if (hsotg->core_params->uframe_sched > 0) { - int frame = -1; - - status = dwc2_find_uframe(hsotg, qh); - if (status == 0) - frame = 7; - else if (status > 0) - frame = status - 1; - - /* Set the new frame up */ - if (frame >= 0) { - qh->sched_frame &= ~0x7; - qh->sched_frame |= (frame & 7); - } - - if (status > 0) - status = 0; - } else { - status = dwc2_periodic_channel_available(hsotg); - if (status) { - dev_info(hsotg->dev, - "%s: No host channel available for periodic transfer\n", - __func__); - return status; - } - - status = dwc2_check_periodic_bandwidth(hsotg, qh); - } - - if (status) { - dev_dbg(hsotg->dev, - "%s: Insufficient periodic bandwidth for periodic transfer\n", - __func__); - return status; - } - status = dwc2_check_max_xfer_size(hsotg, qh); if (status) { dev_dbg(hsotg->dev, @@ -518,6 +1361,35 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) return status; } + /* Cancel pending unreserve; if canceled OK, unreserve was pending */ + if (del_timer(&qh->unreserve_timer)) + WARN_ON(!qh->unreserve_pending); + + /* + * Only need to reserve if there's not an unreserve pending, since if an + * unreserve is pending then by definition our old reservation is still + * valid. Unreserve might still be pending even if we didn't cancel if + * dwc2_unreserve_timer_fn() already started. Code in the timer handles + * that case. + */ + if (!qh->unreserve_pending) { + status = dwc2_do_reserve(hsotg, qh); + if (status) + return status; + } else { + /* + * It might have been a while, so make sure that frame_number + * is still good. Note: we could also try to use the similar + * dwc2_next_periodic_start() but that schedules much more + * tightly and we might need to hurry and queue things up. + */ + if (dwc2_frame_num_le(qh->next_active_frame, + hsotg->frame_number)) + dwc2_pick_first_frame(hsotg, qh); + } + + qh->unreserve_pending = 0; + if (hsotg->core_params->dma_desc_enable > 0) /* Don't rely on SOF and start in ready schedule */ list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); @@ -526,14 +1398,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); - if (hsotg->core_params->uframe_sched <= 0) - /* Reserve periodic channel */ - hsotg->periodic_channels++; - - /* Update claimed usecs per (micro)frame */ - hsotg->periodic_usecs += qh->usecs; - - return status; + return 0; } /** @@ -546,25 +1411,231 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { - int i; + bool did_modify; + + assert_spin_locked(&hsotg->lock); + + /* + * Schedule the unreserve to happen in a little bit. Cases here: + * - Unreserve worker might be sitting there waiting to grab the lock. + * In this case it will notice it's been schedule again and will + * quit. + * - Unreserve worker might not be scheduled. + * + * We should never already be scheduled since dwc2_schedule_periodic() + * should have canceled the scheduled unreserve timer (hence the + * warning on did_modify). + * + * We add + 1 to the timer to guarantee that at least 1 jiffy has + * passed (otherwise if the jiffy counter might tick right after we + * read it and we'll get no delay). + */ + did_modify = mod_timer(&qh->unreserve_timer, + jiffies + DWC2_UNRESERVE_DELAY + 1); + WARN_ON(did_modify); + qh->unreserve_pending = 1; list_del_init(&qh->qh_list_entry); +} - /* Update claimed usecs per (micro)frame */ - hsotg->periodic_usecs -= qh->usecs; +/** + * dwc2_qh_init() - Initializes a QH structure + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: The QH to init + * @urb: Holds the information about the device/endpoint needed to initialize + * the QH + * @mem_flags: Flags for allocating memory. + */ +static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, + struct dwc2_hcd_urb *urb, gfp_t mem_flags) +{ + int dev_speed = dwc2_host_get_speed(hsotg, urb->priv); + u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); + bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info); + bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC); + bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT); + u32 hprt = dwc2_readl(hsotg->regs + HPRT0); + u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; + bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED && + dev_speed != USB_SPEED_HIGH); + int maxp = dwc2_hcd_get_mps(&urb->pipe_info); + int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp); + char *speed, *type; - if (hsotg->core_params->uframe_sched > 0) { - for (i = 0; i < 8; i++) { - hsotg->frame_usecs[i] += qh->frame_usecs[i]; - qh->frame_usecs[i] = 0; + /* Initialize QH */ + qh->hsotg = hsotg; + setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn, + (unsigned long)qh); + qh->ep_type = ep_type; + qh->ep_is_in = ep_is_in; + + qh->data_toggle = DWC2_HC_PID_DATA0; + qh->maxp = maxp; + INIT_LIST_HEAD(&qh->qtd_list); + INIT_LIST_HEAD(&qh->qh_list_entry); + + qh->do_split = do_split; + qh->dev_speed = dev_speed; + + if (ep_is_int || ep_is_isoc) { + /* Compute scheduling parameters once and save them */ + int host_speed = do_split ? USB_SPEED_HIGH : dev_speed; + struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv, + mem_flags, + &qh->ttport); + int device_ns; + + qh->dwc_tt = dwc_tt; + + qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in, + ep_is_isoc, bytecount)); + device_ns = usb_calc_bus_time(dev_speed, ep_is_in, + ep_is_isoc, bytecount); + + if (do_split && dwc_tt) + device_ns += dwc_tt->usb_tt->think_time; + qh->device_us = NS_TO_US(device_ns); + + + qh->device_interval = urb->interval; + qh->host_interval = urb->interval * (do_split ? 8 : 1); + + /* + * Schedule low speed if we're running the host in low or + * full speed OR if we've got a "TT" to deal with to access this + * device. + */ + qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED || + dwc_tt; + + if (do_split) { + /* We won't know num transfers until we schedule */ + qh->num_hs_transfers = -1; + } else if (dev_speed == USB_SPEED_HIGH) { + qh->num_hs_transfers = 1; + } else { + qh->num_hs_transfers = 0; } - } else { - /* Release periodic channel reservation */ - hsotg->periodic_channels--; + + /* We'll schedule later when we have something to do */ + } + + switch (dev_speed) { + case USB_SPEED_LOW: + speed = "low"; + break; + case USB_SPEED_FULL: + speed = "full"; + break; + case USB_SPEED_HIGH: + speed = "high"; + break; + default: + speed = "?"; + break; + } + + switch (qh->ep_type) { + case USB_ENDPOINT_XFER_ISOC: + type = "isochronous"; + break; + case USB_ENDPOINT_XFER_INT: + type = "interrupt"; + break; + case USB_ENDPOINT_XFER_CONTROL: + type = "control"; + break; + case USB_ENDPOINT_XFER_BULK: + type = "bulk"; + break; + default: + type = "?"; + break; + } + + dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type, + speed, bytecount); + dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh, + dwc2_hcd_get_dev_addr(&urb->pipe_info), + dwc2_hcd_get_ep_num(&urb->pipe_info), + ep_is_in ? "IN" : "OUT"); + if (ep_is_int || ep_is_isoc) { + dwc2_sch_dbg(hsotg, + "QH=%p ...duration: host=%d us, device=%d us\n", + qh, qh->host_us, qh->device_us); + dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n", + qh, qh->host_interval, qh->device_interval); + if (qh->schedule_low_speed) + dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n", + qh, dwc2_get_ls_map(hsotg, qh)); } } /** + * dwc2_hcd_qh_create() - Allocates and initializes a QH + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @urb: Holds the information about the device/endpoint needed + * to initialize the QH + * @atomic_alloc: Flag to do atomic allocation if needed + * + * Return: Pointer to the newly allocated QH, or NULL on error + */ +struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, + struct dwc2_hcd_urb *urb, + gfp_t mem_flags) +{ + struct dwc2_qh *qh; + + if (!urb->priv) + return NULL; + + /* Allocate memory */ + qh = kzalloc(sizeof(*qh), mem_flags); + if (!qh) + return NULL; + + dwc2_qh_init(hsotg, qh, urb, mem_flags); + + if (hsotg->core_params->dma_desc_enable > 0 && + dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { + dwc2_hcd_qh_free(hsotg, qh); + return NULL; + } + + return qh; +} + +/** + * dwc2_hcd_qh_free() - Frees the QH + * + * @hsotg: HCD instance + * @qh: The QH to free + * + * QH should already be removed from the list. QTD list should already be empty + * if called from URB Dequeue. + * + * Must NOT be called with interrupt disabled or spinlock held + */ +void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + /* Make sure any unreserve work is finished. */ + if (del_timer_sync(&qh->unreserve_timer)) { + unsigned long flags; + + spin_lock_irqsave(&hsotg->lock, flags); + dwc2_do_unreserve(hsotg, qh); + spin_unlock_irqrestore(&hsotg->lock, flags); + } + dwc2_host_put_tt_info(hsotg, qh->dwc_tt); + + if (qh->desc_list) + dwc2_hcd_qh_free_ddma(hsotg, qh); + kfree(qh); +} + +/** * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic * schedule if it is not already in the schedule. If the QH is already in * the schedule, no action is taken. @@ -586,16 +1657,12 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* QH already in a schedule */ return 0; - if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) && - !hsotg->frame_number) { - dev_dbg(hsotg->dev, - "reset frame number counter\n"); - qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, - SCHEDULE_SLOP); - } - /* Add the new QH to the appropriate schedule */ if (dwc2_qh_is_non_per(qh)) { + /* Schedule right away */ + qh->start_active_frame = hsotg->frame_number; + qh->next_active_frame = qh->start_active_frame; + /* Always start in inactive schedule */ list_add_tail(&qh->qh_list_entry, &hsotg->non_periodic_sched_inactive); @@ -649,39 +1716,164 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) } } -/* - * Schedule the next continuing periodic split transfer +/** + * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split. + * + * This is called for setting next_active_frame for periodic splits for all but + * the first packet of the split. Confusing? I thought so... + * + * Periodic splits are single low/full speed transfers that we end up splitting + * up into several high speed transfers. They always fit into one full (1 ms) + * frame but might be split over several microframes (125 us each). We to put + * each of the parts on a very specific high speed frame. + * + * This function figures out where the next active uFrame needs to be. + * + * @hsotg: The HCD state structure + * @qh: QH for the periodic transfer. + * @frame_number: The current frame number. + * + * Return: number missed by (or 0 if we didn't miss). */ -static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, - struct dwc2_qh *qh, u16 frame_number, - int sched_next_periodic_split) +static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, u16 frame_number) { + u16 old_frame = qh->next_active_frame; + u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); + int missed = 0; u16 incr; - if (sched_next_periodic_split) { - qh->sched_frame = frame_number; - incr = dwc2_frame_num_inc(qh->start_split_frame, 1); - if (dwc2_frame_num_le(frame_number, incr)) { - /* - * Allow one frame to elapse after start split - * microframe before scheduling complete split, but - * DON'T if we are doing the next start split in the - * same frame for an ISOC out - */ - if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || - qh->ep_is_in != 0) { - qh->sched_frame = - dwc2_frame_num_inc(qh->sched_frame, 1); - } - } - } else { - qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame, - qh->interval); - if (dwc2_frame_num_le(qh->sched_frame, frame_number)) - qh->sched_frame = frame_number; - qh->sched_frame |= 0x7; - qh->start_split_frame = qh->sched_frame; + /* + * See dwc2_uframe_schedule_split() for split scheduling. + * + * Basically: increment 1 normally, but 2 right after the start split + * (except for ISOC out). + */ + if (old_frame == qh->start_active_frame && + !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)) + incr = 2; + else + incr = 1; + + qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr); + + /* + * Note that it's OK for frame_number to be 1 frame past + * next_active_frame. Remember that next_active_frame is supposed to + * be 1 frame _before_ when we want to be scheduled. If we're 1 frame + * past it just means schedule ASAP. + * + * It's _not_ OK, however, if we're more than one frame past. + */ + if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) { + /* + * OOPS, we missed. That's actually pretty bad since + * the hub will be unhappy; try ASAP I guess. + */ + missed = dwc2_frame_num_dec(prev_frame_number, + qh->next_active_frame); + qh->next_active_frame = frame_number; } + + return missed; +} + +/** + * dwc2_next_periodic_start() - Set next_active_frame for next transfer start + * + * This is called for setting next_active_frame for a periodic transfer for + * all cases other than midway through a periodic split. This will also update + * start_active_frame. + * + * Since we _always_ keep start_active_frame as the start of the previous + * transfer this is normally pretty easy: we just add our interval to + * start_active_frame and we've got our answer. + * + * The tricks come into play if we miss. In that case we'll look for the next + * slot we can fit into. + * + * @hsotg: The HCD state structure + * @qh: QH for the periodic transfer. + * @frame_number: The current frame number. + * + * Return: number missed by (or 0 if we didn't miss). + */ +static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, u16 frame_number) +{ + int missed = 0; + u16 interval = qh->host_interval; + u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); + + qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame, + interval); + + /* + * The dwc2_frame_num_gt() function used below won't work terribly well + * with if we just incremented by a really large intervals since the + * frame counter only goes to 0x3fff. It's terribly unlikely that we + * will have missed in this case anyway. Just go to exit. If we want + * to try to do better we'll need to keep track of a bigger counter + * somewhere in the driver and handle overflows. + */ + if (interval >= 0x1000) + goto exit; + + /* + * Test for misses, which is when it's too late to schedule. + * + * A few things to note: + * - We compare against prev_frame_number since start_active_frame + * and next_active_frame are always 1 frame before we want things + * to be active and we assume we can still get scheduled in the + * current frame number. + * - It's possible for start_active_frame (now incremented) to be + * next_active_frame if we got an EO MISS (even_odd miss) which + * basically means that we detected there wasn't enough time for + * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us + * at the last second. We want to make sure we don't schedule + * another transfer for the same frame. My test webcam doesn't seem + * terribly upset by missing a transfer but really doesn't like when + * we do two transfers in the same frame. + * - Some misses are expected. Specifically, in order to work + * perfectly dwc2 really needs quite spectacular interrupt latency + * requirements. It needs to be able to handle its interrupts + * completely within 125 us of them being asserted. That not only + * means that the dwc2 interrupt handler needs to be fast but it + * means that nothing else in the system has to block dwc2 for a long + * time. We can help with the dwc2 parts of this, but it's hard to + * guarantee that a system will have interrupt latency < 125 us, so + * we have to be robust to some misses. + */ + if (qh->start_active_frame == qh->next_active_frame || + dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) { + u16 ideal_start = qh->start_active_frame; + int periods_in_map; + + /* + * Adjust interval as per gcd with map size. + * See pmap_schedule() for more details here. + */ + if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH) + periods_in_map = DWC2_HS_SCHEDULE_UFRAMES; + else + periods_in_map = DWC2_LS_SCHEDULE_FRAMES; + interval = gcd(interval, periods_in_map); + + do { + qh->start_active_frame = dwc2_frame_num_inc( + qh->start_active_frame, interval); + } while (dwc2_frame_num_gt(prev_frame_number, + qh->start_active_frame)); + + missed = dwc2_frame_num_dec(qh->start_active_frame, + ideal_start); + } + +exit: + qh->next_active_frame = qh->start_active_frame; + + return missed; } /* @@ -700,7 +1892,9 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, int sched_next_periodic_split) { + u16 old_frame = qh->next_active_frame; u16 frame_number; + int missed; if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s()\n", __func__); @@ -713,33 +1907,44 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, return; } + /* + * Use the real frame number rather than the cached value as of the + * last SOF just to get us a little closer to reality. Note that + * means we don't actually know if we've already handled the SOF + * interrupt for this frame. + */ frame_number = dwc2_hcd_get_frame_number(hsotg); - if (qh->do_split) { - dwc2_sched_periodic_split(hsotg, qh, frame_number, - sched_next_periodic_split); - } else { - qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame, - qh->interval); - if (dwc2_frame_num_le(qh->sched_frame, frame_number)) - qh->sched_frame = frame_number; - } + if (sched_next_periodic_split) + missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number); + else + missed = dwc2_next_periodic_start(hsotg, qh, frame_number); + + dwc2_sch_vdbg(hsotg, + "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n", + qh, sched_next_periodic_split, frame_number, old_frame, + qh->next_active_frame, + dwc2_frame_num_dec(qh->next_active_frame, old_frame), + missed, missed ? "MISS" : ""); if (list_empty(&qh->qtd_list)) { dwc2_hcd_qh_unlink(hsotg, qh); return; } + /* * Remove from periodic_sched_queued and move to * appropriate queue + * + * Note: we purposely use the frame_number from the "hsotg" structure + * since we know SOF interrupt will handle future frames. */ - if ((hsotg->core_params->uframe_sched > 0 && - dwc2_frame_num_le(qh->sched_frame, frame_number)) || - (hsotg->core_params->uframe_sched <= 0 && - qh->sched_frame == frame_number)) - list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready); + if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number)) + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_ready); else - list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_inactive); } /** diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 690b9fd98b55..88629bed6614 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -126,10 +126,10 @@ static const struct dwc2_core_params params_rk3066 = { .speed = -1, .enable_dynamic_fifo = 1, .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 520, /* 520 DWORDs */ + .host_rx_fifo_size = 525, /* 525 DWORDs */ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ - .max_transfer_size = 65535, + .max_transfer_size = -1, .max_packet_count = -1, .host_channels = -1, .phy_type = -1, @@ -149,6 +149,38 @@ static const struct dwc2_core_params params_rk3066 = { .hibernation = -1, }; +static const struct dwc2_core_params params_ltq = { + .otg_cap = 2, /* non-HNP/non-SRP */ + .otg_ver = -1, + .dma_enable = -1, + .dma_desc_enable = -1, + .dma_desc_fs_enable = -1, + .speed = -1, + .enable_dynamic_fifo = -1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 288, /* 288 DWORDs */ + .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ + .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + /* * Check the dr_mode against the module configuration and hardware * capabilities. @@ -428,6 +460,8 @@ static const struct of_device_id dwc2_of_match_table[] = { { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, + { .compatible = "lantiq,arx100-usb", .data = ¶ms_ltq }, + { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, { .compatible = "snps,dwc2", .data = NULL }, { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, {}, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index de5e01f41bc2..17fd81447c9f 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -962,10 +962,6 @@ static int dwc3_probe(struct platform_device *pdev) fladj = pdata->fladj_value; } - /* default to superspeed if no maximum_speed passed */ - if (dwc->maximum_speed == USB_SPEED_UNKNOWN) - dwc->maximum_speed = USB_SPEED_SUPER; - dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; @@ -1016,6 +1012,33 @@ static int dwc3_probe(struct platform_device *pdev) goto err1; } + /* Check the maximum_speed parameter */ + switch (dwc->maximum_speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + default: + dev_err(dev, "invalid maximum_speed parameter %d\n", + dwc->maximum_speed); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + dwc->maximum_speed = USB_SPEED_SUPER; + + /* + * default to superspeed plus if we are capable. + */ + if (dwc3_is_usb31(dwc) && + (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == + DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; + + break; + } + /* Adjust Frame Length */ dwc3_frame_length_adjustment(dwc, fladj); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index e4f8b90d9627..6254b2ff9080 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -223,7 +223,8 @@ /* Global HWPARAMS3 Register */ #define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3) #define DWC3_GHWPARAMS3_SSPHY_IFC_DIS 0 -#define DWC3_GHWPARAMS3_SSPHY_IFC_ENA 1 +#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN1 1 +#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN2 2 /* DWC_usb31 only */ #define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2) #define DWC3_GHWPARAMS3_HSPHY_IFC_DIS 0 #define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI 1 @@ -249,6 +250,7 @@ #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) #define DWC3_DCFG_SPEED_MASK (7 << 0) +#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ #define DWC3_DCFG_SUPERSPEED (4 << 0) #define DWC3_DCFG_HIGHSPEED (0 << 0) #define DWC3_DCFG_FULLSPEED2 (1 << 0) @@ -339,6 +341,7 @@ #define DWC3_DSTS_CONNECTSPD (7 << 0) +#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ #define DWC3_DSTS_SUPERSPEED (4 << 0) #define DWC3_DSTS_HIGHSPEED (0 << 0) #define DWC3_DSTS_FULLSPEED2 (1 << 0) @@ -1024,6 +1027,12 @@ struct dwc3_gadget_ep_cmd_params { void dwc3_set_mode(struct dwc3 *dwc, u32 mode); int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc); +/* check whether we are on the DWC_usb31 core */ +static inline bool dwc3_is_usb31(struct dwc3 *dwc) +{ + return !!(dwc->revision & DWC3_REVISION_IS_DWC31); +} + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); void dwc3_host_exit(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 8d6b75c2f53b..eca2e6d8e041 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -356,7 +356,8 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, */ usb_status |= dwc->gadget.is_selfpowered; - if (dwc->speed == DWC3_DSTS_SUPERSPEED) { + if ((dwc->speed == DWC3_DSTS_SUPERSPEED) || + (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) { reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (reg & DWC3_DCTL_INITU1ENA) usb_status |= 1 << USB_DEV_STAT_U1_ENABLED; @@ -426,7 +427,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, case USB_DEVICE_U1_ENABLE: if (state != USB_STATE_CONFIGURED) return -EINVAL; - if (dwc->speed != DWC3_DSTS_SUPERSPEED) + if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && + (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) return -EINVAL; reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -440,7 +442,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, case USB_DEVICE_U2_ENABLE: if (state != USB_STATE_CONFIGURED) return -EINVAL; - if (dwc->speed != DWC3_DSTS_SUPERSPEED) + if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && + (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) return -EINVAL; reg = dwc3_readl(dwc->regs, DWC3_DCTL); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 2363bad45af8..3ac170f9d94d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -463,7 +463,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); /* Burst size is only needed in SuperSpeed mode */ - if (dwc->gadget.speed == USB_SPEED_SUPER) { + if (dwc->gadget.speed >= USB_SPEED_SUPER) { u32 burst = dep->endpoint.maxburst - 1; params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst); @@ -1441,7 +1441,8 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g) reg = dwc3_readl(dwc->regs, DWC3_DSTS); speed = reg & DWC3_DSTS_CONNECTSPD; - if (speed == DWC3_DSTS_SUPERSPEED) { + if ((speed == DWC3_DSTS_SUPERSPEED) || + (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n"); ret = -EINVAL; goto out; @@ -1666,10 +1667,16 @@ static int dwc3_gadget_start(struct usb_gadget *g, case USB_SPEED_HIGH: reg |= DWC3_DSTS_HIGHSPEED; break; - case USB_SPEED_SUPER: /* FALLTHROUGH */ - case USB_SPEED_UNKNOWN: /* FALTHROUGH */ + case USB_SPEED_SUPER_PLUS: + reg |= DWC3_DSTS_SUPERSPEED_PLUS; + break; default: - reg |= DWC3_DSTS_SUPERSPEED; + dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", + dwc->maximum_speed); + /* fall through */ + case USB_SPEED_SUPER: + reg |= DWC3_DCFG_SUPERSPEED; + break; } } dwc3_writel(dwc->regs, DWC3_DCFG, reg); @@ -2340,7 +2347,8 @@ static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) * this. Maybe it becomes part of the power saving plan. */ - if (speed != DWC3_DSTS_SUPERSPEED) + if ((speed != DWC3_DSTS_SUPERSPEED) && + (speed != DWC3_DSTS_SUPERSPEED_PLUS)) return; /* @@ -2369,6 +2377,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) dwc3_update_ram_clk_sel(dwc, speed); switch (speed) { + case DWC3_DCFG_SUPERSPEED_PLUS: + dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + dwc->gadget.ep0->maxpacket = 512; + dwc->gadget.speed = USB_SPEED_SUPER_PLUS; + break; case DWC3_DCFG_SUPERSPEED: /* * WORKAROUND: DWC3 revisions <1.90a have an issue which @@ -2410,8 +2423,9 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) /* Enable USB2 LPM Capability */ - if ((dwc->revision > DWC3_REVISION_194A) - && (speed != DWC3_DCFG_SUPERSPEED)) { + if ((dwc->revision > DWC3_REVISION_194A) && + (speed != DWC3_DCFG_SUPERSPEED) && + (speed != DWC3_DCFG_SUPERSPEED_PLUS)) { reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg |= DWC3_DCFG_LPM_CAP; dwc3_writel(dwc->regs, DWC3_DCFG, reg); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 8b14c2a13ac5..a5c62093c26c 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -54,6 +54,36 @@ static struct usb_gadget_strings **get_containers_gs( } /** + * function_descriptors() - get function descriptors for speed + * @f: the function + * @speed: the speed + * + * Returns the descriptors or NULL if not set. + */ +static struct usb_descriptor_header ** +function_descriptors(struct usb_function *f, + enum usb_device_speed speed) +{ + struct usb_descriptor_header **descriptors; + + switch (speed) { + case USB_SPEED_SUPER_PLUS: + descriptors = f->ssp_descriptors; + break; + case USB_SPEED_SUPER: + descriptors = f->ss_descriptors; + break; + case USB_SPEED_HIGH: + descriptors = f->hs_descriptors; + break; + default: + descriptors = f->fs_descriptors; + } + + return descriptors; +} + +/** * next_ep_desc() - advance to the next EP descriptor * @t: currect pointer within descriptor array * @@ -118,6 +148,13 @@ int config_ep_by_speed(struct usb_gadget *g, /* select desired speed */ switch (g->speed) { + case USB_SPEED_SUPER_PLUS: + if (gadget_is_superspeed_plus(g)) { + speed_desc = f->ssp_descriptors; + want_comp_desc = 1; + break; + } + /* else: Fall trough */ case USB_SPEED_SUPER: if (gadget_is_superspeed(g)) { speed_desc = f->ss_descriptors; @@ -161,7 +198,7 @@ ep_found: (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP)) return -EIO; _ep->comp_desc = comp_desc; - if (g->speed == USB_SPEED_SUPER) { + if (g->speed >= USB_SPEED_SUPER) { switch (usb_endpoint_type(_ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ @@ -237,6 +274,8 @@ int usb_add_function(struct usb_configuration *config, config->highspeed = true; if (!config->superspeed && function->ss_descriptors) config->superspeed = true; + if (!config->superspeed_plus && function->ssp_descriptors) + config->superspeed_plus = true; done: if (value) @@ -417,17 +456,7 @@ static int config_buf(struct usb_configuration *config, list_for_each_entry(f, &config->functions, list) { struct usb_descriptor_header **descriptors; - switch (speed) { - case USB_SPEED_SUPER: - descriptors = f->ss_descriptors; - break; - case USB_SPEED_HIGH: - descriptors = f->hs_descriptors; - break; - default: - descriptors = f->fs_descriptors; - } - + descriptors = function_descriptors(f, speed); if (!descriptors) continue; status = usb_descriptor_fillbuf(next, len, @@ -451,7 +480,7 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) u8 type = w_value >> 8; enum usb_device_speed speed = USB_SPEED_UNKNOWN; - if (gadget->speed == USB_SPEED_SUPER) + if (gadget->speed >= USB_SPEED_SUPER) speed = gadget->speed; else if (gadget_is_dualspeed(gadget)) { int hs = 0; @@ -482,6 +511,10 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) check_config: /* ignore configs that won't work at this speed */ switch (speed) { + case USB_SPEED_SUPER_PLUS: + if (!c->superspeed_plus) + continue; + break; case USB_SPEED_SUPER: if (!c->superspeed) continue; @@ -509,18 +542,24 @@ static int count_configs(struct usb_composite_dev *cdev, unsigned type) unsigned count = 0; int hs = 0; int ss = 0; + int ssp = 0; if (gadget_is_dualspeed(gadget)) { if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (gadget->speed == USB_SPEED_SUPER) ss = 1; + if (gadget->speed == USB_SPEED_SUPER_PLUS) + ssp = 1; if (type == USB_DT_DEVICE_QUALIFIER) hs = !hs; } list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ - if (ss) { + if (ssp) { + if (!c->superspeed_plus) + continue; + } else if (ss) { if (!c->superspeed) continue; } else if (hs) { @@ -597,6 +636,48 @@ static int bos_desc(struct usb_composite_dev *cdev) ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; + /* The SuperSpeedPlus USB Device Capability descriptor */ + if (gadget_is_superspeed_plus(cdev->gadget)) { + struct usb_ssp_cap_descriptor *ssp_cap; + + ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); + bos->bNumDeviceCaps++; + + /* + * Report typical values. + */ + + le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1)); + ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); + ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; + ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; + + /* SSAC = 1 (2 attributes) */ + ssp_cap->bmAttributes = cpu_to_le32(1); + + /* Min RX/TX Lane Count = 1 */ + ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12); + + /* + * bmSublinkSpeedAttr[0]: + * ST = Symmetric, RX + * LSE = 3 (Gbps) + * LP = 1 (SuperSpeedPlus) + * LSM = 10 (10 Gbps) + */ + ssp_cap->bmSublinkSpeedAttr[0] = + (3 << 4) | (1 << 14) | (0xa << 16); + /* + * bmSublinkSpeedAttr[1] = + * ST = Symmetric, TX + * LSE = 3 (Gbps) + * LP = 1 (SuperSpeedPlus) + * LSM = 10 (10 Gbps) + */ + ssp_cap->bmSublinkSpeedAttr[1] = + (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7); + } + return le16_to_cpu(bos->wTotalLength); } @@ -690,16 +771,7 @@ static int set_config(struct usb_composite_dev *cdev, * function's setup callback instead of the current * configuration's setup callback. */ - switch (gadget->speed) { - case USB_SPEED_SUPER: - descriptors = f->ss_descriptors; - break; - case USB_SPEED_HIGH: - descriptors = f->hs_descriptors; - break; - default: - descriptors = f->fs_descriptors; - } + descriptors = function_descriptors(f, gadget->speed); for (; *descriptors; ++descriptors) { struct usb_endpoint_descriptor *ep; @@ -819,8 +891,9 @@ int usb_add_config(struct usb_composite_dev *cdev, } else { unsigned i; - DBG(cdev, "cfg %d/%p speeds:%s%s%s\n", + DBG(cdev, "cfg %d/%p speeds:%s%s%s%s\n", config->bConfigurationValue, config, + config->superspeed_plus ? " superplus" : "", config->superspeed ? " super" : "", config->highspeed ? " high" : "", config->fullspeed @@ -1499,7 +1572,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) cdev->gadget->ep0->maxpacket; if (gadget_is_superspeed(gadget)) { if (gadget->speed >= USB_SPEED_SUPER) { - cdev->desc.bcdUSB = cpu_to_le16(0x0300); + cdev->desc.bcdUSB = cpu_to_le16(0x0310); cdev->desc.bMaxPacketSize0 = 9; } else { cdev->desc.bcdUSB = cpu_to_le16(0x0210); @@ -1634,15 +1707,24 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) *((u8 *)req->buf) = value; value = min(w_length, (u16) 1); break; - - /* - * USB 3.0 additions: - * Function driver should handle get_status request. If such cb - * wasn't supplied we respond with default value = 0 - * Note: function driver should supply such cb only for the first - * interface of the function - */ case USB_REQ_GET_STATUS: + if (gadget_is_otg(gadget) && gadget->hnp_polling_support && + (w_index == OTG_STS_SELECTOR)) { + if (ctrl->bRequestType != (USB_DIR_IN | + USB_RECIP_DEVICE)) + goto unknown; + *((u8 *)req->buf) = gadget->host_request_flag; + value = 1; + break; + } + + /* + * USB 3.0 additions: + * Function driver should handle get_status request. If such cb + * wasn't supplied we respond with default value = 0 + * Note: function driver should supply such cb only for the + * first interface of the function + */ if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c index 0fafa7a1b6f6..e6c0542a063b 100644 --- a/drivers/usb/gadget/config.c +++ b/drivers/usb/gadget/config.c @@ -163,7 +163,8 @@ EXPORT_SYMBOL_GPL(usb_copy_descriptors); int usb_assign_descriptors(struct usb_function *f, struct usb_descriptor_header **fs, struct usb_descriptor_header **hs, - struct usb_descriptor_header **ss) + struct usb_descriptor_header **ss, + struct usb_descriptor_header **ssp) { struct usb_gadget *g = f->config->cdev->gadget; @@ -182,6 +183,11 @@ int usb_assign_descriptors(struct usb_function *f, if (!f->ss_descriptors) goto err; } + if (ssp && gadget_is_superspeed_plus(g)) { + f->ssp_descriptors = usb_copy_descriptors(ssp); + if (!f->ssp_descriptors) + goto err; + } return 0; err: usb_free_all_descriptors(f); @@ -194,6 +200,7 @@ void usb_free_all_descriptors(struct usb_function *f) usb_free_descriptors(f->fs_descriptors); usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->ss_descriptors); + usb_free_descriptors(f->ssp_descriptors); } EXPORT_SYMBOL_GPL(usb_free_all_descriptors); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 590c44989e5e..c6cc15ebeed6 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1229,6 +1229,7 @@ static void purge_configs_funcs(struct gadget_info *gi) } c->next_interface_id = 0; memset(c->interface, 0, sizeof(c->interface)); + c->superspeed_plus = 0; c->superspeed = 0; c->highspeed = 0; c->fullspeed = 0; diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 2fa1e80a3ce7..a30766ca4226 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -685,7 +685,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, - acm_ss_function); + acm_ss_function, NULL); if (status) goto fail; @@ -777,10 +777,10 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page) return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num); } -CONFIGFS_ATTR_RO(f_acm_port_, num); +CONFIGFS_ATTR_RO(f_acm_, port_num); static struct configfs_attribute *acm_attrs[] = { - &f_acm_port_attr_num, + &f_acm_attr_port_num, NULL, }; diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 7ad60ee41914..4c488d15b6f6 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -786,7 +786,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) fs_ecm_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, - ecm_ss_function); + ecm_ss_function, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c index cad35a502d3f..d58bfc32be9e 100644 --- a/drivers/usb/gadget/function/f_eem.c +++ b/drivers/usb/gadget/function/f_eem.c @@ -309,7 +309,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f) eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function, - eem_ss_function); + eem_ss_function, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index cf43e9e18368..8cfce105c7ee 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -684,44 +684,38 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep, static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) { struct ffs_epfile *epfile = file->private_data; + struct usb_request *req; struct ffs_ep *ep; char *data = NULL; ssize_t ret, data_len = -EINVAL; int halt; /* Are we still active? */ - if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) { - ret = -ENODEV; - goto error; - } + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) + return -ENODEV; /* Wait for endpoint to be enabled */ ep = epfile->ep; if (!ep) { - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - goto error; - } + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep)); - if (ret) { - ret = -EINTR; - goto error; - } + if (ret) + return -EINTR; } /* Do we halt? */ halt = (!io_data->read == !epfile->in); - if (halt && epfile->isoc) { - ret = -EINVAL; - goto error; - } + if (halt && epfile->isoc) + return -EINVAL; /* Allocate & copy */ if (!halt) { /* * if we _do_ wait above, the epfile->ffs->gadget might be NULL - * before the waiting completes, so do not assign to 'gadget' earlier + * before the waiting completes, so do not assign to 'gadget' + * earlier */ struct usb_gadget *gadget = epfile->ffs->gadget; size_t copied; @@ -763,17 +757,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) if (epfile->ep != ep) { /* In the meantime, endpoint got disabled or changed. */ ret = -ESHUTDOWN; - spin_unlock_irq(&epfile->ffs->eps_lock); } else if (halt) { /* Halt */ if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) usb_ep_set_halt(ep->ep); - spin_unlock_irq(&epfile->ffs->eps_lock); ret = -EBADMSG; - } else { - /* Fire the request */ - struct usb_request *req; - + } else if (unlikely(data_len == -EINVAL)) { /* * Sanity Check: even though data_len can't be used * uninitialized at the time I write this comment, some @@ -785,80 +774,80 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * For such reason, we're adding this redundant sanity check * here. */ - if (unlikely(data_len == -EINVAL)) { - WARN(1, "%s: data_len == -EINVAL\n", __func__); - ret = -EINVAL; - goto error_lock; - } - - if (io_data->aio) { - req = usb_ep_alloc_request(ep->ep, GFP_KERNEL); - if (unlikely(!req)) - goto error_lock; - - req->buf = data; - req->length = data_len; + WARN(1, "%s: data_len == -EINVAL\n", __func__); + ret = -EINVAL; + } else if (!io_data->aio) { + DECLARE_COMPLETION_ONSTACK(done); + bool interrupted = false; - io_data->buf = data; - io_data->ep = ep->ep; - io_data->req = req; - io_data->ffs = epfile->ffs; + req = ep->req; + req->buf = data; + req->length = data_len; - req->context = io_data; - req->complete = ffs_epfile_async_io_complete; + req->context = &done; + req->complete = ffs_epfile_io_complete; - ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); - if (unlikely(ret)) { - usb_ep_free_request(ep->ep, req); - goto error_lock; - } - ret = -EIOCBQUEUED; + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + if (unlikely(ret < 0)) + goto error_lock; - spin_unlock_irq(&epfile->ffs->eps_lock); - } else { - DECLARE_COMPLETION_ONSTACK(done); + spin_unlock_irq(&epfile->ffs->eps_lock); - req = ep->req; - req->buf = data; - req->length = data_len; + if (unlikely(wait_for_completion_interruptible(&done))) { + /* + * To avoid race condition with ffs_epfile_io_complete, + * dequeue the request first then check + * status. usb_ep_dequeue API should guarantee no race + * condition with req->complete callback. + */ + usb_ep_dequeue(ep->ep, req); + interrupted = ep->status < 0; + } - req->context = &done; - req->complete = ffs_epfile_io_complete; + /* + * XXX We may end up silently droping data here. Since data_len + * (i.e. req->length) may be bigger than len (after being + * rounded up to maxpacketsize), we may end up with more data + * then user space has space for. + */ + ret = interrupted ? -EINTR : ep->status; + if (io_data->read && ret > 0) { + ret = copy_to_iter(data, ret, &io_data->data); + if (!ret) + ret = -EFAULT; + } + goto error_mutex; + } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) { + ret = -ENOMEM; + } else { + req->buf = data; + req->length = data_len; - ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + io_data->buf = data; + io_data->ep = ep->ep; + io_data->req = req; + io_data->ffs = epfile->ffs; - spin_unlock_irq(&epfile->ffs->eps_lock); + req->context = io_data; + req->complete = ffs_epfile_async_io_complete; - if (unlikely(ret < 0)) { - /* nop */ - } else if (unlikely( - wait_for_completion_interruptible(&done))) { - ret = -EINTR; - usb_ep_dequeue(ep->ep, req); - } else { - /* - * XXX We may end up silently droping data - * here. Since data_len (i.e. req->length) may - * be bigger than len (after being rounded up - * to maxpacketsize), we may end up with more - * data then user space has space for. - */ - ret = ep->status; - if (io_data->read && ret > 0) { - ret = copy_to_iter(data, ret, &io_data->data); - if (!ret) - ret = -EFAULT; - } - } - kfree(data); + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + if (unlikely(ret)) { + usb_ep_free_request(ep->ep, req); + goto error_lock; } - } - mutex_unlock(&epfile->mutex); - return ret; + ret = -EIOCBQUEUED; + /* + * Do not kfree the buffer in this function. It will be freed + * by ffs_user_copy_worker. + */ + data = NULL; + } error_lock: spin_unlock_irq(&epfile->ffs->eps_lock); +error_mutex: mutex_unlock(&epfile->mutex); error: kfree(data); diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 99285b416308..51980c50546d 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -646,7 +646,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_fs_out_ep_desc.bEndpointAddress; status = usb_assign_descriptors(f, hidg_fs_descriptors, - hidg_hs_descriptors, NULL); + hidg_hs_descriptors, NULL, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index ddc3aad886b7..3a9f8f9c77bd 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c @@ -211,7 +211,7 @@ autoconf_fail: ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress; ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs, - ss_loopback_descs); + ss_loopback_descs, NULL); if (ret) return ret; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 223ccf89d226..ee9390b07c17 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -3093,7 +3093,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function, - fsg_ss_function); + fsg_ss_function, fsg_ss_function); if (ret) goto autoconf_fail; diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index fb1fe96d3215..84c0ee5ebd1e 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -56,7 +56,7 @@ static const char f_midi_longname[] = "MIDI Gadget"; * USB <- IN endpoint <- rawmidi */ struct gmidi_in_port { - struct f_midi *midi; + struct snd_rawmidi_substream *substream; int active; uint8_t cable; uint8_t state; @@ -78,9 +78,7 @@ struct f_midi { struct snd_rawmidi *rmidi; u8 ms_id; - struct snd_rawmidi_substream *in_substream[MAX_PORTS]; struct snd_rawmidi_substream *out_substream[MAX_PORTS]; - struct gmidi_in_port *in_port[MAX_PORTS]; unsigned long out_triggered; struct tasklet_struct tasklet; @@ -92,6 +90,8 @@ struct f_midi { /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); unsigned int in_last_port; + + struct gmidi_in_port in_ports_array[/* in_ports */]; }; static inline struct f_midi *func_to_midi(struct usb_function *f) @@ -518,98 +518,95 @@ static void f_midi_drop_out_substreams(struct f_midi *midi) { unsigned int i; - for (i = 0; i < MAX_PORTS; i++) { - struct gmidi_in_port *port = midi->in_port[i]; - struct snd_rawmidi_substream *substream = midi->in_substream[i]; - - if (!port) - break; - - if (!port->active || !substream) - continue; - - snd_rawmidi_drop_output(substream); + for (i = 0; i < midi->in_ports; i++) { + struct gmidi_in_port *port = midi->in_ports_array + i; + struct snd_rawmidi_substream *substream = port->substream; + if (port->active && substream) + snd_rawmidi_drop_output(substream); } } -static void f_midi_transmit(struct f_midi *midi) +static int f_midi_do_transmit(struct f_midi *midi, struct usb_ep *ep) { - struct usb_ep *ep = midi->in_ep; - bool active; - - /* We only care about USB requests if IN endpoint is enabled */ - if (!ep || !ep->enabled) - goto drop_out; + struct usb_request *req = NULL; + unsigned int len, i; + bool active = false; + int err; - do { - struct usb_request *req = NULL; - unsigned int len, i; + /* + * We peek the request in order to reuse it if it fails to enqueue on + * its endpoint + */ + len = kfifo_peek(&midi->in_req_fifo, &req); + if (len != 1) { + ERROR(midi, "%s: Couldn't get usb request\n", __func__); + return -1; + } - active = false; + /* + * If buffer overrun, then we ignore this transmission. + * IMPORTANT: This will cause the user-space rawmidi device to block + * until a) usb requests have been completed or b) snd_rawmidi_write() + * times out. + */ + if (req->length > 0) + return 0; - /* We peek the request in order to reuse it if it fails - * to enqueue on its endpoint */ - len = kfifo_peek(&midi->in_req_fifo, &req); - if (len != 1) { - ERROR(midi, "%s: Couldn't get usb request\n", __func__); - goto drop_out; - } + for (i = midi->in_last_port; i < midi->in_ports; ++i) { + struct gmidi_in_port *port = midi->in_ports_array + i; + struct snd_rawmidi_substream *substream = port->substream; - /* If buffer overrun, then we ignore this transmission. - * IMPORTANT: This will cause the user-space rawmidi device to block until a) usb - * requests have been completed or b) snd_rawmidi_write() times out. */ - if (req->length > 0) - return; + if (!port->active || !substream) + continue; - for (i = midi->in_last_port; i < MAX_PORTS; i++) { - struct gmidi_in_port *port = midi->in_port[i]; - struct snd_rawmidi_substream *substream = midi->in_substream[i]; + while (req->length + 3 < midi->buflen) { + uint8_t b; - if (!port) { - /* Reset counter when we reach the last available port */ - midi->in_last_port = 0; + if (snd_rawmidi_transmit(substream, &b, 1) != 1) { + port->active = 0; break; } + f_midi_transmit_byte(req, port, b); + } - if (!port->active || !substream) - continue; + active = !!port->active; + if (active) + break; + } + midi->in_last_port = active ? i : 0; - while (req->length + 3 < midi->buflen) { - uint8_t b; + if (req->length <= 0) + goto done; - if (snd_rawmidi_transmit(substream, &b, 1) != 1) { - port->active = 0; - break; - } - f_midi_transmit_byte(req, port, b); - } + err = usb_ep_queue(ep, req, GFP_ATOMIC); + if (err < 0) { + ERROR(midi, "%s failed to queue req: %d\n", + midi->in_ep->name, err); + req->length = 0; /* Re-use request next time. */ + } else { + /* Upon success, put request at the back of the queue. */ + kfifo_skip(&midi->in_req_fifo); + kfifo_put(&midi->in_req_fifo, req); + } - active = !!port->active; - /* Check if last port is still active, which means that - * there is still data on that substream but this current - * request run out of space. */ - if (active) { - midi->in_last_port = i; - /* There is no need to re-iterate though midi ports. */ - break; - } - } +done: + return active; +} - if (req->length > 0) { - int err; +static void f_midi_transmit(struct f_midi *midi) +{ + struct usb_ep *ep = midi->in_ep; + int ret; - err = usb_ep_queue(ep, req, GFP_ATOMIC); - if (err < 0) { - ERROR(midi, "%s failed to queue req: %d\n", - midi->in_ep->name, err); - req->length = 0; /* Re-use request next time. */ - } else { - /* Upon success, put request at the back of the queue. */ - kfifo_skip(&midi->in_req_fifo); - kfifo_put(&midi->in_req_fifo, req); - } - } - } while (active); + /* We only care about USB requests if IN endpoint is enabled */ + if (!ep || !ep->enabled) + goto drop_out; + + do { + ret = f_midi_do_transmit(midi, ep); + if (ret < 0) + goto drop_out; + } while (ret); return; @@ -626,13 +623,15 @@ static void f_midi_in_tasklet(unsigned long data) static int f_midi_in_open(struct snd_rawmidi_substream *substream) { struct f_midi *midi = substream->rmidi->private_data; + struct gmidi_in_port *port; - if (!midi->in_port[substream->number]) + if (substream->number >= midi->in_ports) return -EINVAL; VDBG(midi, "%s()\n", __func__); - midi->in_substream[substream->number] = substream; - midi->in_port[substream->number]->state = STATE_UNKNOWN; + port = midi->in_ports_array + substream->number; + port->substream = substream; + port->state = STATE_UNKNOWN; return 0; } @@ -648,11 +647,11 @@ static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct f_midi *midi = substream->rmidi->private_data; - if (!midi->in_port[substream->number]) + if (substream->number >= midi->in_ports) return; VDBG(midi, "%s() %d\n", __func__, up); - midi->in_port[substream->number]->active = up; + midi->in_ports_array[substream->number].active = up; if (up) tasklet_hi_schedule(&midi->tasklet); } @@ -1128,14 +1127,11 @@ static void f_midi_free(struct usb_function *f) { struct f_midi *midi; struct f_midi_opts *opts; - int i; midi = func_to_midi(f); opts = container_of(f->fi, struct f_midi_opts, func_inst); kfree(midi->id); mutex_lock(&opts->lock); - for (i = opts->in_ports - 1; i >= 0; --i) - kfree(midi->in_port[i]); kfifo_free(&midi->in_req_fifo); kfree(midi); --opts->refcnt; @@ -1163,7 +1159,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f) static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) { - struct f_midi *midi; + struct f_midi *midi = NULL; struct f_midi_opts *opts; int status, i; @@ -1172,37 +1168,26 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) mutex_lock(&opts->lock); /* sanity check */ if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) { - mutex_unlock(&opts->lock); - return ERR_PTR(-EINVAL); + status = -EINVAL; + goto setup_fail; } /* allocate and initialize one new instance */ - midi = kzalloc(sizeof(*midi), GFP_KERNEL); + midi = kzalloc( + sizeof(*midi) + opts->in_ports * sizeof(*midi->in_ports_array), + GFP_KERNEL); if (!midi) { - mutex_unlock(&opts->lock); - return ERR_PTR(-ENOMEM); + status = -ENOMEM; + goto setup_fail; } - for (i = 0; i < opts->in_ports; i++) { - struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL); - - if (!port) { - status = -ENOMEM; - mutex_unlock(&opts->lock); - goto setup_fail; - } - - port->midi = midi; - port->active = 0; - port->cable = i; - midi->in_port[i] = port; - } + for (i = 0; i < opts->in_ports; i++) + midi->in_ports_array[i].cable = i; /* set up ALSA midi devices */ midi->id = kstrdup(opts->id, GFP_KERNEL); if (opts->id && !midi->id) { status = -ENOMEM; - mutex_unlock(&opts->lock); goto setup_fail; } midi->in_ports = opts->in_ports; @@ -1229,8 +1214,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) return &midi->func; setup_fail: - for (--i; i >= 0; i--) - kfree(midi->in_port[i]); + mutex_unlock(&opts->lock); kfree(midi); return ERR_PTR(status); } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 7ad798ace1e5..97f0a9bc84df 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1432,7 +1432,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) fs_ncm_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, - NULL); + NULL, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c index d6396e0909ee..d43e86cea74f 100644 --- a/drivers/usb/gadget/function/f_obex.c +++ b/drivers/usb/gadget/function/f_obex.c @@ -364,7 +364,8 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f) obex_hs_ep_out_desc.bEndpointAddress = obex_fs_ep_out_desc.bEndpointAddress; - status = usb_assign_descriptors(f, fs_function, hs_function, NULL); + status = usb_assign_descriptors(f, fs_function, hs_function, NULL, + NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 157441dbfeba..0473d619d5bf 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -541,7 +541,7 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f) /* Do not try to bind Phonet twice... */ status = usb_assign_descriptors(f, fs_pn_function, hs_pn_function, - NULL); + NULL, NULL); if (status) goto err; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 26ccad5d8680..c45104e3a64b 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1051,7 +1051,7 @@ autoconf_fail: ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress; ret = usb_assign_descriptors(f, fs_printer_function, - hs_printer_function, ss_printer_function); + hs_printer_function, ss_printer_function, NULL); if (ret) return ret; diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index e587767e374c..d99dd9542048 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -783,7 +783,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, - eth_ss_function); + eth_ss_function, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c index 6bb44d613bab..cb00ada21d9c 100644 --- a/drivers/usb/gadget/function/f_serial.c +++ b/drivers/usb/gadget/function/f_serial.c @@ -236,7 +236,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f) gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function, - gser_ss_function); + gser_ss_function, NULL); if (status) goto fail; dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 242ba5caffe5..df0189ddfdd5 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -437,7 +437,7 @@ no_iso: ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; ret = usb_assign_descriptors(f, fs_source_sink_descs, - hs_source_sink_descs, ss_source_sink_descs); + hs_source_sink_descs, ss_source_sink_descs, NULL); if (ret) return ret; diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c index 829c78de9eba..434b983f3b4c 100644 --- a/drivers/usb/gadget/function/f_subset.c +++ b/drivers/usb/gadget/function/f_subset.c @@ -362,7 +362,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f) fs_subset_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function, - ss_eth_function); + ss_eth_function, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index bad007b5a190..dfb733047a4c 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -2098,7 +2098,7 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f) uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress; ret = usb_assign_descriptors(f, uasp_fs_function_desc, - uasp_hs_function_desc, uasp_ss_function_desc); + uasp_hs_function_desc, uasp_ss_function_desc, NULL); if (ret) goto ep_fail; diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 6a2346b99f55..f2ac0cbc29a4 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -721,7 +721,8 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f) status = -ENOMEM; /* copy descriptors, and track endpoint copies */ - status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL); + status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL, + NULL); if (status) goto fail; return 0; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 044ca79d3cb5..186d4b162524 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1100,7 +1100,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; - ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL); + ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL, + NULL); if (ret) goto err; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 87fb0fd6aaab..5cdaf0150a4e 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1699,28 +1699,6 @@ static struct usb_gadget_driver gadgetfs_driver = { }; /*----------------------------------------------------------------------*/ - -static void gadgetfs_nop(struct usb_gadget *arg) { } - -static int gadgetfs_probe(struct usb_gadget *gadget, - struct usb_gadget_driver *driver) -{ - CHIP = gadget->name; - return -EISNAM; -} - -static struct usb_gadget_driver probe_driver = { - .max_speed = USB_SPEED_HIGH, - .bind = gadgetfs_probe, - .unbind = gadgetfs_nop, - .setup = (void *)gadgetfs_nop, - .disconnect = gadgetfs_nop, - .driver = { - .name = "nop", - }, -}; - - /* DEVICE INITIALIZATION * * fd = open ("/dev/gadget/$CHIP", O_RDWR) @@ -1971,9 +1949,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) if (the_device) return -ESRCH; - /* fake probe to determine $CHIP */ - CHIP = NULL; - usb_gadget_probe_driver(&probe_driver); + CHIP = usb_get_gadget_udc_name(); if (!CHIP) return -ENODEV; @@ -2034,6 +2010,8 @@ gadgetfs_kill_sb (struct super_block *sb) put_dev (the_device); the_device = NULL; } + kfree(CHIP); + CHIP = NULL; } /*----------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index d6ad7e6c978c..7c289416f87d 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -74,7 +74,6 @@ config USB_BCM63XX_UDC config USB_FSL_USB2 tristate "Freescale Highspeed USB DR Peripheral Controller" depends on FSL_SOC || ARCH_MXC - select USB_FSL_MPH_DR_OF if OF help Some of Freescale PowerPC and i.MX processors have a High Speed Dual-Role(DR) USB controller, which supports device mode. @@ -177,7 +176,7 @@ config USB_RENESAS_USBHS_UDC config USB_RENESAS_USB3 tristate 'Renesas USB3.0 Peripheral controller' - depends on ARCH_SHMOBILE || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST help Renesas USB3.0 Peripheral controller is a USB peripheral controller that supports super, high, and full speed USB 3.0 data transfers. diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c index 7f77db5d1278..aae7458d8986 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_udc.c +++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c @@ -581,8 +581,13 @@ err0: void bdc_udc_exit(struct bdc *bdc) { + unsigned long flags; + dev_dbg(bdc->dev, "%s()\n", __func__); + spin_lock_irqsave(&bdc->lock, flags); bdc_ep_disable(bdc->bdc_ep_array[1]); + spin_unlock_irqrestore(&bdc->lock, flags); + usb_del_gadget_udc(&bdc->gadget); bdc_free_ep(bdc); } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 79fe6b77ee44..8f32b5ee7734 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -49,7 +49,6 @@ #endif #include <mach/hardware.h> -#include <mach/platform.h> /* * USB device configuration structure @@ -147,9 +146,7 @@ struct lpc32xx_udc { u32 io_p_size; void __iomem *udp_baseaddr; int udp_irq[4]; - struct clk *usb_pll_clk; struct clk *usb_slv_clk; - struct clk *usb_otg_clk; /* DMA support */ u32 *udca_v_base; @@ -210,16 +207,6 @@ static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g) #define UDCA_BUFF_SIZE (128) -/* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will - * be replaced with an inremap()ed pointer - * */ -#define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64) - -/* USB_CTRL bit defines */ -#define USB_SLAVE_HCLK_EN (1 << 24) -#define USB_HOST_NEED_CLK_EN (1 << 21) -#define USB_DEV_NEED_CLK_EN (1 << 22) - /********************************************************************** * USB device controller register offsets **********************************************************************/ @@ -639,9 +626,6 @@ static void isp1301_udc_configure(struct lpc32xx_udc *udc) i2c_smbus_write_byte_data(udc->isp1301_i2c_client, ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD); - /* Enable usb_need_clk clock after transceiver is initialized */ - writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL); - dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00)); dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", @@ -980,31 +964,13 @@ static void udc_clk_set(struct lpc32xx_udc *udc, int enable) return; udc->clocked = 1; - - /* 48MHz PLL up */ - clk_enable(udc->usb_pll_clk); - - /* Enable the USB device clock */ - writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, - USB_CTRL); - - clk_enable(udc->usb_otg_clk); + clk_prepare_enable(udc->usb_slv_clk); } else { if (!udc->clocked) return; udc->clocked = 0; - - /* Never disable the USB_HCLK during normal operation */ - - /* 48MHz PLL dpwn */ - clk_disable(udc->usb_pll_clk); - - /* Disable the USB device clock */ - writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN, - USB_CTRL); - - clk_disable(udc->usb_otg_clk); + clk_disable_unprepare(udc->usb_slv_clk); } } @@ -3125,58 +3091,21 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) goto io_map_fail; } - /* Enable AHB slave USB clock, needed for further USB clock control */ - writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL); - - /* Get required clocks */ - udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5"); - if (IS_ERR(udc->usb_pll_clk)) { - dev_err(udc->dev, "failed to acquire USB PLL\n"); - retval = PTR_ERR(udc->usb_pll_clk); - goto pll_get_fail; - } - udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd"); + /* Get USB device clock */ + udc->usb_slv_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(udc->usb_slv_clk)) { dev_err(udc->dev, "failed to acquire USB device clock\n"); retval = PTR_ERR(udc->usb_slv_clk); goto usb_clk_get_fail; } - udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg"); - if (IS_ERR(udc->usb_otg_clk)) { - dev_err(udc->dev, "failed to acquire USB otg clock\n"); - retval = PTR_ERR(udc->usb_otg_clk); - goto usb_otg_clk_get_fail; - } - - /* Setup PLL clock to 48MHz */ - retval = clk_enable(udc->usb_pll_clk); - if (retval < 0) { - dev_err(udc->dev, "failed to start USB PLL\n"); - goto pll_enable_fail; - } - - retval = clk_set_rate(udc->usb_pll_clk, 48000); - if (retval < 0) { - dev_err(udc->dev, "failed to set USB clock rate\n"); - goto pll_set_fail; - } - - writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL); /* Enable USB device clock */ - retval = clk_enable(udc->usb_slv_clk); + retval = clk_prepare_enable(udc->usb_slv_clk); if (retval < 0) { dev_err(udc->dev, "failed to start USB device clock\n"); goto usb_clk_enable_fail; } - /* Enable USB OTG clock */ - retval = clk_enable(udc->usb_otg_clk); - if (retval < 0) { - dev_err(udc->dev, "failed to start USB otg clock\n"); - goto usb_otg_clk_enable_fail; - } - /* Setup deferred workqueue data */ udc->poweron = udc->pullup = 0; INIT_WORK(&udc->pullup_job, pullup_work); @@ -3287,19 +3216,10 @@ dma_alloc_fail: dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE, udc->udca_v_base, udc->udca_p_base); i2c_fail: - clk_disable(udc->usb_otg_clk); -usb_otg_clk_enable_fail: - clk_disable(udc->usb_slv_clk); + clk_disable_unprepare(udc->usb_slv_clk); usb_clk_enable_fail: -pll_set_fail: - clk_disable(udc->usb_pll_clk); -pll_enable_fail: - clk_put(udc->usb_otg_clk); -usb_otg_clk_get_fail: clk_put(udc->usb_slv_clk); usb_clk_get_fail: - clk_put(udc->usb_pll_clk); -pll_get_fail: iounmap(udc->udp_baseaddr); io_map_fail: release_mem_region(udc->io_p_start, udc->io_p_size); @@ -3336,12 +3256,9 @@ static int lpc32xx_udc_remove(struct platform_device *pdev) free_irq(udc->udp_irq[IRQ_USB_HP], udc); free_irq(udc->udp_irq[IRQ_USB_LP], udc); - clk_disable(udc->usb_otg_clk); - clk_put(udc->usb_otg_clk); - clk_disable(udc->usb_slv_clk); + clk_disable_unprepare(udc->usb_slv_clk); clk_put(udc->usb_slv_clk); - clk_disable(udc->usb_pll_clk); - clk_put(udc->usb_pll_clk); + iounmap(udc->udp_baseaddr); release_mem_region(udc->io_p_start, udc->io_p_size); kfree(udc); @@ -3367,7 +3284,7 @@ static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg) udc->clocked = 1; /* Kill global USB clock */ - clk_disable(udc->usb_slv_clk); + clk_disable_unprepare(udc->usb_slv_clk); } return 0; @@ -3379,7 +3296,7 @@ static int lpc32xx_udc_resume(struct platform_device *pdev) if (udc->clocked) { /* Enable global USB clock */ - clk_enable(udc->usb_slv_clk); + clk_prepare_enable(udc->usb_slv_clk); /* Enable clocking */ udc_clk_set(udc, 1); diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c index b82cb14850b6..a238da906115 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.c +++ b/drivers/usb/gadget/udc/pxa25x_udc.c @@ -48,18 +48,157 @@ #include <linux/usb/gadget.h> #include <linux/usb/otg.h> -/* - * This driver is PXA25x only. Grab the right register definitions. - */ -#ifdef CONFIG_ARCH_PXA -#include <mach/pxa25x-udc.h> -#include <mach/hardware.h> -#endif - #ifdef CONFIG_ARCH_LUBBOCK #include <mach/lubbock.h> #endif +#define UDCCR 0x0000 /* UDC Control Register */ +#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */ +#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */ +#define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */ +#define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */ +#define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */ +#define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */ +#define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */ +#define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */ +#define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */ +#define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */ +#define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */ +#define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */ +#define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */ +#define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */ +#define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */ +#define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */ +#define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */ +#define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */ +#define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */ +#define UFNRH 0x0060 /* UDC Frame Number Register High */ +#define UFNRL 0x0064 /* UDC Frame Number Register Low */ +#define UBCR2 0x0068 /* UDC Byte Count Reg 2 */ +#define UBCR4 0x006c /* UDC Byte Count Reg 4 */ +#define UBCR7 0x0070 /* UDC Byte Count Reg 7 */ +#define UBCR9 0x0074 /* UDC Byte Count Reg 9 */ +#define UBCR12 0x0078 /* UDC Byte Count Reg 12 */ +#define UBCR14 0x007c /* UDC Byte Count Reg 14 */ +#define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */ +#define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */ +#define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */ +#define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */ +#define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */ +#define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */ +#define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */ +#define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */ +#define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */ +#define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */ +#define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */ +#define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */ +#define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */ +#define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */ +#define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */ +#define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */ + +#define UICR0 0x0050 /* UDC Interrupt Control Register 0 */ +#define UICR1 0x0054 /* UDC Interrupt Control Register 1 */ + +#define USIR0 0x0058 /* UDC Status Interrupt Register 0 */ +#define USIR1 0x005C /* UDC Status Interrupt Register 1 */ + +#define UDCCR_UDE (1 << 0) /* UDC enable */ +#define UDCCR_UDA (1 << 1) /* UDC active */ +#define UDCCR_RSM (1 << 2) /* Device resume */ +#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */ +#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */ +#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */ +#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */ +#define UDCCR_REM (1 << 7) /* Reset interrupt mask */ + +#define UDCCS0_OPR (1 << 0) /* OUT packet ready */ +#define UDCCS0_IPR (1 << 1) /* IN packet ready */ +#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */ +#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */ +#define UDCCS0_SST (1 << 4) /* Sent stall */ +#define UDCCS0_FST (1 << 5) /* Force stall */ +#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */ +#define UDCCS0_SA (1 << 7) /* Setup active */ + +#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */ +#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */ +#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */ +#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */ +#define UDCCS_BI_SST (1 << 4) /* Sent stall */ +#define UDCCS_BI_FST (1 << 5) /* Force stall */ +#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */ + +#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */ +#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */ +#define UDCCS_BO_DME (1 << 3) /* DMA enable */ +#define UDCCS_BO_SST (1 << 4) /* Sent stall */ +#define UDCCS_BO_FST (1 << 5) /* Force stall */ +#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */ +#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */ + +#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */ +#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */ +#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */ +#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */ +#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */ + +#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */ +#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */ +#ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */ +#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */ +#endif +#ifdef CONFIG_ARCH_PXA +#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */ +#endif +#define UDCCS_IO_DME (1 << 3) /* DMA enable */ +#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */ +#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */ + +#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */ +#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */ +#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */ +#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */ +#define UDCCS_INT_SST (1 << 4) /* Sent stall */ +#define UDCCS_INT_FST (1 << 5) /* Force stall */ +#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */ + +#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */ +#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */ +#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */ +#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */ +#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */ +#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */ +#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */ +#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */ + +#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */ +#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */ +#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */ +#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */ +#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */ +#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */ +#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */ +#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */ + +#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */ +#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */ +#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */ +#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */ +#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */ +#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */ +#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */ +#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */ + +#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */ +#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */ +#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */ +#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */ +#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */ +#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */ +#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */ +#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */ + /* * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x * series processors. The UDC for the IXP 4xx series is very similar. @@ -150,25 +289,61 @@ static void pullup_on(void) mach->udc_command(PXA2XX_UDC_CMD_CONNECT); } -static void pio_irq_enable(int bEndpointAddress) +#if defined(CONFIG_CPU_BIG_ENDIAN) +/* + * IXP4xx has its buses wired up in a way that relies on never doing any + * byte swaps, independent of whether it runs in big-endian or little-endian + * mode, as explained by Krzysztof HaÅ‚asa. + * + * We only support pxa25x in little-endian mode, but it is very likely + * that it works the same way. + */ +static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val) +{ + iowrite32be(val, dev->regs + reg); +} + +static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg) { - bEndpointAddress &= 0xf; + return ioread32be(dev->regs + reg); +} +#else +static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val) +{ + writel(val, dev->regs + reg); +} + +static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg) +{ + return readl(dev->regs + reg); +} +#endif + +static void pio_irq_enable(struct pxa25x_ep *ep) +{ + u32 bEndpointAddress = ep->bEndpointAddress & 0xf; + if (bEndpointAddress < 8) - UICR0 &= ~(1 << bEndpointAddress); + udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) & + ~(1 << bEndpointAddress)); else { bEndpointAddress -= 8; - UICR1 &= ~(1 << bEndpointAddress); + udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) & + ~(1 << bEndpointAddress)); } } -static void pio_irq_disable(int bEndpointAddress) +static void pio_irq_disable(struct pxa25x_ep *ep) { - bEndpointAddress &= 0xf; + u32 bEndpointAddress = ep->bEndpointAddress & 0xf; + if (bEndpointAddress < 8) - UICR0 |= 1 << bEndpointAddress; + udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) | + (1 << bEndpointAddress)); else { bEndpointAddress -= 8; - UICR1 |= 1 << bEndpointAddress; + udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) | + (1 << bEndpointAddress)); } } @@ -177,22 +352,61 @@ static void pio_irq_disable(int bEndpointAddress) */ #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE) -static inline void udc_set_mask_UDCCR(int mask) +static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask) { - UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS); + u32 udccr = udc_get_reg(dev, UDCCR); + + udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR); } -static inline void udc_clear_mask_UDCCR(int mask) +static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask) { - UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS); + u32 udccr = udc_get_reg(dev, UDCCR); + + udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR); } -static inline void udc_ack_int_UDCCR(int mask) +static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask) { /* udccr contains the bits we dont want to change */ - __u32 udccr = UDCCR & UDCCR_MASK_BITS; + u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS; - UDCCR = udccr | (mask & ~UDCCR_MASK_BITS); + udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR); +} + +static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep) +{ + return udc_get_reg(ep->dev, ep->regoff_udccs); +} + +static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data) +{ + udc_set_reg(ep->dev, data, ep->regoff_udccs); +} + +static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev) +{ + return udc_get_reg(dev, UDCCS0); +} + +static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data) +{ + udc_set_reg(dev, data, UDCCS0); +} + +static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep) +{ + return udc_get_reg(ep->dev, ep->regoff_uddr); +} + +static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data) +{ + udc_set_reg(ep->dev, data, ep->regoff_uddr); +} + +static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep) +{ + return udc_get_reg(ep->dev, ep->regoff_ubcr); } /* @@ -358,7 +572,7 @@ static inline void ep0_idle (struct pxa25x_udc *dev) } static int -write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max) +write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max) { u8 *buf; unsigned length, count; @@ -372,7 +586,7 @@ write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max) count = length; while (likely(count--)) - *uddr = *buf++; + udc_ep_set_UDDR(ep, *buf++); return length; } @@ -392,7 +606,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) unsigned count; int is_last, is_short; - count = write_packet(ep->reg_uddr, req, max); + count = write_packet(ep, req, max); /* last packet is usually short (or a zlp) */ if (unlikely (count != max)) @@ -416,15 +630,15 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) * double buffering might work. TSP, TPC, and TFS * bit values are the same for all normal IN endpoints. */ - *ep->reg_udccs = UDCCS_BI_TPC; + udc_ep_set_UDCCS(ep, UDCCS_BI_TPC); if (is_short) - *ep->reg_udccs = UDCCS_BI_TSP; + udc_ep_set_UDCCS(ep, UDCCS_BI_TSP); /* requests complete when all IN data is in the FIFO */ if (is_last) { done (ep, req, 0); if (list_empty(&ep->queue)) - pio_irq_disable (ep->bEndpointAddress); + pio_irq_disable(ep); return 1; } @@ -432,7 +646,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) // double buffering is off in the default fifo mode, which // prevents TFS from being set here. - } while (*ep->reg_udccs & UDCCS_BI_TFS); + } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS); return 0; } @@ -442,20 +656,21 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) static inline void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag) { - UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR; - USIR0 = USIR0_IR0; + udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR); + udc_set_reg(dev, USIR0, USIR0_IR0); dev->req_pending = 0; DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n", - __func__, tag, UDCCS0, flags); + __func__, tag, udc_ep0_get_UDCCS(dev), flags); } static int write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) { + struct pxa25x_udc *dev = ep->dev; unsigned count; int is_short; - count = write_packet(&UDDR0, req, EP0_FIFO_SIZE); + count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE); ep->dev->stats.write.bytes += count; /* last packet "must be" short (or a zlp) */ @@ -468,7 +683,7 @@ write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) if (ep->dev->req_pending) ep0start(ep->dev, UDCCS0_IPR, "short IN"); else - UDCCS0 = UDCCS0_IPR; + udc_ep0_set_UDCCS(dev, UDCCS0_IPR); count = req->req.length; done (ep, req, 0); @@ -484,9 +699,9 @@ write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) if (count >= EP0_FIFO_SIZE) { count = 100; do { - if ((UDCCS0 & UDCCS0_OPR) != 0) { + if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) { /* clear OPR, generate ack */ - UDCCS0 = UDCCS0_OPR; + udc_ep0_set_UDCCS(dev, UDCCS0_OPR); break; } count--; @@ -521,7 +736,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) * UDCCS_{BO,IO}_RPC are all the same bit value. * UDCCS_{BO,IO}_RNE are all the same bit value. */ - udccs = *ep->reg_udccs; + udccs = udc_ep_get_UDCCS(ep); if (unlikely ((udccs & UDCCS_BO_RPC) == 0)) break; buf = req->req.buf + req->req.actual; @@ -530,7 +745,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) /* read all bytes from this packet */ if (likely (udccs & UDCCS_BO_RNE)) { - count = 1 + (0x0ff & *ep->reg_ubcr); + count = 1 + (0x0ff & udc_ep_get_UBCR(ep)); req->req.actual += min (count, bufferspace); } else /* zlp */ count = 0; @@ -540,7 +755,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) is_short ? "/S" : "", req, req->req.actual, req->req.length); while (likely (count-- != 0)) { - u8 byte = (u8) *ep->reg_uddr; + u8 byte = (u8) udc_ep_get_UDDR(ep); if (unlikely (bufferspace == 0)) { /* this happens when the driver's buffer @@ -556,7 +771,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) bufferspace--; } } - *ep->reg_udccs = UDCCS_BO_RPC; + udc_ep_set_UDCCS(ep, UDCCS_BO_RPC); /* RPC/RSP/RNE could now reflect the other packet buffer */ /* iso is one request per packet */ @@ -571,7 +786,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) if (is_short || req->req.actual == req->req.length) { done (ep, req, 0); if (list_empty(&ep->queue)) - pio_irq_disable (ep->bEndpointAddress); + pio_irq_disable(ep); return 1; } @@ -595,7 +810,7 @@ read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) buf = req->req.buf + req->req.actual; bufferspace = req->req.length - req->req.actual; - while (UDCCS0 & UDCCS0_RNE) { + while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) { byte = (u8) UDDR0; if (unlikely (bufferspace == 0)) { @@ -613,7 +828,7 @@ read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) } } - UDCCS0 = UDCCS0_OPR | UDCCS0_IPR; + udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR); /* completion */ if (req->req.actual >= req->req.length) @@ -687,8 +902,8 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) DBG(DBG_VERBOSE, "ep0 config ack%s\n", dev->has_cfr ? "" : " raced"); if (dev->has_cfr) - UDCCFR = UDCCFR_AREN|UDCCFR_ACM - |UDCCFR_MB1; + udc_set_reg(dev, UDCCFR, UDCCFR_AREN | + UDCCFR_ACM | UDCCFR_MB1); done(ep, req, 0); dev->ep0state = EP0_END_XFER; local_irq_restore (flags); @@ -696,7 +911,7 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) } if (dev->req_pending) ep0start(dev, UDCCS0_IPR, "OUT"); - if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0 + if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0 && read_ep0_fifo(ep, req))) { ep0_idle(dev); done(ep, req, 0); @@ -711,16 +926,16 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) } /* can the FIFO can satisfy the request immediately? */ } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { - if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0 + if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0 && write_fifo(ep, req)) req = NULL; - } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0 + } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0 && read_fifo(ep, req)) { req = NULL; } if (likely(req && ep->ep.desc)) - pio_irq_enable(ep->bEndpointAddress); + pio_irq_enable(ep); } /* pio or dma irq handler advances the queue. */ @@ -747,7 +962,7 @@ static void nuke(struct pxa25x_ep *ep, int status) done(ep, req, status); } if (ep->ep.desc) - pio_irq_disable (ep->bEndpointAddress); + pio_irq_disable(ep); } @@ -807,14 +1022,14 @@ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value) local_irq_save(flags); if ((ep->bEndpointAddress & USB_DIR_IN) != 0 - && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0 + && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0 || !list_empty(&ep->queue))) { local_irq_restore(flags); return -EAGAIN; } /* FST bit is the same for control, bulk in, bulk out, interrupt in */ - *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF; + udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF); /* ep0 needs special care */ if (!ep->ep.desc) { @@ -826,7 +1041,7 @@ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value) } else { unsigned i; for (i = 0; i < 1000; i += 20) { - if (*ep->reg_udccs & UDCCS_BI_SST) + if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST) break; udelay(20); } @@ -850,10 +1065,10 @@ static int pxa25x_ep_fifo_status(struct usb_ep *_ep) if ((ep->bEndpointAddress & USB_DIR_IN) != 0) return -EOPNOTSUPP; if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN - || (*ep->reg_udccs & UDCCS_BO_RFS) == 0) + || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0) return 0; else - return (*ep->reg_ubcr & 0xfff) + 1; + return (udc_ep_get_UBCR(ep) & 0xfff) + 1; } static void pxa25x_ep_fifo_flush(struct usb_ep *_ep) @@ -870,15 +1085,15 @@ static void pxa25x_ep_fifo_flush(struct usb_ep *_ep) /* for OUT, just read and discard the FIFO contents. */ if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { - while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0) - (void) *ep->reg_uddr; + while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0) + (void)udc_ep_get_UDDR(ep); return; } /* most IN status is the same, but ISO can't stall */ - *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR + udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC - ? 0 : UDCCS_BI_SST); + ? 0 : UDCCS_BI_SST)); } @@ -905,15 +1120,23 @@ static struct usb_ep_ops pxa25x_ep_ops = { static int pxa25x_udc_get_frame(struct usb_gadget *_gadget) { - return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff); + struct pxa25x_udc *dev; + + dev = container_of(_gadget, struct pxa25x_udc, gadget); + return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) | + (udc_get_reg(dev, UFNRL) & 0xff); } static int pxa25x_udc_wakeup(struct usb_gadget *_gadget) { + struct pxa25x_udc *udc; + + udc = container_of(_gadget, struct pxa25x_udc, gadget); + /* host may not have enabled remote wakeup */ - if ((UDCCS0 & UDCCS0_DRWF) == 0) + if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0) return -EHOSTUNREACH; - udc_set_mask_UDCCR(UDCCR_RSM); + udc_set_mask_UDCCR(udc, UDCCR_RSM); return 0; } @@ -1034,9 +1257,11 @@ udc_seq_show(struct seq_file *m, void *_d) /* registers for device and ep0 */ seq_printf(m, "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n", - UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL); + udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0), + udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0), + udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL)); - tmp = UDCCR; + tmp = udc_get_reg(dev, UDCCR); seq_printf(m, "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp, (tmp & UDCCR_REM) ? " rem" : "", @@ -1048,7 +1273,7 @@ udc_seq_show(struct seq_file *m, void *_d) (tmp & UDCCR_UDA) ? " uda" : "", (tmp & UDCCR_UDE) ? " ude" : ""); - tmp = UDCCS0; + tmp = udc_ep0_get_UDCCS(dev); seq_printf(m, "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp, (tmp & UDCCS0_SA) ? " sa" : "", @@ -1061,7 +1286,7 @@ udc_seq_show(struct seq_file *m, void *_d) (tmp & UDCCS0_OPR) ? " opr" : ""); if (dev->has_cfr) { - tmp = UDCCFR; + tmp = udc_get_reg(dev, UDCCFR); seq_printf(m, "udccfr %02X =%s%s\n", tmp, (tmp & UDCCFR_AREN) ? " aren" : "", @@ -1087,7 +1312,7 @@ udc_seq_show(struct seq_file *m, void *_d) desc = ep->ep.desc; if (!desc) continue; - tmp = *dev->ep [i].reg_udccs; + tmp = udc_ep_get_UDCCS(&dev->ep[i]); seq_printf(m, "%s max %d %s udccs %02x irqs %lu\n", ep->ep.name, usb_endpoint_maxp(desc), @@ -1151,14 +1376,15 @@ static const struct file_operations debug_fops = { static void udc_disable(struct pxa25x_udc *dev) { /* block all irqs */ - udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM); - UICR0 = UICR1 = 0xff; - UFNRH = UFNRH_SIM; + udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM); + udc_set_reg(dev, UICR0, 0xff); + udc_set_reg(dev, UICR1, 0xff); + udc_set_reg(dev, UFNRH, UFNRH_SIM); /* if hardware supports it, disconnect from usb */ pullup_off(); - udc_clear_mask_UDCCR(UDCCR_UDE); + udc_clear_mask_UDCCR(dev, UDCCR_UDE); ep0_idle (dev); dev->gadget.speed = USB_SPEED_UNKNOWN; @@ -1200,10 +1426,10 @@ static void udc_reinit(struct pxa25x_udc *dev) */ static void udc_enable (struct pxa25x_udc *dev) { - udc_clear_mask_UDCCR(UDCCR_UDE); + udc_clear_mask_UDCCR(dev, UDCCR_UDE); /* try to clear these bits before we enable the udc */ - udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); + udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); ep0_idle(dev); dev->gadget.speed = USB_SPEED_UNKNOWN; @@ -1215,15 +1441,15 @@ static void udc_enable (struct pxa25x_udc *dev) * - if RESET is already in progress, ack interrupt * - unmask reset interrupt */ - udc_set_mask_UDCCR(UDCCR_UDE); - if (!(UDCCR & UDCCR_UDA)) - udc_ack_int_UDCCR(UDCCR_RSTIR); + udc_set_mask_UDCCR(dev, UDCCR_UDE); + if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA)) + udc_ack_int_UDCCR(dev, UDCCR_RSTIR); if (dev->has_cfr /* UDC_RES2 is defined */) { /* pxa255 (a0+) can avoid a set_config race that could * prevent gadget drivers from configuring correctly */ - UDCCFR = UDCCFR_ACM | UDCCFR_MB1; + udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1); } else { /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1) * which could result in missing packets and interrupts. @@ -1231,15 +1457,15 @@ static void udc_enable (struct pxa25x_udc *dev) * double buffers or not; ACM/AREN bits fit into the holes. * zero bits (like USIR0_IRx) disable double buffering. */ - UDC_RES1 = 0x00; - UDC_RES2 = 0x00; + udc_set_reg(dev, UDC_RES1, 0x00); + udc_set_reg(dev, UDC_RES2, 0x00); } /* enable suspend/resume and reset irqs */ - udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM); + udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM); /* enable ep0 irqs */ - UICR0 &= ~UICR0_IM0; + udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0); /* if hardware supports it, pullup D+ and wait for reset */ pullup_on(); @@ -1408,9 +1634,9 @@ static void udc_watchdog(unsigned long _dev) local_irq_disable(); if (dev->ep0state == EP0_STALL - && (UDCCS0 & UDCCS0_FST) == 0 - && (UDCCS0 & UDCCS0_SST) == 0) { - UDCCS0 = UDCCS0_FST|UDCCS0_FTF; + && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0 + && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) { + udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF); DBG(DBG_VERBOSE, "ep0 re-stall\n"); start_watchdog(dev); } @@ -1419,7 +1645,7 @@ static void udc_watchdog(unsigned long _dev) static void handle_ep0 (struct pxa25x_udc *dev) { - u32 udccs0 = UDCCS0; + u32 udccs0 = udc_ep0_get_UDCCS(dev); struct pxa25x_ep *ep = &dev->ep [0]; struct pxa25x_request *req; union { @@ -1436,7 +1662,7 @@ static void handle_ep0 (struct pxa25x_udc *dev) /* clear stall status */ if (udccs0 & UDCCS0_SST) { nuke(ep, -EPIPE); - UDCCS0 = UDCCS0_SST; + udc_ep0_set_UDCCS(dev, UDCCS0_SST); del_timer(&dev->timer); ep0_idle(dev); } @@ -1451,7 +1677,7 @@ static void handle_ep0 (struct pxa25x_udc *dev) switch (dev->ep0state) { case EP0_IDLE: /* late-breaking status? */ - udccs0 = UDCCS0; + udccs0 = udc_ep0_get_UDCCS(dev); /* start control request? */ if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE)) @@ -1462,14 +1688,14 @@ static void handle_ep0 (struct pxa25x_udc *dev) /* read SETUP packet */ for (i = 0; i < 8; i++) { - if (unlikely(!(UDCCS0 & UDCCS0_RNE))) { + if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) { bad_setup: DMSG("SETUP %d!\n", i); goto stall; } u.raw [i] = (u8) UDDR0; } - if (unlikely((UDCCS0 & UDCCS0_RNE) != 0)) + if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0)) goto bad_setup; got_setup: @@ -1545,7 +1771,7 @@ config_change: */ } DBG(DBG_VERBOSE, "protocol STALL, " - "%02x err %d\n", UDCCS0, i); + "%02x err %d\n", udc_ep0_get_UDCCS(dev), i); stall: /* the watchdog timer helps deal with cases * where udc seems to clear FST wrongly, and @@ -1592,12 +1818,12 @@ stall: * - IPR cleared * - OPR got set, without SA (likely status stage) */ - UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR); + udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR)); } break; case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ if (udccs0 & UDCCS0_OPR) { - UDCCS0 = UDCCS0_OPR|UDCCS0_FTF; + udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF); DBG(DBG_VERBOSE, "ep0in premature status\n"); if (req) done(ep, req, 0); @@ -1631,14 +1857,14 @@ stall: * also appears after some config change events. */ if (udccs0 & UDCCS0_OPR) - UDCCS0 = UDCCS0_OPR; + udc_ep0_set_UDCCS(dev, UDCCS0_OPR); ep0_idle(dev); break; case EP0_STALL: - UDCCS0 = UDCCS0_FST; + udc_ep0_set_UDCCS(dev, UDCCS0_FST); break; } - USIR0 = USIR0_IR0; + udc_set_reg(dev, USIR0, USIR0_IR0); } static void handle_ep(struct pxa25x_ep *ep) @@ -1658,14 +1884,14 @@ static void handle_ep(struct pxa25x_ep *ep) // TODO check FST handling - udccs = *ep->reg_udccs; + udccs = udc_ep_get_UDCCS(ep); if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */ tmp = UDCCS_BI_TUR; if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) tmp |= UDCCS_BI_SST; tmp &= udccs; if (likely (tmp)) - *ep->reg_udccs = tmp; + udc_ep_set_UDCCS(ep, tmp); if (req && likely ((udccs & UDCCS_BI_TFS) != 0)) completed = write_fifo(ep, req); @@ -1676,13 +1902,13 @@ static void handle_ep(struct pxa25x_ep *ep) tmp = UDCCS_IO_ROF | UDCCS_IO_DME; tmp &= udccs; if (likely(tmp)) - *ep->reg_udccs = tmp; + udc_ep_set_UDCCS(ep, tmp); /* fifos can hold packets, ready for reading... */ if (likely(req)) { completed = read_fifo(ep, req); } else - pio_irq_disable (ep->bEndpointAddress); + pio_irq_disable(ep); } ep->pio_irqs++; } while (completed); @@ -1703,13 +1929,13 @@ pxa25x_udc_irq(int irq, void *_dev) dev->stats.irqs++; do { - u32 udccr = UDCCR; + u32 udccr = udc_get_reg(dev, UDCCR); handled = 0; /* SUSpend Interrupt Request */ if (unlikely(udccr & UDCCR_SUSIR)) { - udc_ack_int_UDCCR(UDCCR_SUSIR); + udc_ack_int_UDCCR(dev, UDCCR_SUSIR); handled = 1; DBG(DBG_VERBOSE, "USB suspend\n"); @@ -1722,7 +1948,7 @@ pxa25x_udc_irq(int irq, void *_dev) /* RESume Interrupt Request */ if (unlikely(udccr & UDCCR_RESIR)) { - udc_ack_int_UDCCR(UDCCR_RESIR); + udc_ack_int_UDCCR(dev, UDCCR_RESIR); handled = 1; DBG(DBG_VERBOSE, "USB resume\n"); @@ -1734,10 +1960,10 @@ pxa25x_udc_irq(int irq, void *_dev) /* ReSeT Interrupt Request - USB reset */ if (unlikely(udccr & UDCCR_RSTIR)) { - udc_ack_int_UDCCR(UDCCR_RSTIR); + udc_ack_int_UDCCR(dev, UDCCR_RSTIR); handled = 1; - if ((UDCCR & UDCCR_UDA) == 0) { + if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) { DBG(DBG_VERBOSE, "USB reset start\n"); /* reset driver and endpoints, @@ -1753,8 +1979,10 @@ pxa25x_udc_irq(int irq, void *_dev) } } else { - u32 usir0 = USIR0 & ~UICR0; - u32 usir1 = USIR1 & ~UICR1; + u32 usir0 = udc_get_reg(dev, USIR0) & + ~udc_get_reg(dev, UICR0); + u32 usir1 = udc_get_reg(dev, USIR1) & + ~udc_get_reg(dev, UICR1); int i; if (unlikely (!usir0 && !usir1)) @@ -1775,13 +2003,15 @@ pxa25x_udc_irq(int irq, void *_dev) if (i && (usir0 & tmp)) { handle_ep(&dev->ep[i]); - USIR0 |= tmp; + udc_set_reg(dev, USIR0, + udc_get_reg(dev, USIR0) | tmp); handled = 1; } #ifndef CONFIG_USB_PXA25X_SMALL if (usir1 & tmp) { handle_ep(&dev->ep[i+8]); - USIR1 |= tmp; + udc_set_reg(dev, USIR1, + udc_get_reg(dev, USIR1) | tmp); handled = 1; } #endif @@ -1826,8 +2056,8 @@ static struct pxa25x_udc memory = { USB_EP_CAPS_DIR_ALL), }, .dev = &memory, - .reg_udccs = &UDCCS0, - .reg_uddr = &UDDR0, + .regoff_udccs = UDCCS0, + .regoff_uddr = UDDR0, }, /* first group of endpoints */ @@ -1843,8 +2073,8 @@ static struct pxa25x_udc memory = { .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 1, .bmAttributes = USB_ENDPOINT_XFER_BULK, - .reg_udccs = &UDCCS1, - .reg_uddr = &UDDR1, + .regoff_udccs = UDCCS1, + .regoff_uddr = UDDR1, }, .ep[2] = { .ep = { @@ -1858,9 +2088,9 @@ static struct pxa25x_udc memory = { .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = 2, .bmAttributes = USB_ENDPOINT_XFER_BULK, - .reg_udccs = &UDCCS2, - .reg_ubcr = &UBCR2, - .reg_uddr = &UDDR2, + .regoff_udccs = UDCCS2, + .regoff_ubcr = UBCR2, + .regoff_uddr = UDDR2, }, #ifndef CONFIG_USB_PXA25X_SMALL .ep[3] = { @@ -1875,8 +2105,8 @@ static struct pxa25x_udc memory = { .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 3, .bmAttributes = USB_ENDPOINT_XFER_ISOC, - .reg_udccs = &UDCCS3, - .reg_uddr = &UDDR3, + .regoff_udccs = UDCCS3, + .regoff_uddr = UDDR3, }, .ep[4] = { .ep = { @@ -1890,9 +2120,9 @@ static struct pxa25x_udc memory = { .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = 4, .bmAttributes = USB_ENDPOINT_XFER_ISOC, - .reg_udccs = &UDCCS4, - .reg_ubcr = &UBCR4, - .reg_uddr = &UDDR4, + .regoff_udccs = UDCCS4, + .regoff_ubcr = UBCR4, + .regoff_uddr = UDDR4, }, .ep[5] = { .ep = { @@ -1905,8 +2135,8 @@ static struct pxa25x_udc memory = { .fifo_size = INT_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 5, .bmAttributes = USB_ENDPOINT_XFER_INT, - .reg_udccs = &UDCCS5, - .reg_uddr = &UDDR5, + .regoff_udccs = UDCCS5, + .regoff_uddr = UDDR5, }, /* second group of endpoints */ @@ -1922,8 +2152,8 @@ static struct pxa25x_udc memory = { .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 6, .bmAttributes = USB_ENDPOINT_XFER_BULK, - .reg_udccs = &UDCCS6, - .reg_uddr = &UDDR6, + .regoff_udccs = UDCCS6, + .regoff_uddr = UDDR6, }, .ep[7] = { .ep = { @@ -1937,9 +2167,9 @@ static struct pxa25x_udc memory = { .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = 7, .bmAttributes = USB_ENDPOINT_XFER_BULK, - .reg_udccs = &UDCCS7, - .reg_ubcr = &UBCR7, - .reg_uddr = &UDDR7, + .regoff_udccs = UDCCS7, + .regoff_ubcr = UBCR7, + .regoff_uddr = UDDR7, }, .ep[8] = { .ep = { @@ -1953,8 +2183,8 @@ static struct pxa25x_udc memory = { .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 8, .bmAttributes = USB_ENDPOINT_XFER_ISOC, - .reg_udccs = &UDCCS8, - .reg_uddr = &UDDR8, + .regoff_udccs = UDCCS8, + .regoff_uddr = UDDR8, }, .ep[9] = { .ep = { @@ -1968,9 +2198,9 @@ static struct pxa25x_udc memory = { .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = 9, .bmAttributes = USB_ENDPOINT_XFER_ISOC, - .reg_udccs = &UDCCS9, - .reg_ubcr = &UBCR9, - .reg_uddr = &UDDR9, + .regoff_udccs = UDCCS9, + .regoff_ubcr = UBCR9, + .regoff_uddr = UDDR9, }, .ep[10] = { .ep = { @@ -1983,8 +2213,8 @@ static struct pxa25x_udc memory = { .fifo_size = INT_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 10, .bmAttributes = USB_ENDPOINT_XFER_INT, - .reg_udccs = &UDCCS10, - .reg_uddr = &UDDR10, + .regoff_udccs = UDCCS10, + .regoff_uddr = UDDR10, }, /* third group of endpoints */ @@ -2000,8 +2230,8 @@ static struct pxa25x_udc memory = { .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 11, .bmAttributes = USB_ENDPOINT_XFER_BULK, - .reg_udccs = &UDCCS11, - .reg_uddr = &UDDR11, + .regoff_udccs = UDCCS11, + .regoff_uddr = UDDR11, }, .ep[12] = { .ep = { @@ -2015,9 +2245,9 @@ static struct pxa25x_udc memory = { .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = 12, .bmAttributes = USB_ENDPOINT_XFER_BULK, - .reg_udccs = &UDCCS12, - .reg_ubcr = &UBCR12, - .reg_uddr = &UDDR12, + .regoff_udccs = UDCCS12, + .regoff_ubcr = UBCR12, + .regoff_uddr = UDDR12, }, .ep[13] = { .ep = { @@ -2031,8 +2261,8 @@ static struct pxa25x_udc memory = { .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 13, .bmAttributes = USB_ENDPOINT_XFER_ISOC, - .reg_udccs = &UDCCS13, - .reg_uddr = &UDDR13, + .regoff_udccs = UDCCS13, + .regoff_uddr = UDDR13, }, .ep[14] = { .ep = { @@ -2046,9 +2276,9 @@ static struct pxa25x_udc memory = { .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = 14, .bmAttributes = USB_ENDPOINT_XFER_ISOC, - .reg_udccs = &UDCCS14, - .reg_ubcr = &UBCR14, - .reg_uddr = &UDDR14, + .regoff_udccs = UDCCS14, + .regoff_ubcr = UBCR14, + .regoff_uddr = UDDR14, }, .ep[15] = { .ep = { @@ -2061,8 +2291,8 @@ static struct pxa25x_udc memory = { .fifo_size = INT_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 15, .bmAttributes = USB_ENDPOINT_XFER_INT, - .reg_udccs = &UDCCS15, - .reg_uddr = &UDDR15, + .regoff_udccs = UDCCS15, + .regoff_uddr = UDDR15, }, #endif /* !CONFIG_USB_PXA25X_SMALL */ }; @@ -2109,6 +2339,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev) struct pxa25x_udc *dev = &memory; int retval, irq; u32 chiprev; + struct resource *res; pr_info("%s: version %s\n", driver_name, DRIVER_VERSION); @@ -2154,6 +2385,11 @@ static int pxa25x_udc_probe(struct platform_device *pdev) if (irq < 0) return -ENODEV; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->regs)) + return PTR_ERR(dev->regs); + dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h index 3fe5931dc21a..4b8b72d7ab37 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.h +++ b/drivers/usb/gadget/udc/pxa25x_udc.h @@ -56,9 +56,9 @@ struct pxa25x_ep { * UDDR = UDC Endpoint Data Register (the fifo) * DRCM = DMA Request Channel Map */ - volatile u32 *reg_udccs; - volatile u32 *reg_ubcr; - volatile u32 *reg_uddr; + u32 regoff_udccs; + u32 regoff_ubcr; + u32 regoff_uddr; }; struct pxa25x_request { @@ -125,6 +125,7 @@ struct pxa25x_udc { #ifdef CONFIG_USB_GADGET_DEBUG_FS struct dentry *debugfs_udc; #endif + void __iomem *regs; }; #define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget)) @@ -197,6 +198,8 @@ dump_udccs0(const char *label) (udccs0 & UDCCS0_OPR) ? " opr" : ""); } +static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *); + static void __maybe_unused dump_state(struct pxa25x_udc *dev) { @@ -228,7 +231,7 @@ dump_state(struct pxa25x_udc *dev) for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) { if (dev->ep[i].ep.desc == NULL) continue; - DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs); + DMSG ("udccs%d = %02x\n", i, udc_ep_get_UDCCS(&dev->ep[i])); } } diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index b86a6f03592e..4151597e9d28 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -443,6 +443,36 @@ err1: EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release); /** + * usb_get_gadget_udc_name - get the name of the first UDC controller + * This functions returns the name of the first UDC controller in the system. + * Please note that this interface is usefull only for legacy drivers which + * assume that there is only one UDC controller in the system and they need to + * get its name before initialization. There is no guarantee that the UDC + * of the returned name will be still available, when gadget driver registers + * itself. + * + * Returns pointer to string with UDC controller name on success, NULL + * otherwise. Caller should kfree() returned string. + */ +char *usb_get_gadget_udc_name(void) +{ + struct usb_udc *udc; + char *name = NULL; + + /* For now we take the first available UDC */ + mutex_lock(&udc_lock); + list_for_each_entry(udc, &udc_list, list) { + if (!udc->driver) { + name = kstrdup(udc->gadget->name, GFP_KERNEL); + break; + } + } + mutex_unlock(&udc_lock); + return name; +} +EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name); + +/** * usb_add_gadget_udc - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller * driver's device. diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index d7dab45b8543..3050b18b2447 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -125,9 +125,6 @@ config USB_EHCI_TT_NEWSCHED If unsure, say Y. -config USB_FSL_MPH_DR_OF - tristate - if USB_EHCI_HCD config USB_EHCI_PCI @@ -160,7 +157,6 @@ config USB_EHCI_FSL tristate "Support for Freescale PPC on-chip EHCI USB controller" depends on FSL_SOC select USB_EHCI_ROOT_HUB_TT - select USB_FSL_MPH_DR_OF if OF ---help--- Variation of ARC USB block used in some Freescale chips. diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 65a06b4382bf..a9ddd3c9ec94 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -74,7 +74,8 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o -obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o +obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o +obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c3791a01ab31..39fd95833eb8 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1901,7 +1901,7 @@ static void musb_recover_from_babble(struct musb *musb) */ static struct musb *allocate_instance(struct device *dev, - struct musb_hdrc_config *config, void __iomem *mbase) + const struct musb_hdrc_config *config, void __iomem *mbase) { struct musb *musb; struct musb_hw_ep *ep; diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index fd215fb45fd4..b6afe9e43305 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -438,7 +438,7 @@ struct musb { */ unsigned double_buffer_not_ok:1; - struct musb_hdrc_config *config; + const struct musb_hdrc_config *config; int xceiv_old_state; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 7539c3188ffc..8abfe4ec62fb 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -117,8 +117,8 @@ static void configure_channel(struct dma_channel *channel, u8 bchannel = musb_channel->idx; u16 csr = 0; - dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", - channel, packet_sz, dma_addr, len, mode); + dev_dbg(musb->controller, "%p, pkt_sz %d, addr %pad, len %d, mode %d\n", + channel, packet_sz, &dma_addr, len, mode); if (mode) { csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; @@ -152,10 +152,10 @@ static int dma_channel_program(struct dma_channel *channel, struct musb_dma_controller *controller = musb_channel->controller; struct musb *musb = controller->private_data; - dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", + dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d\n", musb_channel->epnum, musb_channel->transmit ? "Tx" : "Rx", - packet_sz, dma_addr, len, mode); + packet_sz, &dma_addr, len, mode); BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index d9b0dc461439..fdab4232cfbf 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -752,6 +752,7 @@ static const struct of_device_id sunxi_musb_match[] = { { .compatible = "allwinner,sun8i-a33-musb", }, {} }; +MODULE_DEVICE_TABLE(of, sunxi_musb_match); static struct platform_driver sunxi_musb_driver = { .probe = sunxi_musb_probe, diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index 4c82077da475..e6959ccb4453 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c @@ -310,9 +310,9 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ - dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", + dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %pad len: %u(%u) packet_sz: %i(%i)\n", chdat->epnum, chdat->tx ? "tx" : "rx", - ch, dma_addr, chdat->transfer_len, len, + ch, &dma_addr, chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz); /* diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index d0b6a1cd7f62..c92a295049ad 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c @@ -207,9 +207,6 @@ static int ux500_dma_channel_program(struct dma_channel *channel, BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); - if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len)) - return false; - channel->status = MUSB_DMA_STATUS_BUSY; channel->actual_len = 0; ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len); diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 39b424f7f629..a262a4343f29 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -5,7 +5,6 @@ #include <linux/usb/usb_phy_generic.h> #include <linux/slab.h> #include <linux/clk.h> -#include <linux/regulator/consumer.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/usb/of.h> diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 5320cb8642cb..980c9dee09eb 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -118,7 +118,8 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data) status = USB_EVENT_VBUS; otg->state = OTG_STATE_B_PERIPHERAL; nop->phy.last_event = status; - usb_gadget_vbus_connect(otg->gadget); + if (otg->gadget) + usb_gadget_vbus_connect(otg->gadget); /* drawing a "unit load" is *always* OK, except for OTG */ nop_set_vbus_draw(nop, 100); @@ -128,7 +129,8 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data) } else { nop_set_vbus_draw(nop, 0); - usb_gadget_vbus_disconnect(otg->gadget); + if (otg->gadget) + usb_gadget_vbus_disconnect(otg->gadget); status = USB_EVENT_NONE; otg->state = OTG_STATE_B_IDLE; nop->phy.last_event = status; @@ -184,7 +186,10 @@ static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) } otg->gadget = gadget; - otg->state = OTG_STATE_B_IDLE; + if (otg->state == OTG_STATE_B_PERIPHERAL) + usb_gadget_vbus_connect(gadget); + else + otg->state = OTG_STATE_B_IDLE; return 0; } diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index 3af263cc0caa..8d111ec653e4 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -258,7 +258,7 @@ static void power_down(struct isp1301 *isp) isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); } -static void power_up(struct isp1301 *isp) +static void __maybe_unused power_up(struct isp1301 *isp) { // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND); diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile index 9e47f477b6d2..d787d05f6546 100644 --- a/drivers/usb/renesas_usbhs/Makefile +++ b/drivers/usb/renesas_usbhs/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o -renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o +renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o rcar3.o ifneq ($(CONFIG_USB_RENESAS_USBHS_HCD),) renesas_usbhs-y += mod_host.o diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 5af9ca5d54ab..baeb7d23bf24 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -25,6 +25,7 @@ #include <linux/sysfs.h> #include "common.h" #include "rcar2.h" +#include "rcar3.h" /* * image of renesas_usbhs @@ -477,18 +478,16 @@ static const struct of_device_id usbhs_of_match[] = { .data = (void *)USBHS_TYPE_RCAR_GEN2, }, { - /* Gen3 is compatible with Gen2 */ .compatible = "renesas,usbhs-r8a7795", - .data = (void *)USBHS_TYPE_RCAR_GEN2, + .data = (void *)USBHS_TYPE_RCAR_GEN3, }, { .compatible = "renesas,rcar-gen2-usbhs", .data = (void *)USBHS_TYPE_RCAR_GEN2, }, { - /* Gen3 is compatible with Gen2 */ .compatible = "renesas,rcar-gen3-usbhs", - .data = (void *)USBHS_TYPE_RCAR_GEN2, + .data = (void *)USBHS_TYPE_RCAR_GEN3, }, { }, }; @@ -578,6 +577,13 @@ static int usbhs_probe(struct platform_device *pdev) priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe); } break; + case USBHS_TYPE_RCAR_GEN3: + priv->pfunc = usbhs_rcar3_ops; + if (!priv->dparam.pipe_configs) { + priv->dparam.pipe_configs = usbhsc_new_pipe; + priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe); + } + break; default: if (!info->platform_callback.get_id) { dev_err(&pdev->dev, "no platform callbacks"); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 657f9672ceba..664b263e4b20 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -561,7 +561,7 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep) if (!pkt) break; - usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET); + usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ESHUTDOWN); } usbhs_pipe_disable(pipe); diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 0e95d2925dc5..78e9dba701c4 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -241,7 +241,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); int timeout = 1024; - u16 val; + u16 mask = usbhs_mod_is_host(priv) ? (CSSTS | PID_MASK) : PID_MASK; /* * make sure.... @@ -265,9 +265,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe) usbhs_pipe_disable(pipe); do { - val = usbhsp_pipectrl_get(pipe); - val &= CSSTS | PID_MASK; - if (!val) + if (!(usbhsp_pipectrl_get(pipe) & mask)) return 0; udelay(10); diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c new file mode 100644 index 000000000000..38b01f2aeeb0 --- /dev/null +++ b/drivers/usb/renesas_usbhs/rcar3.c @@ -0,0 +1,54 @@ +/* + * Renesas USB driver R-Car Gen. 3 initialization and power control + * + * Copyright (C) 2016 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/io.h> +#include "common.h" +#include "rcar3.h" + +#define LPSTS 0x102 +#define UGCTRL2 0x184 /* 32-bit register */ + +/* Low Power Status register (LPSTS) */ +#define LPSTS_SUSPM 0x4000 + +/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ +#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ +#define UGCTRL2_USB0SEL_OTG 0x00000030 + +void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) +{ + iowrite32(data, priv->base + reg); +} + +static int usbhs_rcar3_power_ctrl(struct platform_device *pdev, + void __iomem *base, int enable) +{ + struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); + + usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); + + if (enable) + usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); + else + usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0); + + return 0; +} + +static int usbhs_rcar3_get_id(struct platform_device *pdev) +{ + return USBHS_GADGET; +} + +const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = { + .power_ctrl = usbhs_rcar3_power_ctrl, + .get_id = usbhs_rcar3_get_id, +}; diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h new file mode 100644 index 000000000000..5f850b23ff18 --- /dev/null +++ b/drivers/usb/renesas_usbhs/rcar3.h @@ -0,0 +1,3 @@ +#include "common.h" + +extern const struct renesas_usbhs_platform_callback usbhs_rcar3_ops; diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 1074b8921a5d..2b81b24eb5aa 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -126,6 +126,10 @@ struct usb_os_desc_table { * string identifiers assigned during @bind(). If this * pointer is null after initiation, the function will not * be available at super speed. + * @ssp_descriptors: Table of super speed plus descriptors, using + * interface and string identifiers assigned during @bind(). If + * this pointer is null after initiation, the function will not + * be available at super speed plus. * @config: assigned when @usb_add_function() is called; this is the * configuration with which this function is associated. * @os_desc_table: Table of (interface id, os descriptors) pairs. The function @@ -186,6 +190,7 @@ struct usb_function { struct usb_descriptor_header **fs_descriptors; struct usb_descriptor_header **hs_descriptors; struct usb_descriptor_header **ss_descriptors; + struct usb_descriptor_header **ssp_descriptors; struct usb_configuration *config; @@ -317,6 +322,7 @@ struct usb_configuration { unsigned superspeed:1; unsigned highspeed:1; unsigned fullspeed:1; + unsigned superspeed_plus:1; struct usb_function *interface[MAX_CONFIG_INTERFACES]; }; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index d82d0068872b..5d4e151c49bf 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -595,6 +595,10 @@ struct usb_gadget_ops { * only supports HNP on a different root port. * @b_hnp_enable: OTG device feature flag, indicating that the A-Host * enabled HNP support. + * @hnp_polling_support: OTG device feature flag, indicating if the OTG device + * in peripheral mode can support HNP polling. + * @host_request_flag: OTG device feature flag, indicating if A-Peripheral + * or B-Peripheral wants to take host role. * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to * MaxPacketSize. * @is_selfpowered: if the gadget is self-powered. @@ -642,6 +646,8 @@ struct usb_gadget { unsigned b_hnp_enable:1; unsigned a_hnp_support:1; unsigned a_alt_hnp_support:1; + unsigned hnp_polling_support:1; + unsigned host_request_flag:1; unsigned quirk_ep_out_aligned_size:1; unsigned quirk_altset_not_supp:1; unsigned quirk_stall_not_supp:1; @@ -729,6 +735,16 @@ static inline int gadget_is_superspeed(struct usb_gadget *g) } /** + * gadget_is_superspeed_plus() - return true if the hardware handles + * superspeed plus + * @g: controller that might support superspeed plus + */ +static inline int gadget_is_superspeed_plus(struct usb_gadget *g) +{ + return g->max_speed >= USB_SPEED_SUPER_PLUS; +} + +/** * gadget_is_otg - return true iff the hardware is OTG-ready * @g: controller that might have a Mini-AB connector * @@ -1126,6 +1142,7 @@ extern int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)); extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); extern void usb_del_gadget_udc(struct usb_gadget *gadget); +extern char *usb_get_gadget_udc_name(void); /*-------------------------------------------------------------------------*/ @@ -1194,7 +1211,8 @@ struct usb_function; int usb_assign_descriptors(struct usb_function *f, struct usb_descriptor_header **fs, struct usb_descriptor_header **hs, - struct usb_descriptor_header **ss); + struct usb_descriptor_header **ss, + struct usb_descriptor_header **ssp); void usb_free_all_descriptors(struct usb_function *f); struct usb_descriptor_header *usb_otg_descriptor_alloc( diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 96ddfb7ab018..0b3da40a525e 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -124,7 +124,7 @@ struct musb_hdrc_platform_data { int (*set_power)(int state); /* MUSB configuration-specific details */ - struct musb_hdrc_config *config; + const struct musb_hdrc_config *config; /* Architecture specific board data */ void *board_data; diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index f728f1854829..24198e16f849 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -40,6 +40,18 @@ #define PROTO_HOST (1) #define PROTO_GADGET (2) +#define OTG_STS_SELECTOR 0xF000 /* OTG status selector, according to + * OTG and EH 2.0 Chapter 6.2.3 + * Table:6-4 + */ + +#define HOST_REQUEST_FLAG 1 /* Host request flag, according to + * OTG and EH 2.0 Charpter 6.2.3 + * Table:6-5 + */ + +#define T_HOST_REQ_POLL (1500) /* 1500ms, HNP polling interval */ + enum otg_fsm_timer { /* Standard OTG timers */ A_WAIT_VRISE, @@ -48,6 +60,7 @@ enum otg_fsm_timer { A_AIDL_BDIS, B_ASE0_BRST, A_BIDL_ADIS, + B_AIDL_BDIS, /* Auxiliary timers */ B_SE0_SRP, @@ -119,6 +132,8 @@ struct otg_fsm { /* Current usb protocol used: 0:undefine; 1:host; 2:client */ int protocol; struct mutex lock; + u8 *host_req_flag; + struct delayed_work hnp_polling_work; }; struct otg_fsm_ops { diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 4db191fe8c2c..00a47d058d83 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -184,6 +184,7 @@ struct renesas_usbhs_driver_param { }; #define USBHS_TYPE_RCAR_GEN2 1 +#define USBHS_TYPE_RCAR_GEN3 2 /* * option: diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 252ac16635dc..06d6c6228a7a 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -708,6 +708,7 @@ struct usb_otg20_descriptor { #define USB_OTG_HNP (1 << 1) /* swap host/device roles */ #define USB_OTG_ADP (1 << 2) /* support ADP */ +#define OTG_STS_SELECTOR 0xF000 /* OTG status selector */ /*-------------------------------------------------------------------------*/ /* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ @@ -923,6 +924,12 @@ struct usb_ptm_cap_descriptor { __u8 bDevCapabilityType; } __attribute__((packed)); +/* + * The size of the descriptor for the Sublink Speed Attribute Count + * (SSAC) specified in bmAttributes[4:0]. + */ +#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4) + /*-------------------------------------------------------------------------*/ /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with |