diff options
34 files changed, 419 insertions, 220 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index af279458b614..b3e92fbe336c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1593,7 +1593,7 @@ S: Supported EMBEDDED LINUX P: Paul Gortmaker M: paul.gortmaker@windriver.com -P David Woodhouse +P: David Woodhouse M: dwmw2@infradead.org L: linux-embedded@vger.kernel.org S: Maintained diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 94a95d7fafd6..71934856fc22 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -61,8 +61,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); #define MT_DEVICE_NONSHARED 1 #define MT_DEVICE_CACHED 2 #define MT_DEVICE_IXP2000 3 +#define MT_DEVICE_WC 4 /* - * types 4 onwards can be found in asm/mach/map.h and are undefined + * types 5 onwards can be found in asm/mach/map.h and are undefined * for ioremap */ @@ -215,11 +216,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) #define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) #define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) +#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC) #define iounmap(cookie) __iounmap(cookie) #else #define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) #define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC) #define iounmap(cookie) __arch_iounmap(cookie) #endif diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 06f583b13999..9eb936e49cc3 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -18,13 +18,13 @@ struct map_desc { unsigned int type; }; -/* types 0-3 are defined in asm/io.h */ -#define MT_CACHECLEAN 4 -#define MT_MINICLEAN 5 -#define MT_LOW_VECTORS 6 -#define MT_HIGH_VECTORS 7 -#define MT_MEMORY 8 -#define MT_ROM 9 +/* types 0-4 are defined in asm/io.h */ +#define MT_CACHECLEAN 5 +#define MT_MINICLEAN 6 +#define MT_LOW_VECTORS 7 +#define MT_HIGH_VECTORS 8 +#define MT_MEMORY 9 +#define MT_ROM 10 #define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED #define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c index 826010d5d014..2baeaeb0c900 100644 --- a/arch/arm/mach-omap1/mcbsp.c +++ b/arch/arm/mach-omap1/mcbsp.c @@ -159,6 +159,7 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = { #ifdef CONFIG_ARCH_OMAP730 static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { { + .phys_base = OMAP730_MCBSP1_BASE, .virt_base = io_p2v(OMAP730_MCBSP1_BASE), .dma_rx_sync = OMAP_DMA_MCBSP1_RX, .dma_tx_sync = OMAP_DMA_MCBSP1_TX, @@ -167,6 +168,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { .ops = &omap1_mcbsp_ops, }, { + .phys_base = OMAP730_MCBSP2_BASE, .virt_base = io_p2v(OMAP730_MCBSP2_BASE), .dma_rx_sync = OMAP_DMA_MCBSP3_RX, .dma_tx_sync = OMAP_DMA_MCBSP3_TX, @@ -184,6 +186,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { #ifdef CONFIG_ARCH_OMAP15XX static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { { + .phys_base = OMAP1510_MCBSP1_BASE, .virt_base = OMAP1510_MCBSP1_BASE, .dma_rx_sync = OMAP_DMA_MCBSP1_RX, .dma_tx_sync = OMAP_DMA_MCBSP1_TX, @@ -193,6 +196,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { .clk_name = "mcbsp_clk", }, { + .phys_base = OMAP1510_MCBSP2_BASE, .virt_base = io_p2v(OMAP1510_MCBSP2_BASE), .dma_rx_sync = OMAP_DMA_MCBSP2_RX, .dma_tx_sync = OMAP_DMA_MCBSP2_TX, @@ -201,6 +205,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { .ops = &omap1_mcbsp_ops, }, { + .phys_base = OMAP1510_MCBSP3_BASE, .virt_base = OMAP1510_MCBSP3_BASE, .dma_rx_sync = OMAP_DMA_MCBSP3_RX, .dma_tx_sync = OMAP_DMA_MCBSP3_TX, @@ -219,6 +224,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { #ifdef CONFIG_ARCH_OMAP16XX static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { { + .phys_base = OMAP1610_MCBSP1_BASE, .virt_base = OMAP1610_MCBSP1_BASE, .dma_rx_sync = OMAP_DMA_MCBSP1_RX, .dma_tx_sync = OMAP_DMA_MCBSP1_TX, @@ -228,6 +234,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { .clk_name = "mcbsp_clk", }, { + .phys_base = OMAP1610_MCBSP2_BASE, .virt_base = io_p2v(OMAP1610_MCBSP2_BASE), .dma_rx_sync = OMAP_DMA_MCBSP2_RX, .dma_tx_sync = OMAP_DMA_MCBSP2_TX, @@ -236,6 +243,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { .ops = &omap1_mcbsp_ops, }, { + .phys_base = OMAP1610_MCBSP3_BASE, .virt_base = OMAP1610_MCBSP3_BASE, .dma_rx_sync = OMAP_DMA_MCBSP3_RX, .dma_tx_sync = OMAP_DMA_MCBSP3_TX, diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 27eb6e3ca926..b261f1f80b5e 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c @@ -134,6 +134,7 @@ static struct omap_mcbsp_ops omap2_mcbsp_ops = { #ifdef CONFIG_ARCH_OMAP24XX static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { { + .phys_base = OMAP24XX_MCBSP1_BASE, .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE), .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, @@ -143,6 +144,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { .clk_name = "mcbsp_clk", }, { + .phys_base = OMAP24XX_MCBSP2_BASE, .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE), .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, @@ -161,6 +163,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { #ifdef CONFIG_ARCH_OMAP34XX static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { { + .phys_base = OMAP34XX_MCBSP1_BASE, .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE), .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, @@ -170,6 +173,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { .clk_name = "mcbsp_clk", }, { + .phys_base = OMAP34XX_MCBSP2_BASE, .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE), .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 25d9a11eb617..a713e40e1f1a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -211,6 +211,12 @@ static struct mem_type mem_types[] = { PMD_SECT_TEX(1), .domain = DOMAIN_IO, }, + [MT_DEVICE_WC] = { /* ioremap_wc */ + .prot_pte = PROT_PTE_DEVICE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PROT_SECT_DEVICE, + .domain = DOMAIN_IO, + }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, @@ -273,6 +279,20 @@ static void __init build_mem_type_table(void) } /* + * On non-Xscale3 ARMv5-and-older systems, use CB=01 + * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 + * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable + * in xsc3 parlance, Uncached Normal in ARMv6 parlance). + */ + if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { + mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); + } else { + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; + } + + /* * ARMv5 and lower, bit 4 must be set for page tables. * (was: cache "update-able on write" bit on ARM610) * However, Xscale cores require this bit to be cleared. diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c index 2f8627218839..0a38f0b396eb 100644 --- a/arch/arm/plat-mxc/clock.c +++ b/arch/arm/plat-mxc/clock.c @@ -37,7 +37,6 @@ #include <linux/proc_fs.h> #include <linux/semaphore.h> #include <linux/string.h> -#include <linux/version.h> #include <mach/clock.h> diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 3e76ee2bc731..9e1341ebc14e 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -1488,7 +1488,7 @@ static int __init _omap_gpio_init(void) bank->chip.set = gpio_set; if (bank_is_mpuio(bank)) { bank->chip.label = "mpuio"; -#ifdef CONFIG_ARCH_OMAP1 +#ifdef CONFIG_ARCH_OMAP16XX bank->chip.dev = &omap_mpuio_device.dev; #endif bank->chip.base = OMAP_MPUIO(0); diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h index 6eb44a92871d..8fdb95e26fcd 100644 --- a/arch/arm/plat-omap/include/mach/mcbsp.h +++ b/arch/arm/plat-omap/include/mach/mcbsp.h @@ -315,6 +315,7 @@ struct omap_mcbsp_ops { }; struct omap_mcbsp_platform_data { + unsigned long phys_base; u32 virt_base; u8 dma_rx_sync, dma_tx_sync; u16 rx_irq, tx_irq; @@ -324,6 +325,7 @@ struct omap_mcbsp_platform_data { struct omap_mcbsp { struct device *dev; + unsigned long phys_base; u32 io_base; u8 id; u8 free; diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index d0844050f2d2..014d26574bb6 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c @@ -651,7 +651,7 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, omap_set_dma_dest_params(mcbsp[id].dma_tx_lch, src_port, OMAP_DMA_AMODE_CONSTANT, - mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1, + mcbsp[id].phys_base + OMAP_MCBSP_REG_DXR1, 0, 0); omap_set_dma_src_params(mcbsp[id].dma_tx_lch, @@ -712,7 +712,7 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, omap_set_dma_src_params(mcbsp[id].dma_rx_lch, src_port, OMAP_DMA_AMODE_CONSTANT, - mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1, + mcbsp[id].phys_base + OMAP_MCBSP_REG_DRR1, 0, 0); omap_set_dma_dest_params(mcbsp[id].dma_rx_lch, @@ -830,6 +830,7 @@ static int __init omap_mcbsp_probe(struct platform_device *pdev) mcbsp[id].dma_tx_lch = -1; mcbsp[id].dma_rx_lch = -1; + mcbsp[id].phys_base = pdata->phys_base; mcbsp[id].io_base = pdata->virt_base; /* Default I/O is IRQ based */ mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO; diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 743ccad61c60..2be166c544ca 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -80,8 +80,6 @@ void smp_bogo(struct seq_file *m) i, cpu_data(i).clock_tick); } -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); - extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; @@ -120,9 +118,9 @@ void __cpuinit smp_callin(void) while (!cpu_isset(cpuid, smp_commenced_mask)) rmb(); - spin_lock(&call_lock); + ipi_call_lock(); cpu_set(cpuid, cpu_online_map); - spin_unlock(&call_lock); + ipi_call_unlock(); /* idle thread is expected to have preempt disabled */ preempt_disable(); @@ -1305,10 +1303,6 @@ int __cpu_disable(void) c->core_id = 0; c->proc_id = -1; - spin_lock(&call_lock); - cpu_clear(cpu, cpu_online_map); - spin_unlock(&call_lock); - smp_wmb(); /* Make sure no interrupts point to this cpu. */ @@ -1318,6 +1312,10 @@ int __cpu_disable(void) mdelay(1); local_irq_disable(); + ipi_call_lock(); + cpu_clear(cpu, cpu_online_map); + ipi_call_unlock(); + return 0; } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index ae8494944c45..11c8c19f0fb7 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -448,8 +448,10 @@ config PATA_MARVELL tristate "Marvell PATA support via legacy mode" depends on PCI help - This option enables limited support for the Marvell 88SE6145 ATA - controller. + This option enables limited support for the Marvell 88SE61xx ATA + controllers. If you wish to use only the SATA ports then select + the AHCI driver alone. If you wish to the use the PATA port or + both SATA and PATA include this driver. If unsure, say N. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index c729e6988bbb..2e1a7cb2ed5f 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -420,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = { /* board_ahci_mv */ { AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | - AHCI_HFLAG_MV_PATA), + AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, .pio_mask = 0x1f, /* pio0-4 */ @@ -487,7 +487,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -610,6 +612,15 @@ module_param(ahci_em_messages, int, 0444); MODULE_PARM_DESC(ahci_em_messages, "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); +#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) +static int marvell_enable; +#else +static int marvell_enable = 1; +#endif +module_param(marvell_enable, int, 0644); +MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); + + static inline int ahci_nr_ports(u32 cap) { return (cap & 0x1f) + 1; @@ -732,6 +743,8 @@ static void ahci_save_initial_config(struct pci_dev *pdev, "MV_AHCI HACK: port_map %x -> %x\n", port_map, port_map & mv); + dev_printk(KERN_ERR, &pdev->dev, + "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); port_map &= mv; } @@ -2533,6 +2546,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + /* The AHCI driver can only drive the SATA ports, the PATA driver + can drive them all so if both drivers are selected make sure + AHCI stays out of the way */ + if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) + return -ENODEV; + /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 304fdc6f1dc2..2a4c516894f0 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1315,11 +1315,6 @@ fsm_start: break; case HSM_ST_ERR: - /* make sure qc->err_mask is available to - * know what's wrong and recover - */ - WARN_ON(!(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM))); - ap->hsm_task_state = HSM_ST_IDLE; /* complete taskfile transaction */ diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index 24a011b25024..0d87eec84966 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -20,29 +20,30 @@ #include <linux/ata.h> #define DRV_NAME "pata_marvell" -#define DRV_VERSION "0.1.4" +#define DRV_VERSION "0.1.6" /** - * marvell_pre_reset - check for 40/80 pin - * @link: link - * @deadline: deadline jiffies for the operation + * marvell_pata_active - check if PATA is active + * @pdev: PCI device * - * Perform the PATA port setup we need. + * Returns 1 if the PATA port may be active. We know how to check this + * for the 6145 but not the other devices */ -static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) +static int marvell_pata_active(struct pci_dev *pdev) { - struct ata_port *ap = link->ap; - struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int i; u32 devices; void __iomem *barp; - int i; - /* Check if our port is enabled */ + /* We don't yet know how to do this for other devices */ + if (pdev->device != 0x6145) + return 1; barp = pci_iomap(pdev, 5, 0x10); if (barp == NULL) return -ENOMEM; + printk("BAR5:"); for(i = 0; i <= 0x0F; i++) printk("%02X:%02X ", i, ioread8(barp + i)); @@ -51,9 +52,27 @@ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) devices = ioread32(barp + 0x0C); pci_iounmap(pdev, barp); - if ((pdev->device == 0x6145) && (ap->port_no == 0) && - (!(devices & 0x10))) /* PATA enable ? */ - return -ENOENT; + if (devices & 0x10) + return 1; + return 0; +} + +/** + * marvell_pre_reset - check for 40/80 pin + * @link: link + * @deadline: deadline jiffies for the operation + * + * Perform the PATA port setup we need. + */ + +static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + + if (pdev->device == 0x6145 && ap->port_no == 0 && + !marvell_pata_active(pdev)) /* PATA enable ? */ + return -ENOENT; return ata_sff_prereset(link, deadline); } @@ -128,6 +147,12 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i if (pdev->device == 0x6101) ppi[1] = &ata_dummy_port_info; +#if defined(CONFIG_AHCI) || defined(CONFIG_AHCI_MODULE) + if (!marvell_pata_active(pdev)) { + printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); + return -ENODEV; + } +#endif return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL); } diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 720b8645f58a..e970b227fbce 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -322,9 +322,6 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, /* Try to acquire MMIO resources and fallback to PIO if * that fails */ - rc = pcim_enable_device(pdev); - if (rc) - return rc; rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); if (rc) goto use_ioports; diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 13c1d2af18ac..c815f8ecf6e6 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -667,7 +667,8 @@ static const struct pci_device_id mv_pci_tbl[] = { { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, - /* RocketRAID 1740/174x have different identifiers */ + /* RocketRAID 1720/174x have different identifiers */ + { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, { PCI_VDEVICE(TTI, 0x1740), chip_508x }, { PCI_VDEVICE(TTI, 0x1742), chip_508x }, diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 858f70610eda..1e1f3f3757ae 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -309,8 +309,6 @@ static void nv_nf2_freeze(struct ata_port *ap); static void nv_nf2_thaw(struct ata_port *ap); static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); -static int nv_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline); static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); static void nv_adma_qc_prep(struct ata_queued_cmd *qc); @@ -407,7 +405,7 @@ static struct scsi_host_template nv_swncq_sht = { static struct ata_port_operations nv_generic_ops = { .inherits = &ata_bmdma_port_ops, - .hardreset = nv_hardreset, + .hardreset = ATA_OP_NULL, .scr_read = nv_scr_read, .scr_write = nv_scr_write, }; @@ -1588,21 +1586,6 @@ static void nv_mcp55_thaw(struct ata_port *ap) ata_sff_thaw(ap); } -static int nv_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline) -{ - int rc; - - /* SATA hardreset fails to retrieve proper device signature on - * some controllers. Request follow up SRST. For more info, - * see http://bugzilla.kernel.org/show_bug.cgi?id=3352 - */ - rc = sata_sff_hardreset(link, class, deadline); - if (rc) - return rc; - return -EAGAIN; -} - static void nv_adma_error_handler(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 1eb64d08b60a..95b3ec89c126 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -208,7 +208,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) if (cpu_is_omap16xx()) ocpi_enable(); -#ifdef CONFIG_ARCH_OMAP_OTG +#ifdef CONFIG_USB_OTG if (need_transceiver) { ohci->transceiver = otg_get_transceiver(); if (ohci->transceiver) { diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 7b74238ad1c7..e980766bb84b 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -161,7 +161,7 @@ static int usb_console_setup(struct console *co, char *options) if (serial->type->set_termios) { termios->c_cflag = cflag; tty_termios_encode_baud_rate(termios, baud, baud); - serial->type->set_termios(NULL, port, &dummy); + serial->type->set_termios(tty, port, &dummy); port->port.tty = NULL; kfree(termios); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 9abcd2b329f7..e9b20173fef3 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1279,6 +1279,12 @@ static int nfs_parse_mount_options(char *raw, } } + if (errors > 0) { + dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n", + errors, (errors == 1 ? "" : "s")); + if (!sloppy) + return 0; + } return 1; out_nomem: diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e8f450c499b0..2691926fb506 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void) static inline void rebuild_sched_domains(void) { - partition_sched_domains(0, NULL, NULL); + partition_sched_domains(1, NULL, NULL); } #endif /* !CONFIG_CPUSETS */ diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 95c660c9719b..91324908fccd 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -208,6 +208,9 @@ extern void inet_twsk_schedule(struct inet_timewait_sock *tw, extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr); +extern void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, + struct inet_timewait_death_row *twdr, int family); + static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index d5ab79cf516d..f227bc172690 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -14,6 +14,8 @@ * 2003-10-22 Updates by Stephen Hemminger. * 2004 May-July Rework by Paul Jackson. * 2006 Rework by Paul Menage to use generic cgroups + * 2008 Rework of the scheduler domains and CPU hotplug handling + * by Max Krasnyansky * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux @@ -236,9 +238,11 @@ static struct cpuset top_cpuset = { static DEFINE_MUTEX(callback_mutex); -/* This is ugly, but preserves the userspace API for existing cpuset +/* + * This is ugly, but preserves the userspace API for existing cpuset * users. If someone tries to mount the "cpuset" filesystem, we - * silently switch it to mount "cgroup" instead */ + * silently switch it to mount "cgroup" instead + */ static int cpuset_get_sb(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data, struct vfsmount *mnt) @@ -473,10 +477,9 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) } /* - * Helper routine for rebuild_sched_domains(). + * Helper routine for generate_sched_domains(). * Do cpusets a, b have overlapping cpus_allowed masks? */ - static int cpusets_overlap(struct cpuset *a, struct cpuset *b) { return cpus_intersects(a->cpus_allowed, b->cpus_allowed); @@ -518,26 +521,15 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) } /* - * rebuild_sched_domains() - * - * This routine will be called to rebuild the scheduler's dynamic - * sched domains: - * - if the flag 'sched_load_balance' of any cpuset with non-empty - * 'cpus' changes, - * - or if the 'cpus' allowed changes in any cpuset which has that - * flag enabled, - * - or if the 'sched_relax_domain_level' of any cpuset which has - * that flag enabled and with non-empty 'cpus' changes, - * - or if any cpuset with non-empty 'cpus' is removed, - * - or if a cpu gets offlined. - * - * This routine builds a partial partition of the systems CPUs - * (the set of non-overlappping cpumask_t's in the array 'part' - * below), and passes that partial partition to the kernel/sched.c - * partition_sched_domains() routine, which will rebuild the - * schedulers load balancing domains (sched domains) as specified - * by that partial partition. A 'partial partition' is a set of - * non-overlapping subsets whose union is a subset of that set. + * generate_sched_domains() + * + * This function builds a partial partition of the systems CPUs + * A 'partial partition' is a set of non-overlapping subsets whose + * union is a subset of that set. + * The output of this function needs to be passed to kernel/sched.c + * partition_sched_domains() routine, which will rebuild the scheduler's + * load balancing domains (sched domains) as specified by that partial + * partition. * * See "What is sched_load_balance" in Documentation/cpusets.txt * for a background explanation of this. @@ -547,13 +539,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) * domains when operating in the severe memory shortage situations * that could cause allocation failures below. * - * Call with cgroup_mutex held. May take callback_mutex during - * call due to the kfifo_alloc() and kmalloc() calls. May nest - * a call to the get_online_cpus()/put_online_cpus() pair. - * Must not be called holding callback_mutex, because we must not - * call get_online_cpus() while holding callback_mutex. Elsewhere - * the kernel nests callback_mutex inside get_online_cpus() calls. - * So the reverse nesting would risk an ABBA deadlock. + * Must be called with cgroup_lock held. * * The three key local variables below are: * q - a linked-list queue of cpuset pointers, used to implement a @@ -588,10 +574,10 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) * element of the partition (one sched domain) to be passed to * partition_sched_domains(). */ - -void rebuild_sched_domains(void) +static int generate_sched_domains(cpumask_t **domains, + struct sched_domain_attr **attributes) { - LIST_HEAD(q); /* queue of cpusets to be scanned*/ + LIST_HEAD(q); /* queue of cpusets to be scanned */ struct cpuset *cp; /* scans q */ struct cpuset **csa; /* array of all cpuset ptrs */ int csn; /* how many cpuset ptrs in csa so far */ @@ -601,23 +587,26 @@ void rebuild_sched_domains(void) int ndoms; /* number of sched domains in result */ int nslot; /* next empty doms[] cpumask_t slot */ - csa = NULL; + ndoms = 0; doms = NULL; dattr = NULL; + csa = NULL; /* Special case for the 99% of systems with one, full, sched domain */ if (is_sched_load_balance(&top_cpuset)) { - ndoms = 1; doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); if (!doms) - goto rebuild; + goto done; + dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); if (dattr) { *dattr = SD_ATTR_INIT; update_domain_attr_tree(dattr, &top_cpuset); } *doms = top_cpuset.cpus_allowed; - goto rebuild; + + ndoms = 1; + goto done; } csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); @@ -680,61 +669,141 @@ restart: } } - /* Convert <csn, csa> to <ndoms, doms> */ + /* + * Now we know how many domains to create. + * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. + */ doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); - if (!doms) - goto rebuild; + if (!doms) { + ndoms = 0; + goto done; + } + + /* + * The rest of the code, including the scheduler, can deal with + * dattr==NULL case. No need to abort if alloc fails. + */ dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); for (nslot = 0, i = 0; i < csn; i++) { struct cpuset *a = csa[i]; + cpumask_t *dp; int apn = a->pn; - if (apn >= 0) { - cpumask_t *dp = doms + nslot; - - if (nslot == ndoms) { - static int warnings = 10; - if (warnings) { - printk(KERN_WARNING - "rebuild_sched_domains confused:" - " nslot %d, ndoms %d, csn %d, i %d," - " apn %d\n", - nslot, ndoms, csn, i, apn); - warnings--; - } - continue; + if (apn < 0) { + /* Skip completed partitions */ + continue; + } + + dp = doms + nslot; + + if (nslot == ndoms) { + static int warnings = 10; + if (warnings) { + printk(KERN_WARNING + "rebuild_sched_domains confused:" + " nslot %d, ndoms %d, csn %d, i %d," + " apn %d\n", + nslot, ndoms, csn, i, apn); + warnings--; } + continue; + } - cpus_clear(*dp); - if (dattr) - *(dattr + nslot) = SD_ATTR_INIT; - for (j = i; j < csn; j++) { - struct cpuset *b = csa[j]; - - if (apn == b->pn) { - cpus_or(*dp, *dp, b->cpus_allowed); - b->pn = -1; - if (dattr) - update_domain_attr_tree(dattr - + nslot, b); - } + cpus_clear(*dp); + if (dattr) + *(dattr + nslot) = SD_ATTR_INIT; + for (j = i; j < csn; j++) { + struct cpuset *b = csa[j]; + + if (apn == b->pn) { + cpus_or(*dp, *dp, b->cpus_allowed); + if (dattr) + update_domain_attr_tree(dattr + nslot, b); + + /* Done with this partition */ + b->pn = -1; } - nslot++; } + nslot++; } BUG_ON(nslot != ndoms); -rebuild: - /* Have scheduler rebuild sched domains */ +done: + kfree(csa); + + *domains = doms; + *attributes = dattr; + return ndoms; +} + +/* + * Rebuild scheduler domains. + * + * Call with neither cgroup_mutex held nor within get_online_cpus(). + * Takes both cgroup_mutex and get_online_cpus(). + * + * Cannot be directly called from cpuset code handling changes + * to the cpuset pseudo-filesystem, because it cannot be called + * from code that already holds cgroup_mutex. + */ +static void do_rebuild_sched_domains(struct work_struct *unused) +{ + struct sched_domain_attr *attr; + cpumask_t *doms; + int ndoms; + get_online_cpus(); - partition_sched_domains(ndoms, doms, dattr); + + /* Generate domain masks and attrs */ + cgroup_lock(); + ndoms = generate_sched_domains(&doms, &attr); + cgroup_unlock(); + + /* Have scheduler rebuild the domains */ + partition_sched_domains(ndoms, doms, attr); + put_online_cpus(); +} -done: - kfree(csa); - /* Don't kfree(doms) -- partition_sched_domains() does that. */ - /* Don't kfree(dattr) -- partition_sched_domains() does that. */ +static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); + +/* + * Rebuild scheduler domains, asynchronously via workqueue. + * + * If the flag 'sched_load_balance' of any cpuset with non-empty + * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset + * which has that flag enabled, or if any cpuset with a non-empty + * 'cpus' is removed, then call this routine to rebuild the + * scheduler's dynamic sched domains. + * + * The rebuild_sched_domains() and partition_sched_domains() + * routines must nest cgroup_lock() inside get_online_cpus(), + * but such cpuset changes as these must nest that locking the + * other way, holding cgroup_lock() for much of the code. + * + * So in order to avoid an ABBA deadlock, the cpuset code handling + * these user changes delegates the actual sched domain rebuilding + * to a separate workqueue thread, which ends up processing the + * above do_rebuild_sched_domains() function. + */ +static void async_rebuild_sched_domains(void) +{ + schedule_work(&rebuild_sched_domains_work); +} + +/* + * Accomplishes the same scheduler domain rebuild as the above + * async_rebuild_sched_domains(), however it directly calls the + * rebuild routine synchronously rather than calling it via an + * asynchronous work thread. + * + * This can only be called from code that is not holding + * cgroup_mutex (not nested in a cgroup_lock() call.) + */ +void rebuild_sched_domains(void) +{ + do_rebuild_sched_domains(NULL); } /** @@ -863,7 +932,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf) return retval; if (is_load_balanced) - rebuild_sched_domains(); + async_rebuild_sched_domains(); return 0; } @@ -1090,7 +1159,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) if (val != cs->relax_domain_level) { cs->relax_domain_level = val; if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) - rebuild_sched_domains(); + async_rebuild_sched_domains(); } return 0; @@ -1131,7 +1200,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, mutex_unlock(&callback_mutex); if (cpus_nonempty && balance_flag_changed) - rebuild_sched_domains(); + async_rebuild_sched_domains(); return 0; } @@ -1492,6 +1561,9 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) default: BUG(); } + + /* Unreachable but makes gcc happy */ + return 0; } static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) @@ -1504,6 +1576,9 @@ static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) default: BUG(); } + + /* Unrechable but makes gcc happy */ + return 0; } @@ -1692,15 +1767,9 @@ static struct cgroup_subsys_state *cpuset_create( } /* - * Locking note on the strange update_flag() call below: - * * If the cpuset being removed has its flag 'sched_load_balance' * enabled, then simulate turning sched_load_balance off, which - * will call rebuild_sched_domains(). The get_online_cpus() - * call in rebuild_sched_domains() must not be made while holding - * callback_mutex. Elsewhere the kernel nests callback_mutex inside - * get_online_cpus() calls. So the reverse nesting would risk an - * ABBA deadlock. + * will call async_rebuild_sched_domains(). */ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) @@ -1719,7 +1788,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) struct cgroup_subsys cpuset_subsys = { .name = "cpuset", .create = cpuset_create, - .destroy = cpuset_destroy, + .destroy = cpuset_destroy, .can_attach = cpuset_can_attach, .attach = cpuset_attach, .populate = cpuset_populate, @@ -1811,7 +1880,7 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) } /* - * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs + * If CPU and/or memory hotplug handlers, below, unplug any CPUs * or memory nodes, we need to walk over the cpuset hierarchy, * removing that CPU or node from all cpusets. If this removes the * last CPU or node from a cpuset, then move the tasks in the empty @@ -1903,35 +1972,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root) } /* - * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track - * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to - * track what's online after any CPU or memory node hotplug or unplug event. - * - * Since there are two callers of this routine, one for CPU hotplug - * events and one for memory node hotplug events, we could have coded - * two separate routines here. We code it as a single common routine - * in order to minimize text size. - */ - -static void common_cpu_mem_hotplug_unplug(int rebuild_sd) -{ - cgroup_lock(); - - top_cpuset.cpus_allowed = cpu_online_map; - top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; - scan_for_empty_cpusets(&top_cpuset); - - /* - * Scheduler destroys domains on hotplug events. - * Rebuild them based on the current settings. - */ - if (rebuild_sd) - rebuild_sched_domains(); - - cgroup_unlock(); -} - -/* * The top_cpuset tracks what CPUs and Memory Nodes are online, * period. This is necessary in order to make cpusets transparent * (of no affect) on systems that are actively using CPU hotplug @@ -1939,40 +1979,52 @@ static void common_cpu_mem_hotplug_unplug(int rebuild_sd) * * This routine ensures that top_cpuset.cpus_allowed tracks * cpu_online_map on each CPU hotplug (cpuhp) event. + * + * Called within get_online_cpus(). Needs to call cgroup_lock() + * before calling generate_sched_domains(). */ - -static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, +static int cpuset_track_online_cpus(struct notifier_block *unused_nb, unsigned long phase, void *unused_cpu) { + struct sched_domain_attr *attr; + cpumask_t *doms; + int ndoms; + switch (phase) { - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: case CPU_ONLINE: case CPU_ONLINE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - common_cpu_mem_hotplug_unplug(1); break; + default: return NOTIFY_DONE; } + cgroup_lock(); + top_cpuset.cpus_allowed = cpu_online_map; + scan_for_empty_cpusets(&top_cpuset); + ndoms = generate_sched_domains(&doms, &attr); + cgroup_unlock(); + + /* Have scheduler rebuild the domains */ + partition_sched_domains(ndoms, doms, attr); + return NOTIFY_OK; } #ifdef CONFIG_MEMORY_HOTPLUG /* * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. - * Call this routine anytime after you change - * node_states[N_HIGH_MEMORY]. - * See also the previous routine cpuset_handle_cpuhp(). + * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. + * See also the previous routine cpuset_track_online_cpus(). */ - void cpuset_track_online_nodes(void) { - common_cpu_mem_hotplug_unplug(0); + cgroup_lock(); + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; + scan_for_empty_cpusets(&top_cpuset); + cgroup_unlock(); } #endif @@ -1987,7 +2039,7 @@ void __init cpuset_init_smp(void) top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; - hotcpu_notifier(cpuset_handle_cpuhp, 0); + hotcpu_notifier(cpuset_track_online_cpus, 0); } /** diff --git a/kernel/sched.c b/kernel/sched.c index 1a5f73c1fcdc..cc1f81b50b82 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7696,24 +7696,27 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, * and partition_sched_domains() will fallback to the single partition * 'fallback_doms', it also forces the domains to be rebuilt. * + * If doms_new==NULL it will be replaced with cpu_online_map. + * ndoms_new==0 is a special case for destroying existing domains. + * It will not create the default domain. + * * Call with hotplug lock held */ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, struct sched_domain_attr *dattr_new) { - int i, j; + int i, j, n; mutex_lock(&sched_domains_mutex); /* always unregister in case we don't destroy any domains */ unregister_sched_domain_sysctl(); - if (doms_new == NULL) - ndoms_new = 0; + n = doms_new ? ndoms_new : 0; /* Destroy deleted domains */ for (i = 0; i < ndoms_cur; i++) { - for (j = 0; j < ndoms_new; j++) { + for (j = 0; j < n; j++) { if (cpus_equal(doms_cur[i], doms_new[j]) && dattrs_equal(dattr_cur, i, dattr_new, j)) goto match1; @@ -7726,7 +7729,6 @@ match1: if (doms_new == NULL) { ndoms_cur = 0; - ndoms_new = 1; doms_new = &fallback_doms; cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); dattr_new = NULL; @@ -7763,8 +7765,13 @@ match2: int arch_reinit_sched_domains(void) { get_online_cpus(); + + /* Destroy domains first to force the rebuild */ + partition_sched_domains(0, NULL, NULL); + rebuild_sched_domains(); put_online_cpus(); + return 0; } @@ -7848,7 +7855,7 @@ static int update_sched_domains(struct notifier_block *nfb, case CPU_ONLINE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - partition_sched_domains(0, NULL, NULL); + partition_sched_domains(1, NULL, NULL); return NOTIFY_OK; default: diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index eeee218eed80..5bbf07362172 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -188,15 +188,21 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return 0; case BRCTL_SET_BRIDGE_HELLO_TIME: + { + unsigned long t = clock_t_to_jiffies(args[1]); if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (t < HZ) + return -EINVAL; + spin_lock_bh(&br->lock); - br->bridge_hello_time = clock_t_to_jiffies(args[1]); + br->bridge_hello_time = t; if (br_is_root_bridge(br)) br->hello_time = br->bridge_hello_time; spin_unlock_bh(&br->lock); return 0; + } case BRCTL_SET_BRIDGE_MAX_AGE: if (!capable(CAP_NET_ADMIN)) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 27d6a511c8c1..158dee8b4965 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -29,11 +29,12 @@ */ static ssize_t store_bridge_parm(struct device *d, const char *buf, size_t len, - void (*set)(struct net_bridge *, unsigned long)) + int (*set)(struct net_bridge *, unsigned long)) { struct net_bridge *br = to_bridge(d); char *endp; unsigned long val; + int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -43,9 +44,9 @@ static ssize_t store_bridge_parm(struct device *d, return -EINVAL; spin_lock_bh(&br->lock); - (*set)(br, val); + err = (*set)(br, val); spin_unlock_bh(&br->lock); - return len; + return err ? err : len; } @@ -56,12 +57,13 @@ static ssize_t show_forward_delay(struct device *d, return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); } -static void set_forward_delay(struct net_bridge *br, unsigned long val) +static int set_forward_delay(struct net_bridge *br, unsigned long val) { unsigned long delay = clock_t_to_jiffies(val); br->forward_delay = delay; if (br_is_root_bridge(br)) br->bridge_forward_delay = delay; + return 0; } static ssize_t store_forward_delay(struct device *d, @@ -80,12 +82,17 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr, jiffies_to_clock_t(to_bridge(d)->hello_time)); } -static void set_hello_time(struct net_bridge *br, unsigned long val) +static int set_hello_time(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); + + if (t < HZ) + return -EINVAL; + br->hello_time = t; if (br_is_root_bridge(br)) br->bridge_hello_time = t; + return 0; } static ssize_t store_hello_time(struct device *d, @@ -104,12 +111,13 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr, jiffies_to_clock_t(to_bridge(d)->max_age)); } -static void set_max_age(struct net_bridge *br, unsigned long val) +static int set_max_age(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); br->max_age = t; if (br_is_root_bridge(br)) br->bridge_max_age = t; + return 0; } static ssize_t store_max_age(struct device *d, struct device_attribute *attr, @@ -126,9 +134,10 @@ static ssize_t show_ageing_time(struct device *d, return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); } -static void set_ageing_time(struct net_bridge *br, unsigned long val) +static int set_ageing_time(struct net_bridge *br, unsigned long val) { br->ageing_time = clock_t_to_jiffies(val); + return 0; } static ssize_t store_ageing_time(struct device *d, @@ -180,9 +189,10 @@ static ssize_t show_priority(struct device *d, struct device_attribute *attr, (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); } -static void set_priority(struct net_bridge *br, unsigned long val) +static int set_priority(struct net_bridge *br, unsigned long val) { br_stp_set_bridge_priority(br, (u16) val); + return 0; } static ssize_t store_priority(struct device *d, struct device_attribute *attr, diff --git a/net/core/dev.c b/net/core/dev.c index 60c51f765887..e719ed29310f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1991,8 +1991,13 @@ static void net_tx_action(struct softirq_action *h) spin_unlock(root_lock); } else { if (!test_bit(__QDISC_STATE_DEACTIVATED, - &q->state)) + &q->state)) { __netif_reschedule(q); + } else { + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); + } } } } diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index d985bd613d25..743f011b9a84 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -409,3 +409,38 @@ out: } EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); + +void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, + struct inet_timewait_death_row *twdr, int family) +{ + struct inet_timewait_sock *tw; + struct sock *sk; + struct hlist_node *node; + int h; + + local_bh_disable(); + for (h = 0; h < (hashinfo->ehash_size); h++) { + struct inet_ehash_bucket *head = + inet_ehash_bucket(hashinfo, h); + rwlock_t *lock = inet_ehash_lockp(hashinfo, h); +restart: + write_lock(lock); + sk_for_each(sk, node, &head->twchain) { + + tw = inet_twsk(sk); + if (!net_eq(twsk_net(tw), net) || + tw->tw_family != family) + continue; + + atomic_inc(&tw->tw_refcnt); + write_unlock(lock); + inet_twsk_deschedule(tw, twdr); + inet_twsk_put(tw); + + goto restart; + } + write_unlock(lock); + } + local_bh_enable(); +} +EXPORT_SYMBOL_GPL(inet_twsk_purge); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 44c1e934824b..1b4fee20fc93 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2376,6 +2376,7 @@ static int __net_init tcp_sk_init(struct net *net) static void __net_exit tcp_sk_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv4.tcp_sock); + inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET); } static struct pernet_operations __net_initdata tcp_sk_ops = { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5b90b369ccb2..b585c850a89a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2148,6 +2148,7 @@ static int tcpv6_net_init(struct net *net) static void tcpv6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.tcp_sk); + inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6); } static struct pernet_operations tcpv6_net_ops = { diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 1b1226d6653f..20633fdf7e6b 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -68,11 +68,21 @@ static const char *const dccprotos[] = { static int parse_dcc(char *data, const char *data_end, u_int32_t *ip, u_int16_t *port, char **ad_beg_p, char **ad_end_p) { + char *tmp; + /* at least 12: "AAAAAAAA P\1\n" */ while (*data++ != ' ') if (data > data_end - 12) return -1; + /* Make sure we have a newline character within the packet boundaries + * because simple_strtoul parses until the first invalid character. */ + for (tmp = data; tmp <= data_end; tmp++) + if (*tmp == '\n') + break; + if (tmp > data_end || *tmp != '\n') + return -1; + *ad_beg_p = data; *ip = simple_strtoul(data, &data, 10); diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 654a4f7f12c6..9bd03967fea4 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -45,12 +45,12 @@ static LIST_HEAD(gre_keymap_list); void nf_ct_gre_keymap_flush(void) { - struct list_head *pos, *n; + struct nf_ct_gre_keymap *km, *tmp; write_lock_bh(&nf_ct_gre_lock); - list_for_each_safe(pos, n, &gre_keymap_list) { - list_del(pos); - kfree(pos); + list_for_each_entry_safe(km, tmp, &gre_keymap_list, list) { + list_del(&km->list); + kfree(km); } write_unlock_bh(&nf_ct_gre_lock); } @@ -97,10 +97,14 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, kmp = &help->help.ct_pptp_info.keymap[dir]; if (*kmp) { /* check whether it's a retransmission */ + read_lock_bh(&nf_ct_gre_lock); list_for_each_entry(km, &gre_keymap_list, list) { - if (gre_key_cmpfn(km, t) && km == *kmp) + if (gre_key_cmpfn(km, t) && km == *kmp) { + read_unlock_bh(&nf_ct_gre_lock); return 0; + } } + read_unlock_bh(&nf_ct_gre_lock); pr_debug("trying to override keymap_%s for ct %p\n", dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); return -EEXIST; diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 2f9bbc058b48..1fa306be60fb 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1193,7 +1193,6 @@ static const struct sip_handler sip_handlers[] = { static int process_sip_response(struct sk_buff *skb, const char **dptr, unsigned int *datalen) { - static const struct sip_handler *handler; enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen; @@ -1214,6 +1213,8 @@ static int process_sip_response(struct sk_buff *skb, dataoff = matchoff + matchlen + 1; for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { + const struct sip_handler *handler; + handler = &sip_handlers[i]; if (handler->response == NULL) continue; @@ -1228,13 +1229,14 @@ static int process_sip_response(struct sk_buff *skb, static int process_sip_request(struct sk_buff *skb, const char **dptr, unsigned int *datalen) { - static const struct sip_handler *handler; enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen; unsigned int cseq, i; for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { + const struct sip_handler *handler; + handler = &sip_handlers[i]; if (handler->request == NULL) continue; |