diff options
author | Linus Torvalds | 2012-07-11 16:06:54 -0700 |
---|---|---|
committer | Linus Torvalds | 2012-07-11 16:06:54 -0700 |
commit | 00c3e276c54ca7e102f1b771715da76b4e86a098 (patch) | |
tree | 6563a263166bb5acf1945914e3770260dcc38d65 | |
parent | 605cd83694fa175894a1c63191f1f10c449b93a8 (diff) | |
parent | 29f6738609e40227dabcc63bfb3b84b3726a75bd (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge random patches from Andrew Morton.
* Merge emailed patches from Andrew Morton <akpm@linux-foundation.org>: (32 commits)
memblock: free allocated memblock_reserved_regions later
mm: sparse: fix usemap allocation above node descriptor section
mm: sparse: fix section usemap placement calculation
xtensa: fix incorrect memset
shmem: cleanup shmem_add_to_page_cache
shmem: fix negative rss in memcg memory.stat
tmpfs: revert SEEK_DATA and SEEK_HOLE
drivers/rtc/rtc-twl.c: fix threaded IRQ to use IRQF_ONESHOT
fat: fix non-atomic NFS i_pos read
MAINTAINERS: add OMAP CPUfreq driver to OMAP Power Management section
sgi-xp: nested calls to spin_lock_irqsave()
fs: ramfs: file-nommu: add SetPageUptodate()
drivers/rtc/rtc-mxc.c: fix irq enabled interrupts warning
mm/memory_hotplug.c: release memory resources if hotadd_new_pgdat() fails
h8300/uaccess: add mising __clear_user()
h8300/uaccess: remove assignment to __gu_val in unhandled case of get_user()
h8300/time: add missing #include <asm/irq_regs.h>
h8300/signal: fix typo "statis"
h8300/pgtable: add missing #include <asm-generic/pgtable.h>
drivers/rtc/rtc-ab8500.c: ensure correct probing of the AB8500 RTC when Device Tree is enabled
...
38 files changed, 197 insertions, 247 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 03df1d15ebf3..d1d9ae6173b8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4857,6 +4857,7 @@ M: Kevin Hilman <khilman@ti.com> L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/*omap*/*pm* +F: drivers/cpufreq/omap-cpufreq.c OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT M: Rajendra Nayak <rnayak@ti.com> diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h index a09230a08e02..62ef17676b40 100644 --- a/arch/h8300/include/asm/pgtable.h +++ b/arch/h8300/include/asm/pgtable.h @@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long); #define VMALLOC_END 0xffffffff #define arch_enter_lazy_cpu_mode() do {} while (0) + +#include <asm-generic/pgtable.h> + #endif /* _H8300_PGTABLE_H */ diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h index 356068cd0879..8725d1ad4272 100644 --- a/arch/h8300/include/asm/uaccess.h +++ b/arch/h8300/include/asm/uaccess.h @@ -100,7 +100,6 @@ extern int __put_user_bad(void); break; \ default: \ __gu_err = __get_user_bad(); \ - __gu_val = 0; \ break; \ } \ (x) = __gu_val; \ @@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n) return 0; } +#define __clear_user clear_user + #endif /* _H8300_UACCESS_H */ diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index fca10378701b..5adaadaf9218 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -statis void do_signal(struct pt_regs *regs) +static void do_signal(struct pt_regs *regs) { siginfo_t info; int signr; diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c index 32263a138aa6..e0f74191d553 100644 --- a/arch/h8300/kernel/time.c +++ b/arch/h8300/kernel/time.c @@ -27,6 +27,7 @@ #include <linux/profile.h> #include <asm/io.h> +#include <asm/irq_regs.h> #include <asm/timer.h> #define TICK_SIZE (tick_nsec / 1000) diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h index 55b79ef10028..44251b974f1d 100644 --- a/arch/mn10300/include/asm/ptrace.h +++ b/arch/mn10300/include/asm/ptrace.h @@ -81,9 +81,6 @@ struct pt_regs { #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 -/* options set using PTRACE_SETOPTIONS */ -#define PTRACE_O_TRACESYSGOOD 0x00000001 - #ifdef __KERNEL__ #define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL) diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h index bd4e90dfe6c2..f8e66425cbf8 100644 --- a/arch/mn10300/include/asm/timex.h +++ b/arch/mn10300/include/asm/timex.h @@ -11,7 +11,6 @@ #ifndef _ASM_TIMEX_H #define _ASM_TIMEX_H -#include <asm/hardirq.h> #include <unit/timex.h> #define TICK_SIZE (tick_nsec / 1000) @@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void) extern int init_clockevents(void); extern int init_clocksource(void); -static inline void setup_jiffies_interrupt(int irq, - struct irqaction *action) -{ - u16 tmp; - setup_irq(irq, action); - set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL)); - GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST; - tmp = GxICR(irq); -} - #endif /* __KERNEL__ */ #endif /* _ASM_TIMEX_H */ diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c index 69cae0260786..ccce35e3e179 100644 --- a/arch/mn10300/kernel/cevt-mn10300.c +++ b/arch/mn10300/kernel/cevt-mn10300.c @@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev) { } +static inline void setup_jiffies_interrupt(int irq, + struct irqaction *action) +{ + u16 tmp; + setup_irq(irq, action); + set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL)); + GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST; + tmp = GxICR(irq); +} + int __init init_clockevents(void) { struct clock_event_device *cd; diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h index a5ac755dd69f..2df440105a80 100644 --- a/arch/mn10300/kernel/internal.h +++ b/arch/mn10300/kernel/internal.h @@ -9,6 +9,8 @@ * 2 of the Licence, or (at your option) any later version. */ +#include <linux/irqreturn.h> + struct clocksource; struct clock_event_device; diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 2381df83bd00..35932a8de8b8 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, case SC1TXIRQ: #ifdef CONFIG_MN10300_TTYSM1_TIMER12 case TM12IRQ: -#elif CONFIG_MN10300_TTYSM1_TIMER9 +#elif defined(CONFIG_MN10300_TTYSM1_TIMER9) case TM9IRQ: -#elif CONFIG_MN10300_TTYSM1_TIMER3 +#elif defined(CONFIG_MN10300_TTYSM1_TIMER3) case TM3IRQ: #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ #endif /* CONFIG_MN10300_TTYSM1 */ diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index 94a9c6d53e1b..b900e5afa0ae 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -26,6 +26,7 @@ #include <linux/kdebug.h> #include <linux/bug.h> #include <linux/irq.h> +#include <linux/export.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/io.h> diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 159acb02cfd4..e244ebe637e1 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c @@ -15,6 +15,7 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> +#include <linux/export.h> #include <asm/io.h> static unsigned long pci_sram_allocated = 0xbc000000; diff --git a/arch/mn10300/unit-asb2303/include/unit/timex.h b/arch/mn10300/unit-asb2303/include/unit/timex.h index cc18fe7d8b90..c37f9832cf17 100644 --- a/arch/mn10300/unit-asb2303/include/unit/timex.h +++ b/arch/mn10300/unit-asb2303/include/unit/timex.h @@ -11,10 +11,6 @@ #ifndef _ASM_UNIT_TIMEX_H #define _ASM_UNIT_TIMEX_H -#ifndef __ASSEMBLY__ -#include <linux/irq.h> -#endif /* __ASSEMBLY__ */ - #include <asm/timer-regs.h> #include <unit/clock.h> #include <asm/param.h> diff --git a/arch/mn10300/unit-asb2303/smc91111.c b/arch/mn10300/unit-asb2303/smc91111.c index 43c246439413..53677694b165 100644 --- a/arch/mn10300/unit-asb2303/smc91111.c +++ b/arch/mn10300/unit-asb2303/smc91111.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <asm/io.h> +#include <asm/irq.h> #include <asm/timex.h> #include <asm/processor.h> #include <asm/intctl-regs.h> diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h index 758af30d1a16..4cefc224f448 100644 --- a/arch/mn10300/unit-asb2305/include/unit/timex.h +++ b/arch/mn10300/unit-asb2305/include/unit/timex.h @@ -11,10 +11,6 @@ #ifndef _ASM_UNIT_TIMEX_H #define _ASM_UNIT_TIMEX_H -#ifndef __ASSEMBLY__ -#include <linux/irq.h> -#endif /* __ASSEMBLY__ */ - #include <asm/timer-regs.h> #include <unit/clock.h> #include <asm/param.h> diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c index e1becd6b7571..bc4adfaf815c 100644 --- a/arch/mn10300/unit-asb2305/unit-init.c +++ b/arch/mn10300/unit-asb2305/unit-init.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/pci.h> #include <asm/io.h> +#include <asm/irq.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/intctl-regs.h> diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h index ddb7ed010706..42f32db75087 100644 --- a/arch/mn10300/unit-asb2364/include/unit/timex.h +++ b/arch/mn10300/unit-asb2364/include/unit/timex.h @@ -11,10 +11,6 @@ #ifndef _ASM_UNIT_TIMEX_H #define _ASM_UNIT_TIMEX_H -#ifndef __ASSEMBLY__ -#include <linux/irq.h> -#endif /* __ASSEMBLY__ */ - #include <asm/timer-regs.h> #include <unit/clock.h> #include <asm/param.h> diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 9b306e550e3f..2c8d6a3d250a 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs) /* Don't leak any random bits. */ - memset(elfregs, 0, sizeof (elfregs)); + memset(elfregs, 0, sizeof(*elfregs)); /* Note: PS.EXCM is not set while user task is running; its * being set in regs->ps is for exception handling convenience. diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 17bbacb1b4b1..87b251ab6ec5 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, if (msg->activate_gru_mq_desc_gpa != part_uv->activate_gru_mq_desc_gpa) { - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + spin_lock(&part_uv->flags_lock); part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + spin_unlock(&part_uv->flags_lock); part_uv->activate_gru_mq_desc_gpa = msg->activate_gru_mq_desc_gpa; } diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c index 4bcf9ca2818a..370889d0489b 100644 --- a/drivers/rtc/rtc-ab8500.c +++ b/drivers/rtc/rtc-ab8500.c @@ -17,6 +17,7 @@ #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/delay.h> +#include <linux/of.h> #define AB8500_RTC_SOFF_STAT_REG 0x00 #define AB8500_RTC_CC_CONF_REG 0x01 @@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev) } err = request_threaded_irq(irq, NULL, rtc_alarm_handler, - IRQF_NO_SUSPEND, "ab8500-rtc", rtc); + IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc); if (err < 0) { rtc_device_unregister(rtc); return err; @@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rtc); - err = ab8500_sysfs_rtc_register(&pdev->dev); if (err) { dev_err(&pdev->dev, "sysfs RTC failed to register\n"); @@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id ab8500_rtc_match[] = { + { .compatible = "stericsson,ab8500-rtc", }, + {} +}; + static struct platform_driver ab8500_rtc_driver = { .driver = { .name = "ab8500-rtc", .owner = THIS_MODULE, + .of_match_table = ab8500_rtc_match, }, .probe = ab8500_rtc_probe, .remove = __devexit_p(ab8500_rtc_remove), diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 5e1d64ee5228..e3e50d69baf8 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id) struct platform_device *pdev = dev_id; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; + unsigned long flags; u32 status; u32 events = 0; - spin_lock_irq(&pdata->rtc->irq_lock); + spin_lock_irqsave(&pdata->rtc->irq_lock, flags); status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR); /* clear interrupt sources */ writew(status, ioaddr + RTC_RTCISR); @@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id) events |= (RTC_PF | RTC_IRQF); rtc_update_irq(pdata->rtc, 1, events); - spin_unlock_irq(&pdata->rtc->irq_lock); + spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags); return IRQ_HANDLED; } diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c index 1f76320e545b..e2785479113c 100644 --- a/drivers/rtc/rtc-spear.c +++ b/drivers/rtc/rtc-spear.c @@ -458,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev) clk_disable(config->clk); clk_put(config->clk); iounmap(config->ioaddr); - kfree(config); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); rtc_device_unregister(config->rtc); + kfree(config); return 0; } diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 258abeabf624..c5d06fe83bba 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) } ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, - IRQF_TRIGGER_RISING, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, dev_name(&rtc->dev), rtc); if (ret < 0) { dev_err(&pdev->dev, "IRQ is not free.\n"); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a3d81ebf6d86..0038b32cb362 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -738,22 +738,21 @@ static int fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { int len = *lenp; - u32 ipos_h, ipos_m, ipos_l; + struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); + loff_t i_pos; if (len < 5) { *lenp = 5; return 255; /* no room */ } - ipos_h = MSDOS_I(inode)->i_pos >> 8; - ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24; - ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28; + i_pos = fat_i_pos_read(sbi, inode); *lenp = 5; fh[0] = inode->i_ino; fh[1] = inode->i_generation; - fh[2] = ipos_h; - fh[3] = ipos_m | MSDOS_I(inode)->i_logstart; - fh[4] = ipos_l; + fh[2] = i_pos >> 8; + fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart; + fh[4] = (i_pos & 0x0f) << 28; if (parent) fh[4] |= MSDOS_I(parent)->i_logstart; return 3; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 98513c8ed589..7602783d7f41 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, if (ret < 0) mlog_errno(ret); - if (file->f_flags & O_SYNC) + if (file && (file->f_flags & O_SYNC)) handle->h_sync = 1; ocfs2_commit_trans(osb, handle); diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index fbb0b478a346..d5378d028589 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) /* prevent the page from being discarded on memory pressure */ SetPageDirty(page); + SetPageUptodate(page); unlock_page(page); put_page(page); diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 324fe08ea3b1..6d6795d46a75 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -91,6 +91,11 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); +void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal, + unsigned long limit); extern void *__alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index a6bb10235148..19dc455b4f3d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); -int memblock_free_reserved_regions(void); -int memblock_reserve_reserved_regions(void); - +phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2427706f78b4..68c569fcbb66 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -694,7 +694,7 @@ typedef struct pglist_data { range, including holes */ int node_id; wait_queue_head_t kswapd_wait; - struct task_struct *kswapd; + struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ int kswapd_max_order; enum zone_type classzone_idx; } pg_data_t; diff --git a/kernel/sys.c b/kernel/sys.c index e0c8ffc50d7f..2d39a84cd857 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1788,7 +1788,6 @@ SYSCALL_DEFINE1(umask, int, mask) #ifdef CONFIG_CHECKPOINT_RESTORE static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) { - struct vm_area_struct *vma; struct file *exe_file; struct dentry *dentry; int err; @@ -1816,13 +1815,17 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) down_write(&mm->mmap_sem); /* - * Forbid mm->exe_file change if there are mapped other files. + * Forbid mm->exe_file change if old file still mapped. */ err = -EBUSY; - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (vma->vm_file && !path_equal(&vma->vm_file->f_path, - &exe_file->f_path)) - goto exit_unlock; + if (mm->exe_file) { + struct vm_area_struct *vma; + + for (vma = mm->mmap; vma; vma = vma->vm_next) + if (vma->vm_file && + path_equal(&vma->vm_file->f_path, + &mm->exe_file->f_path)) + goto exit_unlock; } /* @@ -1835,6 +1838,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) goto exit_unlock; + err = 0; set_mm_exe_file(mm, exe_file); exit_unlock: up_write(&mm->mmap_sem); diff --git a/mm/bootmem.c b/mm/bootmem.c index ec4fcb7a56c8..73096630cb35 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -698,7 +698,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, return ___alloc_bootmem(size, align, goal, limit); } -static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, +void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) { diff --git a/mm/compaction.c b/mm/compaction.c index 7ea259d82a99..2f42d9528539 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -701,8 +701,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) if (err) { putback_lru_pages(&cc->migratepages); cc->nr_migratepages = 0; + if (err == -ENOMEM) { + ret = COMPACT_PARTIAL; + goto out; + } } - } out: diff --git a/mm/memblock.c b/mm/memblock.c index d4382095f8bd..5cc6731b00cc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, MAX_NUMNODES); } -/* - * Free memblock.reserved.regions - */ -int __init_memblock memblock_free_reserved_regions(void) -{ - if (memblock.reserved.regions == memblock_reserved_init_regions) - return 0; - - return memblock_free(__pa(memblock.reserved.regions), - sizeof(struct memblock_region) * memblock.reserved.max); -} - -/* - * Reserve memblock.reserved.regions - */ -int __init_memblock memblock_reserve_reserved_regions(void) -{ - if (memblock.reserved.regions == memblock_reserved_init_regions) - return 0; - - return memblock_reserve(__pa(memblock.reserved.regions), - sizeof(struct memblock_region) * memblock.reserved.max); -} - static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) { type->total_size -= type->regions[r].size; @@ -184,6 +160,18 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u } } +phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( + phys_addr_t *addr) +{ + if (memblock.reserved.regions == memblock_reserved_init_regions) + return 0; + + *addr = __pa(memblock.reserved.regions); + + return PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.reserved.max); +} + /** * memblock_double_array - double the size of the memblock regions array * @type: memblock type of the regions array being doubled @@ -204,6 +192,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, phys_addr_t new_area_size) { struct memblock_region *new_array, *old_array; + phys_addr_t old_alloc_size, new_alloc_size; phys_addr_t old_size, new_size, addr; int use_slab = slab_is_available(); int *in_slab; @@ -217,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, /* Calculate new doubled size */ old_size = type->max * sizeof(struct memblock_region); new_size = old_size << 1; + /* + * We need to allocated new one align to PAGE_SIZE, + * so we can free them completely later. + */ + old_alloc_size = PAGE_ALIGN(old_size); + new_alloc_size = PAGE_ALIGN(new_size); /* Retrieve the slab flag */ if (type == &memblock.memory) @@ -245,11 +240,11 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, addr = memblock_find_in_range(new_area_start + new_area_size, memblock.current_limit, - new_size, sizeof(phys_addr_t)); + new_alloc_size, PAGE_SIZE); if (!addr && new_area_size) addr = memblock_find_in_range(0, min(new_area_start, memblock.current_limit), - new_size, sizeof(phys_addr_t)); + new_alloc_size, PAGE_SIZE); new_array = addr ? __va(addr) : 0; } @@ -279,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, kfree(old_array); else if (old_array != memblock_memory_init_regions && old_array != memblock_reserved_init_regions) - memblock_free(__pa(old_array), old_size); + memblock_free(__pa(old_array), old_alloc_size); /* Reserve the new array if that comes from the memblock. * Otherwise, we needn't do it */ if (!use_slab) - BUG_ON(memblock_reserve(addr, new_size)); + BUG_ON(memblock_reserve(addr, new_alloc_size)); /* Update slab flag */ *in_slab = use_slab; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0d7e3ec8e0f3..427bb291dd0f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -618,7 +618,7 @@ int __ref add_memory(int nid, u64 start, u64 size) pgdat = hotadd_new_pgdat(nid, start); ret = -ENOMEM; if (!pgdat) - goto out; + goto error; new_pgdat = 1; } diff --git a/mm/nobootmem.c b/mm/nobootmem.c index d23415c001bc..405573010f99 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) __free_pages_bootmem(pfn_to_page(i), 0); } +static unsigned long __init __free_memory_core(phys_addr_t start, + phys_addr_t end) +{ + unsigned long start_pfn = PFN_UP(start); + unsigned long end_pfn = min_t(unsigned long, + PFN_DOWN(end), max_low_pfn); + + if (start_pfn > end_pfn) + return 0; + + __free_pages_memory(start_pfn, end_pfn); + + return end_pfn - start_pfn; +} + unsigned long __init free_low_memory_core_early(int nodeid) { unsigned long count = 0; - phys_addr_t start, end; + phys_addr_t start, end, size; u64 i; - /* free reserved array temporarily so that it's treated as free area */ - memblock_free_reserved_regions(); - - for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { - unsigned long start_pfn = PFN_UP(start); - unsigned long end_pfn = min_t(unsigned long, - PFN_DOWN(end), max_low_pfn); - if (start_pfn < end_pfn) { - __free_pages_memory(start_pfn, end_pfn); - count += end_pfn - start_pfn; - } - } + for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) + count += __free_memory_core(start, end); + + /* free range that is used for reserved array if we allocate it */ + size = get_allocated_memblock_reserved_regions_info(&start); + if (size) + count += __free_memory_core(start, start + size); - /* put region array back? */ - memblock_reserve_reserved_regions(); return count; } @@ -274,7 +282,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align, return ___alloc_bootmem(size, align, goal, limit); } -static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, +void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, diff --git a/mm/shmem.c b/mm/shmem.c index 4ce02e0673db..bd106361be4b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -264,46 +264,55 @@ static int shmem_radix_tree_replace(struct address_space *mapping, } /* + * Sometimes, before we decide whether to proceed or to fail, we must check + * that an entry was not already brought back from swap by a racing thread. + * + * Checking page is not enough: by the time a SwapCache page is locked, it + * might be reused, and again be SwapCache, using the same swap as before. + */ +static bool shmem_confirm_swap(struct address_space *mapping, + pgoff_t index, swp_entry_t swap) +{ + void *item; + + rcu_read_lock(); + item = radix_tree_lookup(&mapping->page_tree, index); + rcu_read_unlock(); + return item == swp_to_radix_entry(swap); +} + +/* * Like add_to_page_cache_locked, but error if expected item has gone. */ static int shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp, void *expected) { - int error = 0; + int error; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageSwapBacked(page)); + page_cache_get(page); + page->mapping = mapping; + page->index = index; + + spin_lock_irq(&mapping->tree_lock); if (!expected) - error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); + error = radix_tree_insert(&mapping->page_tree, index, page); + else + error = shmem_radix_tree_replace(mapping, index, expected, + page); if (!error) { - page_cache_get(page); - page->mapping = mapping; - page->index = index; - - spin_lock_irq(&mapping->tree_lock); - if (!expected) - error = radix_tree_insert(&mapping->page_tree, - index, page); - else - error = shmem_radix_tree_replace(mapping, index, - expected, page); - if (!error) { - mapping->nrpages++; - __inc_zone_page_state(page, NR_FILE_PAGES); - __inc_zone_page_state(page, NR_SHMEM); - spin_unlock_irq(&mapping->tree_lock); - } else { - page->mapping = NULL; - spin_unlock_irq(&mapping->tree_lock); - page_cache_release(page); - } - if (!expected) - radix_tree_preload_end(); + mapping->nrpages++; + __inc_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(page, NR_SHMEM); + spin_unlock_irq(&mapping->tree_lock); + } else { + page->mapping = NULL; + spin_unlock_irq(&mapping->tree_lock); + page_cache_release(page); } - if (error) - mem_cgroup_uncharge_cache_page(page); return error; } @@ -1124,9 +1133,9 @@ repeat: /* We have to do this with page locked to prevent races */ lock_page(page); if (!PageSwapCache(page) || page_private(page) != swap.val || - page->mapping) { + !shmem_confirm_swap(mapping, index, swap)) { error = -EEXIST; /* try again */ - goto failed; + goto unlock; } if (!PageUptodate(page)) { error = -EIO; @@ -1142,9 +1151,12 @@ repeat: error = mem_cgroup_cache_charge(page, current->mm, gfp & GFP_RECLAIM_MASK); - if (!error) + if (!error) { error = shmem_add_to_page_cache(page, mapping, index, gfp, swp_to_radix_entry(swap)); + /* We already confirmed swap, and make no allocation */ + VM_BUG_ON(error); + } if (error) goto failed; @@ -1181,11 +1193,18 @@ repeat: __set_page_locked(page); error = mem_cgroup_cache_charge(page, current->mm, gfp & GFP_RECLAIM_MASK); - if (!error) - error = shmem_add_to_page_cache(page, mapping, index, - gfp, NULL); if (error) goto decused; + error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); + if (!error) { + error = shmem_add_to_page_cache(page, mapping, index, + gfp, NULL); + radix_tree_preload_end(); + } + if (error) { + mem_cgroup_uncharge_cache_page(page); + goto decused; + } lru_cache_add_anon(page); spin_lock(&info->lock); @@ -1245,14 +1264,10 @@ decused: unacct: shmem_unacct_blocks(info->flags, 1); failed: - if (swap.val && error != -EINVAL) { - struct page *test = find_get_page(mapping, index); - if (test && !radix_tree_exceptional_entry(test)) - page_cache_release(test); - /* Have another try if the entry has changed */ - if (test != swp_to_radix_entry(swap)) - error = -EEXIST; - } + if (swap.val && error != -EINVAL && + !shmem_confirm_swap(mapping, index, swap)) + error = -EEXIST; +unlock: if (page) { unlock_page(page); page_cache_release(page); @@ -1264,7 +1279,7 @@ failed: spin_unlock(&info->lock); goto repeat; } - if (error == -EEXIST) + if (error == -EEXIST) /* from above or from radix_tree_insert */ goto repeat; return error; } @@ -1692,98 +1707,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, return error; } -/* - * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. - */ -static pgoff_t shmem_seek_hole_data(struct address_space *mapping, - pgoff_t index, pgoff_t end, int origin) -{ - struct page *page; - struct pagevec pvec; - pgoff_t indices[PAGEVEC_SIZE]; - bool done = false; - int i; - - pagevec_init(&pvec, 0); - pvec.nr = 1; /* start small: we may be there already */ - while (!done) { - pvec.nr = shmem_find_get_pages_and_swap(mapping, index, - pvec.nr, pvec.pages, indices); - if (!pvec.nr) { - if (origin == SEEK_DATA) - index = end; - break; - } - for (i = 0; i < pvec.nr; i++, index++) { - if (index < indices[i]) { - if (origin == SEEK_HOLE) { - done = true; - break; - } - index = indices[i]; - } - page = pvec.pages[i]; - if (page && !radix_tree_exceptional_entry(page)) { - if (!PageUptodate(page)) - page = NULL; - } - if (index >= end || - (page && origin == SEEK_DATA) || - (!page && origin == SEEK_HOLE)) { - done = true; - break; - } - } - shmem_deswap_pagevec(&pvec); - pagevec_release(&pvec); - pvec.nr = PAGEVEC_SIZE; - cond_resched(); - } - return index; -} - -static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin) -{ - struct address_space *mapping; - struct inode *inode; - pgoff_t start, end; - loff_t new_offset; - - if (origin != SEEK_DATA && origin != SEEK_HOLE) - return generic_file_llseek_size(file, offset, origin, - MAX_LFS_FILESIZE); - mapping = file->f_mapping; - inode = mapping->host; - mutex_lock(&inode->i_mutex); - /* We're holding i_mutex so we can access i_size directly */ - - if (offset < 0) - offset = -EINVAL; - else if (offset >= inode->i_size) - offset = -ENXIO; - else { - start = offset >> PAGE_CACHE_SHIFT; - end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - new_offset = shmem_seek_hole_data(mapping, start, end, origin); - new_offset <<= PAGE_CACHE_SHIFT; - if (new_offset > offset) { - if (new_offset < inode->i_size) - offset = new_offset; - else if (origin == SEEK_DATA) - offset = -ENXIO; - else - offset = inode->i_size; - } - } - - if (offset >= 0 && offset != file->f_pos) { - file->f_pos = offset; - file->f_version = 0; - } - mutex_unlock(&inode->i_mutex); - return offset; -} - static long shmem_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { @@ -2787,7 +2710,7 @@ static const struct address_space_operations shmem_aops = { static const struct file_operations shmem_file_operations = { .mmap = shmem_mmap, #ifdef CONFIG_TMPFS - .llseek = shmem_file_llseek, + .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = shmem_file_aio_read, diff --git a/mm/sparse.c b/mm/sparse.c index 6a4bf9160e85..c7bb952400c8 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -275,8 +275,9 @@ static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { - pg_data_t *host_pgdat; - unsigned long goal; + unsigned long goal, limit; + unsigned long *p; + int nid; /* * A page may contain usemaps for other sections preventing the * page being freed and making a section unremovable while @@ -287,10 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, * from the same section as the pgdat where possible to avoid * this problem. */ - goal = __pa(pgdat) & PAGE_SECTION_MASK; - host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT)); - return __alloc_bootmem_node_nopanic(host_pgdat, size, - SMP_CACHE_BYTES, goal); + goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); + limit = goal + (1UL << PA_SECTION_SHIFT); + nid = early_pfn_to_nid(goal >> PAGE_SHIFT); +again: + p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, + SMP_CACHE_BYTES, goal, limit); + if (!p && limit) { + limit = 0; + goto again; + } + return p; } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) diff --git a/mm/vmscan.c b/mm/vmscan.c index eeb3bc9d1d36..661576324c7f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2955,14 +2955,17 @@ int kswapd_run(int nid) } /* - * Called by memory hotplug when all memory in a node is offlined. + * Called by memory hotplug when all memory in a node is offlined. Caller must + * hold lock_memory_hotplug(). */ void kswapd_stop(int nid) { struct task_struct *kswapd = NODE_DATA(nid)->kswapd; - if (kswapd) + if (kswapd) { kthread_stop(kswapd); + NODE_DATA(nid)->kswapd = NULL; + } } static int __init kswapd_init(void) |