aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds2014-07-30 17:16:36 -0700
committerLinus Torvalds2014-07-30 17:16:36 -0700
commit4d0bd657d77d599307649aa03991b05e3ddef059 (patch)
treea1b028afe393695ea8ccbe4a2ef7742b94d82ef7
parent26bcd8b72563b4c54892c4c2a409f6656fb8ae8b (diff)
parente0198b290dcd8313bdf313a0d083033d5c01d761 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: Josh has moved kexec: export free_huge_page to VMCOREINFO mm: fix filemap.c pagecache_get_page() kernel-doc warnings mm: debugfs: move rounddown_pow_of_two() out from do_fault path memcg: oom_notify use-after-free fix hwpoison: call action_result() in failure path of hwpoison_user_mappings() hwpoison: fix hugetlbfs/thp precheck in hwpoison_user_mappings() rapidio/tsi721_dma: fix failure to obtain transaction descriptor mm, thp: do not allow thp faults to avoid cpuset restrictions mm/page-writeback.c: fix divide by zero in bdi_dirty_limits()
-rw-r--r--.mailmap5
-rw-r--r--CREDITS7
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/rcu/rcutorture.c4
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory.c21
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c16
14 files changed, 69 insertions, 36 deletions
diff --git a/.mailmap b/.mailmap
index df1baba43a64..1ad68731fb47 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
John Stultz <johnstul@us.ibm.com>
+<josh@joshtriplett.org> <josh@freedesktop.org>
+<josh@joshtriplett.org> <josh@kernel.org>
+<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
+<josh@joshtriplett.org> <josht@us.ibm.com>
+<josh@joshtriplett.org> <josht@vnet.ibm.com>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/CREDITS b/CREDITS
index 28ee1514b9de..a80b66718f66 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615
S: Australia
N: Josh Triplett
-E: josh@freedesktop.org
-P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB
-D: rcutorture maintainer
+E: josh@joshtriplett.org
+P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D
+D: RCU and rcutorture
D: lock annotations, finding and fixing lock bugs
+D: kernel tinification
N: Winfried Trümper
E: winni@xpilot.org
diff --git a/MAINTAINERS b/MAINTAINERS
index 86efa7e213c2..95990dd2678c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7424,7 +7424,7 @@ S: Orphan
F: drivers/net/wireless/ray*
RCUTORTURE MODULE
-M: Josh Triplett <josh@freedesktop.org>
+M: Josh Triplett <josh@joshtriplett.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
L: linux-kernel@vger.kernel.org
S: Supported
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 9b60b1f3261c..44341dc5b148 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
"desc %p not ACKed\n", tx_desc);
}
+ if (ret == NULL) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: unable to obtain tx descriptor\n", __func__);
+ goto err_out;
+ }
+
i = bdma_chan->wr_count_next % bdma_chan->bd_num;
if (i == bdma_chan->bd_num - 1) {
i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
tx_desc->txd.phys = bdma_chan->bd_phys +
i * sizeof(struct tsi721_dma_desc);
tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
spin_unlock_bh(&bdma_chan->lock);
return ret;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 255cd5cc0754..a23c096b3080 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
+void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 369f41a94124..23a088fec3c0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
#include <linux/swap.h>
#include <linux/syscore_ops.h>
#include <linux/compiler.h>
+#include <linux/hugetlb.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -1619,6 +1620,7 @@ static int __init crash_save_vmcoreinfo_init(void)
#endif
VMCOREINFO_NUMBER(PG_head_mask);
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+ VMCOREINFO_SYMBOL(free_huge_page);
arch_crash_save_vmcoreinfo();
update_vmcoreinfo_note();
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7fa34f86e5ba..948a7693748e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -18,7 +18,7 @@
* Copyright (C) IBM Corporation, 2005, 2006
*
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
- * Josh Triplett <josh@freedesktop.org>
+ * Josh Triplett <josh@joshtriplett.org>
*
* See also: Documentation/RCU/torture.txt
*/
@@ -51,7 +51,7 @@
#include <linux/torture.h>
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
torture_param(int, fqs_duration, 0,
diff --git a/mm/filemap.c b/mm/filemap.c
index dafb06f70a09..900edfaf6df5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use if a page is to be allocated
+ * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
+ * @radix_gfp_mask: gfp mask to use for radix tree node allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
- * PCG flags modify how the page is returned
+ * PCG flags modify how the page is returned.
*
* FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using
- * @gfp_mask and added to the page cache and the VM's LRU
- * list. The page is returned locked and with an increased
- * refcount. Otherwise, %NULL is returned.
+ * @cache_gfp_mask and added to the page cache and the VM's LRU
+ * list. If radix tree nodes are allocated during page cache
+ * insertion then @radix_gfp_mask is used. The page is returned
+ * locked and with an increased refcount. Otherwise, %NULL is
+ * returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9221c02ed9e2..7a0a73d2fcff 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
return NULL;
}
-static void free_huge_page(struct page *page)
+void free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a2c7bcb0e6eb..1f14a430c656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
{
struct mem_cgroup_eventfd_list *ev;
+ spin_lock(&memcg_oom_lock);
+
list_for_each_entry(ev, &memcg->oom_notify, list)
eventfd_signal(ev->eventfd, 1);
+
+ spin_unlock(&memcg_oom_lock);
return 0;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7211a73ba14d..a013bc94ebbe 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct page *hpage = *hpagep;
struct page *ppage;
- if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
+ /*
+ * Here we are interested only in user-mapped pages, so skip any
+ * other types of pages.
+ */
+ if (PageReserved(p) || PageSlab(p))
+ return SWAP_SUCCESS;
+ if (!(PageLRU(hpage) || PageHuge(p)))
return SWAP_SUCCESS;
/*
@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (!page_mapped(hpage))
return SWAP_SUCCESS;
- if (PageKsm(p))
+ if (PageKsm(p)) {
+ pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
return SWAP_FAIL;
+ }
if (PageSwapCache(p)) {
printk(KERN_ERR
@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
!= SWAP_SUCCESS) {
- printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+ action_result(pfn, "unmapping failed", IGNORED);
res = -EBUSY;
goto out;
}
diff --git a/mm/memory.c b/mm/memory.c
index 7e8d8205b610..8b44f765b645 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}
-static unsigned long fault_around_bytes = 65536;
+static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
-/*
- * fault_around_pages() and fault_around_mask() round down fault_around_bytes
- * to nearest page order. It's what do_fault_around() expects to see.
- */
static inline unsigned long fault_around_pages(void)
{
- return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
+ return fault_around_bytes >> PAGE_SHIFT;
}
static inline unsigned long fault_around_mask(void)
{
- return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
+ return ~(fault_around_bytes - 1) & PAGE_MASK;
}
-
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
return 0;
}
+/*
+ * fault_around_pages() and fault_around_mask() expects fault_around_bytes
+ * rounded down to nearest page order. It's what do_fault_around() expects to
+ * see.
+ */
static int fault_around_bytes_set(void *data, u64 val)
{
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
- fault_around_bytes = val;
+ if (val > PAGE_SIZE)
+ fault_around_bytes = rounddown_pow_of_two(val);
+ else
+ fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 518e2c3f4c75..e0c943014eb7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
if (bdi_bg_thresh)
- *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
- background_thresh,
- dirty_thresh);
+ *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
+ background_thresh,
+ dirty_thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8bcfe3ae20cb..ef44ad736ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2447,7 +2447,7 @@ static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)
{
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
- const gfp_t wait = gfp_mask & __GFP_WAIT;
+ const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
- * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+ * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
*/
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
- if (!wait) {
+ if (atomic) {
/*
- * Not worth trying to allocate harder for
- * __GFP_NOMEMALLOC even if it can't schedule.
+ * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+ * if it can't schedule.
*/
- if (!(gfp_mask & __GFP_NOMEMALLOC))
+ if (!(gfp_mask & __GFP_NOMEMALLOC))
alloc_flags |= ALLOC_HARDER;
/*
- * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+ * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+ * comment for __cpuset_node_allowed_softwall().
*/
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt())