aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds2013-09-11 16:08:54 -0700
committerLinus Torvalds2013-09-11 16:08:54 -0700
commitc2d95729e3094ecdd8c54e856bbe971adbbd7f48 (patch)
tree76cc5b551227d3d55d68a93105c1fe8080dfb812 /lib
parentbbda1baeeb2f4aff3addac3d086a1e56c3f2503e (diff)
parentb34081f1cd59585451efaa69e1dff1b9507e6c89 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton: - Some pidns/fork/exec tweaks - OCFS2 updates - Most of MM - there remain quite a few memcg parts which depend on pending core cgroups changes. Which might have been already merged - I'll check tomorrow... - Various misc stuff all over the place - A few block bits which I never got around to sending to Jens - relatively minor things. - MAINTAINERS maintenance - A small number of lib/ updates - checkpatch updates - epoll - firmware/dmi-scan - Some kprobes work for S390 - drivers/rtc updates - hfsplus feature work - vmcore feature work - rbtree upgrades - AOE updates - pktcdvd cleanups - PPS - memstick - w1 - New "inittmpfs" feature, which does the obvious - More IPC work from Davidlohr. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (303 commits) lz4: fix compression/decompression signedness mismatch ipc: drop ipc_lock_check ipc, shm: drop shm_lock_check ipc: drop ipc_lock_by_ptr ipc, shm: guard against non-existant vma in shmdt(2) ipc: document general ipc locking scheme ipc,msg: drop msg_unlock ipc: rename ids->rw_mutex ipc,shm: shorten critical region for shmat ipc,shm: cleanup do_shmat pasta ipc,shm: shorten critical region for shmctl ipc,shm: make shmctl_nolock lockless ipc,shm: introduce shmctl_nolock ipc: drop ipcctl_pre_down ipc,shm: shorten critical region in shmctl_down ipc,shm: introduce lockless functions to obtain the ipc object initmpfs: use initramfs if rootfstype= or root= specified initmpfs: make rootfs use tmpfs when CONFIG_TMPFS enabled initmpfs: move rootfs code from fs/ramfs/ to init/ initmpfs: move bdi setup from init_rootfs to init_ramfs ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/crc32.c17
-rw-r--r--lib/decompress_inflate.c2
-rw-r--r--lib/genalloc.c22
-rw-r--r--lib/lz4/lz4_decompress.c8
-rw-r--r--lib/radix-tree.c41
-rw-r--r--lib/rbtree.c40
-rw-r--r--lib/rbtree_test.c12
8 files changed, 121 insertions, 23 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 652bea9054f0..c9eef36739a9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1461,7 +1461,7 @@ config BACKTRACE_SELF_TEST
config RBTREE_TEST
tristate "Red-Black tree test"
- depends on m && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
A benchmark measuring the performance of the rbtree library.
Also includes rbtree invariant checks.
diff --git a/lib/crc32.c b/lib/crc32.c
index 072fbd8234d5..410093dbe51c 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -131,11 +131,14 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
#endif
/**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC is run
+ * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
+ * CRC32/CRC32C
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other
+ * uses, or the previous crc32/crc32c value if computing incrementally.
+ * @p: pointer to buffer over which CRC32/CRC32C is run
* @len: length of buffer @p
+ * @tab: little-endian Ethernet table
+ * @polynomial: CRC32/CRC32c LE polynomial
*/
static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
size_t len, const u32 (*tab)[256],
@@ -201,11 +204,13 @@ EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
/**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
* other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC is run
+ * @p: pointer to buffer over which CRC32 is run
* @len: length of buffer @p
+ * @tab: big-endian Ethernet table
+ * @polynomial: CRC32 BE polynomial
*/
static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
size_t len, const u32 (*tab)[256],
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 19ff89e34eec..d619b28c456f 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -48,7 +48,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
- out_len = 0x7fffffff; /* no limit */
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index b35cfa9bc3d4..26cf20be72b7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -37,6 +37,11 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
+static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
+{
+ return chunk->end_addr - chunk->start_addr + 1;
+}
+
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
unsigned long val, nval;
@@ -182,13 +187,13 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
int nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
- chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
+ chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
if (unlikely(chunk == NULL))
return -ENOMEM;
chunk->phys_addr = phys;
chunk->start_addr = virt;
- chunk->end_addr = virt + size;
+ chunk->end_addr = virt + size - 1;
atomic_set(&chunk->avail, size);
spin_lock(&pool->lock);
@@ -213,7 +218,7 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
paddr = chunk->phys_addr + (addr - chunk->start_addr);
break;
}
@@ -242,7 +247,7 @@ void gen_pool_destroy(struct gen_pool *pool)
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
- end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
@@ -283,7 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
if (size > atomic_read(&chunk->avail))
continue;
- end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit = chunk_size(chunk) >> order;
retry:
start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
pool->data);
@@ -330,8 +335,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (addr >= chunk->start_addr && addr < chunk->end_addr) {
- BUG_ON(addr + size > chunk->end_addr);
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
+ BUG_ON(addr + size - 1 > chunk->end_addr);
start_bit = (addr - chunk->start_addr) >> order;
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
BUG_ON(remain);
@@ -400,7 +405,7 @@ size_t gen_pool_size(struct gen_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
- size += chunk->end_addr - chunk->start_addr;
+ size += chunk_size(chunk);
rcu_read_unlock();
return size;
}
@@ -519,7 +524,6 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
/**
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
* @dev: device to retrieve the gen_pool from
- * @name: Optional name for the gen_pool, usually NULL
*
* Returns the gen_pool for the device if one is present, or NULL.
*/
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 411be80ddb46..df6839e3ce08 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -283,8 +283,8 @@ _output_error:
return (int) (-(((char *) ip) - source));
}
-int lz4_decompress(const char *src, size_t *src_len, char *dest,
- size_t actual_dest_len)
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+ unsigned char *dest, size_t actual_dest_len)
{
int ret = -1;
int input_len = 0;
@@ -302,8 +302,8 @@ exit_0:
EXPORT_SYMBOL(lz4_decompress);
#endif
-int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
- char *dest, size_t *dest_len)
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+ unsigned char *dest, size_t *dest_len)
{
int ret = -1;
int out_len = 0;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e7964296fd50..7811ed3b4e70 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
+#include <linux/hardirq.h> /* in_interrupt() */
#ifdef __KERNEL__
@@ -207,7 +208,12 @@ radix_tree_node_alloc(struct radix_tree_root *root)
struct radix_tree_node *ret = NULL;
gfp_t gfp_mask = root_gfp_mask(root);
- if (!(gfp_mask & __GFP_WAIT)) {
+ /*
+ * Preload code isn't irq safe and it doesn't make sence to use
+ * preloading in the interrupt anyway as all the allocations have to
+ * be atomic. So just do normal allocation when in interrupt.
+ */
+ if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) {
struct radix_tree_preload *rtp;
/*
@@ -264,7 +270,7 @@ radix_tree_node_free(struct radix_tree_node *node)
* To make use of this facility, the radix tree must be initialised without
* __GFP_WAIT being passed to INIT_RADIX_TREE().
*/
-int radix_tree_preload(gfp_t gfp_mask)
+static int __radix_tree_preload(gfp_t gfp_mask)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
@@ -288,9 +294,40 @@ int radix_tree_preload(gfp_t gfp_mask)
out:
return ret;
}
+
+/*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+ * success, return zero, with preemption disabled. On error, return -ENOMEM
+ * with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_WAIT being passed to INIT_RADIX_TREE().
+ */
+int radix_tree_preload(gfp_t gfp_mask)
+{
+ /* Warn on non-sensical use... */
+ WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT));
+ return __radix_tree_preload(gfp_mask);
+}
EXPORT_SYMBOL(radix_tree_preload);
/*
+ * The same as above function, except we don't guarantee preloading happens.
+ * We do it, if we decide it helps. On success, return zero with preemption
+ * disabled. On error, return -ENOMEM with preemption not disabled.
+ */
+int radix_tree_maybe_preload(gfp_t gfp_mask)
+{
+ if (gfp_mask & __GFP_WAIT)
+ return __radix_tree_preload(gfp_mask);
+ /* Preloading doesn't help anything with this gfp mask, skip it */
+ preempt_disable();
+ return 0;
+}
+EXPORT_SYMBOL(radix_tree_maybe_preload);
+
+/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
*/
diff --git a/lib/rbtree.c b/lib/rbtree.c
index c0e31fe2fabf..65f4effd117f 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -518,3 +518,43 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
*new = *victim;
}
EXPORT_SYMBOL(rb_replace_node);
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+ for (;;) {
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+ else
+ return (struct rb_node *)node;
+ }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+ const struct rb_node *parent;
+ if (!node)
+ return NULL;
+ parent = rb_parent(node);
+
+ /* If we're sitting on node, we've already seen our children */
+ if (parent && node == parent->rb_left && parent->rb_right) {
+ /* If we are the parent's left node, go to the parent's right
+ * node then all the way down to the left */
+ return rb_left_deepest_node(parent->rb_right);
+ } else
+ /* Otherwise we are the parent's right node, and the parent
+ * should be next */
+ return (struct rb_node *)parent;
+}
+EXPORT_SYMBOL(rb_next_postorder);
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+ if (!root->rb_node)
+ return NULL;
+
+ return rb_left_deepest_node(root->rb_node);
+}
+EXPORT_SYMBOL(rb_first_postorder);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 122f02f9941b..31dd4ccd3baa 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb)
return count;
}
+static void check_postorder(int nr_nodes)
+{
+ struct rb_node *rb;
+ int count = 0;
+ for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb))
+ count++;
+
+ WARN_ON_ONCE(count != nr_nodes);
+}
+
static void check(int nr_nodes)
{
struct rb_node *rb;
@@ -136,6 +146,8 @@ static void check(int nr_nodes)
WARN_ON_ONCE(count != nr_nodes);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
+
+ check_postorder(nr_nodes);
}
static void check_augmented(int nr_nodes)