diff options
author | Linus Torvalds | 2014-01-30 18:44:44 -0800 |
---|---|---|
committer | Linus Torvalds | 2014-01-30 18:44:44 -0800 |
commit | aa2e7100e38880db7907cb2b7ec6267b2b243771 (patch) | |
tree | 67f9d2479365398c07833d3fc4f794861f7da5b1 /include | |
parent | 2def2ef2ae5f3990aabdbe8a755911902707d268 (diff) | |
parent | 7c094fd698de2f333fa39b6da213f880d40b9bfe (diff) |
Merge branch 'akpm' (patches from Andrew Morton)
Merge misc fixes from Andrew Morton:
"A few hotfixes and various leftovers which were awaiting other merges.
Mainly movement of zram into mm/"
* emailed patches fron Andrew Morton <akpm@linux-foundation.org>: (25 commits)
memcg: fix mutex not unlocked on memcg_create_kmem_cache fail path
Documentation/filesystems/vfs.txt: update file_operations documentation
mm, oom: base root bonus on current usage
mm: don't lose the SOFT_DIRTY flag on mprotect
mm/slub.c: fix page->_count corruption (again)
mm/mempolicy.c: fix mempolicy printing in numa_maps
zram: remove zram->lock in read path and change it with mutex
zram: remove workqueue for freeing removed pending slot
zram: introduce zram->tb_lock
zram: use atomic operation for stat
zram: remove unnecessary free
zram: delay pending free request in read path
zram: fix race between reset and flushing pending work
zsmalloc: add maintainers
zram: add zram maintainers
zsmalloc: add copyright
zram: add copyright
zram: remove old private project comment
zram: promote zram from staging
zsmalloc: move it under mm
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blkdev.h | 5 | ||||
-rw-r--r-- | include/linux/bootmem.h | 4 | ||||
-rw-r--r-- | include/linux/smp.h | 6 | ||||
-rw-r--r-- | include/linux/zsmalloc.h | 51 |
4 files changed, 59 insertions, 7 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0375654adb28..8678c4322b44 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -95,10 +95,7 @@ enum rq_cmd_type_bits { * as well! */ struct request { - union { - struct list_head queuelist; - struct llist_node ll_list; - }; + struct list_head queuelist; union { struct call_single_data csd; struct work_struct mq_flush_data; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index b388223bd4a9..db51fe4fe317 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -264,7 +264,7 @@ static inline void * __init memblock_virt_alloc_low( { if (!align) align = SMP_CACHE_BYTES; - return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT); + return __alloc_bootmem_low(size, align, 0); } static inline void * __init memblock_virt_alloc_low_nopanic( @@ -272,7 +272,7 @@ static inline void * __init memblock_virt_alloc_low_nopanic( { if (!align) align = SMP_CACHE_BYTES; - return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT); + return __alloc_bootmem_low_nopanic(size, align, 0); } static inline void * __init memblock_virt_alloc_from_nopanic( diff --git a/include/linux/smp.h b/include/linux/smp.h index 5da22ee42e16..3834f43f9993 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -11,12 +11,16 @@ #include <linux/list.h> #include <linux/cpumask.h> #include <linux/init.h> +#include <linux/llist.h> extern void cpu_idle(void); typedef void (*smp_call_func_t)(void *info); struct call_single_data { - struct list_head list; + union { + struct list_head list; + struct llist_node llist; + }; smp_call_func_t func; void *info; u16 flags; diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h new file mode 100644 index 000000000000..e44d634e7fb7 --- /dev/null +++ b/include/linux/zsmalloc.h @@ -0,0 +1,51 @@ +/* + * zsmalloc memory allocator + * + * Copyright (C) 2011 Nitin Gupta + * Copyright (C) 2012, 2013 Minchan Kim + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the license that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + */ + +#ifndef _ZS_MALLOC_H_ +#define _ZS_MALLOC_H_ + +#include <linux/types.h> + +/* + * zsmalloc mapping modes + * + * NOTE: These only make a difference when a mapped object spans pages. + * They also have no effect when PGTABLE_MAPPING is selected. + */ +enum zs_mapmode { + ZS_MM_RW, /* normal read-write mapping */ + ZS_MM_RO, /* read-only (no copy-out at unmap time) */ + ZS_MM_WO /* write-only (no copy-in at map time) */ + /* + * NOTE: ZS_MM_WO should only be used for initializing new + * (uninitialized) allocations. Partial writes to already + * initialized allocations should use ZS_MM_RW to preserve the + * existing data. + */ +}; + +struct zs_pool; + +struct zs_pool *zs_create_pool(gfp_t flags); +void zs_destroy_pool(struct zs_pool *pool); + +unsigned long zs_malloc(struct zs_pool *pool, size_t size); +void zs_free(struct zs_pool *pool, unsigned long obj); + +void *zs_map_object(struct zs_pool *pool, unsigned long handle, + enum zs_mapmode mm); +void zs_unmap_object(struct zs_pool *pool, unsigned long handle); + +u64 zs_get_total_size_bytes(struct zs_pool *pool); + +#endif |