aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller2010-01-22 22:45:46 -0800
committerDavid S. Miller2010-01-22 22:45:46 -0800
commit6be325719b3e54624397e413efd4b33a997e55a3 (patch)
tree57f321a56794cab2222e179b16731e0d76a4a68a /include/linux
parent26d92f9276a56d55511a427fb70bd70886af647a (diff)
parent92dcffb916d309aa01778bf8963a6932e4014d07 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/backlight.h12
-rw-r--r--include/linux/binfmts.h10
-rw-r--r--include/linux/bitmap.h11
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/decompress/mm.h4
-rw-r--r--include/linux/decompress/unlzo.h10
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_nl.h1
-rw-r--r--include/linux/dst.h587
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/enclosure.h2
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/ext3_fs_sb.h2
-rw-r--r--include/linux/ext3_jbd.h7
-rw-r--r--include/linux/fiemap.h2
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/firewire-cdev.h3
-rw-r--r--include/linux/firewire.h4
-rw-r--r--include/linux/fs.h70
-rw-r--r--include/linux/fs_stack.h6
-rw-r--r--include/linux/fsl_devices.h11
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/generic_acl.h41
-rw-r--r--include/linux/genhd.h6
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hrtimer.h58
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hw_breakpoint.h40
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i2c/adp5588.h12
-rw-r--r--include/linux/i2c/tps65010.h19
-rw-r--r--include/linux/i2c/twl.h (renamed from include/linux/i2c/twl4030.h)209
-rw-r--r--include/linux/i8042.h18
-rw-r--r--include/linux/ima.h12
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iocontext.h27
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel-page-flags.h46
-rw-r--r--include/linux/kernel.h62
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kfifo.h568
-rw-r--r--include/linux/kgdb.h7
-rw-r--r--include/linux/kmemcheck.h110
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/kmsg_dump.h60
-rw-r--r--include/linux/ksm.h96
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/leds-lp3944.h3
-rw-r--r--include/linux/leds-pca9532.h2
-rw-r--r--include/linux/leds-regulator.h46
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/list_sort.h11
-rw-r--r--include/linux/lmb.h1
-rw-r--r--include/linux/memcontrol.h30
-rw-r--r--include/linux/memory.h27
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mfd/mc13783-private.h208
-rw-r--r--include/linux/mfd/mc13783.h120
-rw-r--r--include/linux/mfd/pcf50633/core.h17
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/tmio.h39
-rw-r--r--include/linux/mfd/wm831x/core.h43
-rw-r--r--include/linux/mfd/wm831x/pdata.h18
-rw-r--r--include/linux/mfd/wm8350/core.h14
-rw-r--r--include/linux/mfd/wm8350/gpio.h18
-rw-r--r--include/linux/mfd/wm8350/pmic.h28
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h37
-rw-r--r--include/linux/mm_types.h6
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/cfi.h9
-rw-r--r--include/linux/mtd/flashchip.h9
-rw-r--r--include/linux/mtd/nand.h97
-rw-r--r--include/linux/mtd/nand_ecc.h10
-rw-r--r--include/linux/mtd/onenand.h23
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h14
-rw-r--r--include/linux/nfsacl.h1
-rw-r--r--include/linux/nfsd/cache.h86
-rw-r--r--include/linux/nfsd/export.h19
-rw-r--r--include/linux/nfsd/nfsd.h424
-rw-r--r--include/linux/nfsd/nfsfh.h206
-rw-r--r--include/linux/nfsd/state.h404
-rw-r--r--include/linux/nfsd/syscall.h8
-rw-r--r--include/linux/nfsd/xdr.h177
-rw-r--r--include/linux/nfsd/xdr3.h346
-rw-r--r--include/linux/nfsd/xdr4.h563
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/pci_ids.h14
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/perf_counter.h444
-rw-r--r--include/linux/perf_event.h54
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pnp.h13
-rw-r--r--include/linux/poison.h16
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/pwm_backlight.h2
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/raid/pq.h19
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/regulator/max8660.h57
-rw-r--r--include/linux/reiserfs_fs.h61
-rw-r--r--include/linux/resource.h8
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h81
-rw-r--r--include/linux/security.h7
-rw-r--r--include/linux/sem.h5
-rw-r--r--include/linux/serio.h19
-rw-r--r--include/linux/shmem_fs.h16
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/sm501-regs.h2
-rw-r--r--include/linux/sonypi.h1
-rw-r--r--include/linux/spi/dw_spi.h212
-rw-r--r--include/linux/spi/sh_msiof.h10
-rw-r--r--include/linux/spi/xilinx_spi.h20
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h15
-rw-r--r--include/linux/sunrpc/debug.h3
-rw-r--r--include/linux/sunrpc/rpc_rdma.h2
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h7
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/timb_gpio.h37
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/trace_seq.h7
-rw-r--r--include/linux/tracehook.h7
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/usb.h1
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/linux/usbdevice_fs.h26
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/videodev2.h123
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/vt.h19
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/linux/xattr.h13
199 files changed, 5075 insertions, 5019 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index f72914db2a11..756f831cbdd5 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -118,6 +118,7 @@ header-y += mtio.h
header-y += ncp_no.h
header-y += neighbour.h
header-y += net_dropmon.h
+header-y += net_tstamp.h
header-y += netfilter_arp.h
header-y += netrom.h
header-y += nfs2.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dfcd920c3e54..b926afe8c03e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -80,7 +80,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
-int acpi_boot_table_init (void);
+void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
@@ -240,7 +240,7 @@ extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
#define NID_INVAL (-1)
-int acpi_check_resource_conflict(struct resource *res);
+int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
@@ -251,12 +251,19 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
+void __init acpi_set_sci_en_on_resume(void);
#endif /* CONFIG_PM_SLEEP */
+struct acpi_osc_context {
+ char *uuid_str; /* uuid string */
+ int rev;
+ struct acpi_buffer cap; /* arg2/arg3 */
+ struct acpi_buffer ret; /* free by caller if success */
+};
+
#define OSC_QUERY_TYPE 0
#define OSC_SUPPORT_TYPE 1
#define OSC_CONTROL_TYPE 2
-#define OSC_SUPPORT_MASKS 0x1f
/* _OSC DW0 Definition */
#define OSC_QUERY_ENABLE 1
@@ -265,12 +272,23 @@ void __init acpi_s4_no_nvs(void);
#define OSC_INVALID_REVISION_ERROR 8
#define OSC_CAPABILITIES_MASK_ERROR 16
+acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
+
+/* platform-wide _OSC bits */
+#define OSC_SB_PAD_SUPPORT 1
+#define OSC_SB_PPC_OST_SUPPORT 2
+#define OSC_SB_PR3_SUPPORT 4
+#define OSC_SB_CPUHP_OST_SUPPORT 8
+#define OSC_SB_APEI_SUPPORT 16
+
+/* PCI defined _OSC bits */
/* _OSC DW1 Definition (OS Support Fields) */
#define OSC_EXT_PCI_CONFIG_SUPPORT 1
#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
#define OSC_MSI_SUPPORT 16
+#define OSC_PCI_SUPPORT_MASKS 0x1f
/* _OSC DW1 Definition (OS Control Fields) */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
@@ -279,7 +297,7 @@ void __init acpi_s4_no_nvs(void);
#define OSC_PCI_EXPRESS_AER_CONTROL 8
#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
-#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
+#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
OSC_SHPC_NATIVE_HP_CONTROL | \
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
@@ -303,9 +321,9 @@ static inline int acpi_boot_init(void)
return 0;
}
-static inline int acpi_boot_table_init(void)
+static inline void acpi_boot_table_init(void)
{
- return 0;
+ return;
}
static inline int acpi_mps_check(void)
diff --git a/include/linux/aio.h b/include/linux/aio.h
index aea219d7d8d1..811dbb369379 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -102,7 +102,6 @@ struct kiocb {
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
- wait_queue_t ki_wait;
loff_t ki_pos;
void *private;
@@ -140,7 +139,6 @@ struct kiocb {
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
- init_wait((&(x)->ki_wait)); \
} while (0)
#define AIO_RING_MAGIC 0xa10a10a1
@@ -223,8 +221,6 @@ struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
#endif /* CONFIG_AIO */
-#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-
static inline struct kiocb *list_kiocb(struct list_head *h)
{
return list_entry(h, struct kiocb, ki_list);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c87..3e09b345f4d6 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
#define ATMEL_MCI_MAX_NR_SLOTS 2
-#include <linux/dw_dmac.h>
-
/**
* struct mci_slot_pdata - board-specific per-slot configuration
* @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
* @slot: Per-slot configuration data.
*/
struct mci_platform_data {
- struct dw_dma_slave dma_slave;
+ struct mci_dma_data *dma_slave;
struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
};
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 0f5f57858a23..8c4f884db6b4 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -36,18 +36,18 @@ struct backlight_device;
struct fb_info;
struct backlight_ops {
- unsigned int options;
+ const unsigned int options;
#define BL_CORE_SUSPENDRESUME (1 << 0)
/* Notify the backlight driver some property has changed */
- int (*update_status)(struct backlight_device *);
+ int (* const update_status)(struct backlight_device *);
/* Return the current backlight brightness (accounting for power,
fb_blank etc.) */
- int (*get_brightness)(struct backlight_device *);
+ int (* const get_brightness)(struct backlight_device *);
/* Check if given framebuffer device is the one bound to this backlight;
return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
- int (*check_fb)(struct fb_info *);
+ int (* const check_fb)(struct fb_info *);
};
/* This structure defines all the properties of a backlight */
@@ -86,7 +86,7 @@ struct backlight_device {
registered this device has been unloaded, and if class_get_devdata()
points to something in the body of that driver, it is also invalid. */
struct mutex ops_lock;
- struct backlight_ops *ops;
+ const struct backlight_ops *ops;
/* The framebuffer notifier block */
struct notifier_block fb_notif;
@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
}
extern struct backlight_device *backlight_device_register(const char *name,
- struct device *dev, void *devdata, struct backlight_ops *ops);
+ struct device *dev, void *devdata, const struct backlight_ops *ops);
extern void backlight_device_unregister(struct backlight_device *bd);
extern void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index aece486ac734..cd4349bdc34e 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -68,6 +68,14 @@ struct linux_binprm{
#define BINPRM_MAX_RECURSION 4
+/* Function parameter for binfmt->coredump */
+struct coredump_params {
+ long signr;
+ struct pt_regs *regs;
+ struct file *file;
+ unsigned long limit;
+};
+
/*
* This structure defines the functions that are used to load the binary formats that
* linux accepts.
@@ -77,7 +85,7 @@ struct linux_binfmt {
struct module *module;
int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
int (*load_shlib)(struct file *);
- int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+ int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
int hasvdso;
};
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 756d78b8c1c5..daf8c480c786 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -42,6 +42,9 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -108,6 +111,14 @@ extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+extern void bitmap_set(unsigned long *map, int i, int len);
+extern void bitmap_clear(unsigned long *map, int start, int nr);
+extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask);
+
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 784a919aa0d0..5c8018977efa 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
* blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
- * blk_rq_err_sectors() : sectors left till the next error boundary
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
-static inline unsigned int blk_rq_err_sectors(const struct request *rq)
-{
- return blk_rq_err_bytes(rq) >> 9;
-}
-
/*
* Request issue related functions.
*/
@@ -944,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
+extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1116,11 +1112,18 @@ static inline int queue_alignment_offset(struct request_queue *q)
return q->limits.alignment_offset;
}
+static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
+{
+ unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+
+ offset &= granularity - 1;
+ return (granularity + lim->alignment_offset - offset) & (granularity - 1);
+}
+
static inline int queue_sector_alignment_offset(struct request_queue *q,
sector_t sector)
{
- return ((sector << 9) - q->limits.alignment_offset)
- & (q->limits.io_min - 1);
+ return queue_limit_alignment_offset(&q->limits, sector << 9);
}
static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1147,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_sector_discard_alignment(struct request_queue *q,
sector_t sector)
{
- return ((sector << 9) - q->limits.discard_alignment)
- & (q->limits.discard_granularity - 1);
+ struct queue_limits *lim = &q->limits;
+ unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+
+ return (lim->discard_granularity + lim->discard_alignment - alignment)
+ & (lim->discard_granularity - 1);
}
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 47536197ffdd..e287863ac053 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
+extern ssize_t arch_cpu_probe(const char *, size_t);
+extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
@@ -115,6 +117,19 @@ extern void put_online_cpus(void);
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+extern void cpu_hotplug_driver_lock(void);
+extern void cpu_hotplug_driver_unlock(void);
+#else
+static inline void cpu_hotplug_driver_lock(void)
+{
+}
+
+static inline void cpu_hotplug_driver_unlock(void)
+{
+}
+#endif
+
#else /* CONFIG_HOTPLUG_CPU */
#define get_online_cpus() do { } while (0)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 789cf5f920ce..d77b54733c5b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() 1
#define num_possible_cpus() 1
#define num_present_cpus() 1
+#define num_active_cpus() 1
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 000000000000..d5a1d4810b80
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
+/*
+ * AMD CS5535/CS5536 definitions
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CS5535_H
+#define _CS5535_H
+
+/* MSRs */
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
+
+#define MSR_LBAR_SMB 0x5140000B
+#define MSR_LBAR_GPIO 0x5140000C
+#define MSR_LBAR_MFGPT 0x5140000D
+#define MSR_LBAR_ACPI 0x5140000E
+#define MSR_LBAR_PMS 0x5140000F
+
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
+#define MSR_PIC_YSEL_LOW 0x51400020
+#define MSR_PIC_YSEL_HIGH 0x51400021
+#define MSR_PIC_ZSEL_LOW 0x51400022
+#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
+
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
+
+/* resource sizes */
+#define LBAR_GPIO_SIZE 0xFF
+#define LBAR_MFGPT_SIZE 0x40
+#define LBAR_ACPI_SIZE 0x40
+#define LBAR_PMS_SIZE 0x80
+
+/* VSA2 magic values */
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+#define GSW_VSA_SIG 0x534d /* General Software signature */
+
+#include <linux/io.h>
+
+static inline int cs5535_has_vsa2(void)
+{
+ static int has_vsa2 = -1;
+
+ if (has_vsa2 == -1) {
+ uint16_t val;
+
+ /*
+ * The VSA has virtual registers that we can query for a
+ * signature.
+ */
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ val = inw(VSA_VRC_DATA);
+ has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
+ }
+
+ return has_vsa2;
+}
+
+/* GPIOs */
+#define GPIO_OUTPUT_VAL 0x00
+#define GPIO_OUTPUT_ENABLE 0x04
+#define GPIO_OUTPUT_OPEN_DRAIN 0x08
+#define GPIO_OUTPUT_INVERT 0x0C
+#define GPIO_OUTPUT_AUX1 0x10
+#define GPIO_OUTPUT_AUX2 0x14
+#define GPIO_PULL_UP 0x18
+#define GPIO_PULL_DOWN 0x1C
+#define GPIO_INPUT_ENABLE 0x20
+#define GPIO_INPUT_INVERT 0x24
+#define GPIO_INPUT_FILTER 0x28
+#define GPIO_INPUT_EVENT_COUNT 0x2C
+#define GPIO_READ_BACK 0x30
+#define GPIO_INPUT_AUX1 0x34
+#define GPIO_EVENTS_ENABLE 0x38
+#define GPIO_LOCK_ENABLE 0x3C
+#define GPIO_POSITIVE_EDGE_EN 0x40
+#define GPIO_NEGATIVE_EDGE_EN 0x44
+#define GPIO_POSITIVE_EDGE_STS 0x48
+#define GPIO_NEGATIVE_EDGE_STS 0x4C
+
+#define GPIO_MAP_X 0xE0
+#define GPIO_MAP_Y 0xE4
+#define GPIO_MAP_Z 0xE8
+#define GPIO_MAP_W 0xEC
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg);
+void cs5535_gpio_clear(unsigned offset, unsigned int reg);
+int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+
+/* MFGPTs */
+
+#define MFGPT_MAX_TIMERS 8
+#define MFGPT_TIMER_ANY (-1)
+
+#define MFGPT_DOMAIN_WORKING 1
+#define MFGPT_DOMAIN_STANDBY 2
+#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
+
+#define MFGPT_CMP1 0
+#define MFGPT_CMP2 1
+
+#define MFGPT_EVENT_IRQ 0
+#define MFGPT_EVENT_NMI 1
+#define MFGPT_EVENT_RESET 3
+
+#define MFGPT_REG_CMP1 0
+#define MFGPT_REG_CMP2 2
+#define MFGPT_REG_COUNTER 4
+#define MFGPT_REG_SETUP 6
+
+#define MFGPT_SETUP_CNTEN (1 << 15)
+#define MFGPT_SETUP_CMP2 (1 << 14)
+#define MFGPT_SETUP_CMP1 (1 << 13)
+#define MFGPT_SETUP_SETUP (1 << 12)
+#define MFGPT_SETUP_STOPEN (1 << 11)
+#define MFGPT_SETUP_EXTEN (1 << 10)
+#define MFGPT_SETUP_REVEN (1 << 5)
+#define MFGPT_SETUP_CLKSEL (1 << 4)
+
+struct cs5535_mfgpt_timer;
+
+extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
+ uint16_t reg);
+extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value);
+
+extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable);
+extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
+ int *irq, int enable);
+extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
+ int domain);
+extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
+
+static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
+}
+
+static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
+}
+
+#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297a..a3d6ee0044f9 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 12ff8c3f1d05..5032b9a31ae7 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,7 +25,7 @@ static void *malloc(int size)
void *p;
if (size < 0)
- error("Malloc error");
+ return NULL;
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
@@ -35,7 +35,7 @@ static void *malloc(int size)
malloc_ptr += size;
if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
- error("Out of memory");
+ return NULL;
malloc_count++;
return p;
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
new file mode 100644
index 000000000000..987229752519
--- /dev/null
+++ b/include/linux/decompress/unlzo.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_UNLZO_H
+#define DECOMPRESS_UNLZO_H
+
+int unlzo(unsigned char *inbuf, int len,
+ int(*fill)(void*, unsigned int),
+ int(*flush)(void*, unsigned int),
+ unsigned char *output,
+ int *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce8..d4c9c0b88adc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
-int dm_suspended(struct mapped_device *md);
+int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
-struct dm_table *dm_get_table(struct mapped_device *md);
+struct dm_table *dm_get_live_table(struct mapped_device *md);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
/*
* The device must be suspended before calling this method.
+ * Returns the previous table, which the caller must destroy.
*/
-int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+struct dm_table *dm_swap_table(struct mapped_device *md,
+ struct dm_table *t);
/*
* A wrapper around vmalloc.
diff --git a/include/linux/device.h b/include/linux/device.h
index 2a73d9bcbc9c..a62799f2ab00 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -166,9 +166,9 @@ struct driver_attribute driver_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
extern int __must_check driver_create_file(struct device_driver *driver,
- struct driver_attribute *attr);
+ const struct driver_attribute *attr);
extern void driver_remove_file(struct device_driver *driver,
- struct driver_attribute *attr);
+ const struct driver_attribute *attr);
extern int __must_check driver_add_kobj(struct device_driver *drv,
struct kobject *kobj,
@@ -319,13 +319,13 @@ struct device_attribute {
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
extern int __must_check device_create_file(struct device *device,
- struct device_attribute *entry);
+ const struct device_attribute *entry);
extern void device_remove_file(struct device *dev,
- struct device_attribute *attr);
+ const struct device_attribute *attr);
extern int __must_check device_create_bin_file(struct device *dev,
- struct bin_attribute *attr);
+ const struct bin_attribute *attr);
extern void device_remove_bin_file(struct device *dev,
- struct bin_attribute *attr);
+ const struct bin_attribute *attr);
extern int device_schedule_callback_owner(struct device *dev,
void (*func)(struct device *dev), struct module *owner);
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6f..7084503c3405 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
struct dm_dirty_log {
struct dm_dirty_log_type *type;
+ int (*flush_callback_fn)(struct dm_target *ti);
void *context;
};
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
* type->constructor/destructor() directly.
*/
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned argc, char **argv);
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31a..aa95508d2f95 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@@ -266,9 +266,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 15
+#define DM_VERSION_MINOR 16
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
+#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
+/*
+ * If set, any table information returned will relate to the inactive
+ * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
+ * is set before using the data returned.
+ */
+#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a41373..9e2a7a401df5 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
/* Delay bios on regions. */
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error);
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
/*
* Region recovery control.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2b9f2ac7ed60..78784982b33e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,7 +74,7 @@ enum dma_transaction_type {
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
- * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index e84f4733cb55..78962272338a 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.6"
+#define REL_VERSION "8.3.7"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index db5721ad50d1..a4d82f895994 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size)
+ NL_BIT( 68, T_MAY_IGNORE, resize_force)
)
NL_PACKET(syncer_conf, 8,
diff --git a/include/linux/dst.h b/include/linux/dst.h
deleted file mode 100644
index e26fed84b1aa..000000000000
--- a/include/linux/dst.h
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DST_H
-#define __DST_H
-
-#include <linux/types.h>
-#include <linux/connector.h>
-
-#define DST_NAMELEN 32
-#define DST_NAME "dst"
-
-enum {
- /* Remove node with given id from storage */
- DST_DEL_NODE = 0,
- /* Add remote node with given id to the storage */
- DST_ADD_REMOTE,
- /* Add local node with given id to the storage to be exported and used by remote peers */
- DST_ADD_EXPORT,
- /* Crypto initialization command (hash/cipher used to protect the connection) */
- DST_CRYPTO,
- /* Security attributes for given connection (permissions for example) */
- DST_SECURITY,
- /* Register given node in the block layer subsystem */
- DST_START,
- DST_CMD_MAX
-};
-
-struct dst_ctl
-{
- /* Storage name */
- char name[DST_NAMELEN];
- /* Command flags */
- __u32 flags;
- /* Command itself (see above) */
- __u32 cmd;
- /* Maximum number of pages per single request in this device */
- __u32 max_pages;
- /* Stale/error transaction scanning timeout in milliseconds */
- __u32 trans_scan_timeout;
- /* Maximum number of retry sends before completing transaction as broken */
- __u32 trans_max_retries;
- /* Storage size */
- __u64 size;
-};
-
-/* Reply command carries completion status */
-struct dst_ctl_ack
-{
- struct cn_msg msg;
- int error;
- int unused[3];
-};
-
-/*
- * Unfortunaltely socket address structure is not exported to userspace
- * and is redefined there.
- */
-#define SADDR_MAX_DATA 128
-
-struct saddr {
- /* address family, AF_xxx */
- unsigned short sa_family;
- /* 14 bytes of protocol address */
- char sa_data[SADDR_MAX_DATA];
- /* Number of bytes used in sa_data */
- unsigned short sa_data_len;
-};
-
-/* Address structure */
-struct dst_network_ctl
-{
- /* Socket type: datagram, stream...*/
- unsigned int type;
- /* Let me guess, is it a Jupiter diameter? */
- unsigned int proto;
- /* Peer's address */
- struct saddr addr;
-};
-
-struct dst_crypto_ctl
-{
- /* Cipher and hash names */
- char cipher_algo[DST_NAMELEN];
- char hash_algo[DST_NAMELEN];
-
- /* Key sizes. Can be zero for digest for example */
- unsigned int cipher_keysize, hash_keysize;
- /* Alignment. Calculated by the DST itself. */
- unsigned int crypto_attached_size;
- /* Number of threads to perform crypto operations */
- int thread_num;
-};
-
-/* Export security attributes have this bits checked in when client connects */
-#define DST_PERM_READ (1<<0)
-#define DST_PERM_WRITE (1<<1)
-
-/*
- * Right now it is simple model, where each remote address
- * is assigned to set of permissions it is allowed to perform.
- * In real world block device does not know anything but
- * reading and writing, so it should be more than enough.
- */
-struct dst_secure_user
-{
- unsigned int permissions;
- struct saddr addr;
-};
-
-/*
- * Export control command: device to export and network address to accept
- * clients to work with given device
- */
-struct dst_export_ctl
-{
- char device[DST_NAMELEN];
- struct dst_network_ctl ctl;
-};
-
-enum {
- DST_CFG = 1, /* Request remote configuration */
- DST_IO, /* IO command */
- DST_IO_RESPONSE, /* IO response */
- DST_PING, /* Keepalive message */
- DST_NCMD_MAX,
-};
-
-struct dst_cmd
-{
- /* Network command itself, see above */
- __u32 cmd;
- /*
- * Size of the attached data
- * (in most cases, for READ command it means how many bytes were requested)
- */
- __u32 size;
- /* Crypto size: number of attached bytes with digest/hmac */
- __u32 csize;
- /* Here we can carry secret data */
- __u32 reserved;
- /* Read/write bits, see how they are encoded in bio structure */
- __u64 rw;
- /* BIO flags */
- __u64 flags;
- /* Unique command id (like transaction ID) */
- __u64 id;
- /* Sector to start IO from */
- __u64 sector;
- /* Hash data is placed after this header */
- __u8 hash[0];
-};
-
-/*
- * Convert command to/from network byte order.
- * We do not use hton*() functions, since there is
- * no 64-bit implementation.
- */
-static inline void dst_convert_cmd(struct dst_cmd *c)
-{
- c->cmd = __cpu_to_be32(c->cmd);
- c->csize = __cpu_to_be32(c->csize);
- c->size = __cpu_to_be32(c->size);
- c->sector = __cpu_to_be64(c->sector);
- c->id = __cpu_to_be64(c->id);
- c->flags = __cpu_to_be64(c->flags);
- c->rw = __cpu_to_be64(c->rw);
-}
-
-/* Transaction id */
-typedef __u64 dst_gen_t;
-
-#ifdef __KERNEL__
-
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <linux/device.h>
-#include <linux/mempool.h>
-#include <linux/net.h>
-#include <linux/poll.h>
-#include <linux/rbtree.h>
-
-#ifdef CONFIG_DST_DEBUG
-#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
-#else
-static inline void __attribute__ ((format (printf, 1, 2)))
- dprintk(const char *fmt, ...) {}
-#endif
-
-struct dst_node;
-
-struct dst_trans
-{
- /* DST node we are working with */
- struct dst_node *n;
-
- /* Entry inside transaction tree */
- struct rb_node trans_entry;
-
- /* Merlin kills this transaction when this memory cell equals zero */
- atomic_t refcnt;
-
- /* How this transaction should be processed by crypto engine */
- short enc;
- /* How many times this transaction was resent */
- short retries;
- /* Completion status */
- int error;
-
- /* When did we send it to the remote peer */
- long send_time;
-
- /* My name is...
- * Well, computers does not speak, they have unique id instead */
- dst_gen_t gen;
-
- /* Block IO we are working with */
- struct bio *bio;
-
- /* Network command for above block IO request */
- struct dst_cmd cmd;
-};
-
-struct dst_crypto_engine
-{
- /* What should we do with all block requests */
- struct crypto_hash *hash;
- struct crypto_ablkcipher *cipher;
-
- /* Pool of pages used to encrypt data into before sending */
- int page_num;
- struct page **pages;
-
- /* What to do with current request */
- int enc;
- /* Who we are and where do we go */
- struct scatterlist *src, *dst;
-
- /* Maximum timeout waiting for encryption to be completed */
- long timeout;
- /* IV is a 64-bit sequential counter */
- u64 iv;
-
- /* Secret data */
- void *private;
-
- /* Cached temporary data lives here */
- int size;
- void *data;
-};
-
-struct dst_state
-{
- /* The main state protection */
- struct mutex state_lock;
-
- /* Polling machinery for sockets */
- wait_queue_t wait;
- wait_queue_head_t *whead;
- /* Most of events are being waited here */
- wait_queue_head_t thread_wait;
-
- /* Who owns this? */
- struct dst_node *node;
-
- /* Network address for this state */
- struct dst_network_ctl ctl;
-
- /* Permissions to work with: read-only or rw connection */
- u32 permissions;
-
- /* Called when we need to clean private data */
- void (* cleanup)(struct dst_state *st);
-
- /* Used by the server: BIO completion queues BIOs here */
- struct list_head request_list;
- spinlock_t request_lock;
-
- /* Guess what? No, it is not number of planets */
- atomic_t refcnt;
-
- /* This flags is set when connection should be dropped */
- int need_exit;
-
- /*
- * Socket to work with. Second pointer is used for
- * lockless check if socket was changed before performing
- * next action (like working with cached polling result)
- */
- struct socket *socket, *read_socket;
-
- /* Cached preallocated data */
- void *data;
- unsigned int size;
-
- /* Currently processed command */
- struct dst_cmd cmd;
-};
-
-struct dst_info
-{
- /* Device size */
- u64 size;
-
- /* Local device name for export devices */
- char local[DST_NAMELEN];
-
- /* Network setup */
- struct dst_network_ctl net;
-
- /* Sysfs bits use this */
- struct device device;
-};
-
-struct dst_node
-{
- struct list_head node_entry;
-
- /* Hi, my name is stored here */
- char name[DST_NAMELEN];
- /* My cache name is stored here */
- char cache_name[DST_NAMELEN];
-
- /* Block device attached to given node.
- * Only valid for exporting nodes */
- struct block_device *bdev;
- /* Network state machine for given peer */
- struct dst_state *state;
-
- /* Block IO machinery */
- struct request_queue *queue;
- struct gendisk *disk;
-
- /* Number of threads in processing pool */
- int thread_num;
- /* Maximum number of pages in single IO */
- int max_pages;
-
- /* I'm that big in bytes */
- loff_t size;
-
- /* Exported to userspace node information */
- struct dst_info *info;
-
- /*
- * Security attribute list.
- * Used only by exporting node currently.
- */
- struct list_head security_list;
- struct mutex security_lock;
-
- /*
- * When this unerflows below zero, university collapses.
- * But this will not happen, since node will be freed,
- * when reference counter reaches zero.
- */
- atomic_t refcnt;
-
- /* How precisely should I be started? */
- int (*start)(struct dst_node *);
-
- /* Crypto capabilities */
- struct dst_crypto_ctl crypto;
- u8 *hash_key;
- u8 *cipher_key;
-
- /* Pool of processing thread */
- struct thread_pool *pool;
-
- /* Transaction IDs live here */
- atomic_long_t gen;
-
- /*
- * How frequently and how many times transaction
- * tree should be scanned to drop stale objects.
- */
- long trans_scan_timeout;
- int trans_max_retries;
-
- /* Small gnomes live here */
- struct rb_root trans_root;
- struct mutex trans_lock;
-
- /*
- * Transaction cache/memory pool.
- * It is big enough to contain not only transaction
- * itself, but additional crypto data (digest/hmac).
- */
- struct kmem_cache *trans_cache;
- mempool_t *trans_pool;
-
- /* This entity scans transaction tree */
- struct delayed_work trans_work;
-
- wait_queue_head_t wait;
-};
-
-/* Kernel representation of the security attribute */
-struct dst_secure
-{
- struct list_head sec_entry;
- struct dst_secure_user sec;
-};
-
-int dst_process_bio(struct dst_node *n, struct bio *bio);
-
-int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
-int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
-
-static inline struct dst_state *dst_state_get(struct dst_state *st)
-{
- BUG_ON(atomic_read(&st->refcnt) == 0);
- atomic_inc(&st->refcnt);
- return st;
-}
-
-void dst_state_put(struct dst_state *st);
-
-struct dst_state *dst_state_alloc(struct dst_node *n);
-int dst_state_socket_create(struct dst_state *st);
-void dst_state_socket_release(struct dst_state *st);
-
-void dst_state_exit_connected(struct dst_state *st);
-
-int dst_state_schedule_receiver(struct dst_state *st);
-
-void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
-
-static inline void dst_state_lock(struct dst_state *st)
-{
- mutex_lock(&st->state_lock);
-}
-
-static inline void dst_state_unlock(struct dst_state *st)
-{
- mutex_unlock(&st->state_lock);
-}
-
-void dst_poll_exit(struct dst_state *st);
-int dst_poll_init(struct dst_state *st);
-
-static inline unsigned int dst_state_poll(struct dst_state *st)
-{
- unsigned int revents = POLLHUP | POLLERR;
-
- dst_state_lock(st);
- if (st->socket)
- revents = st->socket->ops->poll(NULL, st->socket, NULL);
- dst_state_unlock(st);
-
- return revents;
-}
-
-static inline int dst_thread_setup(void *private, void *data)
-{
- return 0;
-}
-
-void dst_node_put(struct dst_node *n);
-
-static inline struct dst_node *dst_node_get(struct dst_node *n)
-{
- atomic_inc(&n->refcnt);
- return n;
-}
-
-int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
-int dst_recv_cdata(struct dst_state *st, void *cdata);
-int dst_data_send_header(struct socket *sock,
- void *data, unsigned int size, int more);
-
-int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
-
-int dst_process_io(struct dst_state *st);
-int dst_export_crypto(struct dst_node *n, struct bio *bio);
-int dst_export_send_bio(struct bio *bio);
-int dst_start_export(struct dst_node *n);
-
-int __init dst_export_init(void);
-void dst_export_exit(void);
-
-/* Private structure for export block IO requests */
-struct dst_export_priv
-{
- struct list_head request_entry;
- struct dst_state *state;
- struct bio *bio;
- struct dst_cmd cmd;
-};
-
-static inline void dst_trans_get(struct dst_trans *t)
-{
- atomic_inc(&t->refcnt);
-}
-
-struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
-int dst_trans_remove(struct dst_trans *t);
-int dst_trans_remove_nolock(struct dst_trans *t);
-void dst_trans_put(struct dst_trans *t);
-
-/*
- * Convert bio into network command.
- */
-static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
- u32 command, u64 id)
-{
- cmd->cmd = command;
- cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
- cmd->rw = bio->bi_rw;
- cmd->size = bio->bi_size;
- cmd->csize = 0;
- cmd->id = id;
- cmd->sector = bio->bi_sector;
-};
-
-int dst_trans_send(struct dst_trans *t);
-int dst_trans_crypto(struct dst_trans *t);
-
-int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
-void dst_node_crypto_exit(struct dst_node *n);
-
-static inline int dst_need_crypto(struct dst_node *n)
-{
- struct dst_crypto_ctl *c = &n->crypto;
- /*
- * Logical OR is appropriate here, but boolean one produces
- * more optimal code, so it is used instead.
- */
- return (c->hash_algo[0] | c->cipher_algo[0]);
-}
-
-int dst_node_trans_init(struct dst_node *n, unsigned int size);
-void dst_node_trans_exit(struct dst_node *n);
-
-/*
- * Pool of threads.
- * Ready list contains threads currently free to be used,
- * active one contains threads with some work scheduled for them.
- * Caller can wait in given queue when thread is ready.
- */
-struct thread_pool
-{
- int thread_num;
- struct mutex thread_lock;
- struct list_head ready_list, active_list;
-
- wait_queue_head_t wait;
-};
-
-void thread_pool_del_worker(struct thread_pool *p);
-void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
-int thread_pool_add_worker(struct thread_pool *p,
- char *name,
- unsigned int id,
- void *(* init)(void *data),
- void (* cleanup)(void *data),
- void *data);
-
-void thread_pool_destroy(struct thread_pool *p);
-struct thread_pool *thread_pool_create(int num, char *name,
- void *(* init)(void *data),
- void (* cleanup)(void *data),
- void *data);
-
-int thread_pool_schedule(struct thread_pool *p,
- int (* setup)(void *stored_private, void *setup_data),
- int (* action)(void *stored_private, void *setup_data),
- void *setup_data, long timeout);
-int thread_pool_schedule_private(struct thread_pool *p,
- int (* setup)(void *private, void *data),
- int (* action)(void *private, void *data),
- void *data, long timeout, void *id);
-
-#endif /* __KERNEL__ */
-#endif /* __DST_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a1569..f8c2e1767500 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
- ##__VA_ARGS__); \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- dev_printk(KERN_DEBUG, dev, \
- KBUILD_MODNAME ": " fmt, \
- ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
return 0;
}
-#define dynamic_pr_debug(fmt, ...) do { } while (0)
-#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, format, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
#endif
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08b..fb737bc19a8c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
static inline char *
efi_guid_unparse(efi_guid_t *guid, char *out)
{
- sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- guid->b[3], guid->b[2], guid->b[1], guid->b[0],
- guid->b[5], guid->b[4], guid->b[7], guid->b[6],
- guid->b[8], guid->b[9], guid->b[10], guid->b[11],
- guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
+ sprintf(out, "%pUl", guid->b);
return out;
}
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 90a4ed0ea0e5..0cc4d55151b7 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -361,7 +361,7 @@ typedef struct elf64_shdr {
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
-#define NT_PRXSTATUS 0x300 /* s390 upper register halves */
+#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
/* Note header in a PT_NOTE section */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 90d1c2184112..9a33c5f7e126 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -42,6 +42,8 @@ enum enclosure_status {
ENCLOSURE_STATUS_NOT_INSTALLED,
ENCLOSURE_STATUS_UNKNOWN,
ENCLOSURE_STATUS_UNAVAILABLE,
+ /* last element for counting purposes */
+ ENCLOSURE_STATUS_MAX
};
/* SFF-8485 activity light settings */
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf3..1b12642636c7 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+static inline long IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 27e772cefb6a..dc12f416a49f 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,7 +97,7 @@ struct fid {
* @get_name: find the name for a given inode in a given directory
* @get_parent: find the parent of a given directory
*
- * See Documentation/filesystems/Exporting for details on how to use
+ * See Documentation/filesystems/nfs/Exporting for details on how to use
* this interface correctly.
*
* encode_fh:
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
index f07f34de2f0e..258088ab3c6b 100644
--- a/include/linux/ext3_fs_sb.h
+++ b/include/linux/ext3_fs_sb.h
@@ -72,6 +72,8 @@ struct ext3_sb_info {
struct inode * s_journal_inode;
struct journal_s * s_journal;
struct list_head s_orphan;
+ struct mutex s_orphan_lock;
+ struct mutex s_resize_lock;
unsigned long s_commit_interval;
struct block_device *journal_bdev;
#ifdef CONFIG_JBD_DEBUG
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index cf82d519be40..d7b5ddca99c2 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -44,13 +44,13 @@
#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
EXT3_XATTR_TRANS_BLOCKS - 2 + \
- 2*EXT3_QUOTA_TRANS_BLOCKS(sb))
+ EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
/* Delete operations potentially hit one directory's namespace plus an
* entire inode, plus arbitrary amounts of bitmap/indirection data. Be
* generous. We can grow the delete transaction later if necessary. */
-#define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64)
+#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
/* Define an arbitrary limit for the amount of data we will anticipate
* writing to any given transaction. For unbounded transactions such as
@@ -86,6 +86,9 @@
#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
#endif
+#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
int
ext3_mark_iloc_dirty(handle_t *handle,
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index 934e22d65801..d830747f5c0b 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -62,5 +62,7 @@ struct fiemap {
#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
* support extents. Result
* merged for efficiency. */
+#define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other
+ * files. */
#endif /* _LINUX_FIEMAP_H */
diff --git a/include/linux/file.h b/include/linux/file.h
index 335a0a5c316e..5555508fd517 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -18,11 +18,9 @@ extern void drop_file_write_access(struct file *file);
struct file_operations;
struct vfsmount;
struct dentry;
-extern int init_file(struct file *, struct vfsmount *mnt,
- struct dentry *dentry, fmode_t mode,
- const struct file_operations *fop);
-extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry,
- fmode_t mode, const struct file_operations *fop);
+struct path;
+extern struct file *alloc_file(struct path *, fmode_t mode,
+ const struct file_operations *fop);
static inline void fput_light(struct file *file, int fput_needed)
{
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index c6b3ca3af6df..1f716d9f714b 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -340,6 +340,9 @@ struct fw_cdev_send_response {
* The @closure field is passed back to userspace in the response event.
* The @handle field is an out parameter, returning a handle to the allocated
* range to be used for later deallocation of the range.
+ *
+ * The address range is allocated on all local nodes. The address allocation
+ * is exclusive except for the FCP command and response registers.
*/
struct fw_cdev_allocate {
__u64 offset;
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 9416a461b696..a0e67150a729 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -248,8 +248,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
/*
- * Important note: The callback must guarantee that either fw_send_response()
- * or kfree() is called on the @request.
+ * Important note: Except for the FCP registers, the callback must guarantee
+ * that either fw_send_response() or kfree() is called on the @request.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a057f48eb156..b1bcb275b596 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -152,6 +152,7 @@ struct inodes_stat_t {
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO))
+#define WRITE_META (WRITE | (1 << BIO_RW_META))
#define SWRITE_SYNC_PLUG \
(SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
@@ -1094,10 +1095,6 @@ struct file_lock {
extern void send_sigio(struct fown_struct *fown, int fd, int band);
-/* fs/sync.c */
-extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
- loff_t endbyte, unsigned int flags);
-
#ifdef CONFIG_FILE_LOCKING
extern int fcntl_getlk(struct file *, struct flock __user *);
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
@@ -1590,7 +1587,7 @@ struct super_operations {
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
* various stages of removing an inode.
*
- * Two bits are used for locking and completion notification, I_LOCK and I_SYNC.
+ * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
* fdatasync(). i_atime is the usual cause.
@@ -1599,8 +1596,14 @@ struct super_operations {
* don't have to write inode on fdatasync() when only
* mtime has changed in it.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
- * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both
- * are cleared by unlock_new_inode(), called from iget().
+ * I_NEW Serves as both a mutex and completion notification.
+ * New inodes set I_NEW. If two processes both create
+ * the same inode, one of them will release its inode and
+ * wait for I_NEW to be released before returning.
+ * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+ * also cause waiting on I_NEW, without I_NEW actually
+ * being set. find_inode() uses this to prevent returning
+ * nearly-dead inodes.
* I_WILL_FREE Must be set when calling write_inode_now() if i_count
* is zero. I_FREEING must be set when I_WILL_FREE is
* cleared.
@@ -1614,35 +1617,23 @@ struct super_operations {
* prohibited for many purposes. iget() must wait for
* the inode to be completely released, then create it
* anew. Other functions will just ignore such inodes,
- * if appropriate. I_LOCK is used for waiting.
+ * if appropriate. I_NEW is used for waiting.
*
- * I_LOCK Serves as both a mutex and completion notification.
- * New inodes set I_LOCK. If two processes both create
- * the same inode, one of them will release its inode and
- * wait for I_LOCK to be released before returning.
- * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
- * also cause waiting on I_LOCK, without I_LOCK actually
- * being set. find_inode() uses this to prevent returning
- * nearly-dead inodes.
- * I_SYNC Similar to I_LOCK, but limited in scope to writeback
- * of inode dirty data. Having a separate lock for this
- * purpose reduces latency and prevents some filesystem-
- * specific deadlocks.
+ * I_SYNC Synchonized write of dirty inode data. The bits is
+ * set during data writeback, and cleared with a wakeup
+ * on the bit address once it is done.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
- * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on
- * I_CLEAR? If not, why?
*/
#define I_DIRTY_SYNC 1
#define I_DIRTY_DATASYNC 2
#define I_DIRTY_PAGES 4
-#define I_NEW 8
+#define __I_NEW 3
+#define I_NEW (1 << __I_NEW)
#define I_WILL_FREE 16
#define I_FREEING 32
#define I_CLEAR 64
-#define __I_LOCK 7
-#define I_LOCK (1 << __I_LOCK)
-#define __I_SYNC 8
+#define __I_SYNC 7
#define I_SYNC (1 << __I_SYNC)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
@@ -2189,7 +2180,6 @@ static inline void insert_inode_hash(struct inode *inode) {
__insert_inode_hash(inode, inode->i_ino);
}
-extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern void file_kill(struct file *f);
#ifdef CONFIG_BLOCK
@@ -2264,9 +2254,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
int lock_type);
enum {
- DIO_LOCKING = 1, /* need locking between buffered and direct access */
- DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */
- DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
+ /* need locking between buffered and direct access */
+ DIO_LOCKING = 0x01,
+
+ /* filesystem does not support filling holes */
+ DIO_SKIP_HOLES = 0x02,
};
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
@@ -2275,7 +2267,8 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_LOCKING);
+ nr_segs, get_block, end_io,
+ DIO_LOCKING | DIO_SKIP_HOLES);
}
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
@@ -2284,16 +2277,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_NO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_block_t get_block,
- dio_iodone_t end_io)
-{
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+ nr_segs, get_block, end_io, 0);
}
#endif
@@ -2313,6 +2297,7 @@ extern const struct inode_operations page_symlink_inode_operations;
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
@@ -2478,5 +2463,8 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
+#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
+#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index bb516ceeefc9..da317c7163ab 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -8,10 +8,8 @@
#include <linux/fs.h>
/* externs for fs/stack.c */
-extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src,
- int (*get_nlinks)(struct inode *));
-
-extern void fsstack_copy_inode_size(struct inode *dst, const struct inode *src);
+extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src);
+extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
/* inlines */
static inline void fsstack_copy_attr_atime(struct inode *dest,
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 43fc95d822d5..28e33fea5107 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -74,7 +74,12 @@ struct spi_device;
struct fsl_spi_platform_data {
u32 initial_spmode; /* initial SPMODE value */
s16 bus_num;
- bool qe_mode;
+ unsigned int flags;
+#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */
+#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */
+#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */
+#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */
+#define SPI_QE (1 << 4) /* SPI unit is in QE block */
/* board specific information */
u16 max_chipselect;
void (*cs_control)(struct spi_device *spi, bool on);
@@ -90,6 +95,10 @@ struct mpc8xx_pcmcia_ops {
* lead to a deep sleep (i.e. power removed from the core,
* instead of just the clock).
*/
+#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND)
int fsl_deep_sleep(void);
+#else
+static inline int fsl_deep_sleep(void) { return 0; }
+#endif
#endif /* _FSL_DEVICE_H_ */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 47bbdf9c38d0..2233c98d80df 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -57,6 +57,7 @@ struct trace_iterator {
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
+ int leftover;
int cpu;
u64 ts;
@@ -130,7 +131,7 @@ struct ftrace_event_call {
void *mod;
void *data;
- atomic_t profile_count;
+ int profile_count;
int (*profile_enable)(struct ftrace_event_call *);
void (*profile_disable)(struct ftrace_event_call *);
};
@@ -157,7 +158,7 @@ enum {
FILTER_PTR_STRING,
};
-extern int trace_define_common_fields(struct ftrace_event_call *call);
+extern int trace_event_raw_init(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h
index 886f5faa08cb..ca666d18ed67 100644
--- a/include/linux/generic_acl.h
+++ b/include/linux/generic_acl.h
@@ -1,36 +1,15 @@
-/*
- * include/linux/generic_acl.h
- *
- * (C) 2005 Andreas Gruenbacher <agruen@suse.de>
- *
- * This file is released under the GPL.
- */
+#ifndef LINUX_GENERIC_ACL_H
+#define LINUX_GENERIC_ACL_H
-#ifndef GENERIC_ACL_H
-#define GENERIC_ACL_H
+#include <linux/xattr.h>
-#include <linux/posix_acl.h>
-#include <linux/posix_acl_xattr.h>
+struct inode;
-/**
- * struct generic_acl_operations - filesystem operations
- *
- * Filesystems must make these operations available to the generic
- * operations.
- */
-struct generic_acl_operations {
- struct posix_acl *(*getacl)(struct inode *, int);
- void (*setacl)(struct inode *, int, struct posix_acl *);
-};
+extern struct xattr_handler generic_acl_access_handler;
+extern struct xattr_handler generic_acl_default_handler;
-size_t generic_acl_list(struct inode *, struct generic_acl_operations *, int,
- char *, size_t);
-int generic_acl_get(struct inode *, struct generic_acl_operations *, int,
- void *, size_t);
-int generic_acl_set(struct inode *, struct generic_acl_operations *, int,
- const void *, size_t);
-int generic_acl_init(struct inode *, struct inode *,
- struct generic_acl_operations *);
-int generic_acl_chmod(struct inode *, struct generic_acl_operations *);
+int generic_acl_init(struct inode *, struct inode *);
+int generic_acl_chmod(struct inode *);
+int generic_check_acl(struct inode *inode, int mask);
-#endif
+#endif /* LINUX_GENERIC_ACL_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c6c0c41af35f..9717081c75ad 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_read(part, field) \
({ \
typeof((part)->dkstats->field) res = 0; \
- int i; \
- for_each_possible_cpu(i) \
- res += per_cpu_ptr((part)->dkstats, i)->field; \
+ unsigned int _cpu; \
+ for_each_possible_cpu(_cpu) \
+ res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
res; \
})
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 059bd189d35d..4e949a5b5b85 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -99,6 +99,12 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
static inline void gpio_unexport(unsigned gpio)
{
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 211ff4497269..ab2cc20e21a5 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -46,7 +46,7 @@ void kmap_flush_unused(void);
static inline unsigned int nr_free_highpages(void) { return 0; }
-#define totalhigh_pages 0
+#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 9bace4b9f4fe..5d86fb2309d2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -162,18 +162,23 @@ struct hrtimer_clock_base {
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @hres_active: State of high resolution mode
- * @check_clocks: Indictator, when set evaluate time source and clock
- * event devices whether high resolution mode can be
- * activated.
- * @nr_events: Total number of timer interrupt events
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
+ int hang_detected;
unsigned long nr_events;
+ unsigned long nr_retries;
+ unsigned long nr_hangs;
+ ktime_t max_hang_time;
#endif
};
@@ -435,47 +440,4 @@ extern u64 ktime_divns(const ktime_t kt, s64 div);
/* Show pending timers: */
extern void sysrq_timer_list_show(void);
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
- if (likely(!timer_stats_active))
- return;
- timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
- timer->function, timer->start_comm, 0);
-}
-
-extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
- void *addr);
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
- __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
- timer->start_site = NULL;
-}
-#else
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-}
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70fa..78b4bc64c006 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index a03daed08c59..41235c93e4e9 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -20,19 +20,18 @@ enum {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-/* As it's for in-kernel or ptrace use, we want it to be pinned */
-#define DEFINE_BREAKPOINT_ATTR(name) \
-struct perf_event_attr name = { \
- .type = PERF_TYPE_BREAKPOINT, \
- .size = sizeof(name), \
- .pinned = 1, \
-};
-
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
{
+ memset(attr, 0, sizeof(*attr));
+
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(*attr);
+ /*
+ * As it's for in-kernel or ptrace use, we want it to be pinned
+ * and to call its callback every hits.
+ */
attr->pinned = 1;
+ attr->sample_period = 1;
}
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
@@ -52,27 +51,24 @@ static inline int hw_breakpoint_len(struct perf_event *bp)
extern struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk);
/* FIXME: only change from the attr, and don't unregister */
-extern struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk);
+extern int
+modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
/*
* Kernel breakpoints are not associated with any particular thread.
*/
extern struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
int cpu);
extern struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered);
+ perf_overflow_handler_t triggered);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern int __register_perf_hw_breakpoint(struct perf_event *bp);
@@ -93,20 +89,18 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
static inline struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk) { return NULL; }
-static inline struct perf_event *
+static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk) { return NULL; }
+ struct perf_event_attr *attr) { return -ENOSYS; }
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
int cpu) { return NULL; }
static inline struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered) { return NULL; }
+ perf_overflow_handler_t triggered) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline int
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b266..02fc617782ef 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
- * @address_data: The I2C addresses to probe (for detect)
+ * @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
- const struct i2c_client_address_data *address_data;
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ const unsigned short *address_list;
struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
-/* i2c_client_address_data is the struct for holding default client
- * addresses for a driver and for the parameters supplied on the
- * command line
- */
-struct i2c_client_address_data {
- const unsigned short *normal_i2c;
-};
-
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
@@ -576,82 +568,4 @@ union i2c_smbus_data {
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
#define I2C_SMBUS_I2C_BLOCK_DATA 8
-
-#ifdef __KERNEL__
-
-/* These defines are used for probing i2c client addresses */
-/* The length of the option lists */
-#define I2C_CLIENT_MAX_OPTS 48
-
-/* Default fill of many variables */
-#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
-
-/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
- module header */
-
-#define I2C_CLIENT_MODULE_PARM(var,desc) \
- static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
- static unsigned int var##_num; \
- module_param_array(var, short, &var##_num, 0); \
- MODULE_PARM_DESC(var, desc)
-
-#define I2C_CLIENT_INSMOD_COMMON \
-static const struct i2c_client_address_data addr_data = { \
- .normal_i2c = normal_i2c, \
-}
-
-/* These are the ones you want to use in your own drivers. Pick the one
- which matches the number of devices the driver differenciates between. */
-#define I2C_CLIENT_INSMOD \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_1(chip1) \
-enum chips { any_chip, chip1 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
-enum chips { any_chip, chip1, chip2 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
-enum chips { any_chip, chip1, chip2, chip3 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
-enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7, chip8 }; \
-I2C_CLIENT_INSMOD_COMMON
-#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index fc5db826b48e..02c9af374741 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data {
unsigned short unlock_key2; /* Unlock Key 2 */
};
+struct adp5588_gpio_platform_data {
+ unsigned gpio_start; /* GPIO Chip base # */
+ unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
+ int (*setup)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
#endif
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
index 918c5354d9b8..08aa92278d71 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/i2c/tps65010.h
@@ -72,6 +72,21 @@
#define TPS_VDCDC1 0x0c
# define TPS_ENABLE_LP (1 << 3)
#define TPS_VDCDC2 0x0d
+# define TPS_LP_COREOFF (1 << 7)
+# define TPS_VCORE_1_8V (7<<4)
+# define TPS_VCORE_1_5V (6 << 4)
+# define TPS_VCORE_1_4V (5 << 4)
+# define TPS_VCORE_1_3V (4 << 4)
+# define TPS_VCORE_1_2V (3 << 4)
+# define TPS_VCORE_1_1V (2 << 4)
+# define TPS_VCORE_1_0V (1 << 4)
+# define TPS_VCORE_0_85V (0 << 4)
+# define TPS_VCORE_LP_1_2V (3 << 2)
+# define TPS_VCORE_LP_1_1V (2 << 2)
+# define TPS_VCORE_LP_1_0V (1 << 2)
+# define TPS_VCORE_LP_0_85V (0 << 2)
+# define TPS_VIB (1 << 1)
+# define TPS_VCORE_DISCH (1 << 0)
#define TPS_VREGS1 0x0e
# define TPS_LDO2_ENABLE (1 << 7)
# define TPS_LDO2_OFF (1 << 6)
@@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value);
*/
extern int tps65013_set_low_pwr(unsigned mode);
+/* tps65010_set_vdcdc2
+ * value to be written to VDCDC2
+ */
+extern int tps65010_config_vdcdc2(unsigned value);
struct i2c_client;
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl.h
index 5306a759cbde..bf1c5be1f5b6 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __TWL4030_H_
-#define __TWL4030_H_
+#ifndef __TWL_H_
+#define __TWL_H_
#include <linux/types.h>
#include <linux/input/matrix_keypad.h>
@@ -61,28 +61,112 @@
#define TWL4030_MODULE_PWMA 0x0E
#define TWL4030_MODULE_PWMB 0x0F
+#define TWL5031_MODULE_ACCESSORY 0x10
+#define TWL5031_MODULE_INTERRUPTS 0x11
+
/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP 0x10
-#define TWL4030_MODULE_INT 0x11
-#define TWL4030_MODULE_PM_MASTER 0x12
-#define TWL4030_MODULE_PM_RECEIVER 0x13
-#define TWL4030_MODULE_RTC 0x14
-#define TWL4030_MODULE_SECURED_REG 0x15
+#define TWL4030_MODULE_BACKUP 0x12
+#define TWL4030_MODULE_INT 0x13
+#define TWL4030_MODULE_PM_MASTER 0x14
+#define TWL4030_MODULE_PM_RECEIVER 0x15
+#define TWL4030_MODULE_RTC 0x16
+#define TWL4030_MODULE_SECURED_REG 0x17
+
+#define TWL_MODULE_USB TWL4030_MODULE_USB
+#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
+#define TWL_MODULE_PIH TWL4030_MODULE_PIH
+#define TWL_MODULE_MADC TWL4030_MODULE_MADC
+#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
+#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
+#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define TWL_MODULE_RTC TWL4030_MODULE_RTC
+
+#define GPIO_INTR_OFFSET 0
+#define KEYPAD_INTR_OFFSET 1
+#define BCI_INTR_OFFSET 2
+#define MADC_INTR_OFFSET 3
+#define USB_INTR_OFFSET 4
+#define BCI_PRES_INTR_OFFSET 9
+#define USB_PRES_INTR_OFFSET 10
+#define RTC_INTR_OFFSET 11
+
+/*
+ * Offset from TWL6030_IRQ_BASE / pdata->irq_base
+ */
+#define PWR_INTR_OFFSET 0
+#define HOTDIE_INTR_OFFSET 12
+#define SMPSLDO_INTR_OFFSET 13
+#define BATDETECT_INTR_OFFSET 14
+#define SIMDETECT_INTR_OFFSET 15
+#define MMCDETECT_INTR_OFFSET 16
+#define GASGAUGE_INTR_OFFSET 17
+#define USBOTG_INTR_OFFSET 4
+#define CHARGER_INTR_OFFSET 2
+#define RSV_INTR_OFFSET 0
+
+/* INT register offsets */
+#define REG_INT_STS_A 0x00
+#define REG_INT_STS_B 0x01
+#define REG_INT_STS_C 0x02
+
+#define REG_INT_MSK_LINE_A 0x03
+#define REG_INT_MSK_LINE_B 0x04
+#define REG_INT_MSK_LINE_C 0x05
+
+#define REG_INT_MSK_STS_A 0x06
+#define REG_INT_MSK_STS_B 0x07
+#define REG_INT_MSK_STS_C 0x08
+
+/* MASK INT REG GROUP A */
+#define TWL6030_PWR_INT_MASK 0x07
+#define TWL6030_RTC_INT_MASK 0x18
+#define TWL6030_HOTDIE_INT_MASK 0x20
+#define TWL6030_SMPSLDOA_INT_MASK 0xC0
+
+/* MASK INT REG GROUP B */
+#define TWL6030_SMPSLDOB_INT_MASK 0x01
+#define TWL6030_BATDETECT_INT_MASK 0x02
+#define TWL6030_SIMDETECT_INT_MASK 0x04
+#define TWL6030_MMCDETECT_INT_MASK 0x08
+#define TWL6030_GPADC_INT_MASK 0x60
+#define TWL6030_GASGAUGE_INT_MASK 0x80
+
+/* MASK INT REG GROUP C */
+#define TWL6030_USBOTG_INT_MASK 0x0F
+#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
+#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
+
+
+#define TWL4030_CLASS_ID 0x4030
+#define TWL6030_CLASS_ID 0x6030
+unsigned int twl_rev(void);
+#define GET_TWL_REV (twl_rev())
+#define TWL_CLASS_IS(class, id) \
+static inline int twl_class_is_ ##class(void) \
+{ \
+ return ((id) == (GET_TWL_REV)) ? 1 : 0; \
+}
+
+TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
+TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
/*
* Read and write single 8-bit registers
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
-int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
/*
* Read and write several 8-bit registers at once.
*
- * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
+ * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
* for the value, and populate your data starting at offset 1.
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
/*----------------------------------------------------------------------*/
@@ -221,6 +305,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
+/*
+ * Accessory Interrupts
+ */
+#define TWL5031_ACIIMR_LSB 0x05
+#define TWL5031_ACIIMR_MSB 0x06
+#define TWL5031_ACIIDR_LSB 0x07
+#define TWL5031_ACIIDR_MSB 0x08
+#define TWL5031_ACCISR1 0x0F
+#define TWL5031_ACCIMR1 0x10
+#define TWL5031_ACCISR2 0x11
+#define TWL5031_ACCIMR2 0x12
+#define TWL5031_ACCSIR 0x13
+#define TWL5031_ACCEDR1 0x14
+#define TWL5031_ACCSIHCTRL 0x15
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery Charger Controller
+ */
+
+#define TWL5031_INTERRUPTS_BCIISR1 0x0
+#define TWL5031_INTERRUPTS_BCIIMR1 0x1
+#define TWL5031_INTERRUPTS_BCIISR2 0x2
+#define TWL5031_INTERRUPTS_BCIIMR2 0x3
+#define TWL5031_INTERRUPTS_BCISIR 0x4
+#define TWL5031_INTERRUPTS_BCIEDR1 0x5
+#define TWL5031_INTERRUPTS_BCIEDR2 0x6
+#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
/* Power bus message definitions */
/* The TWL4030/5030 splits its power-management resources (the various
@@ -250,6 +366,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define RES_TYPE_ALL 0x7
+/* Resource states */
#define RES_STATE_WRST 0xF
#define RES_STATE_ACTIVE 0xE
#define RES_STATE_SLEEP 0x8
@@ -310,8 +427,18 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define MSG_SINGULAR(devgrp, id, state) \
((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+#define MSG_BROADCAST_ALL(devgrp, state) \
+ ((devgrp) << 5 | (state))
+
+#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
+#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
+#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
/*----------------------------------------------------------------------*/
+struct twl4030_clock_init_data {
+ bool ck32k_lowpwr_enable;
+};
+
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
@@ -391,12 +518,15 @@ struct twl4030_resconfig {
u8 devgroup; /* Processor group that Power resource belongs to */
u8 type; /* Power resource addressed, 6 / broadcast message */
u8 type2; /* Power resource addressed, 3 / broadcast message */
+ u8 remap_off; /* off state remapping */
+ u8 remap_sleep; /* sleep state remapping */
};
struct twl4030_power_data {
struct twl4030_script **scripts;
unsigned num;
struct twl4030_resconfig *resource_config;
+#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
};
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
@@ -421,6 +551,7 @@ struct twl4030_codec_data {
struct twl4030_platform_data {
unsigned irq_base, irq_end;
+ struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
struct twl4030_madc_platform_data *madc;
@@ -429,19 +560,31 @@ struct twl4030_platform_data {
struct twl4030_power_data *power;
struct twl4030_codec_data *codec;
- /* LDO regulators */
+ /* Common LDO regulators for TWL4030/TWL6030 */
struct regulator_init_data *vdac;
+ struct regulator_init_data *vaux1;
+ struct regulator_init_data *vaux2;
+ struct regulator_init_data *vaux3;
+ /* TWL4030 LDO regulators */
struct regulator_init_data *vpll1;
struct regulator_init_data *vpll2;
struct regulator_init_data *vmmc1;
struct regulator_init_data *vmmc2;
struct regulator_init_data *vsim;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
struct regulator_init_data *vaux4;
-
- /* REVISIT more to come ... _nothing_ should be hard-wired */
+ struct regulator_init_data *vio;
+ struct regulator_init_data *vdd1;
+ struct regulator_init_data *vdd2;
+ struct regulator_init_data *vintana1;
+ struct regulator_init_data *vintana2;
+ struct regulator_init_data *vintdig;
+ /* TWL6030 LDO regulators */
+ struct regulator_init_data *vmmc;
+ struct regulator_init_data *vpp;
+ struct regulator_init_data *vusim;
+ struct regulator_init_data *vana;
+ struct regulator_init_data *vcxio;
+ struct regulator_init_data *vusb;
};
/*----------------------------------------------------------------------*/
@@ -473,6 +616,7 @@ int twl4030_sih_setup(int module);
* VIO is generally fixed.
*/
+/* TWL4030 SMPS/LDO's */
/* EXTERNAL dc-to-dc buck converters */
#define TWL4030_REG_VDD1 0
#define TWL4030_REG_VDD2 1
@@ -499,4 +643,31 @@ int twl4030_sih_setup(int module);
#define TWL4030_REG_VUSB1V8 18
#define TWL4030_REG_VUSB3V1 19
+/* TWL6030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck convertor contollable via SR */
+#define TWL6030_REG_VDD1 30
+#define TWL6030_REG_VDD2 31
+#define TWL6030_REG_VDD3 32
+
+/* Non SR compliant dc-to-dc buck convertors */
+#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_V2V1 34
+#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V8 36
+
+/* EXTERNAL LDOs */
+#define TWL6030_REG_VAUX1_6030 37
+#define TWL6030_REG_VAUX2_6030 38
+#define TWL6030_REG_VAUX3_6030 39
+#define TWL6030_REG_VMMC 40
+#define TWL6030_REG_VPP 41
+#define TWL6030_REG_VUSIM 42
+#define TWL6030_REG_VANA 43
+#define TWL6030_REG_VCXIO 44
+#define TWL6030_REG_VDAC 45
+#define TWL6030_REG_VUSB 46
+
+/* INTERNAL LDOs */
+#define TWL6030_REG_VRTC 47
+
#endif /* End of __TWL4030_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 60c3360ef6ad..9bf6870ee5f4 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -39,6 +39,10 @@ void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
bool i8042_check_port_owner(const struct serio *);
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
#else
@@ -52,7 +56,7 @@ void i8042_unlock_chip(void)
int i8042_command(unsigned char *param, int command)
{
- return -ENOSYS;
+ return -ENODEV;
}
bool i8042_check_port_owner(const struct serio *serio)
@@ -60,6 +64,18 @@ bool i8042_check_port_owner(const struct serio *serio)
return false;
}
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
#endif
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e3f2a4c25f6..99dc6d5cf7e5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -13,18 +13,14 @@
#include <linux/fs.h>
struct linux_binprm;
-#define IMA_COUNT_UPDATE 1
-#define IMA_COUNT_LEAVE 0
-
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_inode_alloc(struct inode *inode);
extern void ima_inode_free(struct inode *inode);
-extern int ima_path_check(struct path *path, int mask, int update_counts);
+extern int ima_path_check(struct path *path, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_counts_get(struct file *file);
-extern void ima_counts_put(struct path *path, int mask);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -42,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
return;
}
-static inline int ima_path_check(struct path *path, int mask, int update_counts)
+static inline int ima_path_check(struct path *path, int mask)
{
return 0;
}
@@ -62,9 +58,5 @@ static inline void ima_counts_get(struct file *file)
return;
}
-static inline void ima_counts_put(struct path *path, int mask)
-{
- return;
-}
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index ff8bde520d03..ab1d31f9352b 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -149,6 +149,8 @@ void prepare_namespace(void);
extern void (*late_time_init)(void);
+extern int initcall_debug;
+
#endif
#ifndef MODULE
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c9..abec69b63d7e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -165,7 +165,7 @@ extern struct cred init_cred;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4f0a72a9740c..9310c699a37d 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -332,6 +332,7 @@ struct intel_iommu {
#ifdef CONFIG_INTR_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
+ int node;
};
static inline void __iommu_flush_cache(
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index a63235996309..78ef023227d4 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -4,32 +4,6 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
-/*
- * This is the per-process anticipatory I/O scheduler state.
- */
-struct as_io_context {
- spinlock_t lock;
-
- void (*dtor)(struct as_io_context *aic); /* destructor */
- void (*exit)(struct as_io_context *aic); /* called on task exit */
-
- unsigned long state;
- atomic_t nr_queued; /* queued reads & sync writes */
- atomic_t nr_dispatched; /* number of requests gone to the drivers */
-
- /* IO History tracking */
- /* Thinktime */
- unsigned long last_end_request;
- unsigned long ttime_total;
- unsigned long ttime_samples;
- unsigned long ttime_mean;
- /* Layout pattern */
- unsigned int seek_samples;
- sector_t last_request_pos;
- u64 seek_total;
- sector_t seek_mean;
-};
-
struct cfq_queue;
struct cfq_io_context {
void *key;
@@ -78,7 +52,6 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
- struct as_io_context *aic;
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void *ioc_data;
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 3b068e5b5671..64d1b638745d 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -14,14 +14,11 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size);
-extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
unsigned long boundary_size,
unsigned long align_mask);
-extern void iommu_area_free(unsigned long *map, unsigned long start,
- unsigned int nr);
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 83aa81297ea3..7129504e053d 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -126,11 +126,11 @@ extern int allocate_resource(struct resource *root, struct resource *new,
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
-static inline resource_size_t resource_size(struct resource *res)
+static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
-static inline unsigned long resource_type(struct resource *res)
+static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e408722a84c7..07baa38bce37 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -87,7 +87,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
/* default values */
#define DFLT_QUEUESMAX 256 /* max number of message queues */
#define DFLT_MSGMAX 10 /* max number of messages in each queue */
-#define HARD_MSGMAX (131072/sizeof(void *))
+#define HARD_MSGMAX (32768*sizeof(void *)/4)
#define DFLT_MSGSIZEMAX 8192 /* max message size */
#else
static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a6..451481c082b5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f1011f7f3d41..638ce4554c76 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -653,6 +653,7 @@ struct transaction_s
* waiting for it to finish.
*/
unsigned int t_synchronous_commit:1;
+ unsigned int t_flushed_data_blocks:1;
/*
* For use by the filesystem to store fs-specific data
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2b..d8e9b3d1c23c 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
__builtin_extract_return_addr((void *)addr));
}
-/*
- * Pretty-print a function pointer. This function is deprecated.
- * Please use the "%pF" vsprintf format instead.
- */
-static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- addr = *(void **)addr;
-#endif
- print_symbol(fmt, (unsigned long)addr);
-}
-
static inline void print_ip_sym(unsigned long ip)
{
printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
new file mode 100644
index 000000000000..bd92a89f4b0a
--- /dev/null
+++ b/include/linux/kernel-page-flags.h
@@ -0,0 +1,46 @@
+#ifndef LINUX_KERNEL_PAGE_FLAGS_H
+#define LINUX_KERNEL_PAGE_FLAGS_H
+
+/*
+ * Stable page flag bits exported to user space
+ */
+
+#define KPF_LOCKED 0
+#define KPF_ERROR 1
+#define KPF_REFERENCED 2
+#define KPF_UPTODATE 3
+#define KPF_DIRTY 4
+#define KPF_LRU 5
+#define KPF_ACTIVE 6
+#define KPF_SLAB 7
+#define KPF_WRITEBACK 8
+#define KPF_RECLAIM 9
+#define KPF_BUDDY 10
+
+/* 11-20: new additions in 2.6.31 */
+#define KPF_MMAP 11
+#define KPF_ANON 12
+#define KPF_SWAPCACHE 13
+#define KPF_SWAPBACKED 14
+#define KPF_COMPOUND_HEAD 15
+#define KPF_COMPOUND_TAIL 16
+#define KPF_HUGE 17
+#define KPF_UNEVICTABLE 18
+#define KPF_HWPOISON 19
+#define KPF_NOPAGE 20
+
+#define KPF_KSM 21
+
+/* kernel hacking assistances
+ * WARNING: subject to change, never rely on them!
+ */
+#define KPF_RESERVED 32
+#define KPF_MLOCKED 33
+#define KPF_MAPPEDTODISK 34
+#define KPF_PRIVATE 35
+#define KPF_PRIVATE_2 36
+#define KPF_OWNER_PRIVATE 37
+#define KPF_ARCH 38
+#define KPF_UNCACHED 39
+
+#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf12..328bca609b9b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#define printk_once(x...) ({ \
- static bool __print_once = true; \
+ static bool __print_once; \
\
- if (__print_once) { \
- __print_once = false; \
+ if (!__print_once) { \
+ __print_once = true; \
printk(x); \
} \
})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#elif defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) do { \
- dynamic_pr_debug(fmt, ##__VA_ARGS__); \
- } while (0)
+#define pr_debug(fmt, ...) \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+ */
+#ifdef CONFIG_PRINTK
+#define printk_ratelimited(fmt, ...) ({ \
+ static struct ratelimit_state _rs = { \
+ .interval = DEFAULT_RATELIMIT_INTERVAL, \
+ .burst = DEFAULT_RATELIMIT_BURST, \
+ }; \
+ \
+ if (!__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+#else
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_ratelimited printk
+#endif
+
+#define pr_emerg_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_ratelimited, don't do that... */
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_ratelimited(fmt, ...) \
+ ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
+ ##__VA_ARGS__); 0; })
+#endif
+
+/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
@@ -492,6 +535,8 @@ extern int
__trace_printk(unsigned long ip, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
+extern void trace_dump_stack(void);
+
/*
* The double __builtin_constant_p is because gcc will give us an error
* if we try to allocate the static variable to fmt if it is not a
@@ -525,6 +570,7 @@ trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
+static inline void trace_dump_stack(void) { }
static inline int
trace_printk(const char *fmt, ...)
{
@@ -688,6 +734,10 @@ struct sysinfo {
/* Force a compilation error if condition is constant and true */
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index adc34f2c6eff..c356b6914ffd 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -206,6 +206,8 @@ extern size_t vmcoreinfo_max_size;
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
+int crash_shrink_memory(unsigned long new_size);
+size_t crash_get_memory_size(void);
#else /* !CONFIG_KEXEC */
struct pt_regs;
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index ad6bdf5a5970..6f6c5f300af6 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,6 +1,7 @@
/*
- * A simple kernel FIFO implementation.
+ * A generic kernel FIFO implementation.
*
+ * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
* Copyright (C) 2004 Stelian Pop <stelian@popies.net>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,6 +19,25 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
+
+/*
+ * Howto porting drivers to the new generic fifo API:
+ *
+ * - Modify the declaration of the "struct kfifo *" object into a
+ * in-place "struct kfifo" object
+ * - Init the in-place object with kfifo_alloc() or kfifo_init()
+ * Note: The address of the in-place "struct kfifo" object must be
+ * passed as the first argument to this functions
+ * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get
+ * into kfifo_out
+ * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get
+ * into kfifo_out_locked
+ * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc
+ * must be passed now to the kfifo_in_locked and kfifo_out_locked
+ * as the last parameter.
+ * - All formerly name __kfifo_* functions has been renamed into kfifo_*
+ */
+
#ifndef _LINUX_KFIFO_H
#define _LINUX_KFIFO_H
@@ -29,26 +49,82 @@ struct kfifo {
unsigned int size; /* the size of the allocated buffer */
unsigned int in; /* data is added at offset (in % size) */
unsigned int out; /* data is extracted from off. (out % size) */
- spinlock_t *lock; /* protects concurrent modifications */
};
-extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- gfp_t gfp_mask, spinlock_t *lock);
-extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
- spinlock_t *lock);
+/*
+ * Macros for declaration and initialization of the kfifo datatype
+ */
+
+/* helper macro */
+#define __kfifo_initializer(s, b) \
+ (struct kfifo) { \
+ .size = s, \
+ .in = 0, \
+ .out = 0, \
+ .buffer = b \
+ }
+
+/**
+ * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer. Must be a power of two.
+ *
+ * Note1: the macro can be used inside struct or union declaration
+ * Note2: the macro creates two objects:
+ * A kfifo object with the given name and a buffer for the kfifo
+ * object named name##kfifo_buffer
+ */
+#define DECLARE_KFIFO(name, size) \
+union { \
+ struct kfifo name; \
+ unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \
+}
+
+/**
+ * INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO
+ * @name: name of the declared kfifo datatype
+ */
+#define INIT_KFIFO(name) \
+ name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
+ sizeof(struct kfifo), name##kfifo_buffer)
+
+/**
+ * DEFINE_KFIFO - macro to define and initialize a kfifo
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer. Must be a power of two.
+ *
+ * Note1: the macro can be used for global and local kfifo data type variables
+ * Note2: the macro creates two objects:
+ * A kfifo object with the given name and a buffer for the kfifo
+ * object named name##kfifo_buffer
+ */
+#define DEFINE_KFIFO(name, size) \
+ unsigned char name##kfifo_buffer[size]; \
+ struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer)
+
+#undef __kfifo_initializer
+
+extern void kfifo_init(struct kfifo *fifo, void *buffer,
+ unsigned int size);
+extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
+ gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
-extern unsigned int __kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len);
-extern unsigned int __kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len);
+extern unsigned int kfifo_in(struct kfifo *fifo,
+ const void *from, unsigned int len);
+extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
+ void *to, unsigned int len);
+extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
+ void *to, unsigned int len, unsigned offset);
/**
- * __kfifo_reset - removes the entire FIFO contents, no locking version
- * @fifo: the fifo to be emptied.
+ * kfifo_initialized - Check if kfifo is initialized.
+ * @fifo: fifo to check
+ * Return %true if FIFO is initialized, otherwise %false.
+ * Assumes the fifo was 0 before.
*/
-static inline void __kfifo_reset(struct kfifo *fifo)
+static inline bool kfifo_initialized(struct kfifo *fifo)
{
- fifo->in = fifo->out = 0;
+ return fifo->buffer != 0;
}
/**
@@ -57,96 +133,484 @@ static inline void __kfifo_reset(struct kfifo *fifo)
*/
static inline void kfifo_reset(struct kfifo *fifo)
{
- unsigned long flags;
+ fifo->in = fifo->out = 0;
+}
- spin_lock_irqsave(fifo->lock, flags);
+/**
+ * kfifo_reset_out - skip FIFO contents
+ * @fifo: the fifo to be emptied.
+ */
+static inline void kfifo_reset_out(struct kfifo *fifo)
+{
+ smp_mb();
+ fifo->out = fifo->in;
+}
- __kfifo_reset(fifo);
+/**
+ * kfifo_size - returns the size of the fifo in bytes
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check unsigned int kfifo_size(struct kfifo *fifo)
+{
+ return fifo->size;
+}
- spin_unlock_irqrestore(fifo->lock, flags);
+/**
+ * kfifo_len - returns the number of used bytes in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int kfifo_len(struct kfifo *fifo)
+{
+ register unsigned int out;
+
+ out = fifo->out;
+ smp_rmb();
+ return fifo->in - out;
}
/**
- * kfifo_put - puts some data into the FIFO
+ * kfifo_is_empty - returns true if the fifo is empty
* @fifo: the fifo to be used.
- * @buffer: the data to be added.
- * @len: the length of the data to be added.
+ */
+static inline __must_check int kfifo_is_empty(struct kfifo *fifo)
+{
+ return fifo->in == fifo->out;
+}
+
+/**
+ * kfifo_is_full - returns true if the fifo is full
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check int kfifo_is_full(struct kfifo *fifo)
+{
+ return kfifo_len(fifo) == kfifo_size(fifo);
+}
+
+/**
+ * kfifo_avail - returns the number of bytes available in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
+{
+ return kfifo_size(fifo) - kfifo_len(fifo);
+}
+
+/**
+ * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking
+ * @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @n: the length of the data to be added.
+ * @lock: pointer to the spinlock to use for locking.
*
- * This function copies at most @len bytes from the @buffer into
+ * This function copies at most @len bytes from the @from buffer into
* the FIFO depending on the free space, and returns the number of
* bytes copied.
*/
-static inline unsigned int kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len)
+static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
+ const void *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(fifo->lock, flags);
+ spin_lock_irqsave(lock, flags);
- ret = __kfifo_put(fifo, buffer, len);
+ ret = kfifo_in(fifo, from, n);
- spin_unlock_irqrestore(fifo->lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return ret;
}
/**
- * kfifo_get - gets some data from the FIFO
+ * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking
* @fifo: the fifo to be used.
- * @buffer: where the data must be copied.
- * @len: the size of the destination buffer.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @lock: pointer to the spinlock to use for locking.
*
* This function copies at most @len bytes from the FIFO into the
- * @buffer and returns the number of copied bytes.
+ * @to buffer and returns the number of copied bytes.
*/
-static inline unsigned int kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len)
+static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
+ void *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(fifo->lock, flags);
+ spin_lock_irqsave(lock, flags);
+
+ ret = kfifo_out(fifo, to, n);
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return ret;
+}
+
+extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
+
+extern __must_check int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned *lenout);
+
+extern __must_check int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned *lenout);
+
+/*
+ * __kfifo_add_out internal helper function for updating the out offset
+ */
+static inline void __kfifo_add_out(struct kfifo *fifo,
+ unsigned int off)
+{
+ smp_mb();
+ fifo->out += off;
+}
+
+/*
+ * __kfifo_add_in internal helper function for updating the in offset
+ */
+static inline void __kfifo_add_in(struct kfifo *fifo,
+ unsigned int off)
+{
+ smp_wmb();
+ fifo->in += off;
+}
+
+/*
+ * __kfifo_off internal helper function for calculating the index of a
+ * given offeset
+ */
+static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off)
+{
+ return off & (fifo->size - 1);
+}
+
+/*
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
+ */
+static inline unsigned int __kfifo_peek_n(struct kfifo *fifo,
+ unsigned int recsize)
+{
+#define __KFIFO_GET(fifo, off, shift) \
+ ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift))
+
+ unsigned int l;
+
+ l = __KFIFO_GET(fifo, 0, 0);
+
+ if (--recsize)
+ l |= __KFIFO_GET(fifo, 1, 8);
+
+ return l;
+#undef __KFIFO_GET
+}
+
+/*
+ * __kfifo_poke_n internal helper function for storing the length of
+ * the next record into the fifo
+ */
+static inline void __kfifo_poke_n(struct kfifo *fifo,
+ unsigned int recsize, unsigned int n)
+{
+#define __KFIFO_PUT(fifo, off, val, shift) \
+ ( \
+ (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \
+ (unsigned char)((val) >> (shift)) \
+ )
+
+ __KFIFO_PUT(fifo, 0, n, 0);
+
+ if (--recsize)
+ __KFIFO_PUT(fifo, 1, n, 8);
+#undef __KFIFO_PUT
+}
+
+/*
+ * __kfifo_in_... internal functions for put date into the fifo
+ * do not call it directly, use kfifo_in_rec() instead
+ */
+extern unsigned int __kfifo_in_n(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize);
- ret = __kfifo_get(fifo, buffer, len);
+extern unsigned int __kfifo_in_generic(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize);
- /*
- * optimization: if the FIFO is empty, set the indices to 0
- * so we don't wrap the next time
- */
- if (fifo->in == fifo->out)
- fifo->in = fifo->out = 0;
+static inline unsigned int __kfifo_in_rec(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize)
+{
+ unsigned int ret;
- spin_unlock_irqrestore(fifo->lock, flags);
+ ret = __kfifo_in_n(fifo, from, n, recsize);
+ if (likely(ret == 0)) {
+ if (recsize)
+ __kfifo_poke_n(fifo, recsize, n);
+ __kfifo_add_in(fifo, n + recsize);
+ }
return ret;
}
/**
- * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
+ * kfifo_in_rec - puts some record data into the FIFO
* @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @n: the length of the data to be added.
+ * @recsize: size of record field
+ *
+ * This function copies @n bytes from the @from into the FIFO and returns
+ * the number of bytes which cannot be copied.
+ * A returned value greater than the @n value means that the record doesn't
+ * fit into the buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo,
+ void *from, unsigned int n, unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_in_generic(fifo, from, n, recsize);
+ return __kfifo_in_rec(fifo, from, n, recsize);
+}
+
+/*
+ * __kfifo_out_... internal functions for get date from the fifo
+ * do not call it directly, use kfifo_out_rec() instead
*/
-static inline unsigned int __kfifo_len(struct kfifo *fifo)
+extern unsigned int __kfifo_out_n(struct kfifo *fifo,
+ void *to, unsigned int reclen, unsigned int recsize);
+
+extern unsigned int __kfifo_out_generic(struct kfifo *fifo,
+ void *to, unsigned int n,
+ unsigned int recsize, unsigned int *total);
+
+static inline unsigned int __kfifo_out_rec(struct kfifo *fifo,
+ void *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
{
- return fifo->in - fifo->out;
+ unsigned int l;
+
+ if (!recsize) {
+ l = n;
+ if (total)
+ *total = l;
+ } else {
+ l = __kfifo_peek_n(fifo, recsize);
+ if (total)
+ *total = l;
+ if (n < l)
+ return l;
+ }
+
+ return __kfifo_out_n(fifo, to, l, recsize);
}
/**
- * kfifo_len - returns the number of bytes available in the FIFO
+ * kfifo_out_rec - gets some record data from the FIFO
* @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @recsize: size of record field
+ * @total: pointer where the total number of to copied bytes should stored
+ *
+ * This function copies at most @n bytes from the FIFO to @to and returns the
+ * number of bytes which cannot be copied.
+ * A returned value greater than the @n value means that the record doesn't
+ * fit into the @to buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
*/
-static inline unsigned int kfifo_len(struct kfifo *fifo)
+static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo,
+ void *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+
{
- unsigned long flags;
- unsigned int ret;
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_out_generic(fifo, to, n, recsize, total);
+ return __kfifo_out_rec(fifo, to, n, recsize, total);
+}
- spin_lock_irqsave(fifo->lock, flags);
+/*
+ * __kfifo_from_user_... internal functions for transfer from user space into
+ * the fifo. do not call it directly, use kfifo_from_user_rec() instead
+ */
+extern unsigned int __kfifo_from_user_n(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize);
+
+extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize);
- ret = __kfifo_len(fifo);
+static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize)
+{
+ unsigned int ret;
- spin_unlock_irqrestore(fifo->lock, flags);
+ ret = __kfifo_from_user_n(fifo, from, n, recsize);
+ if (likely(ret == 0)) {
+ if (recsize)
+ __kfifo_poke_n(fifo, recsize, n);
+ __kfifo_add_in(fifo, n + recsize);
+ }
return ret;
}
+/**
+ * kfifo_from_user_rec - puts some data from user space into the FIFO
+ * @fifo: the fifo to be used.
+ * @from: pointer to the data to be added.
+ * @n: the length of the data to be added.
+ * @recsize: size of record field
+ *
+ * This function copies @n bytes from the @from into the
+ * FIFO and returns the number of bytes which cannot be copied.
+ *
+ * If the returned value is equal or less the @n value, the copy_from_user()
+ * functions has failed. Otherwise the record doesn't fit into the buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_from_user_generic(fifo, from, n, recsize);
+ return __kfifo_from_user_rec(fifo, from, n, recsize);
+}
+
+/*
+ * __kfifo_to_user_... internal functions for transfer fifo data into user space
+ * do not call it directly, use kfifo_to_user_rec() instead
+ */
+extern unsigned int __kfifo_to_user_n(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int reclen,
+ unsigned int recsize);
+
+extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int recsize,
+ unsigned int *total);
+
+static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo,
+ void __user *to, unsigned int n,
+ unsigned int recsize, unsigned int *total)
+{
+ unsigned int l;
+
+ if (!recsize) {
+ l = n;
+ if (total)
+ *total = l;
+ } else {
+ l = __kfifo_peek_n(fifo, recsize);
+ if (total)
+ *total = l;
+ if (n < l)
+ return l;
+ }
+
+ return __kfifo_to_user_n(fifo, to, n, l, recsize);
+}
+
+/**
+ * kfifo_to_user_rec - gets data from the FIFO and write it to user space
+ * @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @recsize: size of record field
+ * @total: pointer where the total number of to copied bytes should stored
+ *
+ * This function copies at most @n bytes from the FIFO to the @to.
+ * In case of an error, the function returns the number of bytes which cannot
+ * be copied.
+ * If the returned value is equal or less the @n value, the copy_to_user()
+ * functions has failed. Otherwise the record doesn't fit into the @to buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_to_user_generic(fifo, to, n, recsize, total);
+ return __kfifo_to_user_rec(fifo, to, n, recsize, total);
+}
+
+/*
+ * __kfifo_peek_... internal functions for peek into the next fifo record
+ * do not call it directly, use kfifo_peek_rec() instead
+ */
+extern unsigned int __kfifo_peek_generic(struct kfifo *fifo,
+ unsigned int recsize);
+
+/**
+ * kfifo_peek_rec - gets the size of the next FIFO record data
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ *
+ * This function returns the size of the next FIFO record in number of bytes
+ */
+static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_peek_generic(fifo, recsize);
+ if (!recsize)
+ return kfifo_len(fifo);
+ return __kfifo_peek_n(fifo, recsize);
+}
+
+/*
+ * __kfifo_skip_... internal functions for skip the next fifo record
+ * do not call it directly, use kfifo_skip_rec() instead
+ */
+extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize);
+
+static inline void __kfifo_skip_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ unsigned int l;
+
+ if (recsize) {
+ l = __kfifo_peek_n(fifo, recsize);
+
+ if (l + recsize <= kfifo_len(fifo)) {
+ __kfifo_add_out(fifo, l + recsize);
+ return;
+ }
+ }
+ kfifo_reset_out(fifo);
+}
+
+/**
+ * kfifo_skip_rec - skip the next fifo out record
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ *
+ * This function skips the next FIFO record
+ */
+static inline void kfifo_skip_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ __kfifo_skip_generic(fifo, recsize);
+ else
+ __kfifo_skip_rec(fifo, recsize);
+}
+
+/**
+ * kfifo_avail_rec - returns the number of bytes available in a record FIFO
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ */
+static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ unsigned int l = kfifo_size(fifo) - kfifo_len(fifo);
+
+ return (l > recsize) ? l - recsize : 0;
+}
+
#endif
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6adcc297e354..19ec41a183f5 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -29,8 +29,7 @@ struct pt_regs;
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
- * This can be implemented in the architecture specific portion of
- * for kgdb.
+ * This can be implemented in the architecture specific portion of kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
@@ -65,7 +64,7 @@ struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
- * This will be impelmented a static inline per architecture. This
+ * This will be implemented as a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
@@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
- * and get them be in a known state. This should do what is needed
+ * and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index e880d4cf9e22..08d7dc4ddf40 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ * struct a {
+ * int x:8, y:8;
+ * };
+ *
+ * then this should be rewritten as
+ *
+ * struct a {
+ * kmemcheck_bitfield_begin(flags);
+ * int x:8, y:8;
+ * kmemcheck_bitfield_end(flags);
+ * };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ * kmemcheck_annotate_bitfield(a, flags);
+ */
+#define kmemcheck_bitfield_begin(name) \
+ int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name) \
+ int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ int _n; \
+ \
+ if (!ptr) \
+ break; \
+ \
+ _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ MAYBE_BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ kmemcheck_mark_initialized(&(var), sizeof(var)); \
+ } while (0) \
+
#else
#define kmemcheck_enabled 0
@@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
return true;
}
-#endif /* CONFIG_KMEMCHECK */
-
-/*
- * Bitfield annotations
- *
- * How to use: If you have a struct using bitfields, for example
- *
- * struct a {
- * int x:8, y:8;
- * };
- *
- * then this should be rewritten as
- *
- * struct a {
- * kmemcheck_bitfield_begin(flags);
- * int x:8, y:8;
- * kmemcheck_bitfield_end(flags);
- * };
- *
- * Now the "flags_begin" and "flags_end" members may be used to refer to the
- * beginning and end, respectively, of the bitfield (and things like
- * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
- * fields should be annotated:
- *
- * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
- * kmemcheck_annotate_bitfield(a, flags);
- *
- * Note: We provide the same definitions for both kmemcheck and non-
- * kmemcheck kernels. This makes it harder to introduce accidental errors. It
- * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
- */
-#define kmemcheck_bitfield_begin(name) \
- int name##_begin[0];
-
-#define kmemcheck_bitfield_end(name) \
- int name##_end[0];
+#define kmemcheck_bitfield_begin(name)
+#define kmemcheck_bitfield_end(name)
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ } while (0)
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- int _n; \
- \
- if (!ptr) \
- break; \
- \
- _n = (long) &((ptr)->name##_end) \
- - (long) &((ptr)->name##_begin); \
- MAYBE_BUILD_BUG_ON(_n < 0); \
- \
- kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+#define kmemcheck_annotate_variable(var) \
+ do { \
} while (0)
-#define kmemcheck_annotate_variable(var) \
- do { \
- kmemcheck_mark_initialized(&(var), sizeof(var)); \
- } while (0) \
+#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 3c7497d46ee9..99d9a6766f7e 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
-extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp) __ref;
+extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
static inline void kmemleak_ignore(const void *ptr)
{
}
-static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
}
static inline void kmemleak_erase(void **ptr)
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 000000000000..e32aa268efac
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,60 @@
+/*
+ * linux/include/kmsg_dump.h
+ *
+ * Copyright (C) 2009 Net Insight AB
+ *
+ * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+#ifndef _LINUX_KMSG_DUMP_H
+#define _LINUX_KMSG_DUMP_H
+
+#include <linux/list.h>
+
+enum kmsg_dump_reason {
+ KMSG_DUMP_OOPS,
+ KMSG_DUMP_PANIC,
+};
+
+/**
+ * struct kmsg_dumper - kernel crash message dumper structure
+ * @dump: The callback which gets called on crashes. The buffer is passed
+ * as two sections, where s1 (length l1) contains the older
+ * messages and s2 (length l2) contains the newer.
+ * @list: Entry in the dumper list (private)
+ * @registered: Flag that specifies if this is already registered
+ */
+struct kmsg_dumper {
+ void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
+ const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2);
+ struct list_head list;
+ int registered;
+};
+
+#ifdef CONFIG_PRINTK
+void kmsg_dump(enum kmsg_dump_reason reason);
+
+int kmsg_dump_register(struct kmsg_dumper *dumper);
+
+int kmsg_dump_unregister(struct kmsg_dumper *dumper);
+#else
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+}
+
+static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+
+static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5d..43bdab769fc3 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
#include <linux/bitops.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/vmstat.h>
+
+struct stable_node;
+struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm)
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
*/
static inline int PageKsm(struct page *page)
{
- return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
}
/*
- * But we have to avoid the checking which page_add_anon_rmap() performs.
+ * When do_swap_page() first faults in from swap what used to be a KSM page,
+ * no problem, it will be assigned to this vma's anon_vma; but thereafter,
+ * it might be faulted into a different anon_vma (or perhaps to a different
+ * offset in the same anon_vma). do_swap_page() cannot do all the locking
+ * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
+ * a copy, and leave remerging the pages to a later pass of ksmd.
+ *
+ * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
+ * but what if the vma was unmerged while the page was swapped out?
*/
-static inline void page_add_ksm_rmap(struct page *page)
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- page->mapping = (void *) PAGE_MAPPING_ANON;
- __inc_zone_page_state(page, NR_ANON_PAGES);
- }
+ struct anon_vma *anon_vma = page_anon_vma(page);
+
+ if (!anon_vma ||
+ (anon_vma == vma->anon_vma &&
+ page->index == linear_page_index(vma, address)))
+ return page;
+
+ return ksm_does_need_to_copy(page, vma, address);
}
+
+int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
#else /* !CONFIG_KSM */
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+
+static inline int PageKsm(struct page *page)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
return 0;
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return page;
+}
+
+static inline int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags)
{
return 0;
}
-static inline void ksm_exit(struct mm_struct *mm)
+static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
+ return 0;
}
-static inline int PageKsm(struct page *page)
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
{
return 0;
}
-/* No stub required for page_add_ksm_rmap(page) */
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
+#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
-#endif
+#endif /* __LINUX_KSM_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2d241da07236..a24de0b1858e 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -496,6 +496,7 @@ struct kvm_ioeventfd {
#define KVM_CAP_VCPU_EVENTS 41
#endif
#define KVM_CAP_S390_PSW 42
+#define KVM_CAP_PPC_SEGSTATE 43
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
index afc9f9fd70f5..2618aa9063bc 100644
--- a/include/linux/leds-lp3944.h
+++ b/include/linux/leds-lp3944.h
@@ -12,9 +12,6 @@
#ifndef __LINUX_LEDS_LP3944_H
#define __LINUX_LEDS_LP3944_H
-#include <linux/leds.h>
-#include <linux/workqueue.h>
-
#define LP3944_LED0 0
#define LP3944_LED1 1
#define LP3944_LED2 2
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 96eea90f01a8..f158eb1149aa 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -32,7 +32,7 @@ struct pca9532_led {
struct i2c_client *client;
char *name;
struct led_classdev ldev;
- struct work_struct work;
+ struct work_struct work;
enum pca9532_type type;
enum pca9532_state state;
};
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h
new file mode 100644
index 000000000000..5a8eb389aab8
--- /dev/null
+++ b/include/linux/leds-regulator.h
@@ -0,0 +1,46 @@
+/*
+ * leds-regulator.h - platform data structure for regulator driven LEDs.
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_REGULATOR_H
+#define __LINUX_LEDS_REGULATOR_H
+
+/*
+ * Use "vled" as supply id when declaring the regulator consumer:
+ *
+ * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = {
+ * { .dev_name = "leds-regulator.0", supply = "vled" },
+ * };
+ *
+ * If you have several regulator driven LEDs, you can append a numerical id to
+ * .dev_name as done above, and use the same id when declaring the platform
+ * device:
+ *
+ * static struct led_regulator_platform_data a780_vibrator_data = {
+ * .name = "a780::vibrator",
+ * };
+ *
+ * static struct platform_device a780_vibrator = {
+ * .name = "leds-regulator",
+ * .id = 0,
+ * .dev = {
+ * .platform_data = &a780_vibrator_data,
+ * },
+ * };
+ */
+
+#include <linux/leds.h>
+
+struct led_regulator_platform_data {
+ char *name; /* LED name as expected by LED class */
+ enum led_brightness brightness; /* initial brightness value */
+};
+
+#endif /* __LINUX_LEDS_REGULATOR_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6a9c4ddd3d95..73112250862c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -354,6 +354,9 @@ enum {
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
+ /* sometimes resuming a link requires several retries */
+ ATA_LINK_RESUME_TRIES = 5,
+
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4c..f1ca0dcc1628 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
#define LIS3_WAKEUP_Z_HI (1 << 5)
unsigned char wakeup_flags;
unsigned char wakeup_thresh;
+#define LIS3_NO_MAP 0
+#define LIS3_DEV_X 1
+#define LIS3_DEV_Y 2
+#define LIS3_DEV_Z 3
+#define LIS3_INV_DEV_X -1
+#define LIS3_INV_DEV_Y -2
+#define LIS3_INV_DEV_Z -3
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+ /* Limits for selftest are specified in chip data sheet */
+ s16 st_min_limits[3]; /* min pass limit x, y, z */
+ s16 st_max_limits[3]; /* max pass limit x, y, z */
};
#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
new file mode 100644
index 000000000000..1a2df2efb771
--- /dev/null
+++ b/include/linux/list_sort.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_LIST_SORT_H
+#define _LINUX_LIST_SORT_H
+
+#include <linux/types.h>
+
+struct list_head;
+
+void list_sort(void *priv, struct list_head *head,
+ int (*cmp)(void *priv, struct list_head *a,
+ struct list_head *b));
+#endif
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 2442e3f3d033..ef82b8fcbddb 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void);
extern u64 lmb_end_of_DRAM(void);
extern void __init lmb_enforce_memory_limit(u64 memory_limit);
extern int __init lmb_is_reserved(u64 addr);
+extern int lmb_is_region_reserved(u64 base, u64 size);
extern int lmb_find(struct lmb_property *res);
extern void lmb_dump_all(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bf9213b2db8f..1f9b119f4ace 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -54,6 +54,11 @@ extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru(struct page *page);
extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
+
+/* For coalescing uncharge for reducing memcg' overhead*/
+extern void mem_cgroup_uncharge_start(void);
+extern void mem_cgroup_uncharge_end(void);
+
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
@@ -68,6 +73,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
+extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
static inline
@@ -80,6 +86,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
return cgroup == mem;
}
+extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
+
extern int
mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
@@ -117,7 +125,7 @@ static inline bool mem_cgroup_disabled(void)
}
extern bool mem_cgroup_oom_called(struct task_struct *task);
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
+void mem_cgroup_update_file_mapped(struct page *page, int val);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask, int nid,
int zid);
@@ -151,6 +159,14 @@ static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
{
}
+static inline void mem_cgroup_uncharge_start(void)
+{
+}
+
+static inline void mem_cgroup_uncharge_end(void)
+{
+}
+
static inline void mem_cgroup_uncharge_page(struct page *page)
{
}
@@ -189,6 +205,11 @@ mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
{
}
+static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
{
return 1;
@@ -200,6 +221,11 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
return 1;
}
+static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
+{
+ return NULL;
+}
+
static inline int
mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
{
@@ -274,7 +300,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
+static inline void mem_cgroup_update_file_mapped(struct page *page,
int val)
{
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 37fa19b34ef5..1adfe779eb99 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -50,6 +50,19 @@ struct memory_notify {
int status_change_nid;
};
+/*
+ * During pageblock isolation, count the number of pages within the
+ * range [start_pfn, start_pfn + nr_pages) which are owned by code
+ * in the notifier chain.
+ */
+#define MEM_ISOLATE_COUNT (1<<0)
+
+struct memory_isolate_notify {
+ unsigned long start_pfn; /* Start of range to check */
+ unsigned int nr_pages; /* # pages in range to check */
+ unsigned int pages_found; /* # pages owned found by callbacks */
+};
+
struct notifier_block;
struct mem_section;
@@ -76,14 +89,28 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
+static inline int register_memory_isolate_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
+{
+}
+static inline int memory_isolate_notify(unsigned long val, void *v)
+{
+ return 0;
+}
#else
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
+extern int register_memory_isolate_notifier(struct notifier_block *nb);
+extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
extern int unregister_memory_section(struct mem_section *);
extern int memory_dev_init(void);
extern int remove_memory_block(unsigned long, struct mem_section *, int);
extern int memory_notify(unsigned long val, void *v);
+extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
enum mem_add_context { BOOT, HOTPLUG };
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a41..35b07b773e6c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
-extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f1..1cc966cd3e5f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
+extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
return node_zonelist(0, gfp_flags);
}
+static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
+
static inline int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes,
const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
new file mode 100644
index 000000000000..f41b428d2cec
--- /dev/null
+++ b/include/linux/mfd/88pm8607.h
@@ -0,0 +1,217 @@
+/*
+ * Marvell 88PM8607 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM8607_H
+#define __LINUX_MFD_88PM8607_H
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO14,
+
+ PM8607_ID_RG_MAX,
+};
+
+#define CHIP_ID (0x40)
+#define CHIP_ID_MASK (0xF8)
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_MISC1 (0x40)
+
+/* bit definitions for PM8607 events */
+#define PM8607_EVENT_ONKEY (1 << 0)
+#define PM8607_EVENT_EXTON (1 << 1)
+#define PM8607_EVENT_CHG (1 << 2)
+#define PM8607_EVENT_BAT (1 << 3)
+#define PM8607_EVENT_RTC (1 << 4)
+#define PM8607_EVENT_CC (1 << 5)
+#define PM8607_EVENT_VBAT (1 << 8)
+#define PM8607_EVENT_VCHG (1 << 9)
+#define PM8607_EVENT_VSYS (1 << 10)
+#define PM8607_EVENT_TINT (1 << 11)
+#define PM8607_EVENT_GPADC0 (1 << 12)
+#define PM8607_EVENT_GPADC1 (1 << 13)
+#define PM8607_EVENT_GPADC2 (1 << 14)
+#define PM8607_EVENT_GPADC3 (1 << 15)
+#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
+#define PM8607_EVENT_PEN (1 << 17)
+#define PM8607_EVENT_HEADSET (1 << 18)
+#define PM8607_EVENT_HOOK (1 << 19)
+#define PM8607_EVENT_MICIN (1 << 20)
+#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
+#define PM8607_EVENT_CHG_DONE (1 << 22)
+#define PM8607_EVENT_CHG_FAULT (1 << 23)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_MISC1_PI2C (1 << 0)
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY = 0,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_VBAT = 8,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT = 16,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+
+struct pm8607_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *client;
+
+ int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
+ int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
+
+ int buck3_double; /* DVC ramp slope double */
+ unsigned char chip_id;
+
+};
+
+#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm8607_platform_data {
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
+};
+
+extern int pm8607_reg_read(struct pm8607_chip *, int);
+extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
+extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
+ unsigned char);
+#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
new file mode 100644
index 000000000000..a42a7033ae53
--- /dev/null
+++ b/include/linux/mfd/ab4500.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * AB4500 device core funtions, for client access
+ */
+#ifndef MFD_AB4500_H
+#define MFD_AB4500_H
+
+#include <linux/device.h>
+
+/*
+ * AB4500 bank addresses
+ */
+#define AB4500_SYS_CTRL1_BLOCK 0x1
+#define AB4500_SYS_CTRL2_BLOCK 0x2
+#define AB4500_REGU_CTRL1 0x3
+#define AB4500_REGU_CTRL2 0x4
+#define AB4500_USB 0x5
+#define AB4500_TVOUT 0x6
+#define AB4500_DBI 0x7
+#define AB4500_ECI_AV_ACC 0x8
+#define AB4500_RESERVED 0x9
+#define AB4500_GPADC 0xA
+#define AB4500_CHARGER 0xB
+#define AB4500_GAS_GAUGE 0xC
+#define AB4500_AUDIO 0xD
+#define AB4500_INTERRUPT 0xE
+#define AB4500_RTC 0xF
+#define AB4500_MISC 0x10
+#define AB4500_DEBUG 0x12
+#define AB4500_PROD_TEST 0x13
+#define AB4500_OTP_EMUL 0x15
+
+/*
+ * System control 1 register offsets.
+ * Bank = 0x01
+ */
+#define AB4500_TURNON_STAT_REG 0x0100
+#define AB4500_RESET_STAT_REG 0x0101
+#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
+
+#define AB4500_FSM_STAT1_REG 0x0140
+#define AB4500_FSM_STAT2_REG 0x0141
+#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
+#define AB4500_USB_STAT1_REG 0x0143
+#define AB4500_USB_STAT2_REG 0x0144
+#define AB4500_STATUS_SPARE1_REG 0x0145
+#define AB4500_STATUS_SPARE2_REG 0x0146
+
+#define AB4500_CTRL1_REG 0x0180
+#define AB4500_CTRL2_REG 0x0181
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB4500_CTRL3_REG 0x0200
+#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
+#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
+#define AB4500_LOW_BAT_REG 0x0203
+#define AB4500_BATT_OK_REG 0x0204
+#define AB4500_SYSCLK_TIMER_REG 0x0205
+#define AB4500_SMPSCLK_CTRL_REG 0x0206
+#define AB4500_SMPSCLK_SEL1_REG 0x0207
+#define AB4500_SMPSCLK_SEL2_REG 0x0208
+#define AB4500_SMPSCLK_SEL3_REG 0x0209
+#define AB4500_SYSULPCLK_CONF_REG 0x020A
+#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
+#define AB4500_SYSCLK_CTRL_REG 0x020C
+#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
+#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
+#define AB4500_SYSCTRL_SPARE_REG 0x020F
+#define AB4500_PAD_CONF_REG 0x0210
+
+/*
+ * Regu control1 register offsets
+ * Bank = 0x03
+ */
+#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
+#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
+#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
+#define AB4500_REGU_REQ_CTRL1_REG 0x0303
+#define AB4500_REGU_REQ_CTRL2_REG 0x0304
+#define AB4500_REGU_REQ_CTRL3_REG 0x0305
+#define AB4500_REGU_REQ_CTRL4_REG 0x0306
+#define AB4500_REGU_MISC1_REG 0x0380
+#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
+#define AB4500_REGU_VUSB_CTRL_REG 0x0382
+#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
+#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
+
+/*
+ * Regu control2 Vmod register offsets
+ */
+#define AB4500_REGU_VMOD_REGU_REG 0x0440
+#define AB4500_REGU_VMOD_SEL1_REG 0x0441
+#define AB4500_REGU_VMOD_SEL2_REG 0x0442
+#define AB4500_REGU_CTRL_DISCH_REG 0x0443
+#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
+
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB4500_USB_LINE_STAT_REG 0x0580
+#define AB4500_USB_LINE_CTRL1_REG 0x0581
+#define AB4500_USB_LINE_CTRL2_REG 0x0582
+#define AB4500_USB_LINE_CTRL3_REG 0x0583
+#define AB4500_USB_LINE_CTRL4_REG 0x0584
+#define AB4500_USB_LINE_CTRL5_REG 0x0585
+#define AB4500_USB_OTG_CTRL_REG 0x0587
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_CTRL_SPARE_REG 0x0589
+#define AB4500_USB_PHY_CTRL_REG 0x058A
+
+/*
+ * TVOUT / CTRL register offsets
+ * Bank : 0x06
+ */
+#define AB4500_TVOUT_CTRL_REG 0x0680
+
+/*
+ * DBI register offsets
+ * Bank : 0x07
+ */
+#define AB4500_DBI_REG1_REG 0x0700
+#define AB4500_DBI_REG2_REG 0x0701
+
+/*
+ * ECI regsiter offsets
+ * Bank : 0x08
+ */
+#define AB4500_ECI_CTRL_REG 0x0800
+#define AB4500_ECI_HOOKLEVEL_REG 0x0801
+#define AB4500_ECI_DATAOUT_REG 0x0802
+#define AB4500_ECI_DATAIN_REG 0x0803
+
+/*
+ * AV Connector register offsets
+ * Bank : 0x08
+ */
+#define AB4500_AV_CONN_REG 0x0840
+
+/*
+ * Accessory detection register offsets
+ * Bank : 0x08
+ */
+#define AB4500_ACC_DET_DB1_REG 0x0880
+#define AB4500_ACC_DET_DB2_REG 0x0881
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB4500_GPADC_CTRL1_REG 0x0A00
+#define AB4500_GPADC_CTRL2_REG 0x0A01
+#define AB4500_GPADC_CTRL3_REG 0x0A02
+#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
+#define AB4500_GPADC_STAT_REG 0x0A04
+#define AB4500_GPADC_MANDATAL_REG 0x0A05
+#define AB4500_GPADC_MANDATAH_REG 0x0A06
+#define AB4500_GPADC_AUTODATAL_REG 0x0A07
+#define AB4500_GPADC_AUTODATAH_REG 0x0A08
+#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_STATUS1_REG 0x0B00
+#define AB4500_CH_STATUS2_REG 0x0B01
+#define AB4500_CH_USBCH_STAT1_REG 0x0B02
+#define AB4500_CH_USBCH_STAT2_REG 0x0B03
+#define AB4500_CH_FSM_STAT_REG 0x0B04
+#define AB4500_CH_STAT_REG 0x0B05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_VOLT_LVL_REG 0x0B40
+
+/*
+ * Charger / main control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_MCH_CTRL1 0x0B80
+#define AB4500_MCH_CTRL2 0x0B81
+#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
+#define AB4500_CH_WD_REG 0x0B83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB4500_USBCH_CTRL1_REG 0x0BC0
+#define AB4500_USBCH_CTRL2_REG 0x0BC1
+#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
+
+/*
+ * RTC bank register offsets
+ * Bank : 0xF
+ */
+#define AB4500_RTC_SOFF_STAT_REG 0x0F00
+#define AB4500_RTC_CC_CONF_REG 0x0F01
+#define AB4500_RTC_READ_REQ_REG 0x0F02
+#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
+#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
+#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
+#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
+#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
+#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
+#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
+#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
+#define AB4500_RTC_STAT_REG 0x0F0B
+#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
+#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
+#define AB4500_RTC_CALIB_REG 0x0F0E
+#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB4500_PWM_OUT_CTRL1_REG 0x1060
+#define AB4500_PWM_OUT_CTRL2_REG 0x1061
+#define AB4500_PWM_OUT_CTRL3_REG 0x1062
+#define AB4500_PWM_OUT_CTRL4_REG 0x1063
+#define AB4500_PWM_OUT_CTRL5_REG 0x1064
+#define AB4500_PWM_OUT_CTRL6_REG 0x1065
+#define AB4500_PWM_OUT_CTRL7_REG 0x1066
+
+#define AB4500_I2C_PAD_CTRL_REG 0x1067
+#define AB4500_REV_REG 0x1080
+
+/**
+ * struct ab4500
+ * @spi: spi device structure
+ * @tx_buf: transmit buffer
+ * @rx_buf: receive buffer
+ * @lock: sync primitive
+ */
+struct ab4500 {
+ struct spi_device *spi;
+ unsigned long tx_buf[4];
+ unsigned long rx_buf[4];
+ struct mutex lock;
+};
+
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data);
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr);
+
+#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 000000000000..ac37558a4673
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef __LINUX_MFD_ADP5520_H
+#define __LINUX_MFD_ADP5520_H
+
+#define ID_ADP5520 5520
+#define ID_ADP5501 5501
+
+/*
+ * ADP5520/ADP5501 Register Map
+ */
+
+#define ADP5520_MODE_STATUS 0x00
+#define ADP5520_INTERRUPT_ENABLE 0x01
+#define ADP5520_BL_CONTROL 0x02
+#define ADP5520_BL_TIME 0x03
+#define ADP5520_BL_FADE 0x04
+#define ADP5520_DAYLIGHT_MAX 0x05
+#define ADP5520_DAYLIGHT_DIM 0x06
+#define ADP5520_OFFICE_MAX 0x07
+#define ADP5520_OFFICE_DIM 0x08
+#define ADP5520_DARK_MAX 0x09
+#define ADP5520_DARK_DIM 0x0A
+#define ADP5520_BL_VALUE 0x0B
+#define ADP5520_ALS_CMPR_CFG 0x0C
+#define ADP5520_L2_TRIP 0x0D
+#define ADP5520_L2_HYS 0x0E
+#define ADP5520_L3_TRIP 0x0F
+#define ADP5520_L3_HYS 0x10
+#define ADP5520_LED_CONTROL 0x11
+#define ADP5520_LED_TIME 0x12
+#define ADP5520_LED_FADE 0x13
+#define ADP5520_LED1_CURRENT 0x14
+#define ADP5520_LED2_CURRENT 0x15
+#define ADP5520_LED3_CURRENT 0x16
+
+/*
+ * ADP5520 Register Map
+ */
+
+#define ADP5520_GPIO_CFG_1 0x17
+#define ADP5520_GPIO_CFG_2 0x18
+#define ADP5520_GPIO_IN 0x19
+#define ADP5520_GPIO_OUT 0x1A
+#define ADP5520_GPIO_INT_EN 0x1B
+#define ADP5520_GPIO_INT_STAT 0x1C
+#define ADP5520_GPIO_INT_LVL 0x1D
+#define ADP5520_GPIO_DEBOUNCE 0x1E
+#define ADP5520_GPIO_PULLUP 0x1F
+#define ADP5520_KP_INT_STAT_1 0x20
+#define ADP5520_KP_INT_STAT_2 0x21
+#define ADP5520_KR_INT_STAT_1 0x22
+#define ADP5520_KR_INT_STAT_2 0x23
+#define ADP5520_KEY_STAT_1 0x24
+#define ADP5520_KEY_STAT_2 0x25
+
+/*
+ * MODE_STATUS bits
+ */
+
+#define ADP5520_nSTNBY (1 << 7)
+#define ADP5520_BL_EN (1 << 6)
+#define ADP5520_DIM_EN (1 << 5)
+#define ADP5520_OVP_INT (1 << 4)
+#define ADP5520_CMPR_INT (1 << 3)
+#define ADP5520_GPI_INT (1 << 2)
+#define ADP5520_KR_INT (1 << 1)
+#define ADP5520_KP_INT (1 << 0)
+
+/*
+ * INTERRUPT_ENABLE bits
+ */
+
+#define ADP5520_AUTO_LD_EN (1 << 4)
+#define ADP5520_CMPR_IEN (1 << 3)
+#define ADP5520_OVP_IEN (1 << 2)
+#define ADP5520_KR_IEN (1 << 1)
+#define ADP5520_KP_IEN (1 << 0)
+
+/*
+ * BL_CONTROL bits
+ */
+
+#define ADP5520_BL_LVL ((x) << 5)
+#define ADP5520_BL_LAW ((x) << 4)
+#define ADP5520_BL_AUTO_ADJ (1 << 3)
+#define ADP5520_OVP_EN (1 << 2)
+#define ADP5520_FOVR (1 << 1)
+#define ADP5520_KP_BL_EN (1 << 0)
+
+/*
+ * ALS_CMPR_CFG bits
+ */
+
+#define ADP5520_L3_OUT (1 << 3)
+#define ADP5520_L2_OUT (1 << 2)
+#define ADP5520_L3_EN (1 << 1)
+
+#define ADP5020_MAX_BRIGHTNESS 0x7F
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
+#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
+
+/*
+ * LEDs subdevice bits and masks
+ */
+
+#define ADP5520_01_MAXLEDS 3
+
+#define ADP5520_FLAG_LED_MASK 0x3
+#define ADP5520_FLAG_OFFT_SHIFT 8
+#define ADP5520_FLAG_OFFT_MASK 0x3
+
+#define ADP5520_R3_MODE (1 << 5)
+#define ADP5520_C3_MODE (1 << 4)
+#define ADP5520_LED_LAW (1 << 3)
+#define ADP5520_LED3_EN (1 << 2)
+#define ADP5520_LED2_EN (1 << 1)
+#define ADP5520_LED1_EN (1 << 0)
+
+/*
+ * GPIO subdevice bits and masks
+ */
+
+#define ADP5520_MAXGPIOS 8
+
+#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_GPIO_C2 (1 << 6)
+#define ADP5520_GPIO_C1 (1 << 5)
+#define ADP5520_GPIO_C0 (1 << 4)
+#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_GPIO_R2 (1 << 2)
+#define ADP5520_GPIO_R1 (1 << 1)
+#define ADP5520_GPIO_R0 (1 << 0)
+
+struct adp5520_gpio_platform_data {
+ unsigned gpio_start;
+ u8 gpio_en_mask;
+ u8 gpio_pullup_mask;
+};
+
+/*
+ * Keypad subdevice bits and masks
+ */
+
+#define ADP5520_MAXKEYS 16
+
+#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_COL_C2 (1 << 6)
+#define ADP5520_COL_C1 (1 << 5)
+#define ADP5520_COL_C0 (1 << 4)
+#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_ROW_R2 (1 << 2)
+#define ADP5520_ROW_R1 (1 << 1)
+#define ADP5520_ROW_R0 (1 << 0)
+
+#define ADP5520_KEY(row, col) (col + row * 4)
+#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
+
+struct adp5520_keys_platform_data {
+ int rows_en_mask; /* Number of rows */
+ int cols_en_mask; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+};
+
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
+#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
+#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
+
+#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
+
+#define ADP5520_LED_ONT_200ms 0
+#define ADP5520_LED_ONT_600ms 1
+#define ADP5520_LED_ONT_800ms 2
+#define ADP5520_LED_ONT_1200ms 3
+
+struct adp5520_leds_platform_data {
+ int num_leds;
+ struct led_info *leds;
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 led_on_time;
+};
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP5520_FADE_T_600ms 2
+#define ADP5520_FADE_T_900ms 3
+#define ADP5520_FADE_T_1200ms 4
+#define ADP5520_FADE_T_1500ms 5
+#define ADP5520_FADE_T_1800ms 6
+#define ADP5520_FADE_T_2100ms 7
+#define ADP5520_FADE_T_2400ms 8
+#define ADP5520_FADE_T_2700ms 9
+#define ADP5520_FADE_T_3000ms 10
+#define ADP5520_FADE_T_3500ms 11
+#define ADP5520_FADE_T_4000ms 12
+#define ADP5520_FADE_T_4500ms 13
+#define ADP5520_FADE_T_5000ms 14
+#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP5520_BL_LAW_LINEAR 0
+#define ADP5520_BL_LAW_SQUARE 1
+#define ADP5520_BL_LAW_CUBIC1 2
+#define ADP5520_BL_LAW_CUBIC2 3
+
+#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP5520_BL_AMBL_FILT_160ms 1
+#define ADP5520_BL_AMBL_FILT_320ms 2
+#define ADP5520_BL_AMBL_FILT_640ms 3
+#define ADP5520_BL_AMBL_FILT_1280ms 4
+#define ADP5520_BL_AMBL_FILT_2560ms 5
+#define ADP5520_BL_AMBL_FILT_5120ms 6
+#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+ /*
+ * Blacklight current 0..30mA
+ */
+#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
+
+ /*
+ * L2 comparator current 0..1000uA
+ */
+#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
+
+ /*
+ * L3 comparator current 0..127uA
+ */
+#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
+
+struct adp5520_backlight_platform_data {
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+};
+
+/*
+ * MFD chip platform data
+ */
+
+struct adp5520_platform_data {
+ struct adp5520_keys_platform_data *keys;
+ struct adp5520_gpio_platform_data *gpio;
+ struct adp5520_leds_platform_data *leds;
+ struct adp5520_backlight_platform_data *backlight;
+};
+
+/*
+ * MFD chip functions
+ */
+
+extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
+extern int adp5520_write(struct device *dev, int reg, u8 val);
+extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+
+extern int adp5520_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+extern int adp5520_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 3402042ddc31..40c372165f3e 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_LED_4MA 1
#define PCAP_LED_5MA 2
#define PCAP_LED_9MA 3
-#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
-#define PCAP_LED_GPIO_EN 0x01000000
-#define PCAP_LED_GPIO_INVERT 0x02000000
#define PCAP_LED_T_MASK 0xf
#define PCAP_LED_C_MASK 0x3
#define PCAP_BL_MASK 0x1f
diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h
index 47e698cb0f16..95cf9360553f 100644
--- a/include/linux/mfd/mc13783-private.h
+++ b/include/linux/mfd/mc13783-private.h
@@ -24,52 +24,23 @@
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-
-struct mc13783_irq {
- void (*handler)(int, void *);
- void *data;
-};
-
-#define MC13783_NUM_IRQ 2
-#define MC13783_IRQ_TS 0
-#define MC13783_IRQ_REGULATOR 1
-
-#define MC13783_ADC_MODE_TS 1
-#define MC13783_ADC_MODE_SINGLE_CHAN 2
-#define MC13783_ADC_MODE_MULT_CHAN 3
+#include <linux/interrupt.h>
struct mc13783 {
- int revision;
- struct device *dev;
- struct spi_device *spi_device;
-
- int (*read_dev)(void *data, char reg, int count, u32 *dst);
- int (*write_dev)(void *data, char reg, int count, const u32 *src);
-
- struct mutex io_lock;
- void *io_data;
+ struct spi_device *spidev;
+ struct mutex lock;
int irq;
- unsigned int flags;
+ int flags;
- struct mc13783_irq irq_handler[MC13783_NUM_IRQ];
- struct work_struct work;
- struct completion adc_done;
- unsigned int ts_active;
- struct mutex adc_conv_lock;
+ irq_handler_t irqhandler[MC13783_NUM_IRQ];
+ void *irqdata[MC13783_NUM_IRQ];
+ /* XXX these should go as platformdata to the regulator subdevice */
struct mc13783_regulator_init_data *regulators;
int num_regulators;
};
-int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *);
-int mc13783_reg_write(struct mc13783 *, int, u32);
-int mc13783_set_bits(struct mc13783 *, int, u32, u32);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data);
-
#define MC13783_REG_INTERRUPT_STATUS_0 0
#define MC13783_REG_INTERRUPT_MASK_0 1
#define MC13783_REG_INTERRUPT_SENSE_0 2
@@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_REG_TEST_3 63
#define MC13783_REG_NB 64
-
-/*
- * Interrupt Status
- */
-#define MC13783_INT_STAT_ADCDONEI (1 << 0)
-#define MC13783_INT_STAT_ADCBISDONEI (1 << 1)
-#define MC13783_INT_STAT_TSI (1 << 2)
-#define MC13783_INT_STAT_WHIGHI (1 << 3)
-#define MC13783_INT_STAT_WLOWI (1 << 4)
-#define MC13783_INT_STAT_CHGDETI (1 << 6)
-#define MC13783_INT_STAT_CHGOVI (1 << 7)
-#define MC13783_INT_STAT_CHGREVI (1 << 8)
-#define MC13783_INT_STAT_CHGSHORTI (1 << 9)
-#define MC13783_INT_STAT_CCCVI (1 << 10)
-#define MC13783_INT_STAT_CHGCURRI (1 << 11)
-#define MC13783_INT_STAT_BPONI (1 << 12)
-#define MC13783_INT_STAT_LOBATLI (1 << 13)
-#define MC13783_INT_STAT_LOBATHI (1 << 14)
-#define MC13783_INT_STAT_UDPI (1 << 15)
-#define MC13783_INT_STAT_USBI (1 << 16)
-#define MC13783_INT_STAT_IDI (1 << 19)
-#define MC13783_INT_STAT_Unused (1 << 20)
-#define MC13783_INT_STAT_SE1I (1 << 21)
-#define MC13783_INT_STAT_CKDETI (1 << 22)
-#define MC13783_INT_STAT_UDMI (1 << 23)
-
-/*
- * Interrupt Mask
- */
-#define MC13783_INT_MASK_ADCDONEM (1 << 0)
-#define MC13783_INT_MASK_ADCBISDONEM (1 << 1)
-#define MC13783_INT_MASK_TSM (1 << 2)
-#define MC13783_INT_MASK_WHIGHM (1 << 3)
-#define MC13783_INT_MASK_WLOWM (1 << 4)
-#define MC13783_INT_MASK_CHGDETM (1 << 6)
-#define MC13783_INT_MASK_CHGOVM (1 << 7)
-#define MC13783_INT_MASK_CHGREVM (1 << 8)
-#define MC13783_INT_MASK_CHGSHORTM (1 << 9)
-#define MC13783_INT_MASK_CCCVM (1 << 10)
-#define MC13783_INT_MASK_CHGCURRM (1 << 11)
-#define MC13783_INT_MASK_BPONM (1 << 12)
-#define MC13783_INT_MASK_LOBATLM (1 << 13)
-#define MC13783_INT_MASK_LOBATHM (1 << 14)
-#define MC13783_INT_MASK_UDPM (1 << 15)
-#define MC13783_INT_MASK_USBM (1 << 16)
-#define MC13783_INT_MASK_IDM (1 << 19)
-#define MC13783_INT_MASK_SE1M (1 << 21)
-#define MC13783_INT_MASK_CKDETM (1 << 22)
-
/*
* Reg Regulator Mode 0
*/
@@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_SWCTRL_SW3_STBY (1 << 21)
#define MC13783_SWCTRL_SW3_MODE (1 << 22)
-/*
- * ADC/Touch
- */
-#define MC13783_ADC0_LICELLCON (1 << 0)
-#define MC13783_ADC0_CHRGICON (1 << 1)
-#define MC13783_ADC0_BATICON (1 << 2)
-#define MC13783_ADC0_RTHEN (1 << 3)
-#define MC13783_ADC0_DTHEN (1 << 4)
-#define MC13783_ADC0_UIDEN (1 << 5)
-#define MC13783_ADC0_ADOUTEN (1 << 6)
-#define MC13783_ADC0_ADOUTPER (1 << 7)
-#define MC13783_ADC0_ADREFEN (1 << 10)
-#define MC13783_ADC0_ADREFMODE (1 << 11)
-#define MC13783_ADC0_TSMOD0 (1 << 12)
-#define MC13783_ADC0_TSMOD1 (1 << 13)
-#define MC13783_ADC0_TSMOD2 (1 << 14)
-#define MC13783_ADC0_CHRGRAWDIV (1 << 15)
-#define MC13783_ADC0_ADINC1 (1 << 16)
-#define MC13783_ADC0_ADINC2 (1 << 17)
-#define MC13783_ADC0_WCOMP (1 << 18)
-#define MC13783_ADC0_ADCBIS0 (1 << 23)
-
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_TRIGMASK (1 << 4)
-#define MC13783_ADC1_ADA10 (1 << 5)
-#define MC13783_ADC1_ADA11 (1 << 6)
-#define MC13783_ADC1_ADA12 (1 << 7)
-#define MC13783_ADC1_ADA20 (1 << 8)
-#define MC13783_ADC1_ADA21 (1 << 9)
-#define MC13783_ADC1_ADA22 (1 << 10)
-#define MC13783_ADC1_ATO0 (1 << 11)
-#define MC13783_ADC1_ATO1 (1 << 12)
-#define MC13783_ADC1_ATO2 (1 << 13)
-#define MC13783_ADC1_ATO3 (1 << 14)
-#define MC13783_ADC1_ATO4 (1 << 15)
-#define MC13783_ADC1_ATO5 (1 << 16)
-#define MC13783_ADC1_ATO6 (1 << 17)
-#define MC13783_ADC1_ATO7 (1 << 18)
-#define MC13783_ADC1_ATOX (1 << 19)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
-#define MC13783_ADC1_ADONESHOT (1 << 22)
-#define MC13783_ADC1_ADCBIS1 (1 << 23)
-
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
-
-#define MC13783_ADC2_ADD10 (1 << 2)
-#define MC13783_ADC2_ADD11 (1 << 3)
-#define MC13783_ADC2_ADD12 (1 << 4)
-#define MC13783_ADC2_ADD13 (1 << 5)
-#define MC13783_ADC2_ADD14 (1 << 6)
-#define MC13783_ADC2_ADD15 (1 << 7)
-#define MC13783_ADC2_ADD16 (1 << 8)
-#define MC13783_ADC2_ADD17 (1 << 9)
-#define MC13783_ADC2_ADD18 (1 << 10)
-#define MC13783_ADC2_ADD19 (1 << 11)
-#define MC13783_ADC2_ADD20 (1 << 14)
-#define MC13783_ADC2_ADD21 (1 << 15)
-#define MC13783_ADC2_ADD22 (1 << 16)
-#define MC13783_ADC2_ADD23 (1 << 17)
-#define MC13783_ADC2_ADD24 (1 << 18)
-#define MC13783_ADC2_ADD25 (1 << 19)
-#define MC13783_ADC2_ADD26 (1 << 20)
-#define MC13783_ADC2_ADD27 (1 << 21)
-#define MC13783_ADC2_ADD28 (1 << 22)
-#define MC13783_ADC2_ADD29 (1 << 23)
+static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
+{
+ int ret;
+ mc13783_lock(mc13783);
+ ret = mc13783_reg_rmw(mc13783, offset, mask, val);
+ mc13783_unlock(mc13783);
-#define MC13783_ADC3_WHIGH0 (1 << 0)
-#define MC13783_ADC3_WHIGH1 (1 << 1)
-#define MC13783_ADC3_WHIGH2 (1 << 2)
-#define MC13783_ADC3_WHIGH3 (1 << 3)
-#define MC13783_ADC3_WHIGH4 (1 << 4)
-#define MC13783_ADC3_WHIGH5 (1 << 5)
-#define MC13783_ADC3_ICID0 (1 << 6)
-#define MC13783_ADC3_ICID1 (1 << 7)
-#define MC13783_ADC3_ICID2 (1 << 8)
-#define MC13783_ADC3_WLOW0 (1 << 9)
-#define MC13783_ADC3_WLOW1 (1 << 10)
-#define MC13783_ADC3_WLOW2 (1 << 11)
-#define MC13783_ADC3_WLOW3 (1 << 12)
-#define MC13783_ADC3_WLOW4 (1 << 13)
-#define MC13783_ADC3_WLOW5 (1 << 14)
-#define MC13783_ADC3_ADCBIS2 (1 << 23)
-
-#define MC13783_ADC4_ADDBIS10 (1 << 2)
-#define MC13783_ADC4_ADDBIS11 (1 << 3)
-#define MC13783_ADC4_ADDBIS12 (1 << 4)
-#define MC13783_ADC4_ADDBIS13 (1 << 5)
-#define MC13783_ADC4_ADDBIS14 (1 << 6)
-#define MC13783_ADC4_ADDBIS15 (1 << 7)
-#define MC13783_ADC4_ADDBIS16 (1 << 8)
-#define MC13783_ADC4_ADDBIS17 (1 << 9)
-#define MC13783_ADC4_ADDBIS18 (1 << 10)
-#define MC13783_ADC4_ADDBIS19 (1 << 11)
-#define MC13783_ADC4_ADDBIS20 (1 << 14)
-#define MC13783_ADC4_ADDBIS21 (1 << 15)
-#define MC13783_ADC4_ADDBIS22 (1 << 16)
-#define MC13783_ADC4_ADDBIS23 (1 << 17)
-#define MC13783_ADC4_ADDBIS24 (1 << 18)
-#define MC13783_ADC4_ADDBIS25 (1 << 19)
-#define MC13783_ADC4_ADDBIS26 (1 << 20)
-#define MC13783_ADC4_ADDBIS27 (1 << 21)
-#define MC13783_ADC4_ADDBIS28 (1 << 22)
-#define MC13783_ADC4_ADDBIS29 (1 << 23)
+ return ret;
+}
#endif /* __LINUX_MFD_MC13783_PRIV_H */
-
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b3a2a7243573..35680409b8cf 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,28 +1,50 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
+#ifndef __LINUX_MFD_MC13783_H
+#define __LINUX_MFD_MC13783_H
-#ifndef __INCLUDE_LINUX_MFD_MC13783_H
-#define __INCLUDE_LINUX_MFD_MC13783_H
+#include <linux/interrupt.h>
struct mc13783;
+
+void mc13783_lock(struct mc13783 *mc13783);
+void mc13783_unlock(struct mc13783 *mc13783);
+
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val);
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val);
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val);
+
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev);
+int mc13783_ackirq(struct mc13783 *mc13783, int irq);
+
+int mc13783_mask(struct mc13783 *mc13783, int irq);
+int mc13783_unmask(struct mc13783 *mc13783, int irq);
+
+#define MC13783_ADC0 43
+#define MC13783_ADC0_ADREFEN (1 << 10)
+#define MC13783_ADC0_ADREFMODE (1 << 11)
+#define MC13783_ADC0_TSMOD0 (1 << 12)
+#define MC13783_ADC0_TSMOD1 (1 << 13)
+#define MC13783_ADC0_TSMOD2 (1 << 14)
+#define MC13783_ADC0_ADINC1 (1 << 16)
+#define MC13783_ADC0_ADINC2 (1 << 17)
+
+#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \
+ MC13783_ADC0_TSMOD1 | \
+ MC13783_ADC0_TSMOD2)
+
+/* to be cleaned up */
struct regulator_init_data;
struct mc13783_regulator_init_data {
@@ -30,23 +52,30 @@ struct mc13783_regulator_init_data {
struct regulator_init_data *init_data;
};
-struct mc13783_platform_data {
- struct mc13783_regulator_init_data *regulators;
+struct mc13783_regulator_platform_data {
int num_regulators;
- unsigned int flags;
+ struct mc13783_regulator_init_data *regulators;
};
-/* mc13783_platform_data flags */
+struct mc13783_platform_data {
+ int num_regulators;
+ struct mc13783_regulator_init_data *regulators;
+
#define MC13783_USE_TOUCHSCREEN (1 << 0)
#define MC13783_USE_CODEC (1 << 1)
#define MC13783_USE_ADC (1 << 2)
#define MC13783_USE_RTC (1 << 3)
#define MC13783_USE_REGULATOR (1 << 4)
+ unsigned int flags;
+};
+
+#define MC13783_ADC_MODE_TS 1
+#define MC13783_ADC_MODE_SINGLE_CHAN 2
+#define MC13783_ADC_MODE_MULT_CHAN 3
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_SW_SW1A 0
#define MC13783_SW_SW1B 1
@@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_REGU_V3 29
#define MC13783_REGU_V4 30
-#endif /* __INCLUDE_LINUX_MFD_MC13783_H */
+#define MC13783_IRQ_ADCDONE 0
+#define MC13783_IRQ_ADCBISDONE 1
+#define MC13783_IRQ_TS 2
+#define MC13783_IRQ_WHIGH 3
+#define MC13783_IRQ_WLOW 4
+#define MC13783_IRQ_CHGDET 6
+#define MC13783_IRQ_CHGOV 7
+#define MC13783_IRQ_CHGREV 8
+#define MC13783_IRQ_CHGSHORT 9
+#define MC13783_IRQ_CCCV 10
+#define MC13783_IRQ_CHGCURR 11
+#define MC13783_IRQ_BPON 12
+#define MC13783_IRQ_LOBATL 13
+#define MC13783_IRQ_LOBATH 14
+#define MC13783_IRQ_UDP 15
+#define MC13783_IRQ_USB 16
+#define MC13783_IRQ_ID 19
+#define MC13783_IRQ_SE1 21
+#define MC13783_IRQ_CKDET 22
+#define MC13783_IRQ_UDM 23
+#define MC13783_IRQ_1HZ 24
+#define MC13783_IRQ_TODA 25
+#define MC13783_IRQ_ONOFD1 27
+#define MC13783_IRQ_ONOFD2 28
+#define MC13783_IRQ_ONOFD3 29
+#define MC13783_IRQ_SYSRST 30
+#define MC13783_IRQ_RTCRST 31
+#define MC13783_IRQ_PC 32
+#define MC13783_IRQ_WARM 33
+#define MC13783_IRQ_MEMHLD 34
+#define MC13783_IRQ_PWRRDY 35
+#define MC13783_IRQ_THWARNL 36
+#define MC13783_IRQ_THWARNH 37
+#define MC13783_IRQ_CLK 38
+#define MC13783_IRQ_SEMAF 39
+#define MC13783_IRQ_MC2B 41
+#define MC13783_IRQ_HSDET 42
+#define MC13783_IRQ_HSL 43
+#define MC13783_IRQ_ALSPTH 44
+#define MC13783_IRQ_AHSSHORT 45
+#define MC13783_NUM_IRQ 46
+#endif /* __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 9aba7b779fbc..3398bd9aab11 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
char **batteries;
int num_batteries;
- int charging_restart_interval;
+ /*
+ * Should be set accordingly to the reference resistor used, see
+ * I_{ch(ref)} charger reference current in the pcf50633 User
+ * Manual.
+ */
+ int charger_reference_current_ma;
/* Callbacks */
void (*probe_done)(struct pcf50633 *);
@@ -40,10 +45,6 @@ struct pcf50633_platform_data {
u8 resumers[5];
};
-struct pcf50633_subdev_pdata {
- struct pcf50633 *pcf;
-};
-
struct pcf50633_irq {
void (*handler) (int, void *);
void *data;
@@ -217,5 +218,9 @@ enum pcf50633_reg_int5 {
#define PCF50633_REG_LEDCTL 0x2a
#define PCF50633_REG_LEDDIM 0x2b
-#endif
+static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+#endif
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2c..df4f5fa88de3 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
int pcf50633_mbc_get_status(struct pcf50633 *);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 6b9c5d06690c..9cb1834deffa 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -2,6 +2,8 @@
#define MFD_TMIO_H
#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
#define tmio_ioread8(addr) readb(addr)
#define tmio_ioread16(addr) readw(addr)
@@ -18,11 +20,48 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
+#define sd_config_write8(base, shift, reg, val) \
+ tmio_iowrite8((val), (base) + ((reg) << (shift)))
+#define sd_config_write16(base, shift, reg, val) \
+ tmio_iowrite16((val), (base) + ((reg) << (shift)))
+#define sd_config_write32(base, shift, reg, val) \
+ do { \
+ tmio_iowrite16((val), (base) + ((reg) << (shift))); \
+ tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
+ } while (0)
+
+int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
+int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
+void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
+void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
+
/*
* data for the MMC controller
*/
struct tmio_mmc_data {
const unsigned int hclk;
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
};
/*
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 91eb493bf14c..5184b79c700b 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -16,7 +16,6 @@
#define __MFD_WM831X_CORE_H__
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
/*
* Register values.
@@ -117,6 +116,7 @@
#define WM831X_DC3_SLEEP_CONTROL 0x4063
#define WM831X_DC4_CONTROL 0x4064
#define WM831X_DC4_SLEEP_CONTROL 0x4065
+#define WM832X_DC4_SLEEP_CONTROL 0x4067
#define WM831X_EPE1_CONTROL 0x4066
#define WM831X_EPE2_CONTROL 0x4067
#define WM831X_LDO1_CONTROL 0x4068
@@ -235,6 +235,8 @@
struct regulator_dev;
+#define WM831X_NUM_IRQ_REGS 5
+
struct wm831x {
struct mutex io_lock;
@@ -248,10 +250,11 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- struct workqueue_struct *irq_wq;
- struct work_struct irq_work;
unsigned int irq_base;
- int irq_masks[5];
+ int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
+ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+
+ int num_gpio;
struct mutex auxadc_lock;
@@ -278,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
-int __must_check wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *);
-void wm831x_disable_irq(struct wm831x *wm831x, int irq);
-void wm831x_enable_irq(struct wm831x *wm831x, int irq);
+static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
+ unsigned int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name,
+ void *dev)
+{
+ return request_threaded_irq(irq, NULL, handler, flags, name, dev);
+}
+
+static inline void wm831x_free_irq(struct wm831x *wm831x,
+ unsigned int irq, void *dev)
+{
+ free_irq(irq, dev);
+}
+
+static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
+{
+ disable_irq(irq);
+}
+
+static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+{
+ enable_irq(irq);
+}
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 90d820260aad..fd322aca33ba 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -41,6 +41,23 @@ struct wm831x_battery_pdata {
int timeout; /** Charge cycle timeout, in minutes */
};
+/**
+ * Configuration for the WM831x DC-DC BuckWise convertors. This
+ * should be passed as driver_data in the regulator_init_data.
+ *
+ * Currently all the configuration is for the fast DVS switching
+ * support of the devices. This allows MFPs on the device to be
+ * configured as an input to switch between two output voltages,
+ * allowing voltage transitions without the expense of an access over
+ * I2C or SPI buses.
+ */
+struct wm831x_buckv_pdata {
+ int dvs_gpio; /** CPU GPIO to use for DVS switching */
+ int dvs_control_src; /** Hardware DVS source to use (1 or 2) */
+ int dvs_init_state; /** DVS state to expect on startup */
+ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */
+};
+
/* Sources for status LED configuration. Values are register values
* plus 1 to allow for a zero default for preserve.
*/
@@ -91,6 +108,7 @@ struct wm831x_pdata {
/** Called after subdevices are set up */
int (*post_init)(struct wm831x *wm831x);
+ int irq_base;
int gpio_base;
struct wm831x_backlight_pdata *backlight;
struct wm831x_backup_pdata *backup;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 1d595de6a055..43868899bf49 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[];
struct wm8350;
struct wm8350_irq {
- void (*handler) (struct wm8350 *, int, void *);
+ irq_handler_t handler;
void *data;
};
@@ -646,10 +646,12 @@ struct wm8350 {
* @init: Function called during driver initialisation. Should be
* used by the platform to configure GPIO functions and similar.
* @irq_high: Set if WM8350 IRQ is active high.
+ * @irq_base: Base IRQ for genirq (not currently used).
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
int irq_high;
+ int irq_base;
};
@@ -676,11 +678,13 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
* WM8350 internal interrupts
*/
int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data);
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data);
int wm8350_free_irq(struct wm8350 *wm8350, int irq);
int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
-
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+int wm8350_irq_exit(struct wm8350 *wm8350);
#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index ed91e8f5d298..71af3d6ebe9d 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -173,6 +173,24 @@
#define WM8350_GPIO_DEBOUNCE_ON 1
/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+
+/*
* R128 (0x80) - GPIO Debounce
*/
#define WM8350_GP12_DB 0x1000
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index be3264e286e0..e786fe9841ef 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -666,20 +666,20 @@
#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
-#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4)
-#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4)
-#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4)
-#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4)
-#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4)
-#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4)
-#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4)
-#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0)
-#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0)
-#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0)
-#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0)
-#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0)
-#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0)
-#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0)
+#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
+#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
+#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
+#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
+#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
+#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
+#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
+#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
+#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
+#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
+#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
+#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
+#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
+#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
/*
* Regulator Interrupts.
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1c..7f085c97c799 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
-extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private) { return -ENOSYS; }
-
-static inline int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest) { return 0; }
+ unsigned long private, int offlining) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ce7cc6c7bcbb..e92d1bfdb330 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -61,6 +61,7 @@ enum {
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
+ MLX4_DEV_CAP_FLAG_BLH = 1 << 15,
MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
MLX4_DEV_CAP_FLAG_APM = 1 << 17,
MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4d..60c467bfbabd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it.
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else
-#endif
- if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
+/* Neutral page->mapping pointer to address_space or anon_vma or other */
+static inline void *page_rmapping(struct page *page)
+{
+ return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -767,6 +780,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
@@ -1022,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_all_active_ranges(void);
+void sort_node_map(void);
+unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
@@ -1071,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
+extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* prio_tree.c */
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
@@ -1316,11 +1335,17 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
size_t size);
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
+enum mf_flags {
+ MF_COUNT_INCREASED = 1 << 0,
+};
extern void memory_failure(unsigned long pfn, int trapno);
-extern int __memory_failure(unsigned long pfn, int trapno, int ref);
+extern int __memory_failure(unsigned long pfn, int trapno, int flags);
+extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
+extern void shake_page(struct page *p, int access);
extern atomic_long_t mce_bad_pages;
+extern int soft_offline_page(struct page *page, int flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 84a524afb3dc..36f96271306c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -122,7 +122,9 @@ struct vm_region {
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
- atomic_t vm_usage; /* region usage count */
+ int vm_usage; /* region usage count (access under nommu_region_sem) */
+ bool vm_icache_flushed : 1; /* true if the icache has been flushed for
+ * this region */
};
/*
@@ -203,10 +205,12 @@ struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
+#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 8a5509877192..ee24ef8ab616 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,8 +1,6 @@
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
-#include <linux/autoconf.h>
-
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6f7561730d88..30fe668c2542 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -15,7 +15,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/pageblock-flags.h>
-#include <linux/bounds.h>
+#include <generated/bounds.h>
#include <asm/atomic.h>
#include <asm/page.h>
diff --git a/include/linux/module.h b/include/linux/module.h
index 482efc865acf..6cb1a3cab5d3 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -25,8 +25,10 @@
/* Not Yet Implemented */
#define MODULE_SUPPORTED_DEVICE(name)
-/* some toolchains uses a `_' prefix for all user symbols */
-#ifndef MODULE_SYMBOL_PREFIX
+/* Some toolchains use a `_' prefix for all user symbols. */
+#ifdef CONFIG_SYMBOL_PREFIX
+#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
+#else
#define MODULE_SYMBOL_PREFIX ""
#endif
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index fff8c53e5434..9c3757c5759d 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,22 +19,21 @@
/**
* struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with
- * option BBT_ABSPAGE when bbt is searched,
- * then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob area of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This
- * number of blocks is reserved at the end of the device
- * where the tables are written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved
- * (rather than bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked
- * good / bad blocks, can be NULL, if len = 0
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ * when bbt is searched, then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob are of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device where the tables are
+ * written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ * bad) block in the stored bbt
+ * @pattern: pattern to identify bad block table or factory marked good /
+ * bad blocks, can be NULL, if len = 0
*
* Descriptor for the bad block table marker and the descriptor for the
* pattern which identifies good and bad blocks. The assumption is made
@@ -90,7 +89,9 @@ struct nand_bbt_descr {
/*
* Constants for oob configuration
*/
-#define ONENAND_BADBLOCK_POS 0
+#define NAND_SMALL_BADBLOCK_POS 5
+#define NAND_LARGE_BADBLOCK_POS 0
+#define ONENAND_BADBLOCK_POS 0
/*
* Bad block scanning errors
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 88d3d8fbf9f2..df89f4275232 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -518,10 +518,11 @@ struct cfi_fixup {
#define CFI_MFR_ANY 0xffff
#define CFI_ID_ANY 0xffff
-#define CFI_MFR_AMD 0x0001
-#define CFI_MFR_ATMEL 0x001F
-#define CFI_MFR_SAMSUNG 0x00EC
-#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_AMD 0x0001
+#define CFI_MFR_INTEL 0x0089
+#define CFI_MFR_ATMEL 0x001F
+#define CFI_MFR_SAMSUNG 0x00EC
+#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d4f38c5fd44e..d0bf422ae374 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -38,6 +38,15 @@ typedef enum {
FL_XIP_WHILE_ERASING,
FL_XIP_WHILE_WRITING,
FL_SHUTDOWN,
+ /* These 2 come from nand_state_t, which has been unified here */
+ FL_READING,
+ FL_CACHEDPRG,
+ /* These 4 come from onenand_state_t, which has been unified here */
+ FL_RESETING,
+ FL_OTPING,
+ FL_PREPARING_ERASE,
+ FL_VERIFYING_ERASE,
+
FL_UNKNOWN
} flstate_t;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7a232a9bdd62..ccab9dfc5217 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,6 +21,8 @@
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
struct mtd_info;
/* Scan and identify a NAND device */
@@ -168,7 +170,6 @@ typedef enum {
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
-
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS \
(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -194,6 +195,9 @@ typedef enum {
/* This option is defined if the board driver allocates its own buffers
(e.g. because it needs them DMA-coherent */
#define NAND_OWN_BUFFERS 0x00040000
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV 0x00080000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -202,20 +206,6 @@ typedef enum {
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
-/*
- * nand_state_t - chip states
- * Enumeration for NAND flash chip state
- */
-typedef enum {
- FL_READY,
- FL_READING,
- FL_WRITING,
- FL_ERASING,
- FL_SYNCING,
- FL_CACHEDPRG,
- FL_PM_SUSPENDED,
-} nand_state_t;
-
/* Keep gcc happy */
struct nand_chip;
@@ -402,7 +392,7 @@ struct nand_chip {
uint8_t cellinfo;
int badblockpos;
- nand_state_t state;
+ flstate_t state;
uint8_t *oob_poi;
struct nand_hw_control *controller;
@@ -470,75 +460,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-/**
- * struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
- * when bbt is searched, then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob are of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This number of
- * blocks is reserved at the end of the device where the tables are
- * written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
- * bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked good /
- * bad blocks, can be NULL, if len = 0
- *
- * Descriptor for the bad block table marker and the descriptor for the
- * pattern which identifies good and bad blocks. The assumption is made
- * that the pattern and the version count are always located in the oob area
- * of the first block.
- */
-struct nand_bbt_descr {
- int options;
- int pages[NAND_MAX_CHIPS];
- int offs;
- int veroffs;
- uint8_t version[NAND_MAX_CHIPS];
- int len;
- int maxblocks;
- int reserved_block_code;
- uint8_t *pattern;
-};
-
-/* Options for the bad block table descriptors */
-
-/* The number of bits used per block in the bbt on the device */
-#define NAND_BBT_NRBITS_MSK 0x0000000F
-#define NAND_BBT_1BIT 0x00000001
-#define NAND_BBT_2BIT 0x00000002
-#define NAND_BBT_4BIT 0x00000004
-#define NAND_BBT_8BIT 0x00000008
-/* The bad block table is in the last good block of the device */
-#define NAND_BBT_LASTBLOCK 0x00000010
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_ABSPAGE 0x00000020
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_SEARCH 0x00000040
-/* bbt is stored per chip on multichip devices */
-#define NAND_BBT_PERCHIP 0x00000080
-/* bbt has a version counter at offset veroffs */
-#define NAND_BBT_VERSION 0x00000100
-/* Create a bbt if none axists */
-#define NAND_BBT_CREATE 0x00000200
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000400
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY 0x00000800
-/* Write bbt if neccecary */
-#define NAND_BBT_WRITE 0x00001000
-/* Read and write back block contents when writing bbt */
-#define NAND_BBT_SAVECONTENT 0x00002000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00004000
-
-/* The maximum number of blocks to scan for a bbt */
-#define NAND_BBT_SCAN_MAXBLOCKS 4
-
extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_default_bbt(struct mtd_info *mtd);
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, uint8_t * buf);
-/*
-* Constants for oob configuration
-*/
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
-
/**
* struct platform_nand_chip - chip level device structure
* @nr_chips: max. number of chips to scan for
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 052ea8ca2434..41bc013571d0 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -16,7 +16,13 @@
struct mtd_info;
/*
- * Calculate 3 byte ECC code for 256 byte block
+ * Calculate 3 byte ECC code for eccsize byte block
+ */
+void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
+ u_char *ecc_code);
+
+/*
+ * Calculate 3 byte ECC code for 256/512 byte block
*/
int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
unsigned int eccsize);
/*
- * Detect and correct a 1 bit error for 256 byte block
+ * Detect and correct a 1 bit error for 256/512 byte block
*/
int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4e49f3350678..5509eb06b326 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,7 +1,7 @@
/*
* linux/include/linux/mtd/onenand.h
*
- * Copyright (C) 2005-2007 Samsung Electronics
+ * Copyright © 2005-2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
+#include <linux/mtd/flashchip.h>
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
/* Free resources held by the OneNAND device */
extern void onenand_release(struct mtd_info *mtd);
-/*
- * onenand_state_t - chip states
- * Enumeration for OneNAND flash chip state
- */
-typedef enum {
- FL_READY,
- FL_READING,
- FL_WRITING,
- FL_ERASING,
- FL_SYNCING,
- FL_LOCKING,
- FL_RESETING,
- FL_OTPING,
- FL_PM_SUSPENDED,
-} onenand_state_t;
-
/**
* struct onenand_bufferram - OneNAND BufferRAM Data
* @blockpage: block & page address in BufferRAM
@@ -137,7 +122,7 @@ struct onenand_chip {
spinlock_t chip_lock;
wait_queue_head_t wq;
- onenand_state_t state;
+ flstate_t state;
unsigned char *page_buf;
unsigned char *oob_buf;
@@ -152,6 +137,8 @@ struct onenand_chip {
/*
* Helper macros
*/
+#define ONENAND_PAGES_PER_BLOCK (1<<6)
+
#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index acadbf53a69f..cd6f3b431195 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -131,6 +131,8 @@
#define ONENAND_CMD_LOCK_TIGHT (0x2C)
#define ONENAND_CMD_UNLOCK_ALL (0x27)
#define ONENAND_CMD_ERASE (0x94)
+#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
+#define ONENAND_CMD_ERASE_VERIFY (0x71)
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 028946750289..05b441d93642 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -72,8 +72,6 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
-extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
-extern void release_open_intent(struct nameidata *);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c060208109..9b8299af3741 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
#define SEQ4_STATUS_LEASE_MOVED 0x00000080
#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
+#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
+#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
#define NFS4_MAX_UINT64 (~(u64)0)
@@ -528,6 +530,7 @@ enum {
NFSPROC4_CLNT_DESTROY_SESSION,
NFSPROC4_CLNT_SEQUENCE,
NFSPROC4_CLNT_GET_LEASE_TIME,
+ NFSPROC4_CLNT_RECLAIM_COMPLETE,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3b..34fc6be5bfcf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
+ struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c8..89b28812ec24 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -2,6 +2,7 @@
#define _LINUX_NFS_XDR_H
#include <linux/nfsacl.h>
+#include <linux/nfs3.h>
/*
* To change the maximum rsize and wsize supported by the NFS client, adjust
@@ -170,8 +171,9 @@ struct nfs4_sequence_args {
struct nfs4_sequence_res {
struct nfs4_session *sr_session;
u8 sr_slotid; /* slot used to send request */
- unsigned long sr_renewal_time;
int sr_status; /* sequence operation status */
+ unsigned long sr_renewal_time;
+ u32 sr_status_flags;
};
struct nfs4_get_lease_time_args {
@@ -938,6 +940,16 @@ struct nfs41_create_session_args {
struct nfs41_create_session_res {
struct nfs_client *client;
};
+
+struct nfs41_reclaim_complete_args {
+ /* In the future extend to include curr_fh for use with migration */
+ unsigned char one_fs:1;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs41_reclaim_complete_res {
+ struct nfs4_sequence_res seq_res;
+};
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 43011b69297c..f321b578edeb 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -29,6 +29,7 @@
#ifdef __KERNEL__
#include <linux/posix_acl.h>
+#include <linux/sunrpc/xdr.h>
/* Maximum number of ACL entries over NFS */
#define NFS_ACL_MAX_ENTRIES 1024
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
deleted file mode 100644
index 3a3f58934f5e..000000000000
--- a/include/linux/nfsd/cache.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * include/linux/nfsd/cache.h
- *
- * Request reply cache. This was heavily inspired by the
- * implementation in 4.3BSD/4.4BSD.
- *
- * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef NFSCACHE_H
-#define NFSCACHE_H
-
-#include <linux/in.h>
-#include <linux/uio.h>
-
-/*
- * Representation of a reply cache entry.
- */
-struct svc_cacherep {
- struct hlist_node c_hash;
- struct list_head c_lru;
-
- unsigned char c_state, /* unused, inprog, done */
- c_type, /* status, buffer */
- c_secure : 1; /* req came from port < 1024 */
- struct sockaddr_in c_addr;
- __be32 c_xid;
- u32 c_prot;
- u32 c_proc;
- u32 c_vers;
- unsigned long c_timestamp;
- union {
- struct kvec u_vec;
- __be32 u_status;
- } c_u;
-};
-
-#define c_replvec c_u.u_vec
-#define c_replstat c_u.u_status
-
-/* cache entry states */
-enum {
- RC_UNUSED,
- RC_INPROG,
- RC_DONE
-};
-
-/* return values */
-enum {
- RC_DROPIT,
- RC_REPLY,
- RC_DOIT,
- RC_INTR
-};
-
-/*
- * Cache types.
- * We may want to add more types one day, e.g. for diropres and
- * attrstat replies. Using cache entries with fixed length instead
- * of buffer pointers may be more efficient.
- */
-enum {
- RC_NOCACHE,
- RC_REPLSTAT,
- RC_REPLBUFF,
-};
-
-/*
- * If requests are retransmitted within this interval, they're dropped.
- */
-#define RC_DELAY (HZ/5)
-
-int nfsd_reply_cache_init(void);
-void nfsd_reply_cache_shutdown(void);
-int nfsd_cache_lookup(struct svc_rqst *, int);
-void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
-
-#ifdef CONFIG_NFSD_V4
-void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
-#else /* CONFIG_NFSD_V4 */
-static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
-{
-}
-#endif /* CONFIG_NFSD_V4 */
-
-#endif /* NFSCACHE_H */
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index a6d9ef2bb34a..8ae78a61eea4 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -12,7 +12,7 @@
# include <linux/types.h>
#ifdef __KERNEL__
-# include <linux/in.h>
+# include <linux/nfsd/nfsfh.h>
#endif
/*
@@ -39,11 +39,23 @@
#define NFSEXP_FSID 0x2000
#define NFSEXP_CROSSMOUNT 0x4000
#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
-#define NFSEXP_ALLFLAGS 0xFE3F
+/*
+ * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4
+ * clients, and only to the single directory that is the root of the
+ * export; further lookup and readdir operations are treated as if every
+ * subdirectory was a mountpoint, and ignored if they are not themselves
+ * exported. This is used by nfsd and mountd to construct the NFSv4
+ * pseudofilesystem, which provides access only to paths leading to each
+ * exported filesystem.
+ */
+#define NFSEXP_V4ROOT 0x10000
+/* All flags that we claim to support. (Note we don't support NOACL.) */
+#define NFSEXP_ALLFLAGS 0x17E3F
/* The flags that may vary depending on security flavor: */
#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
- | NFSEXP_ALLSQUASH)
+ | NFSEXP_ALLSQUASH \
+ | NFSEXP_INSECURE_PORT)
#ifdef __KERNEL__
@@ -108,7 +120,6 @@ struct svc_expkey {
struct path ek_path;
};
-#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
deleted file mode 100644
index 510ffdd5020e..000000000000
--- a/include/linux/nfsd/nfsd.h
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * linux/include/linux/nfsd/nfsd.h
- *
- * Hodge-podge collection of knfsd-related stuff.
- * I will sort this out later.
- *
- * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef LINUX_NFSD_NFSD_H
-#define LINUX_NFSD_NFSD_H
-
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/fs.h>
-#include <linux/posix_acl.h>
-#include <linux/mount.h>
-
-#include <linux/nfsd/debug.h>
-#include <linux/nfsd/nfsfh.h>
-#include <linux/nfsd/export.h>
-#include <linux/nfsd/stats.h>
-/*
- * nfsd version
- */
-#define NFSD_SUPPORTED_MINOR_VERSION 1
-
-/*
- * Flags for nfsd_permission
- */
-#define NFSD_MAY_NOP 0
-#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
-#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
-#define NFSD_MAY_READ 4 /* == MAY_READ */
-#define NFSD_MAY_SATTR 8
-#define NFSD_MAY_TRUNC 16
-#define NFSD_MAY_LOCK 32
-#define NFSD_MAY_OWNER_OVERRIDE 64
-#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
-#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
-
-#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
-#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
-
-/*
- * Callback function for readdir
- */
-struct readdir_cd {
- __be32 err; /* 0, nfserr, or nfserr_eof */
-};
-typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
-
-extern struct svc_program nfsd_program;
-extern struct svc_version nfsd_version2, nfsd_version3,
- nfsd_version4;
-extern u32 nfsd_supported_minorversion;
-extern struct mutex nfsd_mutex;
-extern struct svc_serv *nfsd_serv;
-extern spinlock_t nfsd_drc_lock;
-extern unsigned int nfsd_drc_max_mem;
-extern unsigned int nfsd_drc_mem_used;
-
-extern const struct seq_operations nfs_exports_op;
-
-/*
- * Function prototypes.
- */
-int nfsd_svc(unsigned short port, int nrservs);
-int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp);
-
-int nfsd_nrthreads(void);
-int nfsd_nrpools(void);
-int nfsd_get_nrthreads(int n, int *);
-int nfsd_set_nrthreads(int n, int *);
-
-/* nfsd/vfs.c */
-int fh_lock_parent(struct svc_fh *, struct dentry *);
-int nfsd_racache_init(int);
-void nfsd_racache_shutdown(void);
-int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
- struct svc_export **expp);
-__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
- const char *, unsigned int, struct svc_fh *);
-__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
- const char *, unsigned int,
- struct svc_export **, struct dentry **);
-__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
- struct iattr *, int, time_t);
-#ifdef CONFIG_NFSD_V4
-__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
- struct nfs4_acl *);
-int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
-#endif /* CONFIG_NFSD_V4 */
-__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
- char *name, int len, struct iattr *attrs,
- int type, dev_t rdev, struct svc_fh *res);
-#ifdef CONFIG_NFSD_V3
-__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
-__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
- char *name, int len, struct iattr *attrs,
- struct svc_fh *res, int createmode,
- u32 *verifier, int *truncp, int *created);
-__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
- loff_t, unsigned long);
-#endif /* CONFIG_NFSD_V3 */
-__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
- int, struct file **);
-void nfsd_close(struct file *);
-__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
- loff_t, struct kvec *, int, unsigned long *);
-__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
- loff_t, struct kvec *,int, unsigned long *, int *);
-__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
- char *, int *);
-__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
- char *name, int len, char *path, int plen,
- struct svc_fh *res, struct iattr *);
-__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
- char *, int, struct svc_fh *);
-__be32 nfsd_rename(struct svc_rqst *,
- struct svc_fh *, char *, int,
- struct svc_fh *, char *, int);
-__be32 nfsd_remove(struct svc_rqst *,
- struct svc_fh *, char *, int);
-__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
- char *name, int len);
-int nfsd_truncate(struct svc_rqst *, struct svc_fh *,
- unsigned long size);
-__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
- loff_t *, struct readdir_cd *, filldir_t);
-__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
- struct kstatfs *, int access);
-
-int nfsd_notify_change(struct inode *, struct iattr *);
-__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
- struct dentry *, int);
-int nfsd_sync_dir(struct dentry *dp);
-
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
-#ifdef CONFIG_NFSD_V2_ACL
-extern struct svc_version nfsd_acl_version2;
-#else
-#define nfsd_acl_version2 NULL
-#endif
-#ifdef CONFIG_NFSD_V3_ACL
-extern struct svc_version nfsd_acl_version3;
-#else
-#define nfsd_acl_version3 NULL
-#endif
-struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
-int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
-#endif
-
-enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
-int nfsd_vers(int vers, enum vers_op change);
-int nfsd_minorversion(u32 minorversion, enum vers_op change);
-void nfsd_reset_versions(void);
-int nfsd_create_serv(void);
-
-extern int nfsd_max_blksize;
-
-/*
- * NFSv4 State
- */
-#ifdef CONFIG_NFSD_V4
-extern unsigned int max_delegations;
-int nfs4_state_init(void);
-void nfsd4_free_slabs(void);
-int nfs4_state_start(void);
-void nfs4_state_shutdown(void);
-time_t nfs4_lease_time(void);
-void nfs4_reset_lease(time_t leasetime);
-int nfs4_reset_recoverydir(char *recdir);
-#else
-static inline int nfs4_state_init(void) { return 0; }
-static inline void nfsd4_free_slabs(void) { }
-static inline int nfs4_state_start(void) { return 0; }
-static inline void nfs4_state_shutdown(void) { }
-static inline time_t nfs4_lease_time(void) { return 0; }
-static inline void nfs4_reset_lease(time_t leasetime) { }
-static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
-#endif
-
-/*
- * lockd binding
- */
-void nfsd_lockd_init(void);
-void nfsd_lockd_shutdown(void);
-
-
-/*
- * These macros provide pre-xdr'ed values for faster operation.
- */
-#define nfs_ok cpu_to_be32(NFS_OK)
-#define nfserr_perm cpu_to_be32(NFSERR_PERM)
-#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
-#define nfserr_io cpu_to_be32(NFSERR_IO)
-#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
-#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
-#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
-#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
-#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
-#define nfserr_nodev cpu_to_be32(NFSERR_NODEV)
-#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR)
-#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR)
-#define nfserr_inval cpu_to_be32(NFSERR_INVAL)
-#define nfserr_fbig cpu_to_be32(NFSERR_FBIG)
-#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
-#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
-#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
-#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
-#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
-#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
-#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
-#define nfserr_stale cpu_to_be32(NFSERR_STALE)
-#define nfserr_remote cpu_to_be32(NFSERR_REMOTE)
-#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH)
-#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE)
-#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC)
-#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE)
-#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP)
-#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL)
-#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT)
-#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE)
-#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX)
-#define nfserr_denied cpu_to_be32(NFSERR_DENIED)
-#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK)
-#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED)
-#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE)
-#define nfserr_same cpu_to_be32(NFSERR_SAME)
-#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE)
-#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID)
-#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE)
-#define nfserr_moved cpu_to_be32(NFSERR_MOVED)
-#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE)
-#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH)
-#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED)
-#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID)
-#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID)
-#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID)
-#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID)
-#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK)
-#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME)
-#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH)
-#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
-#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
-#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
-#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
-#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
-#define nfserr_grace cpu_to_be32(NFSERR_GRACE)
-#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
-#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
-#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
-#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
-#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
-#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
-#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
-#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
-#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
-#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION)
-#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT)
-#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY)
-#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
-#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED)
-#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY)
-#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER)
-#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE)
-#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT)
-#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT)
-#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE)
-#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED)
-#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS)
-#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG)
-#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG)
-#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE)
-#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP)
-#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND)
-#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS)
-#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION)
-#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP)
-#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY)
-#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE)
-#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY)
-#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT)
-#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION)
-#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP)
-#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT)
-#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP)
-#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED)
-#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE)
-#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL)
-#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG)
-#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT)
-#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED)
-
-/* error codes for internal use */
-/* if a request fails due to kmalloc failure, it gets dropped.
- * Client should resend eventually
- */
-#define nfserr_dropit cpu_to_be32(30000)
-/* end-of-file indicator in readdir */
-#define nfserr_eof cpu_to_be32(30001)
-/* replay detected */
-#define nfserr_replay_me cpu_to_be32(11001)
-/* nfs41 replay detected */
-#define nfserr_replay_cache cpu_to_be32(11002)
-
-/* Check for dir entries '.' and '..' */
-#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
-
-/*
- * Time of server startup
- */
-extern struct timeval nfssvc_boot;
-
-#ifdef CONFIG_NFSD_V4
-
-/* before processing a COMPOUND operation, we have to check that there
- * is enough space in the buffer for XDR encode to succeed. otherwise,
- * we might process an operation with side effects, and be unable to
- * tell the client that the operation succeeded.
- *
- * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space
- * needed to encode an "ordinary" _successful_ operation. (GETATTR,
- * READ, READDIR, and READLINK have their own buffer checks.) if we
- * fall below this level, we fail the next operation with NFS4ERR_RESOURCE.
- *
- * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space
- * needed to encode an operation which has failed with NFS4ERR_RESOURCE.
- * care is taken to ensure that we never fall below this level for any
- * reason.
- */
-#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
-#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
-
-#define NFSD_LEASE_TIME (nfs4_lease_time())
-#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
-
-/*
- * The following attributes are currently not supported by the NFSv4 server:
- * ARCHIVE (deprecated anyway)
- * HIDDEN (unlikely to be supported any time soon)
- * MIMETYPE (unlikely to be supported any time soon)
- * QUOTA_* (will be supported in a forthcoming patch)
- * SYSTEM (unlikely to be supported any time soon)
- * TIME_BACKUP (unlikely to be supported any time soon)
- * TIME_CREATE (unlikely to be supported any time soon)
- */
-#define NFSD4_SUPPORTED_ATTRS_WORD0 \
-(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
- | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \
- | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \
- | FATTR4_WORD0_UNIQUE_HANDLES | FATTR4_WORD0_LEASE_TIME | FATTR4_WORD0_RDATTR_ERROR \
- | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \
- | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \
- | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \
- | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_HOMOGENEOUS \
- | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
- | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
-
-#define NFSD4_SUPPORTED_ATTRS_WORD1 \
-(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
- | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
- | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
- | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_ACCESS_SET \
- | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
- | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
-
-#define NFSD4_SUPPORTED_ATTRS_WORD2 0
-
-#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
- NFSD4_SUPPORTED_ATTRS_WORD0
-
-#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
- NFSD4_SUPPORTED_ATTRS_WORD1
-
-#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
- (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
-
-static inline u32 nfsd_suppattrs0(u32 minorversion)
-{
- return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
- : NFSD4_SUPPORTED_ATTRS_WORD0;
-}
-
-static inline u32 nfsd_suppattrs1(u32 minorversion)
-{
- return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
- : NFSD4_SUPPORTED_ATTRS_WORD1;
-}
-
-static inline u32 nfsd_suppattrs2(u32 minorversion)
-{
- return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
- : NFSD4_SUPPORTED_ATTRS_WORD2;
-}
-
-/* These will return ERR_INVAL if specified in GETATTR or READDIR. */
-#define NFSD_WRITEONLY_ATTRS_WORD1 \
-(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
-
-/* These are the only attrs allowed in CREATE/OPEN/SETATTR. */
-#define NFSD_WRITEABLE_ATTRS_WORD0 \
-(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL )
-#define NFSD_WRITEABLE_ATTRS_WORD1 \
-(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
- | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
-#define NFSD_WRITEABLE_ATTRS_WORD2 0
-
-#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
- NFSD_WRITEABLE_ATTRS_WORD0
-/*
- * we currently store the exclusive create verifier in the v_{a,m}time
- * attributes so the client can't set these at create time using EXCLUSIVE4_1
- */
-#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \
- (NFSD_WRITEABLE_ATTRS_WORD1 & \
- ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET))
-#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
- NFSD_WRITEABLE_ATTRS_WORD2
-
-#endif /* CONFIG_NFSD_V4 */
-
-#endif /* LINUX_NFSD_NFSD_H */
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index 8f641c908450..65e333afaee4 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -16,11 +16,9 @@
# include <linux/types.h>
#ifdef __KERNEL__
-# include <linux/string.h>
-# include <linux/fs.h>
+# include <linux/sunrpc/svc.h>
#endif
#include <linux/nfsd/const.h>
-#include <linux/nfsd/debug.h>
/*
* This is the old "dentry style" Linux NFSv2 file handle.
@@ -164,208 +162,6 @@ typedef struct svc_fh {
} svc_fh;
-enum nfsd_fsid {
- FSID_DEV = 0,
- FSID_NUM,
- FSID_MAJOR_MINOR,
- FSID_ENCODE_DEV,
- FSID_UUID4_INUM,
- FSID_UUID8,
- FSID_UUID16,
- FSID_UUID16_INUM,
-};
-
-enum fsid_source {
- FSIDSOURCE_DEV,
- FSIDSOURCE_FSID,
- FSIDSOURCE_UUID,
-};
-extern enum fsid_source fsid_source(struct svc_fh *fhp);
-
-
-/* This might look a little large to "inline" but in all calls except
- * one, 'vers' is constant so moste of the function disappears.
- */
-static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino,
- u32 fsid, unsigned char *uuid)
-{
- u32 *up;
- switch(vers) {
- case FSID_DEV:
- fsidv[0] = htonl((MAJOR(dev)<<16) |
- MINOR(dev));
- fsidv[1] = ino_t_to_u32(ino);
- break;
- case FSID_NUM:
- fsidv[0] = fsid;
- break;
- case FSID_MAJOR_MINOR:
- fsidv[0] = htonl(MAJOR(dev));
- fsidv[1] = htonl(MINOR(dev));
- fsidv[2] = ino_t_to_u32(ino);
- break;
-
- case FSID_ENCODE_DEV:
- fsidv[0] = new_encode_dev(dev);
- fsidv[1] = ino_t_to_u32(ino);
- break;
-
- case FSID_UUID4_INUM:
- /* 4 byte fsid and inode number */
- up = (u32*)uuid;
- fsidv[0] = ino_t_to_u32(ino);
- fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3];
- break;
-
- case FSID_UUID8:
- /* 8 byte fsid */
- up = (u32*)uuid;
- fsidv[0] = up[0] ^ up[2];
- fsidv[1] = up[1] ^ up[3];
- break;
-
- case FSID_UUID16:
- /* 16 byte fsid - NFSv3+ only */
- memcpy(fsidv, uuid, 16);
- break;
-
- case FSID_UUID16_INUM:
- /* 8 byte inode and 16 byte fsid */
- *(u64*)fsidv = (u64)ino;
- memcpy(fsidv+2, uuid, 16);
- break;
- default: BUG();
- }
-}
-
-static inline int key_len(int type)
-{
- switch(type) {
- case FSID_DEV: return 8;
- case FSID_NUM: return 4;
- case FSID_MAJOR_MINOR: return 12;
- case FSID_ENCODE_DEV: return 8;
- case FSID_UUID4_INUM: return 8;
- case FSID_UUID8: return 8;
- case FSID_UUID16: return 16;
- case FSID_UUID16_INUM: return 24;
- default: return 0;
- }
-}
-
-/*
- * Shorthand for dprintk()'s
- */
-extern char * SVCFH_fmt(struct svc_fh *fhp);
-
-/*
- * Function prototypes
- */
-__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
-__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
-__be32 fh_update(struct svc_fh *);
-void fh_put(struct svc_fh *);
-
-static __inline__ struct svc_fh *
-fh_copy(struct svc_fh *dst, struct svc_fh *src)
-{
- WARN_ON(src->fh_dentry || src->fh_locked);
-
- *dst = *src;
- return dst;
-}
-
-static inline void
-fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
-{
- dst->fh_size = src->fh_size;
- memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
-}
-
-static __inline__ struct svc_fh *
-fh_init(struct svc_fh *fhp, int maxsize)
-{
- memset(fhp, 0, sizeof(*fhp));
- fhp->fh_maxsize = maxsize;
- return fhp;
-}
-
-#ifdef CONFIG_NFSD_V3
-/*
- * Fill in the pre_op attr for the wcc data
- */
-static inline void
-fill_pre_wcc(struct svc_fh *fhp)
-{
- struct inode *inode;
-
- inode = fhp->fh_dentry->d_inode;
- if (!fhp->fh_pre_saved) {
- fhp->fh_pre_mtime = inode->i_mtime;
- fhp->fh_pre_ctime = inode->i_ctime;
- fhp->fh_pre_size = inode->i_size;
- fhp->fh_pre_change = inode->i_version;
- fhp->fh_pre_saved = 1;
- }
-}
-
-extern void fill_post_wcc(struct svc_fh *);
-#else
-#define fill_pre_wcc(ignored)
-#define fill_post_wcc(notused)
-#endif /* CONFIG_NFSD_V3 */
-
-
-/*
- * Lock a file handle/inode
- * NOTE: both fh_lock and fh_unlock are done "by hand" in
- * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
- * so, any changes here should be reflected there.
- */
-
-static inline void
-fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
-{
- struct dentry *dentry = fhp->fh_dentry;
- struct inode *inode;
-
- dfprintk(FILEOP, "nfsd: fh_lock(%s) locked = %d\n",
- SVCFH_fmt(fhp), fhp->fh_locked);
-
- BUG_ON(!dentry);
-
- if (fhp->fh_locked) {
- printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
- return;
- }
-
- inode = dentry->d_inode;
- mutex_lock_nested(&inode->i_mutex, subclass);
- fill_pre_wcc(fhp);
- fhp->fh_locked = 1;
-}
-
-static inline void
-fh_lock(struct svc_fh *fhp)
-{
- fh_lock_nested(fhp, I_MUTEX_NORMAL);
-}
-
-/*
- * Unlock a file handle/inode
- */
-static inline void
-fh_unlock(struct svc_fh *fhp)
-{
- BUG_ON(!fhp->fh_dentry);
-
- if (fhp->fh_locked) {
- fill_post_wcc(fhp);
- mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
- fhp->fh_locked = 0;
- }
-}
#endif /* __KERNEL__ */
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
deleted file mode 100644
index b38d11324189..000000000000
--- a/include/linux/nfsd/state.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * linux/include/nfsd/state.h
- *
- * Copyright (c) 2001 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Kendrick Smith <kmsmith@umich.edu>
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _NFSD4_STATE_H
-#define _NFSD4_STATE_H
-
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/sunrpc/clnt.h>
-
-typedef struct {
- u32 cl_boot;
- u32 cl_id;
-} clientid_t;
-
-typedef struct {
- u32 so_boot;
- u32 so_stateownerid;
- u32 so_fileid;
-} stateid_opaque_t;
-
-typedef struct {
- u32 si_generation;
- stateid_opaque_t si_opaque;
-} stateid_t;
-#define si_boot si_opaque.so_boot
-#define si_stateownerid si_opaque.so_stateownerid
-#define si_fileid si_opaque.so_fileid
-
-struct nfsd4_cb_sequence {
- /* args/res */
- u32 cbs_minorversion;
- struct nfs4_client *cbs_clp;
-};
-
-struct nfs4_delegation {
- struct list_head dl_perfile;
- struct list_head dl_perclnt;
- struct list_head dl_recall_lru; /* delegation recalled */
- atomic_t dl_count; /* ref count */
- struct nfs4_client *dl_client;
- struct nfs4_file *dl_file;
- struct file_lock *dl_flock;
- struct file *dl_vfs_file;
- u32 dl_type;
- time_t dl_time;
-/* For recall: */
- u32 dl_ident;
- stateid_t dl_stateid;
- struct knfsd_fh dl_fh;
- int dl_retries;
-};
-
-/* client delegation callback info */
-struct nfs4_cb_conn {
- /* SETCLIENTID info */
- struct sockaddr_storage cb_addr;
- size_t cb_addrlen;
- u32 cb_prog;
- u32 cb_minorversion;
- u32 cb_ident; /* minorversion 0 only */
- /* RPC client info */
- atomic_t cb_set; /* successful CB_NULL call */
- struct rpc_clnt * cb_client;
-};
-
-/* Maximum number of slots per session. 160 is useful for long haul TCP */
-#define NFSD_MAX_SLOTS_PER_SESSION 160
-/* Maximum number of operations per session compound */
-#define NFSD_MAX_OPS_PER_COMPOUND 16
-/* Maximum session per slot cache size */
-#define NFSD_SLOT_CACHE_SIZE 1024
-/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
-#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32
-#define NFSD_MAX_MEM_PER_SESSION \
- (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE)
-
-struct nfsd4_slot {
- bool sl_inuse;
- bool sl_cachethis;
- u16 sl_opcnt;
- u32 sl_seqid;
- __be32 sl_status;
- u32 sl_datalen;
- char sl_data[];
-};
-
-struct nfsd4_channel_attrs {
- u32 headerpadsz;
- u32 maxreq_sz;
- u32 maxresp_sz;
- u32 maxresp_cached;
- u32 maxops;
- u32 maxreqs;
- u32 nr_rdma_attrs;
- u32 rdma_attrs;
-};
-
-struct nfsd4_create_session {
- clientid_t clientid;
- struct nfs4_sessionid sessionid;
- u32 seqid;
- u32 flags;
- struct nfsd4_channel_attrs fore_channel;
- struct nfsd4_channel_attrs back_channel;
- u32 callback_prog;
- u32 uid;
- u32 gid;
-};
-
-/* The single slot clientid cache structure */
-struct nfsd4_clid_slot {
- u32 sl_seqid;
- __be32 sl_status;
- struct nfsd4_create_session sl_cr_ses;
-};
-
-struct nfsd4_session {
- struct kref se_ref;
- struct list_head se_hash; /* hash by sessionid */
- struct list_head se_perclnt;
- u32 se_flags;
- struct nfs4_client *se_client; /* for expire_client */
- struct nfs4_sessionid se_sessionid;
- struct nfsd4_channel_attrs se_fchannel;
- struct nfsd4_channel_attrs se_bchannel;
- struct nfsd4_slot *se_slots[]; /* forward channel slots */
-};
-
-static inline void
-nfsd4_put_session(struct nfsd4_session *ses)
-{
- extern void free_session(struct kref *kref);
- kref_put(&ses->se_ref, free_session);
-}
-
-static inline void
-nfsd4_get_session(struct nfsd4_session *ses)
-{
- kref_get(&ses->se_ref);
-}
-
-/* formatted contents of nfs4_sessionid */
-struct nfsd4_sessionid {
- clientid_t clientid;
- u32 sequence;
- u32 reserved;
-};
-
-#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
-
-/*
- * struct nfs4_client - one per client. Clientids live here.
- * o Each nfs4_client is hashed by clientid.
- *
- * o Each nfs4_clients is also hashed by name
- * (the opaque quantity initially sent by the client to identify itself).
- *
- * o cl_perclient list is used to ensure no dangling stateowner references
- * when we expire the nfs4_client
- */
-struct nfs4_client {
- struct list_head cl_idhash; /* hash by cl_clientid.id */
- struct list_head cl_strhash; /* hash by cl_name */
- struct list_head cl_openowners;
- struct list_head cl_delegations;
- struct list_head cl_lru; /* tail queue */
- struct xdr_netobj cl_name; /* id generated by client */
- char cl_recdir[HEXDIR_LEN]; /* recovery dir */
- nfs4_verifier cl_verifier; /* generated by client */
- time_t cl_time; /* time of last lease renewal */
- struct sockaddr_storage cl_addr; /* client ipaddress */
- u32 cl_flavor; /* setclientid pseudoflavor */
- char *cl_principal; /* setclientid principal name */
- struct svc_cred cl_cred; /* setclientid principal */
- clientid_t cl_clientid; /* generated by server */
- nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_cb_conn cl_cb_conn; /* callback info */
- atomic_t cl_count; /* ref count */
- u32 cl_firststate; /* recovery dir creation */
-
- /* for nfs41 */
- struct list_head cl_sessions;
- struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
- u32 cl_exchange_flags;
- struct nfs4_sessionid cl_sessionid;
-
- /* for nfs41 callbacks */
- /* We currently support a single back channel with a single slot */
- unsigned long cl_cb_slot_busy;
- u32 cl_cb_seq_nr;
- struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
- struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
- /* wait here for slots */
-};
-
-/* struct nfs4_client_reset
- * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl
- * upon lease reset, or from upcall to state_daemon (to read in state
- * from non-volitile storage) upon reboot.
- */
-struct nfs4_client_reclaim {
- struct list_head cr_strhash; /* hash by cr_name */
- char cr_recdir[HEXDIR_LEN]; /* recover dir */
-};
-
-static inline void
-update_stateid(stateid_t *stateid)
-{
- stateid->si_generation++;
-}
-
-/* A reasonable value for REPLAY_ISIZE was estimated as follows:
- * The OPEN response, typically the largest, requires
- * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
- * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
- * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
- */
-
-#define NFSD4_REPLAY_ISIZE 112
-
-/*
- * Replay buffer, where the result of the last seqid-mutating operation
- * is cached.
- */
-struct nfs4_replay {
- __be32 rp_status;
- unsigned int rp_buflen;
- char *rp_buf;
- unsigned intrp_allocated;
- struct knfsd_fh rp_openfh;
- char rp_ibuf[NFSD4_REPLAY_ISIZE];
-};
-
-/*
-* nfs4_stateowner can either be an open_owner, or a lock_owner
-*
-* so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[]
-* for lock_owner
-* so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[]
-* for lock_owner
-* so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client
-* struct is reaped.
-* so_perfilestate: heads the list of nfs4_stateid (either open or lock)
-* and is used to ensure no dangling nfs4_stateid references when we
-* release a stateowner.
-* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when
-* close is called to reap associated byte-range locks
-* so_close_lru: (open) stateowner is placed on this list instead of being
-* reaped (when so_perfilestate is empty) to hold the last close replay.
-* reaped by laundramat thread after lease period.
-*/
-struct nfs4_stateowner {
- struct kref so_ref;
- struct list_head so_idhash; /* hash by so_id */
- struct list_head so_strhash; /* hash by op_name */
- struct list_head so_perclient;
- struct list_head so_stateids;
- struct list_head so_perstateid; /* for lockowners only */
- struct list_head so_close_lru; /* tail queue */
- time_t so_time; /* time of placement on so_close_lru */
- int so_is_open_owner; /* 1=openowner,0=lockowner */
- u32 so_id;
- struct nfs4_client * so_client;
- /* after increment in ENCODE_SEQID_OP_TAIL, represents the next
- * sequence id expected from the client: */
- u32 so_seqid;
- struct xdr_netobj so_owner; /* open owner name */
- int so_confirmed; /* successful OPEN_CONFIRM? */
- struct nfs4_replay so_replay;
-};
-
-/*
-* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
-* o fi_perfile list is used to search for conflicting
-* share_acces, share_deny on the file.
-*/
-struct nfs4_file {
- atomic_t fi_ref;
- struct list_head fi_hash; /* hash by "struct inode *" */
- struct list_head fi_stateids;
- struct list_head fi_delegations;
- struct inode *fi_inode;
- u32 fi_id; /* used with stateowner->so_id
- * for stateid_hashtbl hash */
- bool fi_had_conflict;
-};
-
-/*
-* nfs4_stateid can either be an open stateid or (eventually) a lock stateid
-*
-* (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file
-*
-* st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry
-* st_perfile: file_hashtbl[] entry.
-* st_perfile_state: nfs4_stateowner->so_perfilestate
-* st_perlockowner: (open stateid) list of lock nfs4_stateowners
-* st_access_bmap: used only for open stateid
-* st_deny_bmap: used only for open stateid
-* st_openstp: open stateid lock stateid was derived from
-*
-* XXX: open stateids and lock stateids have diverged sufficiently that
-* we should consider defining separate structs for the two cases.
-*/
-
-struct nfs4_stateid {
- struct list_head st_hash;
- struct list_head st_perfile;
- struct list_head st_perstateowner;
- struct list_head st_lockowners;
- struct nfs4_stateowner * st_stateowner;
- struct nfs4_file * st_file;
- stateid_t st_stateid;
- struct file * st_vfs_file;
- unsigned long st_access_bmap;
- unsigned long st_deny_bmap;
- struct nfs4_stateid * st_openstp;
-};
-
-/* flags for preprocess_seqid_op() */
-#define HAS_SESSION 0x00000001
-#define CONFIRM 0x00000002
-#define OPEN_STATE 0x00000004
-#define LOCK_STATE 0x00000008
-#define RD_STATE 0x00000010
-#define WR_STATE 0x00000020
-#define CLOSE_STATE 0x00000040
-
-#define seqid_mutating_err(err) \
- (((err) != nfserr_stale_clientid) && \
- ((err) != nfserr_bad_seqid) && \
- ((err) != nfserr_stale_stateid) && \
- ((err) != nfserr_bad_stateid))
-
-struct nfsd4_compound_state;
-
-extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
- stateid_t *stateid, int flags, struct file **filp);
-extern void nfs4_lock_state(void);
-extern void nfs4_unlock_state(void);
-extern int nfs4_in_grace(void);
-extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
-extern void put_nfs4_client(struct nfs4_client *clp);
-extern void nfs4_free_stateowner(struct kref *kref);
-extern int set_callback_cred(void);
-extern void nfsd4_probe_callback(struct nfs4_client *clp);
-extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
-extern void nfs4_put_delegation(struct nfs4_delegation *dp);
-extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
-extern void nfsd4_init_recdir(char *recdir_name);
-extern int nfsd4_recdir_load(void);
-extern void nfsd4_shutdown_recdir(void);
-extern int nfs4_client_to_reclaim(const char *name);
-extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
-extern void nfsd4_recdir_purge_old(void);
-extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
-extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
-
-static inline void
-nfs4_put_stateowner(struct nfs4_stateowner *so)
-{
- kref_put(&so->so_ref, nfs4_free_stateowner);
-}
-
-static inline void
-nfs4_get_stateowner(struct nfs4_stateowner *so)
-{
- kref_get(&so->so_ref);
-}
-
-#endif /* NFSD4_STATE_H */
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h
index 7a3b565b898f..812bc1e160dc 100644
--- a/include/linux/nfsd/syscall.h
+++ b/include/linux/nfsd/syscall.h
@@ -9,14 +9,8 @@
#ifndef NFSD_SYSCALL_H
#define NFSD_SYSCALL_H
-# include <linux/types.h>
-#ifdef __KERNEL__
-# include <linux/in.h>
-#endif
-#include <linux/posix_types.h>
-#include <linux/nfsd/const.h>
+#include <linux/types.h>
#include <linux/nfsd/export.h>
-#include <linux/nfsd/nfsfh.h>
/*
* Version of the syscall interface
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h
deleted file mode 100644
index a0132ef58f21..000000000000
--- a/include/linux/nfsd/xdr.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * linux/include/linux/nfsd/xdr.h
- *
- * XDR types for nfsd. This is mainly a typing exercise.
- */
-
-#ifndef LINUX_NFSD_H
-#define LINUX_NFSD_H
-
-#include <linux/fs.h>
-#include <linux/vfs.h>
-#include <linux/nfs.h>
-
-struct nfsd_fhandle {
- struct svc_fh fh;
-};
-
-struct nfsd_sattrargs {
- struct svc_fh fh;
- struct iattr attrs;
-};
-
-struct nfsd_diropargs {
- struct svc_fh fh;
- char * name;
- unsigned int len;
-};
-
-struct nfsd_readargs {
- struct svc_fh fh;
- __u32 offset;
- __u32 count;
- int vlen;
-};
-
-struct nfsd_writeargs {
- svc_fh fh;
- __u32 offset;
- int len;
- int vlen;
-};
-
-struct nfsd_createargs {
- struct svc_fh fh;
- char * name;
- unsigned int len;
- struct iattr attrs;
-};
-
-struct nfsd_renameargs {
- struct svc_fh ffh;
- char * fname;
- unsigned int flen;
- struct svc_fh tfh;
- char * tname;
- unsigned int tlen;
-};
-
-struct nfsd_readlinkargs {
- struct svc_fh fh;
- char * buffer;
-};
-
-struct nfsd_linkargs {
- struct svc_fh ffh;
- struct svc_fh tfh;
- char * tname;
- unsigned int tlen;
-};
-
-struct nfsd_symlinkargs {
- struct svc_fh ffh;
- char * fname;
- unsigned int flen;
- char * tname;
- unsigned int tlen;
- struct iattr attrs;
-};
-
-struct nfsd_readdirargs {
- struct svc_fh fh;
- __u32 cookie;
- __u32 count;
- __be32 * buffer;
-};
-
-struct nfsd_attrstat {
- struct svc_fh fh;
- struct kstat stat;
-};
-
-struct nfsd_diropres {
- struct svc_fh fh;
- struct kstat stat;
-};
-
-struct nfsd_readlinkres {
- int len;
-};
-
-struct nfsd_readres {
- struct svc_fh fh;
- unsigned long count;
- struct kstat stat;
-};
-
-struct nfsd_readdirres {
- int count;
-
- struct readdir_cd common;
- __be32 * buffer;
- int buflen;
- __be32 * offset;
-};
-
-struct nfsd_statfsres {
- struct kstatfs stats;
-};
-
-/*
- * Storage requirements for XDR arguments and results.
- */
-union nfsd_xdrstore {
- struct nfsd_sattrargs sattr;
- struct nfsd_diropargs dirop;
- struct nfsd_readargs read;
- struct nfsd_writeargs write;
- struct nfsd_createargs create;
- struct nfsd_renameargs rename;
- struct nfsd_linkargs link;
- struct nfsd_symlinkargs symlink;
- struct nfsd_readdirargs readdir;
-};
-
-#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore)
-
-
-int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *);
-int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
-int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *,
- struct nfsd_sattrargs *);
-int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *,
- struct nfsd_diropargs *);
-int nfssvc_decode_readargs(struct svc_rqst *, __be32 *,
- struct nfsd_readargs *);
-int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *,
- struct nfsd_writeargs *);
-int nfssvc_decode_createargs(struct svc_rqst *, __be32 *,
- struct nfsd_createargs *);
-int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *,
- struct nfsd_renameargs *);
-int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd_readlinkargs *);
-int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *,
- struct nfsd_linkargs *);
-int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd_symlinkargs *);
-int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *,
- struct nfsd_readdirargs *);
-int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *);
-int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *);
-int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *);
-int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *);
-int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *);
-int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *);
-int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *);
-
-int nfssvc_encode_entry(void *, const char *name,
- int namlen, loff_t offset, u64 ino, unsigned int);
-
-int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
-
-/* Helper functions for NFSv2 ACL code */
-__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp);
-__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
-
-#endif /* LINUX_NFSD_H */
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h
deleted file mode 100644
index 421eddd65a25..000000000000
--- a/include/linux/nfsd/xdr3.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * linux/include/linux/nfsd/xdr3.h
- *
- * XDR types for NFSv3 in nfsd.
- *
- * Copyright (C) 1996-1998, Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef _LINUX_NFSD_XDR3_H
-#define _LINUX_NFSD_XDR3_H
-
-#include <linux/nfsd/xdr.h>
-
-struct nfsd3_sattrargs {
- struct svc_fh fh;
- struct iattr attrs;
- int check_guard;
- time_t guardtime;
-};
-
-struct nfsd3_diropargs {
- struct svc_fh fh;
- char * name;
- unsigned int len;
-};
-
-struct nfsd3_accessargs {
- struct svc_fh fh;
- unsigned int access;
-};
-
-struct nfsd3_readargs {
- struct svc_fh fh;
- __u64 offset;
- __u32 count;
- int vlen;
-};
-
-struct nfsd3_writeargs {
- svc_fh fh;
- __u64 offset;
- __u32 count;
- int stable;
- __u32 len;
- int vlen;
-};
-
-struct nfsd3_createargs {
- struct svc_fh fh;
- char * name;
- unsigned int len;
- int createmode;
- struct iattr attrs;
- __be32 * verf;
-};
-
-struct nfsd3_mknodargs {
- struct svc_fh fh;
- char * name;
- unsigned int len;
- __u32 ftype;
- __u32 major, minor;
- struct iattr attrs;
-};
-
-struct nfsd3_renameargs {
- struct svc_fh ffh;
- char * fname;
- unsigned int flen;
- struct svc_fh tfh;
- char * tname;
- unsigned int tlen;
-};
-
-struct nfsd3_readlinkargs {
- struct svc_fh fh;
- char * buffer;
-};
-
-struct nfsd3_linkargs {
- struct svc_fh ffh;
- struct svc_fh tfh;
- char * tname;
- unsigned int tlen;
-};
-
-struct nfsd3_symlinkargs {
- struct svc_fh ffh;
- char * fname;
- unsigned int flen;
- char * tname;
- unsigned int tlen;
- struct iattr attrs;
-};
-
-struct nfsd3_readdirargs {
- struct svc_fh fh;
- __u64 cookie;
- __u32 dircount;
- __u32 count;
- __be32 * verf;
- __be32 * buffer;
-};
-
-struct nfsd3_commitargs {
- struct svc_fh fh;
- __u64 offset;
- __u32 count;
-};
-
-struct nfsd3_getaclargs {
- struct svc_fh fh;
- int mask;
-};
-
-struct posix_acl;
-struct nfsd3_setaclargs {
- struct svc_fh fh;
- int mask;
- struct posix_acl *acl_access;
- struct posix_acl *acl_default;
-};
-
-struct nfsd3_attrstat {
- __be32 status;
- struct svc_fh fh;
- struct kstat stat;
-};
-
-/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */
-struct nfsd3_diropres {
- __be32 status;
- struct svc_fh dirfh;
- struct svc_fh fh;
-};
-
-struct nfsd3_accessres {
- __be32 status;
- struct svc_fh fh;
- __u32 access;
-};
-
-struct nfsd3_readlinkres {
- __be32 status;
- struct svc_fh fh;
- __u32 len;
-};
-
-struct nfsd3_readres {
- __be32 status;
- struct svc_fh fh;
- unsigned long count;
- int eof;
-};
-
-struct nfsd3_writeres {
- __be32 status;
- struct svc_fh fh;
- unsigned long count;
- int committed;
-};
-
-struct nfsd3_renameres {
- __be32 status;
- struct svc_fh ffh;
- struct svc_fh tfh;
-};
-
-struct nfsd3_linkres {
- __be32 status;
- struct svc_fh tfh;
- struct svc_fh fh;
-};
-
-struct nfsd3_readdirres {
- __be32 status;
- struct svc_fh fh;
- int count;
- __be32 verf[2];
-
- struct readdir_cd common;
- __be32 * buffer;
- int buflen;
- __be32 * offset;
- __be32 * offset1;
- struct svc_rqst * rqstp;
-
-};
-
-struct nfsd3_fsstatres {
- __be32 status;
- struct kstatfs stats;
- __u32 invarsec;
-};
-
-struct nfsd3_fsinfores {
- __be32 status;
- __u32 f_rtmax;
- __u32 f_rtpref;
- __u32 f_rtmult;
- __u32 f_wtmax;
- __u32 f_wtpref;
- __u32 f_wtmult;
- __u32 f_dtpref;
- __u64 f_maxfilesize;
- __u32 f_properties;
-};
-
-struct nfsd3_pathconfres {
- __be32 status;
- __u32 p_link_max;
- __u32 p_name_max;
- __u32 p_no_trunc;
- __u32 p_chown_restricted;
- __u32 p_case_insensitive;
- __u32 p_case_preserving;
-};
-
-struct nfsd3_commitres {
- __be32 status;
- struct svc_fh fh;
-};
-
-struct nfsd3_getaclres {
- __be32 status;
- struct svc_fh fh;
- int mask;
- struct posix_acl *acl_access;
- struct posix_acl *acl_default;
-};
-
-/* dummy type for release */
-struct nfsd3_fhandle_pair {
- __u32 dummy;
- struct svc_fh fh1;
- struct svc_fh fh2;
-};
-
-/*
- * Storage requirements for XDR arguments and results.
- */
-union nfsd3_xdrstore {
- struct nfsd3_sattrargs sattrargs;
- struct nfsd3_diropargs diropargs;
- struct nfsd3_readargs readargs;
- struct nfsd3_writeargs writeargs;
- struct nfsd3_createargs createargs;
- struct nfsd3_renameargs renameargs;
- struct nfsd3_linkargs linkargs;
- struct nfsd3_symlinkargs symlinkargs;
- struct nfsd3_readdirargs readdirargs;
- struct nfsd3_diropres diropres;
- struct nfsd3_accessres accessres;
- struct nfsd3_readlinkres readlinkres;
- struct nfsd3_readres readres;
- struct nfsd3_writeres writeres;
- struct nfsd3_renameres renameres;
- struct nfsd3_linkres linkres;
- struct nfsd3_readdirres readdirres;
- struct nfsd3_fsstatres fsstatres;
- struct nfsd3_fsinfores fsinfores;
- struct nfsd3_pathconfres pathconfres;
- struct nfsd3_commitres commitres;
- struct nfsd3_getaclres getaclres;
-};
-
-#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore)
-
-int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
-int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *,
- struct nfsd3_sattrargs *);
-int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *,
- struct nfsd3_diropargs *);
-int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *,
- struct nfsd3_accessargs *);
-int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readargs *);
-int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *,
- struct nfsd3_writeargs *);
-int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *,
- struct nfsd3_createargs *);
-int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *,
- struct nfsd3_createargs *);
-int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *,
- struct nfsd3_mknodargs *);
-int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *,
- struct nfsd3_renameargs *);
-int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readlinkargs *);
-int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *,
- struct nfsd3_linkargs *);
-int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *,
- struct nfsd3_symlinkargs *);
-int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readdirargs *);
-int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *,
- struct nfsd3_readdirargs *);
-int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *,
- struct nfsd3_commitargs *);
-int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
-int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *,
- struct nfsd3_attrstat *);
-int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *,
- struct nfsd3_attrstat *);
-int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *,
- struct nfsd3_diropres *);
-int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *,
- struct nfsd3_accessres *);
-int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *,
- struct nfsd3_readlinkres *);
-int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *);
-int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *);
-int nfs3svc_encode_createres(struct svc_rqst *, __be32 *,
- struct nfsd3_diropres *);
-int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *,
- struct nfsd3_renameres *);
-int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *,
- struct nfsd3_linkres *);
-int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *,
- struct nfsd3_readdirres *);
-int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *,
- struct nfsd3_fsstatres *);
-int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *,
- struct nfsd3_fsinfores *);
-int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *,
- struct nfsd3_pathconfres *);
-int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *,
- struct nfsd3_commitres *);
-
-int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *,
- struct nfsd3_attrstat *);
-int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *,
- struct nfsd3_fhandle_pair *);
-int nfs3svc_encode_entry(void *, const char *name,
- int namlen, loff_t offset, u64 ino,
- unsigned int);
-int nfs3svc_encode_entry_plus(void *, const char *name,
- int namlen, loff_t offset, u64 ino,
- unsigned int);
-/* Helper functions for NFSv3 ACL code */
-__be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p,
- struct svc_fh *fhp);
-__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp);
-
-
-#endif /* _LINUX_NFSD_XDR3_H */
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
deleted file mode 100644
index 73164c2b3d29..000000000000
--- a/include/linux/nfsd/xdr4.h
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
- * include/linux/nfsd/xdr4.h
- *
- * Server-side types for NFSv4.
- *
- * Copyright (c) 2002 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Kendrick Smith <kmsmith@umich.edu>
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _LINUX_NFSD_XDR4_H
-#define _LINUX_NFSD_XDR4_H
-
-#include <linux/nfs4.h>
-
-#define NFSD4_MAX_TAGLEN 128
-#define XDR_LEN(n) (((n) + 3) & ~3)
-
-struct nfsd4_compound_state {
- struct svc_fh current_fh;
- struct svc_fh save_fh;
- struct nfs4_stateowner *replay_owner;
- /* For sessions DRC */
- struct nfsd4_session *session;
- struct nfsd4_slot *slot;
- __be32 *datap;
- size_t iovlen;
- u32 minorversion;
- u32 status;
-};
-
-static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
-{
- return cs->slot != NULL;
-}
-
-struct nfsd4_change_info {
- u32 atomic;
- bool change_supported;
- u32 before_ctime_sec;
- u32 before_ctime_nsec;
- u64 before_change;
- u32 after_ctime_sec;
- u32 after_ctime_nsec;
- u64 after_change;
-};
-
-struct nfsd4_access {
- u32 ac_req_access; /* request */
- u32 ac_supported; /* response */
- u32 ac_resp_access; /* response */
-};
-
-struct nfsd4_close {
- u32 cl_seqid; /* request */
- stateid_t cl_stateid; /* request+response */
- struct nfs4_stateowner * cl_stateowner; /* response */
-};
-
-struct nfsd4_commit {
- u64 co_offset; /* request */
- u32 co_count; /* request */
- nfs4_verifier co_verf; /* response */
-};
-
-struct nfsd4_create {
- u32 cr_namelen; /* request */
- char * cr_name; /* request */
- u32 cr_type; /* request */
- union { /* request */
- struct {
- u32 namelen;
- char *name;
- } link; /* NF4LNK */
- struct {
- u32 specdata1;
- u32 specdata2;
- } dev; /* NF4BLK, NF4CHR */
- } u;
- u32 cr_bmval[3]; /* request */
- struct iattr cr_iattr; /* request */
- struct nfsd4_change_info cr_cinfo; /* response */
- struct nfs4_acl *cr_acl;
-};
-#define cr_linklen u.link.namelen
-#define cr_linkname u.link.name
-#define cr_specdata1 u.dev.specdata1
-#define cr_specdata2 u.dev.specdata2
-
-struct nfsd4_delegreturn {
- stateid_t dr_stateid;
-};
-
-struct nfsd4_getattr {
- u32 ga_bmval[3]; /* request */
- struct svc_fh *ga_fhp; /* response */
-};
-
-struct nfsd4_link {
- u32 li_namelen; /* request */
- char * li_name; /* request */
- struct nfsd4_change_info li_cinfo; /* response */
-};
-
-struct nfsd4_lock_denied {
- clientid_t ld_clientid;
- struct nfs4_stateowner *ld_sop;
- u64 ld_start;
- u64 ld_length;
- u32 ld_type;
-};
-
-struct nfsd4_lock {
- /* request */
- u32 lk_type;
- u32 lk_reclaim; /* boolean */
- u64 lk_offset;
- u64 lk_length;
- u32 lk_is_new;
- union {
- struct {
- u32 open_seqid;
- stateid_t open_stateid;
- u32 lock_seqid;
- clientid_t clientid;
- struct xdr_netobj owner;
- } new;
- struct {
- stateid_t lock_stateid;
- u32 lock_seqid;
- } old;
- } v;
-
- /* response */
- union {
- struct {
- stateid_t stateid;
- } ok;
- struct nfsd4_lock_denied denied;
- } u;
- /* The lk_replay_owner is the open owner in the open_to_lock_owner
- * case and the lock owner otherwise: */
- struct nfs4_stateowner *lk_replay_owner;
-};
-#define lk_new_open_seqid v.new.open_seqid
-#define lk_new_open_stateid v.new.open_stateid
-#define lk_new_lock_seqid v.new.lock_seqid
-#define lk_new_clientid v.new.clientid
-#define lk_new_owner v.new.owner
-#define lk_old_lock_stateid v.old.lock_stateid
-#define lk_old_lock_seqid v.old.lock_seqid
-
-#define lk_rflags u.ok.rflags
-#define lk_resp_stateid u.ok.stateid
-#define lk_denied u.denied
-
-
-struct nfsd4_lockt {
- u32 lt_type;
- clientid_t lt_clientid;
- struct xdr_netobj lt_owner;
- u64 lt_offset;
- u64 lt_length;
- struct nfs4_stateowner * lt_stateowner;
- struct nfsd4_lock_denied lt_denied;
-};
-
-
-struct nfsd4_locku {
- u32 lu_type;
- u32 lu_seqid;
- stateid_t lu_stateid;
- u64 lu_offset;
- u64 lu_length;
- struct nfs4_stateowner *lu_stateowner;
-};
-
-
-struct nfsd4_lookup {
- u32 lo_len; /* request */
- char * lo_name; /* request */
-};
-
-struct nfsd4_putfh {
- u32 pf_fhlen; /* request */
- char *pf_fhval; /* request */
-};
-
-struct nfsd4_open {
- u32 op_claim_type; /* request */
- struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */
- u32 op_delegate_type; /* request - CLAIM_PREV only */
- stateid_t op_delegate_stateid; /* request - response */
- u32 op_create; /* request */
- u32 op_createmode; /* request */
- u32 op_bmval[3]; /* request */
- struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
- nfs4_verifier verf; /* EXCLUSIVE4 */
- clientid_t op_clientid; /* request */
- struct xdr_netobj op_owner; /* request */
- u32 op_seqid; /* request */
- u32 op_share_access; /* request */
- u32 op_share_deny; /* request */
- stateid_t op_stateid; /* response */
- u32 op_recall; /* recall */
- struct nfsd4_change_info op_cinfo; /* response */
- u32 op_rflags; /* response */
- int op_truncate; /* used during processing */
- struct nfs4_stateowner *op_stateowner; /* used during processing */
- struct nfs4_acl *op_acl;
-};
-#define op_iattr iattr
-#define op_verf verf
-
-struct nfsd4_open_confirm {
- stateid_t oc_req_stateid /* request */;
- u32 oc_seqid /* request */;
- stateid_t oc_resp_stateid /* response */;
- struct nfs4_stateowner * oc_stateowner; /* response */
-};
-
-struct nfsd4_open_downgrade {
- stateid_t od_stateid;
- u32 od_seqid;
- u32 od_share_access;
- u32 od_share_deny;
- struct nfs4_stateowner *od_stateowner;
-};
-
-
-struct nfsd4_read {
- stateid_t rd_stateid; /* request */
- u64 rd_offset; /* request */
- u32 rd_length; /* request */
- int rd_vlen;
- struct file *rd_filp;
-
- struct svc_rqst *rd_rqstp; /* response */
- struct svc_fh * rd_fhp; /* response */
-};
-
-struct nfsd4_readdir {
- u64 rd_cookie; /* request */
- nfs4_verifier rd_verf; /* request */
- u32 rd_dircount; /* request */
- u32 rd_maxcount; /* request */
- u32 rd_bmval[3]; /* request */
- struct svc_rqst *rd_rqstp; /* response */
- struct svc_fh * rd_fhp; /* response */
-
- struct readdir_cd common;
- __be32 * buffer;
- int buflen;
- __be32 * offset;
-};
-
-struct nfsd4_release_lockowner {
- clientid_t rl_clientid;
- struct xdr_netobj rl_owner;
-};
-struct nfsd4_readlink {
- struct svc_rqst *rl_rqstp; /* request */
- struct svc_fh * rl_fhp; /* request */
-};
-
-struct nfsd4_remove {
- u32 rm_namelen; /* request */
- char * rm_name; /* request */
- struct nfsd4_change_info rm_cinfo; /* response */
-};
-
-struct nfsd4_rename {
- u32 rn_snamelen; /* request */
- char * rn_sname; /* request */
- u32 rn_tnamelen; /* request */
- char * rn_tname; /* request */
- struct nfsd4_change_info rn_sinfo; /* response */
- struct nfsd4_change_info rn_tinfo; /* response */
-};
-
-struct nfsd4_secinfo {
- u32 si_namelen; /* request */
- char *si_name; /* request */
- struct svc_export *si_exp; /* response */
-};
-
-struct nfsd4_setattr {
- stateid_t sa_stateid; /* request */
- u32 sa_bmval[3]; /* request */
- struct iattr sa_iattr; /* request */
- struct nfs4_acl *sa_acl;
-};
-
-struct nfsd4_setclientid {
- nfs4_verifier se_verf; /* request */
- u32 se_namelen; /* request */
- char * se_name; /* request */
- u32 se_callback_prog; /* request */
- u32 se_callback_netid_len; /* request */
- char * se_callback_netid_val; /* request */
- u32 se_callback_addr_len; /* request */
- char * se_callback_addr_val; /* request */
- u32 se_callback_ident; /* request */
- clientid_t se_clientid; /* response */
- nfs4_verifier se_confirm; /* response */
-};
-
-struct nfsd4_setclientid_confirm {
- clientid_t sc_clientid;
- nfs4_verifier sc_confirm;
-};
-
-/* also used for NVERIFY */
-struct nfsd4_verify {
- u32 ve_bmval[3]; /* request */
- u32 ve_attrlen; /* request */
- char * ve_attrval; /* request */
-};
-
-struct nfsd4_write {
- stateid_t wr_stateid; /* request */
- u64 wr_offset; /* request */
- u32 wr_stable_how; /* request */
- u32 wr_buflen; /* request */
- int wr_vlen;
-
- u32 wr_bytes_written; /* response */
- u32 wr_how_written; /* response */
- nfs4_verifier wr_verifier; /* response */
-};
-
-struct nfsd4_exchange_id {
- nfs4_verifier verifier;
- struct xdr_netobj clname;
- u32 flags;
- clientid_t clientid;
- u32 seqid;
- int spa_how;
-};
-
-struct nfsd4_sequence {
- struct nfs4_sessionid sessionid; /* request/response */
- u32 seqid; /* request/response */
- u32 slotid; /* request/response */
- u32 maxslots; /* request/response */
- u32 cachethis; /* request */
-#if 0
- u32 target_maxslots; /* response */
- u32 status_flags; /* response */
-#endif /* not yet */
-};
-
-struct nfsd4_destroy_session {
- struct nfs4_sessionid sessionid;
-};
-
-struct nfsd4_op {
- int opnum;
- __be32 status;
- union {
- struct nfsd4_access access;
- struct nfsd4_close close;
- struct nfsd4_commit commit;
- struct nfsd4_create create;
- struct nfsd4_delegreturn delegreturn;
- struct nfsd4_getattr getattr;
- struct svc_fh * getfh;
- struct nfsd4_link link;
- struct nfsd4_lock lock;
- struct nfsd4_lockt lockt;
- struct nfsd4_locku locku;
- struct nfsd4_lookup lookup;
- struct nfsd4_verify nverify;
- struct nfsd4_open open;
- struct nfsd4_open_confirm open_confirm;
- struct nfsd4_open_downgrade open_downgrade;
- struct nfsd4_putfh putfh;
- struct nfsd4_read read;
- struct nfsd4_readdir readdir;
- struct nfsd4_readlink readlink;
- struct nfsd4_remove remove;
- struct nfsd4_rename rename;
- clientid_t renew;
- struct nfsd4_secinfo secinfo;
- struct nfsd4_setattr setattr;
- struct nfsd4_setclientid setclientid;
- struct nfsd4_setclientid_confirm setclientid_confirm;
- struct nfsd4_verify verify;
- struct nfsd4_write write;
- struct nfsd4_release_lockowner release_lockowner;
-
- /* NFSv4.1 */
- struct nfsd4_exchange_id exchange_id;
- struct nfsd4_create_session create_session;
- struct nfsd4_destroy_session destroy_session;
- struct nfsd4_sequence sequence;
- } u;
- struct nfs4_replay * replay;
-};
-
-struct nfsd4_compoundargs {
- /* scratch variables for XDR decode */
- __be32 * p;
- __be32 * end;
- struct page ** pagelist;
- int pagelen;
- __be32 tmp[8];
- __be32 * tmpp;
- struct tmpbuf {
- struct tmpbuf *next;
- void (*release)(const void *);
- void *buf;
- } *to_free;
-
- struct svc_rqst *rqstp;
-
- u32 taglen;
- char * tag;
- u32 minorversion;
- u32 opcnt;
- struct nfsd4_op *ops;
- struct nfsd4_op iops[8];
-};
-
-struct nfsd4_compoundres {
- /* scratch variables for XDR encode */
- __be32 * p;
- __be32 * end;
- struct xdr_buf * xbuf;
- struct svc_rqst * rqstp;
-
- u32 taglen;
- char * tag;
- u32 opcnt;
- __be32 * tagp; /* tag, opcount encode location */
- struct nfsd4_compound_state cstate;
-};
-
-static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
-{
- struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
- return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
-}
-
-static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
-{
- return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp);
-}
-
-#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
-
-static inline void
-set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
-{
- BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
- cinfo->atomic = 1;
- cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
- if (cinfo->change_supported) {
- cinfo->before_change = fhp->fh_pre_change;
- cinfo->after_change = fhp->fh_post_change;
- } else {
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
- }
-}
-
-int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
-int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
- struct nfsd4_compoundargs *);
-int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
- struct nfsd4_compoundres *);
-void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
-void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
-__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
- struct dentry *dentry, __be32 *buffer, int *countp,
- u32 *bmval, struct svc_rqst *, int ignore_crossmnt);
-extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_setclientid *setclid);
-extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_setclientid_confirm *setclientid_confirm);
-extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
-extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
- struct nfsd4_sequence *seq);
-extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
-struct nfsd4_exchange_id *);
- extern __be32 nfsd4_create_session(struct svc_rqst *,
- struct nfsd4_compound_state *,
- struct nfsd4_create_session *);
-extern __be32 nfsd4_sequence(struct svc_rqst *,
- struct nfsd4_compound_state *,
- struct nfsd4_sequence *);
-extern __be32 nfsd4_destroy_session(struct svc_rqst *,
- struct nfsd4_compound_state *,
- struct nfsd4_destroy_session *);
-extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
- struct nfsd4_open *open);
-extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
- struct svc_fh *current_fh, struct nfsd4_open *open);
-extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
-extern __be32 nfsd4_close(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_close *close);
-extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_open_downgrade *od);
-extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
- struct nfsd4_lock *lock);
-extern __be32 nfsd4_lockt(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_lockt *lockt);
-extern __be32 nfsd4_locku(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_locku *locku);
-extern __be32
-nfsd4_release_lockowner(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
- struct nfsd4_release_lockowner *rlockowner);
-extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *);
-extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
-extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *, clientid_t *clid);
-#endif
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a86..06292dac3eab 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ struct work_struct node_work;
+#endif
};
struct memory_block;
extern struct node node_devices[];
+typedef void (*node_registration_func_t)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+
+#ifdef CONFIG_HUGETLBFS
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
+#endif
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9e..454997cccbd8 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*mask);
+ node_set(node, *mask);
+}
+
#define nodemask_of_node(node) \
({ \
typeof(_unused_nodemask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(node); \
+ m.bits[0] = 1UL << (node); \
} else { \
- nodes_clear(m); \
- node_set((node), m); \
+ init_nodemask_of_node(&m, (node)); \
} \
m; \
})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ * For nodemask scrach area.
+ * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
+ * name.
*/
-
-#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
-#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define NODEMASK_FREE(m) kfree(m)
+#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#define NODEMASK_ALLOC(type, name, gfp_flags) \
+ type *name = kmalloc(sizeof(*name), gfp_flags)
+#define NODEMASK_FREE(m) kfree(m)
#else
-#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
-#define NODEMASK_FREE(m)
+#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
+#define NODEMASK_FREE(m) do {} while (0)
#endif
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
nodemask_t mask2;
};
-#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
-#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+#define NODEMASK_SCRATCH(x) \
+ NODEMASK_ALLOC(struct nodemask_scratch, x, \
+ GFP_KERNEL | __GFP_NORETRY)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b159..3aaa31603a86 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6aac5fe4f6f1..537662315627 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/nodemask.h>
struct zonelist;
struct notifier_block;
@@ -26,7 +27,8 @@ enum oom_constraint {
extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
+extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *mask);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b173955..5b59f35dcb8f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
-#include <linux/bounds.h>
+#include <generated/bounds.h>
#endif /* !__GENERATING_BOUNDS_H */
/*
@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-#define MLOCK_PAGES 1
+#ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
-#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
@@ -277,13 +275,15 @@ PAGEFLAG_FALSE(Uncached)
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison)
-TESTSETFLAG(HWPoison, hwpoison)
+TESTSCFLAG(HWPoison, hwpoison)
#define __PG_HWPOISON (1UL << PG_hwpoison)
#else
PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+u64 stable_page_flags(struct page *page);
+
static inline int PageUptodate(struct page *page)
{
int ret = test_bit(PG_uptodate, &(page)->flags);
@@ -393,7 +393,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 4b938d4f3ac2..b0e4eb126236 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }
+TESTPCGFLAG(Locked, LOCK)
+
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
CLEARPCGFLAG(Cache, CACHE)
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
bit_spin_lock(PCG_LOCK, &pc->flags);
}
-static inline int trylock_page_cgroup(struct page_cgroup *pc)
-{
- return bit_spin_trylock(PCG_LOCK, &pc->flags);
-}
-
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 04771b9c3316..174e5392e51e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -243,6 +243,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int wakeup_prepared:1;
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@@ -566,6 +567,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
resource_size_t);
void pcibios_update_irq(struct pci_dev *, int irq);
+/* Weak but can be overriden by arch */
+void pci_fixup_cardbus(struct pci_bus *);
+
/* Generic PCI functions used internally */
extern struct pci_bus *pci_find_bus(int domain, int busnr);
@@ -1255,7 +1259,7 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
-extern u8 pci_dfl_cache_line_size;
+extern u8 __devinitdata pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eae1f864c934..cca8a044e2b6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2295,6 +2295,20 @@
#define PCI_DEVICE_ID_MPC8536 0x0051
#define PCI_DEVICE_ID_P2020E 0x0070
#define PCI_DEVICE_ID_P2020 0x0071
+#define PCI_DEVICE_ID_P2010E 0x0078
+#define PCI_DEVICE_ID_P2010 0x0079
+#define PCI_DEVICE_ID_P1020E 0x0100
+#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1011E 0x0108
+#define PCI_DEVICE_ID_P1011 0x0109
+#define PCI_DEVICE_ID_P1022E 0x0110
+#define PCI_DEVICE_ID_P1022 0x0111
+#define PCI_DEVICE_ID_P1013E 0x0118
+#define PCI_DEVICE_ID_P1013 0x0119
+#define PCI_DEVICE_ID_P4080E 0x0400
+#define PCI_DEVICE_ID_P4080 0x0401
+#define PCI_DEVICE_ID_P4040E 0x0408
+#define PCI_DEVICE_ID_P4040 0x0409
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd4..5a5d6ce4bd55 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..cf5efbcf716c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-
-#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-struct percpu_data {
- void *ptrs[1];
-};
-
-/* pointer disguising messes up the kmemleak objects tracking */
-#ifndef CONFIG_DEBUG_KMEMLEAK
-#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-#else
-#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
-#endif
-
-#define per_cpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
-})
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ return __pa(addr);
+}
+
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
- __alignof__(type))
+#define alloc_percpu(type) \
+ (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#define __pcpu_size_call_return(stem, variable) \
+({ typeof(variable) pscr_ret__; \
+ switch(sizeof(variable)) { \
+ case 1: pscr_ret__ = stem##1(variable);break; \
+ case 2: pscr_ret__ = stem##2(variable);break; \
+ case 4: pscr_ret__ = stem##4(variable);break; \
+ case 8: pscr_ret__ = stem##8(variable);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ pscr_ret__; \
+})
+
+#define __pcpu_size_call(stem, variable, ...) \
+do { \
+ switch(sizeof(variable)) { \
+ case 1: stem##1(variable, __VA_ARGS__);break; \
+ case 2: stem##2(variable, __VA_ARGS__);break; \
+ case 4: stem##4(variable, __VA_ARGS__);break; \
+ case 8: stem##8(variable, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+} while (0)
+
+/*
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables (can be determined
+ * using per_cpu_var(xx).
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The first group is used for accesses that must be done in a
+ * preemption safe way since we know that the context is not preempt
+ * safe. Interrupts may occur. If the interrupt modifies the variable
+ * too then RMW actions will not be reliable.
+ *
+ * The arch code can provide optimized functions in two ways:
+ *
+ * 1. Override the function completely. F.e. define this_cpu_add().
+ * The arch must then ensure that the various scalar format passed
+ * are handled correctly.
+ *
+ * 2. Provide functions for certain scalar sizes. F.e. provide
+ * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
+ * sized RMW actions. If arch code does not provide operations for
+ * a scalar size then the fallback in the generic code will be
+ * used.
+ */
+
+#define _this_cpu_generic_read(pcp) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = *this_cpu_ptr(&(pcp)); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_read
+# ifndef this_cpu_read_1
+# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_2
+# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_4
+# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_8
+# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
+# endif
+# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
+#endif
+
+#define _this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ preempt_disable(); \
+ *__this_cpu_ptr(&pcp) op val; \
+ preempt_enable(); \
+} while (0)
+
+#ifndef this_cpu_write
+# ifndef this_cpu_write_1
+# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_2
+# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_4
+# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_8
+# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_add
+# ifndef this_cpu_add_1
+# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_2
+# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_4
+# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_8
+# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_sub
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef this_cpu_inc
+# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
+#endif
+
+#ifndef this_cpu_dec
+# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef this_cpu_and
+# ifndef this_cpu_and_1
+# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_2
+# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_4
+# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_8
+# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_or
+# ifndef this_cpu_or_1
+# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_2
+# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_4
+# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_8
+# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_xor
+# ifndef this_cpu_xor_1
+# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_2
+# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_4
+# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_8
+# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+/*
+ * Generic percpu operations that do not require preemption handling.
+ * Either we do not care about races or the caller has the
+ * responsibility of handling preemptions issues. Arch code can still
+ * override these instructions since the arch per cpu code may be more
+ * efficient and may actually get race freeness for free (that is the
+ * case for x86 for example).
+ *
+ * If there is no other protection through preempt disable and/or
+ * disabling interupts then one of these RMW operations can show unexpected
+ * behavior because the execution thread was rescheduled on another processor
+ * or an interrupt occurred and the same percpu variable was modified from
+ * the interrupt context.
+ */
+#ifndef __this_cpu_read
+# ifndef __this_cpu_read_1
+# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_2
+# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_4
+# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_8
+# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+#endif
+
+#define __this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *__this_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#ifndef __this_cpu_write
+# ifndef __this_cpu_write_1
+# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_2
+# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_4
+# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_8
+# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_add
+# ifndef __this_cpu_add_1
+# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_2
+# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_4
+# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_8
+# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# ifndef __this_cpu_and_1
+# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_2
+# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_4
+# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_8
+# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_or
+# ifndef __this_cpu_or_1
+# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_2
+# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_4
+# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_8
+# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_xor
+# ifndef __this_cpu_xor_1
+# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_2
+# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_4
+# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_8
+# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+#endif
+
+/*
+ * IRQ safe versions of the per cpu RMW operations. Note that these operations
+ * are *not* safe against modification of the same variable from another
+ * processors (which one gets when using regular atomic operations)
+ . They are guaranteed to be atomic vs. local interrupts and
+ * preemption only.
+ */
+#define irqsafe_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ *__this_cpu_ptr(&(pcp)) op val; \
+ local_irq_restore(flags); \
+} while (0)
+
+#ifndef irqsafe_cpu_add
+# ifndef irqsafe_cpu_add_1
+# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_2
+# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_4
+# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_8
+# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef irqsafe_cpu_sub
+# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
+#endif
+
+#ifndef irqsafe_cpu_inc
+# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_dec
+# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_and
+# ifndef irqsafe_cpu_and_1
+# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_2
+# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_4
+# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_8
+# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
+#endif
+
+#ifndef irqsafe_cpu_or
+# ifndef irqsafe_cpu_or_1
+# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_2
+# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_4
+# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_8
+# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
+#endif
+
+#ifndef irqsafe_cpu_xor
+# ifndef irqsafe_cpu_xor_1
+# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_2
+# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_4
+# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_8
+# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
deleted file mode 100644
index e3fb25606706..000000000000
--- a/include/linux/perf_counter.h
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * NOTE: this file will be removed in a future kernel release, it is
- * provided as a courtesy copy of user-space code that relies on the
- * old (pre-rename) symbols and constants.
- *
- * Performance events:
- *
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
- *
- * Data type definitions, declarations, prototypes.
- *
- * Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
- */
-#ifndef _LINUX_PERF_COUNTER_H
-#define _LINUX_PERF_COUNTER_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <asm/byteorder.h>
-
-/*
- * User-space ABI bits:
- */
-
-/*
- * attr.type
- */
-enum perf_type_id {
- PERF_TYPE_HARDWARE = 0,
- PERF_TYPE_SOFTWARE = 1,
- PERF_TYPE_TRACEPOINT = 2,
- PERF_TYPE_HW_CACHE = 3,
- PERF_TYPE_RAW = 4,
-
- PERF_TYPE_MAX, /* non-ABI */
-};
-
-/*
- * Generalized performance counter event types, used by the
- * attr.event_id parameter of the sys_perf_counter_open()
- * syscall:
- */
-enum perf_hw_id {
- /*
- * Common hardware events, generalized by the kernel:
- */
- PERF_COUNT_HW_CPU_CYCLES = 0,
- PERF_COUNT_HW_INSTRUCTIONS = 1,
- PERF_COUNT_HW_CACHE_REFERENCES = 2,
- PERF_COUNT_HW_CACHE_MISSES = 3,
- PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_HW_BRANCH_MISSES = 5,
- PERF_COUNT_HW_BUS_CYCLES = 6,
-
- PERF_COUNT_HW_MAX, /* non-ABI */
-};
-
-/*
- * Generalized hardware cache counters:
- *
- * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
- * { read, write, prefetch } x
- * { accesses, misses }
- */
-enum perf_hw_cache_id {
- PERF_COUNT_HW_CACHE_L1D = 0,
- PERF_COUNT_HW_CACHE_L1I = 1,
- PERF_COUNT_HW_CACHE_LL = 2,
- PERF_COUNT_HW_CACHE_DTLB = 3,
- PERF_COUNT_HW_CACHE_ITLB = 4,
- PERF_COUNT_HW_CACHE_BPU = 5,
-
- PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_id {
- PERF_COUNT_HW_CACHE_OP_READ = 0,
- PERF_COUNT_HW_CACHE_OP_WRITE = 1,
- PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
-
- PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_result_id {
- PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
- PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
-
- PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
-};
-
-/*
- * Special "software" counters provided by the kernel, even if the hardware
- * does not support performance counters. These counters measure various
- * physical and sw events of the kernel (and allow the profiling of them as
- * well):
- */
-enum perf_sw_ids {
- PERF_COUNT_SW_CPU_CLOCK = 0,
- PERF_COUNT_SW_TASK_CLOCK = 1,
- PERF_COUNT_SW_PAGE_FAULTS = 2,
- PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
- PERF_COUNT_SW_CPU_MIGRATIONS = 4,
- PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
- PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
- PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
- PERF_COUNT_SW_EMULATION_FAULTS = 8,
-
- PERF_COUNT_SW_MAX, /* non-ABI */
-};
-
-/*
- * Bits that can be set in attr.sample_type to request information
- * in the overflow packets.
- */
-enum perf_counter_sample_format {
- PERF_SAMPLE_IP = 1U << 0,
- PERF_SAMPLE_TID = 1U << 1,
- PERF_SAMPLE_TIME = 1U << 2,
- PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_READ = 1U << 4,
- PERF_SAMPLE_CALLCHAIN = 1U << 5,
- PERF_SAMPLE_ID = 1U << 6,
- PERF_SAMPLE_CPU = 1U << 7,
- PERF_SAMPLE_PERIOD = 1U << 8,
- PERF_SAMPLE_STREAM_ID = 1U << 9,
- PERF_SAMPLE_RAW = 1U << 10,
-
- PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
-};
-
-/*
- * The format of the data returned by read() on a perf counter fd,
- * as specified by attr.read_format:
- *
- * struct read_format {
- * { u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 id; } && PERF_FORMAT_ID
- * } && !PERF_FORMAT_GROUP
- *
- * { u64 nr;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 value;
- * { u64 id; } && PERF_FORMAT_ID
- * } cntr[nr];
- * } && PERF_FORMAT_GROUP
- * };
- */
-enum perf_counter_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
- PERF_FORMAT_ID = 1U << 2,
- PERF_FORMAT_GROUP = 1U << 3,
-
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
-};
-
-#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
-
-/*
- * Hardware event to monitor via a performance monitoring counter:
- */
-struct perf_counter_attr {
-
- /*
- * Major type: hardware/software/tracepoint/etc.
- */
- __u32 type;
-
- /*
- * Size of the attr structure, for fwd/bwd compat.
- */
- __u32 size;
-
- /*
- * Type specific configuration information.
- */
- __u64 config;
-
- union {
- __u64 sample_period;
- __u64 sample_freq;
- };
-
- __u64 sample_type;
- __u64 read_format;
-
- __u64 disabled : 1, /* off by default */
- inherit : 1, /* children inherit it */
- pinned : 1, /* must always be on PMU */
- exclusive : 1, /* only group on PMU */
- exclude_user : 1, /* don't count user */
- exclude_kernel : 1, /* ditto kernel */
- exclude_hv : 1, /* ditto hypervisor */
- exclude_idle : 1, /* don't count when idle */
- mmap : 1, /* include mmap data */
- comm : 1, /* include comm data */
- freq : 1, /* use freq, not period */
- inherit_stat : 1, /* per task counts */
- enable_on_exec : 1, /* next exec enables */
- task : 1, /* trace fork/exit */
- watermark : 1, /* wakeup_watermark */
-
- __reserved_1 : 49;
-
- union {
- __u32 wakeup_events; /* wakeup every n events */
- __u32 wakeup_watermark; /* bytes before wakeup */
- };
- __u32 __reserved_2;
-
- __u64 __reserved_3;
-};
-
-/*
- * Ioctls that can be done on a perf counter fd:
- */
-#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
-#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
-#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
-#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
-#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
-#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
-
-enum perf_counter_ioc_flags {
- PERF_IOC_FLAG_GROUP = 1U << 0,
-};
-
-/*
- * Structure of the page that can be mapped via mmap
- */
-struct perf_counter_mmap_page {
- __u32 version; /* version number of this structure */
- __u32 compat_version; /* lowest version this is compat with */
-
- /*
- * Bits needed to read the hw counters in user-space.
- *
- * u32 seq;
- * s64 count;
- *
- * do {
- * seq = pc->lock;
- *
- * barrier()
- * if (pc->index) {
- * count = pmc_read(pc->index - 1);
- * count += pc->offset;
- * } else
- * goto regular_read;
- *
- * barrier();
- * } while (pc->lock != seq);
- *
- * NOTE: for obvious reason this only works on self-monitoring
- * processes.
- */
- __u32 lock; /* seqlock for synchronization */
- __u32 index; /* hardware counter identifier */
- __s64 offset; /* add to hardware counter value */
- __u64 time_enabled; /* time counter active */
- __u64 time_running; /* time counter on cpu */
-
- /*
- * Hole for extension of the self monitor capabilities
- */
-
- __u64 __reserved[123]; /* align to 1k */
-
- /*
- * Control data for the mmap() data buffer.
- *
- * User-space reading the @data_head value should issue an rmb(), on
- * SMP capable platforms, after reading this value -- see
- * perf_counter_wakeup().
- *
- * When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data. In this case
- * the kernel will not over-write unread data.
- */
- __u64 data_head; /* head in the data section */
- __u64 data_tail; /* user-space written tail */
-};
-
-#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
-#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
-#define PERF_EVENT_MISC_KERNEL (1 << 0)
-#define PERF_EVENT_MISC_USER (2 << 0)
-#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-
-struct perf_event_header {
- __u32 type;
- __u16 misc;
- __u16 size;
-};
-
-enum perf_event_type {
-
- /*
- * The MMAP events record the PROT_EXEC mappings so that we can
- * correlate userspace IPs to code. They have the following structure:
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * char filename[];
- * };
- */
- PERF_EVENT_MMAP = 1,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 id;
- * u64 lost;
- * };
- */
- PERF_EVENT_LOST = 2,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * char comm[];
- * };
- */
- PERF_EVENT_COMM = 3,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * };
- */
- PERF_EVENT_EXIT = 4,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 time;
- * u64 id;
- * u64 stream_id;
- * };
- */
- PERF_EVENT_THROTTLE = 5,
- PERF_EVENT_UNTHROTTLE = 6,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * };
- */
- PERF_EVENT_FORK = 7,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, tid;
- *
- * struct read_format values;
- * };
- */
- PERF_EVENT_READ = 8,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * { u64 ip; } && PERF_SAMPLE_IP
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 period; } && PERF_SAMPLE_PERIOD
- *
- * { struct read_format values; } && PERF_SAMPLE_READ
- *
- * { u64 nr,
- * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
- *
- * #
- * # The RAW record below is opaque data wrt the ABI
- * #
- * # That is, the ABI doesn't make any promises wrt to
- * # the stability of its content, it may vary depending
- * # on event, hardware, kernel version and phase of
- * # the moon.
- * #
- * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
- * #
- *
- * { u32 size;
- * char data[size];}&& PERF_SAMPLE_RAW
- * };
- */
- PERF_EVENT_SAMPLE = 9,
-
- PERF_EVENT_MAX, /* non-ABI */
-};
-
-enum perf_callchain_context {
- PERF_CONTEXT_HV = (__u64)-32,
- PERF_CONTEXT_KERNEL = (__u64)-128,
- PERF_CONTEXT_USER = (__u64)-512,
-
- PERF_CONTEXT_GUEST = (__u64)-2048,
- PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
- PERF_CONTEXT_GUEST_USER = (__u64)-2560,
-
- PERF_CONTEXT_MAX = (__u64)-4095,
-};
-
-#define PERF_FLAG_FD_NO_GROUP (1U << 0)
-#define PERF_FLAG_FD_OUTPUT (1U << 1)
-
-/*
- * In case some app still references the old symbols:
- */
-
-#define __NR_perf_counter_open __NR_perf_event_open
-
-#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
-#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
-
-#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 43adbd7f0010..8fa71874113f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -18,10 +18,6 @@
#include <linux/ioctl.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <asm/hw_breakpoint.h>
-#endif
-
/*
* User-space ABI bits:
*/
@@ -215,17 +211,11 @@ struct perf_event_attr {
__u32 wakeup_watermark; /* bytes before wakeup */
};
- union {
- struct { /* Hardware breakpoint info */
- __u64 bp_addr;
- __u32 bp_type;
- __u32 bp_len;
- };
- };
-
__u32 __reserved_2;
- __u64 __reserved_3;
+ __u64 bp_addr;
+ __u32 bp_type;
+ __u32 bp_len;
};
/*
@@ -451,6 +441,10 @@ enum perf_callchain_context {
# include <asm/perf_event.h>
#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <asm/hw_breakpoint.h>
+#endif
+
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
@@ -565,10 +559,12 @@ struct perf_pending_entry {
void (*func)(struct perf_pending_entry *);
};
-typedef void (*perf_callback_t)(struct perf_event *, void *);
-
struct perf_sample_data;
+typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
+ struct perf_sample_data *,
+ struct pt_regs *regs);
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -660,18 +656,12 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
- void (*overflow_handler)(struct perf_event *event,
- int nmi, struct perf_sample_data *data,
- struct pt_regs *regs);
+ perf_overflow_handler_t overflow_handler;
#ifdef CONFIG_EVENT_PROFILE
struct event_filter *filter;
#endif
- perf_callback_t callback;
-
- perf_callback_t event_callback;
-
#endif /* CONFIG_PERF_EVENTS */
};
@@ -685,7 +675,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
@@ -781,7 +771,7 @@ extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
pid_t pid,
- perf_callback_t callback);
+ perf_overflow_handler_t callback);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -824,9 +814,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
*/
static inline int is_software_event(struct perf_event *event)
{
- return (event->attr.type != PERF_TYPE_RAW) &&
- (event->attr.type != PERF_TYPE_HARDWARE) &&
- (event->attr.type != PERF_TYPE_HW_CACHE);
+ switch (event->attr.type) {
+ case PERF_TYPE_SOFTWARE:
+ case PERF_TYPE_TRACEPOINT:
+ /* for now the breakpoint stuff also works as software event */
+ case PERF_TYPE_BREAKPOINT:
+ return 1;
+ }
+ return 0;
}
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -876,6 +871,8 @@ extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
+extern void perf_event_enable(struct perf_event *event);
+extern void perf_event_disable(struct perf_event *event);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -906,7 +903,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
-
+static inline void perf_event_enable(struct perf_event *event) { }
+static inline void perf_event_disable(struct perf_event *event) { }
#endif
#define perf_output_put(handle, x) \
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..8227f717c70f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246af..198b8f9fe05e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-struct dev_pm_ops name = { \
+const struct dev_pm_ops name = { \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index fddfafaed024..7c4193eb0072 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -334,6 +334,19 @@ extern struct pnp_protocol pnpbios_protocol;
#define pnp_device_is_pnpbios(dev) 0
#endif
+#ifdef CONFIG_PNPACPI
+extern struct pnp_protocol pnpacpi_protocol;
+
+static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev)
+{
+ if (dev->protocol == &pnpacpi_protocol)
+ return dev->data;
+ return NULL;
+}
+#else
+#define pnp_acpi_device(dev) 0
+#endif
+
/* status */
#define PNP_READY 0x0000
#define PNP_ATTACHED 0x0001
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 7fc194aef8c2..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -2,13 +2,25 @@
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 7456d7d87a19..56f2d63a5cbb 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -105,12 +105,7 @@ static inline int ptrace_reparented(struct task_struct *child)
{
return child->real_parent != child->parent;
}
-static inline void ptrace_link(struct task_struct *child,
- struct task_struct *new_parent)
-{
- if (unlikely(child->ptrace))
- __ptrace_link(child, new_parent);
-}
+
static inline void ptrace_unlink(struct task_struct *child)
{
if (unlikely(child->ptrace))
@@ -169,9 +164,9 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
INIT_LIST_HEAD(&child->ptraced);
child->parent = child->real_parent;
child->ptrace = 0;
- if (unlikely(ptrace)) {
+ if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
child->ptrace = current->ptrace;
- ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent);
}
}
@@ -278,6 +273,18 @@ static inline void user_enable_block_step(struct task_struct *task)
}
#endif /* arch_has_block_step */
+#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
+extern void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info);
+#else
+static inline void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+}
+#endif
+
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 7a9754c96775..01b3d759f1fc 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -10,7 +10,7 @@ struct platform_pwm_backlight_data {
unsigned int dft_brightness;
unsigned int pwm_period_ns;
int (*init)(struct device *dev);
- int (*notify)(int brightness);
+ int (*notify)(struct device *dev, int brightness);
void (*exit)(struct device *dev);
};
diff --git a/include/linux/quota.h b/include/linux/quota.h
index e70e62194243..a6861f117480 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -315,8 +315,9 @@ struct dquot_operations {
int (*claim_space) (struct inode *, qsize_t);
/* release rsved quota for delayed alloc */
void (*release_rsv) (struct inode *, qsize_t);
- /* get reserved quota for delayed alloc */
- qsize_t (*get_reserved_space) (struct inode *);
+ /* get reserved quota for delayed alloc, value returned is managed by
+ * quota code only */
+ qsize_t *(*get_reserved_space) (struct inode *);
};
/* Operations handling requests from userspace */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d92480f8285c..1cbbd2c11aa9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -78,6 +78,25 @@ struct raid6_calls {
/* Selected algorithm */
extern struct raid6_calls raid6_call;
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_intx16;
+extern const struct raid6_calls raid6_intx32;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
int raid6_select_algo(void);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index c4ba9a78721e..96cc307ed9f4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -101,4 +101,9 @@ static inline void exit_rcu(void)
{
}
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c93eee5911b0..8044b1b94333 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
extern void synchronize_rcu(void);
extern void exit_rcu(void);
+/*
+ * Defined as macro as it is a very low level header
+ * included from areas that don't even know about current
+ */
+#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock(void)
@@ -63,6 +69,11 @@ static inline void exit_rcu(void)
{
}
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock_bh(void)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 490c5b37b6d7..030d92255c7a 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
#ifndef __LINUX_REGULATOR_CONSUMER_H_
#define __LINUX_REGULATOR_CONSUMER_H_
+#include <linux/device.h>
+
/*
* Regulator operating modes.
*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 87f5f176d4ef..234a8476cba8 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -43,16 +43,20 @@ struct regulator;
/**
* struct regulator_state - regulator state during low power system states
*
- * This describes a regulators state during a system wide low power state.
+ * This describes a regulators state during a system wide low power
+ * state. One of enabled or disabled must be set for the
+ * configuration to be applied.
*
* @uV: Operating voltage during suspend.
* @mode: Operating mode during suspend.
* @enabled: Enabled during suspend.
+ * @disabled: Disabled during suspend.
*/
struct regulator_state {
int uV; /* suspend voltage */
unsigned int mode; /* suspend regulator operating mode */
int enabled; /* is regulator enabled in this suspend state */
+ int disabled; /* is the regulator disbled in this suspend state */
};
/**
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h
new file mode 100644
index 000000000000..9936763621c7
--- /dev/null
+++ b/include/linux/regulator/max8660.h
@@ -0,0 +1,57 @@
+/*
+ * max8660.h -- Voltage regulation for the Maxim 8660/8661
+ *
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8660_H
+#define __LINUX_REGULATOR_MAX8660_H
+
+#include <linux/regulator/machine.h>
+
+enum {
+ MAX8660_V3,
+ MAX8660_V4,
+ MAX8660_V5,
+ MAX8660_V6,
+ MAX8660_V7,
+ MAX8660_V_END,
+};
+
+/**
+ * max8660_subdev_data - regulator subdev data
+ * @id: regulator id
+ * @name: regulator name
+ * @platform_data: regulator init data
+ */
+struct max8660_subdev_data {
+ int id;
+ char *name;
+ struct regulator_init_data *platform_data;
+};
+
+/**
+ * max8660_platform_data - platform data for max8660
+ * @num_subdevs: number of regulators used
+ * @subdevs: pointer to regulators used
+ * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled.
+ */
+struct max8660_platform_data {
+ int num_subdevs;
+ struct max8660_subdev_data *subdevs;
+ unsigned en34_is_high:1;
+};
+#endif
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index a05b4a20768d..1ba3cf6edfbb 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -62,6 +62,12 @@ void reiserfs_write_unlock(struct super_block *s);
int reiserfs_write_lock_once(struct super_block *s);
void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
+#ifdef CONFIG_REISERFS_CHECK
+void reiserfs_lock_check_recursive(struct super_block *s);
+#else
+static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
+#endif
+
/*
* Several mutexes depend on the write lock.
* However sometimes we want to relax the write lock while we hold
@@ -92,11 +98,31 @@ void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
static inline void reiserfs_mutex_lock_safe(struct mutex *m,
struct super_block *s)
{
+ reiserfs_lock_check_recursive(s);
reiserfs_write_unlock(s);
mutex_lock(m);
reiserfs_write_lock(s);
}
+static inline void
+reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
+ struct super_block *s)
+{
+ reiserfs_lock_check_recursive(s);
+ reiserfs_write_unlock(s);
+ mutex_lock_nested(m, subclass);
+ reiserfs_write_lock(s);
+}
+
+static inline void
+reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
+{
+ reiserfs_lock_check_recursive(s);
+ reiserfs_write_unlock(s);
+ down_read(sem);
+ reiserfs_write_lock(s);
+}
+
/*
* When we schedule, we usually want to also release the write lock,
* according to the previous bkl based locking scheme of reiserfs.
@@ -2051,25 +2077,12 @@ void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
struct treepath *path, struct reiserfs_dir_entry *de);
struct dentry *reiserfs_get_parent(struct dentry *);
-/* procfs.c */
-
-#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
-#define REISERFS_PROC_INFO
-#else
-#undef REISERFS_PROC_INFO
-#endif
+#ifdef CONFIG_REISERFS_PROC_INFO
int reiserfs_proc_info_init(struct super_block *sb);
int reiserfs_proc_info_done(struct super_block *sb);
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func);
-void reiserfs_proc_unregister_global(const char *name);
int reiserfs_proc_info_global_init(void);
int reiserfs_proc_info_global_done(void);
-int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
- int count, int *eof, void *data);
-
-#if defined( REISERFS_PROC_INFO )
#define PROC_EXP( e ) e
@@ -2084,6 +2097,26 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
#else
+static inline int reiserfs_proc_info_init(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_done(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_init(void)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_done(void)
+{
+ return 0;
+}
+
#define PROC_EXP( e )
#define VOID_V ( ( void ) 0 )
#define PROC_INFO_MAX( sb, field, value ) VOID_V
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 40fc7e626082..f1e914eefeab 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -3,8 +3,6 @@
#include <linux/time.h>
-struct task_struct;
-
/*
* Resource control/accounting header file for linux
*/
@@ -70,6 +68,12 @@ struct rlimit {
*/
#include <asm/resource.h>
+#ifdef __KERNEL__
+
+struct task_struct;
+
int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
+#endif /* __KERNEL__ */
+
#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba7032609..b019ae64e2ab 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
+#ifdef CONFIG_KSM
+ atomic_t ksm_refcount;
+#endif
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
};
#ifdef CONFIG_MMU
+#ifdef CONFIG_KSM
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+ atomic_set(&anon_vma->ksm_refcount, 0);
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return atomic_read(&anon_vma->ksm_refcount);
+}
+#else
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return 0;
+}
+#endif /* CONFIG_KSM */
+
+static inline struct anon_vma *page_anon_vma(struct page *page)
+{
+ if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
+ PAGE_MAPPING_ANON)
+ return NULL;
+ return page_rmapping(page);
+}
static inline void anon_vma_lock(struct vm_area_struct *vma)
{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+void anon_vma_free(struct anon_vma *);
/*
* rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
+int page_referenced_one(struct page *, struct vm_area_struct *,
+ unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap_one(struct page *, struct vm_area_struct *,
+ unsigned long address, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..281d8fd775e8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..71e0b00b6f2c
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_read_lock(rwlock_t *lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock);
+ extern void do_raw_write_lock(rwlock_t *lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock);
+#else
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock_flags(lock, flags) \
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
+# define do_raw_write_lock_flags(lock, flags) \
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000000..9c9f0495d37c
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _raw_read_lock(lock) __raw_read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _raw_write_lock(lock) __raw_write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __raw_write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __raw_read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline void __raw_read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __raw_write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..bd31808c7d8e
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ arch_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261f..bdfcc2527970 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->activity != 0);
-}
+extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89115ec7d43f..6f7bba93929b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
+#define TASK_STATE_MAX 512
+
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
+
+extern char ___assert_task_state[1 - 2*!!(
+ sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -371,6 +377,8 @@ extern int sysctl_max_map_count;
#include <linux/aio.h>
+#ifdef CONFIG_MMU
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -380,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
+#else
+static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+#endif
#if USE_SPLIT_PTLOCKS
/*
@@ -1091,7 +1102,8 @@ struct sched_class {
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
- void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
+ void (*task_waking) (struct rq *this_rq, struct task_struct *task);
+ void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
@@ -1102,7 +1114,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
- void (*task_new) (struct rq *rq, struct task_struct *p);
+ void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1111,10 +1123,11 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);
- unsigned int (*get_rr_interval) (struct task_struct *task);
+ unsigned int (*get_rr_interval) (struct rq *rq,
+ struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p);
+ void (*moved_group) (struct task_struct *p, int on_rq);
#endif
};
@@ -1151,8 +1164,6 @@ struct sched_entity {
u64 start_runtime;
u64 avg_wakeup;
- u64 avg_running;
-
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
@@ -1175,7 +1186,6 @@ struct sched_entity {
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
- u64 nr_forced2_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
@@ -1411,7 +1421,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
@@ -1544,10 +1554,18 @@ struct task_struct {
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
unsigned long stack_start;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+ int do_batch; /* incremented when batch uncharge started */
+ struct mem_cgroup *memcg; /* target memcg of uncharge */
+ unsigned long bytes; /* uncharged usage */
+ unsigned long memsw_bytes; /* uncharged mem+swap usage */
+ } memcg_batch;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
@@ -1840,7 +1858,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern int sched_clock_stable;
#endif
-extern unsigned long long sched_clock(void);
+/* ftrace calls sched_clock() directly */
+extern unsigned long long notrace sched_clock(void);
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
@@ -1903,14 +1922,22 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_child_runs_first;
+
+enum sched_tunable_scaling {
+ SCHED_TUNABLESCALING_NONE,
+ SCHED_TUNABLESCALING_LOG,
+ SCHED_TUNABLESCALING_LINEAR,
+ SCHED_TUNABLESCALING_END,
+};
+extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+
#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
-int sched_nr_latency_handler(struct ctl_table *table, int write,
+int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
@@ -2066,7 +2093,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t);
extern int do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
-extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
@@ -2085,11 +2111,6 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
-static inline int is_si_special(const struct siginfo *info)
-{
- return info <= SEND_SIG_FORCED;
-}
-
/*
* True if we are on the alternate signal stack.
*/
@@ -2475,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
-
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
@@ -2585,7 +2604,27 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
}
#endif /* CONFIG_MM_OWNER */
-#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
+static inline unsigned long task_rlimit(const struct task_struct *tsk,
+ unsigned int limit)
+{
+ return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+}
+
+static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
+ unsigned int limit)
+{
+ return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+}
+
+static inline unsigned long rlimit(unsigned int limit)
+{
+ return task_rlimit(current, limit);
+}
+
+static inline unsigned long rlimit_max(unsigned int limit)
+{
+ return task_rlimit_max(current, limit);
+}
#endif /* __KERNEL__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 466cbadbd1ef..2c627d361c02 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -95,8 +95,13 @@ struct seq_file;
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
+#else
+#define dac_mmap_min_addr 0UL
+#endif
+
/*
* Values used in the task_security_ops calls
*/
@@ -121,6 +126,7 @@ struct request_sock;
#define LSM_UNSAFE_PTRACE 2
#define LSM_UNSAFE_PTRACE_CAP 4
+#ifdef CONFIG_MMU
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
@@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
}
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
#ifdef CONFIG_SECURITY
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 1b191c176bcd..8a4adbef8a0f 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -86,6 +86,7 @@ struct task_struct;
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ struct list_head sem_pending; /* pending single-sop operations */
};
/* One sem_array data structure for each set of semaphores in the system. */
@@ -96,11 +97,13 @@ struct sem_array {
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head sem_pending; /* pending operations to be processed */
struct list_head list_id; /* undo requests on this array */
- unsigned long sem_nsems; /* no. of semaphores in array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
};
/* One queue for each sleeping process in the system. */
struct sem_queue {
+ struct list_head simple_list; /* queue of pending operations */
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
diff --git a/include/linux/serio.h b/include/linux/serio.h
index e2f3044d4a4a..813d26c247ec 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -136,25 +136,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
-/*
- * Use the following functions to pin serio's driver in process context
- */
-static inline int serio_pin_driver(struct serio *serio)
-{
- return mutex_lock_interruptible(&serio->drv_mutex);
-}
-
-static inline void serio_pin_driver_uninterruptible(struct serio *serio)
-{
- mutex_lock(&serio->drv_mutex);
-}
-
-static inline void serio_unpin_driver(struct serio *serio)
-{
- mutex_unlock(&serio->drv_mutex);
-}
-
-
#endif
/*
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index deee7afd8d66..e164291fb3e7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -41,20 +41,4 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
extern int init_tmpfs(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
-#ifdef CONFIG_TMPFS_POSIX_ACL
-int shmem_check_acl(struct inode *, int);
-int shmem_acl_init(struct inode *, struct inode *);
-
-extern struct xattr_handler shmem_xattr_acl_access_handler;
-extern struct xattr_handler shmem_xattr_acl_default_handler;
-
-extern struct generic_acl_operations shmem_acl_ops;
-
-#else
-static inline int shmem_acl_init(struct inode *inode, struct inode *dir)
-{
- return 0;
-}
-#endif /* CONFIG_TMPFS_POSIX_ACL */
-
#endif
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500de..ca6b2b317991 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
@@ -166,7 +166,7 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5ad70a60fd74..1e14beb23f9b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index d53642d2d899..67ed2c542831 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -31,6 +31,8 @@
#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
+#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
+
/* miscellaneous control */
#define SM501_MISC_CONTROL (0x000004)
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index 34c4475ac4a2..4f95c1aac2fd 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -111,6 +111,7 @@
#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
+#define SONYPI_EVENT_MEDIA_PRESSED 72
/* get/set brightness */
#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
new file mode 100644
index 000000000000..51b3e771a9a3
--- /dev/null
+++ b/include/linux/spi/dw_spi.h
@@ -0,0 +1,212 @@
+#ifndef DW_SPI_HEADER_H
+#define DW_SPI_HEADER_H
+#include <linux/io.h>
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET 0
+
+#define SPI_FRF_OFFSET 4
+#define SPI_FRF_SPI 0x0
+#define SPI_FRF_SSP 0x1
+#define SPI_FRF_MICROWIRE 0x2
+#define SPI_FRF_RESV 0x3
+
+#define SPI_MODE_OFFSET 6
+#define SPI_SCPH_OFFSET 6
+#define SPI_SCOL_OFFSET 7
+#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_TR 0x0 /* xmit & recv */
+#define SPI_TMOD_TO 0x1 /* xmit only */
+#define SPI_TMOD_RO 0x2 /* recv only */
+#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET 10
+#define SPI_SRL_OFFSET 11
+#define SPI_CFS_OFFSET 12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK 0x7f /* cover 7 bits */
+#define SR_BUSY (1 << 0)
+#define SR_TF_NOT_FULL (1 << 1)
+#define SR_TF_EMPT (1 << 2)
+#define SR_RF_NOT_EMPT (1 << 3)
+#define SR_RF_FULL (1 << 4)
+#define SR_TX_ERR (1 << 5)
+#define SR_DCOL (1 << 6)
+
+/* Bit fields in ISR, IMR, RISR, 7 bits */
+#define SPI_INT_TXEI (1 << 0)
+#define SPI_INT_TXOI (1 << 1)
+#define SPI_INT_RXUI (1 << 2)
+#define SPI_INT_RXOI (1 << 3)
+#define SPI_INT_RXFI (1 << 4)
+#define SPI_INT_MSTI (1 << 5)
+
+/* TX RX interrupt level threshhold, max can be 256 */
+#define SPI_INT_THRESHOLD 32
+
+enum dw_ssi_type {
+ SSI_MOTO_SPI = 0,
+ SSI_TI_SSP,
+ SSI_NS_MICROWIRE,
+};
+
+struct dw_spi_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ssienr;
+ u32 mwcr;
+ u32 ser;
+ u32 baudr;
+ u32 txfltr;
+ u32 rxfltr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 txoicr;
+ u32 rxoicr;
+ u32 rxuicr;
+ u32 msticr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr;
+ u32 idr;
+ u32 version;
+ u32 dr; /* Currently oper as 32 bits,
+ though only low 16 bits matters */
+} __packed;
+
+struct dw_spi {
+ struct spi_master *master;
+ struct spi_device *cur_dev;
+ struct device *parent_dev;
+ enum dw_ssi_type type;
+
+ void __iomem *regs;
+ unsigned long paddr;
+ u32 iolen;
+ int irq;
+ u32 max_freq; /* max bus freq supported */
+
+ u16 bus_num;
+ u16 num_cs; /* supported slave numbers */
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ int run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ struct chip_data *prev_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes; /* current is a 1/2 bytes op */
+ u8 max_bits_per_word; /* maxim is 16b */
+ u32 dma_width;
+ int cs_change;
+ int (*write)(struct dw_spi *dws);
+ int (*read)(struct dw_spi *dws);
+ irqreturn_t (*transfer_handler)(struct dw_spi *dws);
+ void (*cs_control)(u32 command);
+
+ /* Dma info */
+ int dma_inited;
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+ int txdma_done;
+ int rxdma_done;
+ u64 tx_param;
+ u64 rx_param;
+ struct device *dma_dev;
+ dma_addr_t dma_addr;
+
+ /* Bus interface info */
+ void *priv;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+#define dw_readl(dw, name) \
+ __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_writel(dw, name, val) \
+ __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_readw(dw, name) \
+ __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_writew(dw, name, val) \
+ __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
+
+static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+{
+ dw_writel(dws, ssienr, (enable ? 1 : 0));
+}
+
+static inline void spi_set_clk(struct dw_spi *dws, u16 div)
+{
+ dw_writel(dws, baudr, div);
+}
+
+static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
+{
+ if (cs > dws->num_cs)
+ return;
+ dw_writel(dws, ser, 1 << cs);
+}
+
+/* Disable IRQ bits */
+static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, imr) & ~mask;
+ dw_writel(dws, imr, new_mask);
+}
+
+/* Enable IRQ bits */
+static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, imr) | mask;
+ dw_writel(dws, imr, new_mask);
+}
+
+/*
+ * Each SPI slave device to work with dw_api controller should
+ * has such a structure claiming its working mode (PIO/DMA etc),
+ * which can be save in the "controller_data" member of the
+ * struct spi_device
+ */
+struct dw_spi_chip {
+ u8 poll_mode; /* 0 for contoller polling mode */
+ u8 type; /* SPI/SSP/Micrwire */
+ u8 enable_dma;
+ void (*cs_control)(u32 command);
+};
+
+extern int dw_spi_add_host(struct dw_spi *dws);
+extern void dw_spi_remove_host(struct dw_spi *dws);
+extern int dw_spi_suspend_host(struct dw_spi *dws);
+extern int dw_spi_resume_host(struct dw_spi *dws);
+#endif /* DW_SPI_HEADER_H */
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 000000000000..2e8db3d2d2e5
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,10 @@
+#ifndef __SPI_SH_MSIOF_H__
+#define __SPI_SH_MSIOF_H__
+
+struct sh_msiof_spi_info {
+ int tx_fifo_override;
+ int rx_fifo_override;
+ u16 num_chipselect;
+};
+
+#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 000000000000..6f17278810b0
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_SPI_XILINX_SPI_H
+#define __LINUX_SPI_XILINX_SPI_H
+
+/**
+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
+ * @num_chipselect: Number of chip select by the IP.
+ * @little_endian: If registers should be accessed little endian or not.
+ * @bits_per_word: Number of bits per word.
+ * @devices: Devices to add when the driver is probed.
+ * @num_devices: Number of devices in the devices array.
+ */
+struct xspi_platform_data {
+ u16 num_chipselect;
+ bool little_endian;
+ u8 bits_per_word;
+ struct spi_board_info *devices;
+ u8 num_devices;
+};
+
+#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d88..86088213334a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -75,12 +75,12 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
-# define spin_lock_init(lock) \
- do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key); \
-} while (0)
-#else
-# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
-#endif
-
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+{
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+}
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
-#endif
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
-#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
- } while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#endif
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ assert_raw_spin_locked(&lock->rlock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock) (!spin_is_locked(lock))
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459bc..e253ccd7a604 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
-
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
- __acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
-#define _spin_lock(lock) __spin_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
-#define _spin_lock_bh(lock) __spin_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
-#define _spin_lock_irq(lock) __spin_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
-#define _spin_trylock(lock) __spin_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
-#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK
-#define _spin_unlock(lock) __spin_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
-#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
+ * do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
- _raw_spin_lock_flags(lock, &flags);
+ do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
-}
-
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..af1f47229e70 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable_no_resched(); local_bh_enable(); \
+ __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _read_lock(lock) __LOCK(lock)
-#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
-#define _read_lock_bh(lock) __LOCK_BH(lock)
-#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _read_lock_irq(lock) __LOCK_IRQ(lock)
-#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
-#define _read_unlock(lock) __UNLOCK(lock)
-#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
#include <linux/lockdep.h>
-typedef struct {
- raw_spinlock_t raw_lock;
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -29,26 +29,10 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# define RW_DEP_MAP_INIT(lockname)
+# define SPIN_DEBUG_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- .magic = SPINLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
-#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..c09b6407ae1b 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..b14f6a91e19f 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-#define __raw_spin_is_locked(x) ((x)->slock == 0)
+#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
-#define __raw_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
-# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
-#define __raw_spin_is_contended(lock) (((void)(lock), 0))
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
-#define __raw_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5ad..a716ee2a8adb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,9 +62,20 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
-extern char * __must_check strstrip(char *);
+extern char * __must_check skip_spaces(const char *);
+
+extern char *strim(char *);
+
+static inline __must_check char *strstrip(char *str)
+{
+ return strim(str);
+}
+
#ifndef __HAVE_ARCH_STRSTR
-extern char * strstr(const char *,const char *);
+extern char * strstr(const char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNSTR
+extern char * strnstr(const char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 10709cbe96fd..c2786f20016f 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -28,9 +28,6 @@
#ifdef __KERNEL__
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-
/*
* Enable RPC debugging/profiling.
*/
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 87b895d5c786..b78f16b1dea3 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -40,6 +40,8 @@
#ifndef _LINUX_SUNRPC_RPC_RDMA_H
#define _LINUX_SUNRPC_RPC_RDMA_H
+#include <linux/types.h>
+
struct rpcrdma_segment {
__be32 rs_handle; /* Registered memory handle */
__be32 rs_length; /* Length of the chunk in bytes */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc0..7bc7fd5291ce 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
+#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
@@ -171,7 +173,8 @@ struct rpc_task_setup {
#define RPC_PRIORITY_LOW (-1)
#define RPC_PRIORITY_NORMAL (0)
#define RPC_PRIORITY_HIGH (1)
-#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW)
+#define RPC_PRIORITY_PRIVILEGED (2)
+#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
struct rpc_timer {
struct timer_list timer;
@@ -227,6 +230,7 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *,
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
+int rpc_queue_empty(struct rpc_wait_queue *);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_malloc(struct rpc_task *, size_t);
void rpc_free(void *);
@@ -252,6 +256,16 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task)
return __rpc_wait_for_completion_task(task, NULL);
}
+static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio)
+{
+ task->tk_priority = prio - RPC_PRIORITY_LOW;
+}
+
+static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio)
+{
+ return (task->tk_priority + RPC_PRIORITY_LOW == prio);
+}
+
#ifdef RPC_DEBUG
static inline const char * rpc_qname(struct rpc_wait_queue *q)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 52e8cb0a7569..5a3085b9b394 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -29,7 +29,6 @@ struct svc_pool_stats {
unsigned long packets;
unsigned long sockets_queued;
unsigned long threads_woken;
- unsigned long overloads_avoided;
unsigned long threads_timedout;
};
@@ -50,7 +49,6 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
- int sp_nwaking; /* number of threads woken but not yet active */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
} ____cacheline_aligned_in_smp;
@@ -275,16 +273,11 @@ struct svc_rqst {
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
struct svc_cacherep * rq_cacherep; /* cache info */
- struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
- * determine what device number
- * to report (real or virtual)
- */
int rq_splice_ok; /* turned off in gss privacy
* to prevent encrypting page
* cache pages */
wait_queue_head_t rq_wait; /* synchronization */
struct task_struct *rq_task; /* service thread */
- int rq_waking; /* 1 if thread is being woken */
};
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a4..a2602a8207a6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
+ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32
-#define SWAP_MAP_MAX 0x7ffe
-#define SWAP_MAP_BAD 0x7fff
-#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
-#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
+#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
+#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
+#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
+#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
- unsigned long flags;
- int prio; /* swap priority */
- int next; /* next entry on swap list */
- struct file *swap_file;
- struct block_device *bdev;
- struct list_head extent_list;
- struct swap_extent *curr_swap_extent;
- unsigned short *swap_map;
- unsigned int lowest_bit;
- unsigned int highest_bit;
+ unsigned long flags; /* SWP_USED etc: see above */
+ signed short prio; /* swap priority of this type */
+ signed char type; /* strange name for an index */
+ signed char next; /* next type on the swap list */
+ unsigned int max; /* extent of the swap_map */
+ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned int lowest_bit; /* index of first free in swap_map */
+ unsigned int highest_bit; /* index of last free in swap_map */
+ unsigned int pages; /* total of usable pages of swap */
+ unsigned int inuse_pages; /* number of those currently in use */
+ unsigned int cluster_next; /* likely index for next allocation */
+ unsigned int cluster_nr; /* countdown to next cluster search */
unsigned int lowest_alloc; /* while preparing discard cluster */
unsigned int highest_alloc; /* while preparing discard cluster */
- unsigned int cluster_next;
- unsigned int cluster_nr;
- unsigned int pages;
- unsigned int max;
- unsigned int inuse_pages;
- unsigned int old_block_size;
+ struct swap_extent *curr_swap_extent;
+ struct swap_extent first_swap_extent;
+ struct block_device *bdev; /* swap device or bdev of swap file */
+ struct file *swap_file; /* seldom referenced */
+ unsigned int old_block_size; /* seldom referenced */
};
struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
extern int kswapd_run(int nid);
+extern void kswapd_stop(int nid);
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
-extern void swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
+extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
+extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free(swp_entry_t, struct page *page);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
#define free_swap_and_cache(swp) is_migration_entry(swp)
#define swapcache_prepare(swp) is_migration_entry(swp)
-static inline void swap_duplicate(swp_entry_t swp)
+static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
+ return 0;
+}
+
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
}
static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 939a61507ac5..207466a49f3d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -102,12 +102,10 @@ struct perf_event_attr;
#ifdef CONFIG_EVENT_PROFILE
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
- .profile_count = ATOMIC_INIT(-1), \
.profile_enable = prof_sysenter_enable, \
.profile_disable = prof_sysenter_disable,
#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
- .profile_count = ATOMIC_INIT(-1), \
.profile_enable = prof_sysexit_enable, \
.profile_disable = prof_sysexit_disable,
#else
@@ -145,7 +143,7 @@ struct perf_event_attr;
.name = "sys_enter"#sname, \
.system = "syscalls", \
.event = &enter_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
+ .raw_init = trace_event_raw_init, \
.show_format = syscall_enter_format, \
.define_fields = syscall_enter_define_fields, \
.regfunc = reg_event_syscall_enter, \
@@ -167,7 +165,7 @@ struct perf_event_attr;
.name = "sys_exit"#sname, \
.system = "syscalls", \
.event = &exit_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
+ .raw_init = trace_event_raw_init, \
.show_format = syscall_exit_format, \
.define_fields = syscall_exit_define_fields, \
.regfunc = reg_event_syscall_exit, \
@@ -197,7 +195,7 @@ struct perf_event_attr;
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
- __syscall_meta_##sname = { \
+ __syscall_meta__##sname = { \
.name = "sys_"#sname, \
.nb_args = 0, \
.enter_event = &event_enter__##sname, \
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9d68fed50f11..cfa83083a2d4 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -99,8 +99,9 @@ int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr,
void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
- struct bin_attribute *attr);
-void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr);
+ const struct bin_attribute *attr);
+void sysfs_remove_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr);
int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target,
const char *name);
@@ -175,13 +176,13 @@ static inline void sysfs_remove_file(struct kobject *kobj,
}
static inline int sysfs_create_bin_file(struct kobject *kobj,
- struct bin_attribute *attr)
+ const struct bin_attribute *attr)
{
return 0;
}
static inline void sysfs_remove_bin_file(struct kobject *kobj,
- struct bin_attribute *attr)
+ const struct bin_attribute *attr)
{
}
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
new file mode 100644
index 000000000000..ce456eaae861
--- /dev/null
+++ b/include/linux/timb_gpio.h
@@ -0,0 +1,37 @@
+/*
+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_TIMB_GPIO_H
+#define _LINUX_TIMB_GPIO_H
+
+/**
+ * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
+ * @gpio_base The number of the first GPIO pin, set to -1 for
+ * dynamic number allocation.
+ * @nr_pins Number of pins that is supported by the hardware (1-32)
+ * @irq_base If IRQ is supported by the hardware, this is the base
+ * number of IRQ:s. One IRQ per pin will be used. Set to
+ * -1 if IRQ:s is not supported.
+ */
+struct timbgpio_platform_data {
+ int gpio_base;
+ int nr_pins;
+ int irq_base;
+};
+
+#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 57e63579bfdd..5b81156780b1 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
| 1*SD_WAKE_AFFINE \
| 1*SD_SHARE_CPUPOWER \
| 0*SD_POWERSAVINGS_BALANCE \
- | 0*SD_SHARE_PKG_RESOURCES \
+ | 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
| 0*SD_PREFER_SIBLING \
, \
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 09077f6ed128..5cf397ceb726 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -14,6 +14,7 @@ struct trace_seq {
unsigned char buffer[PAGE_SIZE];
unsigned int len;
unsigned int readpos;
+ int full;
};
static inline void
@@ -21,6 +22,7 @@ trace_seq_init(struct trace_seq *s)
{
s->len = 0;
s->readpos = 0;
+ s->full = 0;
}
/*
@@ -33,7 +35,7 @@ extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
__attribute__ ((format (printf, 2, 0)));
extern int
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
-extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt);
extern int trace_seq_puts(struct trace_seq *s, const char *str);
@@ -55,8 +57,9 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
return 0;
}
-static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
+ return 0;
}
static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt)
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 1eb44a924e56..10db0102a890 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -134,6 +134,13 @@ static inline __must_check int tracehook_report_syscall_entry(
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
+ if (step) {
+ siginfo_t info;
+ user_single_step_siginfo(current, regs, &info);
+ force_sig_info(SIGTRAP, &info, current);
+ return;
+ }
+
ptrace_report_syscall(regs);
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 405a9035fe40..6abfcf5b5887 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
extern struct ktermios tty_std_termios;
-extern int kmsg_redirect;
-
extern void console_init(void);
extern int vcs_init(void);
@@ -466,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
extern void tty_port_put(struct tty_port *port);
-extern inline struct tty_port *tty_port_get(struct tty_port *port)
+static inline struct tty_port *tty_port_get(struct tty_port *port)
{
if (port)
kref_get(&port->kref);
@@ -488,7 +486,7 @@ extern void tty_port_close(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
extern int tty_port_open(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
-extern inline int tty_port_users(struct tty_port *port)
+static inline int tty_port_users(struct tty_port *port)
{
return port->count + port->blocked_open;
}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6b58367d145e..d512d98dfb7d 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
+extern long __probe_kernel_read(void *dst, void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index e101a2d04d75..d7ace1b80f09 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -192,6 +192,7 @@ struct usb_interface {
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
unsigned reset_running:1;
+ unsigned resetting_device:1; /* true: bandwidth alloc after reset */
struct device dev; /* interface specific device info */
struct device *usb_dev;
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index acf6e457c04b..1819396ed501 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -16,6 +16,7 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/sysrq.h>
+#include <linux/kfifo.h>
#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
@@ -94,7 +95,7 @@ struct usb_serial_port {
unsigned char *bulk_out_buffer;
int bulk_out_size;
struct urb *write_urb;
- struct kfifo *write_fifo;
+ struct kfifo write_fifo;
int write_urb_busy;
__u8 bulk_out_endpointAddress;
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index b2a7d8ba6ee3..15591d2ea400 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -128,6 +128,29 @@ struct usbdevfs_hub_portinfo {
#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
+
+struct usbdevfs_ctrltransfer32 {
+ u8 bRequestType;
+ u8 bRequest;
+ u16 wValue;
+ u16 wIndex;
+ u16 wLength;
+ u32 timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_bulktransfer32 {
+ compat_uint_t ep;
+ compat_uint_t len;
+ compat_uint_t timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_disconnectsignal32 {
+ compat_int_t signr;
+ compat_caddr_t context;
+};
+
struct usbdevfs_urb32 {
unsigned char type;
unsigned char endpoint;
@@ -153,7 +176,9 @@ struct usbdevfs_ioctl32 {
#endif /* __KERNEL__ */
#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
+#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer)
+#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int)
#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface)
#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int)
@@ -166,6 +191,7 @@ struct usbdevfs_ioctl32 {
#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *)
#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32)
#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal)
+#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int)
#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int)
#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo)
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 79b9837d9ca0..cf97b5b9d1fe 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,4 +1,4 @@
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/module.h>
/* Simply sanity version stamp for modules. */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 32b92298fd79..d4962a782b8a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -294,6 +294,7 @@ struct v4l2_pix_format {
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
/* Palette formats */
@@ -329,7 +330,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */
+#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
+#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
/* 10bit raw bayer DPCM compressed to 8 bits */
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
/*
@@ -732,6 +737,99 @@ struct v4l2_standard {
};
/*
+ * V I D E O T I M I N G S D V P R E S E T
+ */
+struct v4l2_dv_preset {
+ __u32 preset;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T S E N U M E R A T I O N
+ */
+struct v4l2_dv_enum_preset {
+ __u32 index;
+ __u32 preset;
+ __u8 name[32]; /* Name of the preset timing */
+ __u32 width;
+ __u32 height;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T V A L U E S
+ */
+#define V4L2_DV_INVALID 0
+#define V4L2_DV_480P59_94 1 /* BT.1362 */
+#define V4L2_DV_576P50 2 /* BT.1362 */
+#define V4L2_DV_720P24 3 /* SMPTE 296M */
+#define V4L2_DV_720P25 4 /* SMPTE 296M */
+#define V4L2_DV_720P30 5 /* SMPTE 296M */
+#define V4L2_DV_720P50 6 /* SMPTE 296M */
+#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
+#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
+#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I25 11 /* BT.1120 */
+#define V4L2_DV_1080I50 12 /* SMPTE 296M */
+#define V4L2_DV_1080I60 13 /* SMPTE 296M */
+#define V4L2_DV_1080P24 14 /* SMPTE 296M */
+#define V4L2_DV_1080P25 15 /* SMPTE 296M */
+#define V4L2_DV_1080P30 16 /* SMPTE 296M */
+#define V4L2_DV_1080P50 17 /* BT.1120 */
+#define V4L2_DV_1080P60 18 /* BT.1120 */
+
+/*
+ * D V B T T I M I N G S
+ */
+
+/* BT.656/BT.1120 timing data */
+struct v4l2_bt_timings {
+ __u32 width; /* width in pixels */
+ __u32 height; /* height in lines */
+ __u32 interlaced; /* Interlaced or progressive */
+ __u32 polarities; /* Positive or negative polarity */
+ __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
+ __u32 hfrontporch; /* Horizpontal front porch in pixels */
+ __u32 hsync; /* Horizontal Sync length in pixels */
+ __u32 hbackporch; /* Horizontal back porch in pixels */
+ __u32 vfrontporch; /* Vertical front porch in pixels */
+ __u32 vsync; /* Vertical Sync length in lines */
+ __u32 vbackporch; /* Vertical back porch in lines */
+ __u32 il_vfrontporch; /* Vertical front porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vsync; /* Vertical sync length for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vbackporch; /* Vertical back porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 reserved[16];
+} __attribute__ ((packed));
+
+/* Interlaced or progressive format */
+#define V4L2_DV_PROGRESSIVE 0
+#define V4L2_DV_INTERLACED 1
+
+/* Polarities. If bit is not set, it is assumed to be negative polarity */
+#define V4L2_DV_VSYNC_POS_POL 0x00000001
+#define V4L2_DV_HSYNC_POS_POL 0x00000002
+
+
+/* DV timings */
+struct v4l2_dv_timings {
+ __u32 type;
+ union {
+ struct v4l2_bt_timings bt;
+ __u32 reserved[32];
+ };
+} __attribute__ ((packed));
+
+/* Values for the type field */
+#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
+
+/*
* V I D E O I N P U T S
*/
struct v4l2_input {
@@ -742,7 +840,8 @@ struct v4l2_input {
__u32 tuner; /* Associated tuner */
v4l2_std_id std;
__u32 status;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
@@ -773,6 +872,11 @@ struct v4l2_input {
#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
+/* capabilities flags */
+#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* V I D E O O U T P U T S
*/
@@ -783,13 +887,19 @@ struct v4l2_output {
__u32 audioset; /* Associated audios (bitfield) */
__u32 modulator; /* Associated modulator */
v4l2_std_id std;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
#define V4L2_OUTPUT_TYPE_MODULATOR 1
#define V4L2_OUTPUT_TYPE_ANALOG 2
#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
+/* capabilities flags */
+#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* C O N T R O L S
*/
@@ -1624,6 +1734,13 @@ struct v4l2_dbg_chip_ident {
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
+#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
+#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
+#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
+#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
+#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
+#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a8..ee03bba9c5df 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+ KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -76,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states).event[item]++;
+ __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states).event[item]++;
- put_cpu();
+ this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states).event[item] += delta;
+ __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states).event[item] += delta;
- put_cpu();
+ this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f06232..d5dd0bc408fd 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,23 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
+#ifdef __KERNEL__
+
+#ifdef CONFIG_VT_CONSOLE
+
+extern int vt_kmsg_redirect(int new);
+
+#else
+
+static inline int vt_kmsg_redirect(int new)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
+
#endif /* _LINUX_VT_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 705f01fe413a..76e8903cd204 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -70,6 +70,7 @@ struct writeback_control {
struct bdi_writeback;
int inode_wait(void *);
void writeback_inodes_sb(struct super_block *);
+int writeback_inodes_sb_if_idle(struct super_block *);
void sync_inodes_sb(struct super_block *);
void writeback_inodes_wbc(struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
@@ -79,8 +80,7 @@ void wakeup_flusher_threads(long nr_pages);
static inline void wait_on_inode(struct inode *inode)
{
might_sleep();
- wait_on_bit(&inode->i_state, __I_LOCK, inode_wait,
- TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
static inline void inode_sync_wait(struct inode *inode)
{
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 5c84af8c5f6f..fb9b7e6e1e2d 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -38,12 +38,13 @@ struct dentry;
struct xattr_handler {
char *prefix;
- size_t (*list)(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len);
- int (*get)(struct inode *inode, const char *name, void *buffer,
- size_t size);
- int (*set)(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags);
+ int flags; /* fs private flags passed back to the handlers */
+ size_t (*list)(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int handler_flags);
+ int (*get)(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int handler_flags);
+ int (*set)(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags);
};
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);