diff options
Diffstat (limited to 'include/linux')
321 files changed, 7740 insertions, 5953 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 689a8b9b9c8f..5b36974ed60a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -56,6 +56,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) +{ + struct fwnode_handle *fwnode; + + fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); + if (!fwnode) + return NULL; + + fwnode->type = FWNODE_ACPI_STATIC; + + return fwnode; +} + +static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) +{ + if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC)) + return; + + kfree(fwnode); +} + /** * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with * the PCI-defined class-code information @@ -220,10 +241,6 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, unsigned int max_entries); -int __init acpi_table_parse_entries(char *id, unsigned long table_size, - int entry_id, - acpi_tbl_entry_handler handler, - unsigned int max_entries); int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries); @@ -420,6 +437,8 @@ static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, return acpi_dev_filter_resource_type(ares, (unsigned long)arg); } +struct acpi_device *acpi_resource_consumer(struct resource *res); + int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, @@ -469,6 +488,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 +#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; @@ -555,7 +575,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); void acpi_walk_dep_device_list(acpi_handle handle); -struct platform_device *acpi_create_platform_device(struct acpi_device *); +struct platform_device *acpi_create_platform_device(struct acpi_device *, + struct property_entry *); #define ACPI_PTR(_ptr) (_ptr) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -743,6 +764,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) return DEV_DMA_NOT_SUPPORTED; } +static inline void acpi_dma_configure(struct device *dev, + enum dev_dma_attr attr) { } + +static inline void acpi_dma_deconfigure(struct device *dev) { } + #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -763,6 +789,11 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) return -EINVAL; } +static inline struct acpi_device *acpi_resource_consumer(struct resource *res) +{ + return NULL; +} + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 0e32dac8fd03..77e08099e554 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -23,20 +23,36 @@ #include <linux/fwnode.h> #include <linux/irqdomain.h> +#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) +#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) + int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); void iort_deregister_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT void acpi_iort_init(void); +bool iort_node_match(u8 type); u32 iort_msi_map_rid(struct device *dev, u32 req_id); struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +/* IOMMU interface */ +void iort_set_dma_mask(struct device *dev); +const struct iommu_ops *iort_iommu_configure(struct device *dev); #else static inline void acpi_iort_init(void) { } +static inline bool iort_node_match(u8 type) { return false; } static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) { return req_id; } static inline struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) { return NULL; } +/* IOMMU interface */ +static inline void iort_set_dma_mask(struct device *dev) { } +static inline +const struct iommu_ops *iort_iommu_configure(struct device *dev) +{ return NULL; } #endif +#define IORT_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn) + #endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h new file mode 100644 index 000000000000..62be3a40239d --- /dev/null +++ b/include/linux/ahci-remap.h @@ -0,0 +1,28 @@ +#ifndef _LINUX_AHCI_REMAP_H +#define _LINUX_AHCI_REMAP_H + +#include <linux/sizes.h> + +#define AHCI_VSCAP 0xa4 +#define AHCI_REMAP_CAP 0x800 + +/* device class code */ +#define AHCI_REMAP_N_DCC 0x880 + +/* remap-device base relative to ahci-bar */ +#define AHCI_REMAP_N_OFFSET SZ_16K +#define AHCI_REMAP_N_SIZE SZ_16K + +#define AHCI_MAX_REMAP 3 + +static inline unsigned int ahci_remap_dcc(int i) +{ + return AHCI_REMAP_N_DCC + i * 0x80; +} + +static inline unsigned int ahci_remap_base(int i) +{ + return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE; +} + +#endif /* _LINUX_AHCI_REMAP_H */ diff --git a/include/linux/aio.h b/include/linux/aio.h index 9eb42dbc5582..fdd0a343f455 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -14,14 +14,9 @@ typedef int (kiocb_cancel_fn)(struct kiocb *); /* prototypes */ #ifdef CONFIG_AIO extern void exit_aio(struct mm_struct *mm); -extern long do_io_submit(aio_context_t ctx_id, long nr, - struct iocb __user *__user *iocbpp, bool compat); void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); #else static inline void exit_aio(struct mm_struct *mm) { } -static inline long do_io_submit(aio_context_t ctx_id, long nr, - struct iocb __user * __user *iocbpp, - bool compat) { return 0; } static inline void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) { } #endif /* CONFIG_AIO */ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 9d8031257a90..c70aac13244a 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -10,7 +10,12 @@ enum alarmtimer_type { ALARM_REALTIME, ALARM_BOOTTIME, + /* Supported types end here */ ALARM_NUMTYPE, + + /* Used for tracing information. No usable types. */ + ALARM_REALTIME_FREEZER, + ALARM_BOOTTIME_FREEZER, }; enum alarmtimer_restart { diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h deleted file mode 100644 index fb83c0453489..000000000000 --- a/include/linux/amba/pl061.h +++ /dev/null @@ -1,16 +0,0 @@ -#include <linux/types.h> - -/* platform data for the PL061 GPIO driver */ - -struct pl061_platform_data { - /* number of the first GPIO */ - unsigned gpio_base; - - /* number of the first IRQ. - * If the IRQ functionality in not desired this must be set to 0. - */ - unsigned irq_base; - - u8 directions; /* startup directions, 1: out, 0: in */ - u8 values; /* startup values */ -}; diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 27e9ec8778eb..5308eae9ce35 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -84,6 +84,8 @@ struct pl08x_channel_data { * running any DMA transfer and multiplexing can be recycled * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 + * @slave_map: DMA slave matching table + * @slave_map_len: number of elements in @slave_map */ struct pl08x_platform_data { struct pl08x_channel_data *slave_channels; @@ -93,6 +95,8 @@ struct pl08x_platform_data { void (*put_xfer_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; u8 mem_buses; + const struct dma_slave_map *slave_map; + int slave_map_len; }; #ifdef CONFIG_AMBA_PL08X diff --git a/include/linux/ata.h b/include/linux/ata.h index fdb180367ba1..af6859b3a93d 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -348,6 +348,7 @@ enum { ATA_LOG_DEVSLP_DETO = 0x01, ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, + ATA_LOG_NCQ_PRIO_OFFSET = 0x09, /* NCQ send and receive log */ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, @@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id) return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); } +static inline bool ata_id_has_ncq_prio(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY] & BIT(12); +} + static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && diff --git a/include/linux/audit.h b/include/linux/audit.h index 9d4443f93db6..f51fca8d0b6f 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -147,7 +147,7 @@ extern void audit_log_d_path(struct audit_buffer *ab, extern void audit_log_key(struct audit_buffer *ab, char *key); extern void audit_log_link_denied(const char *operation, - struct path *link); + const struct path *link); extern void audit_log_lost(const char *message); #ifdef CONFIG_SECURITY extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c357f27d5483..e850e76acaaf 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,8 @@ struct bdi_writeback { struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + unsigned long dirty_sleep; /* last wait */ + struct list_head bdi_node; /* anchored at bdi->wb_list */ #ifdef CONFIG_CGROUP_WRITEBACK @@ -136,12 +138,13 @@ struct bdi_writeback { struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ - unsigned int capabilities; /* Device capabilities */ + unsigned long io_pages; /* max allowed IO size */ congested_fn *congested_fn; /* Function pointer if device is md/dm */ void *congested_data; /* Pointer to aux data for congested func */ char *name; + unsigned int capabilities; /* Device capabilities */ unsigned int min_ratio; unsigned int max_ratio, max_prop_frac; diff --git a/include/linux/bio.h b/include/linux/bio.h index 97cb48f03dc7..7cf8a6c70a3f 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -63,6 +63,12 @@ #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) /* + * Return the data direction, READ or WRITE. + */ +#define bio_data_dir(bio) \ + (op_is_write(bio_op(bio)) ? WRITE : READ) + +/* * Check whether this bio carries any data or not. A NULL bio is allowed. */ static inline bool bio_has_data(struct bio *bio) @@ -70,7 +76,8 @@ static inline bool bio_has_data(struct bio *bio) if (bio && bio->bi_iter.bi_size && bio_op(bio) != REQ_OP_DISCARD && - bio_op(bio) != REQ_OP_SECURE_ERASE) + bio_op(bio) != REQ_OP_SECURE_ERASE && + bio_op(bio) != REQ_OP_WRITE_ZEROES) return true; return false; @@ -80,18 +87,8 @@ static inline bool bio_no_advance_iter(struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || - bio_op(bio) == REQ_OP_WRITE_SAME; -} - -static inline bool bio_is_rw(struct bio *bio) -{ - if (!bio_has_data(bio)) - return false; - - if (bio_no_advance_iter(bio)) - return false; - - return true; + bio_op(bio) == REQ_OP_WRITE_SAME || + bio_op(bio) == REQ_OP_WRITE_ZEROES; } static inline bool bio_mergeable(struct bio *bio) @@ -193,18 +190,20 @@ static inline unsigned bio_segments(struct bio *bio) struct bvec_iter iter; /* - * We special case discard/write same, because they interpret bi_size - * differently: + * We special case discard/write same/write zeroes, because they + * interpret bi_size differently: */ - if (bio_op(bio) == REQ_OP_DISCARD) - return 1; - - if (bio_op(bio) == REQ_OP_SECURE_ERASE) - return 1; - - if (bio_op(bio) == REQ_OP_WRITE_SAME) + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + return 0; + case REQ_OP_WRITE_SAME: return 1; + default: + break; + } bio_for_each_segment(bv, bio, iter) segs++; @@ -409,6 +408,8 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) } +extern blk_qc_t submit_bio(struct bio *); + extern void bio_endio(struct bio *); static inline void bio_io_error(struct bio *bio) @@ -423,13 +424,15 @@ extern int bio_phys_segments(struct request_queue *, struct bio *); extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); -extern void bio_init(struct bio *); +extern void bio_init(struct bio *bio, struct bio_vec *table, + unsigned short max_vecs); extern void bio_reset(struct bio *); void bio_chain(struct bio *, struct bio *); extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, const struct iov_iter *, gfp_t); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 3bf5d33800ab..01b62e7bac74 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) /** * blkg_rwstat_add - add a value to a blkg_rwstat * @rwstat: target blkg_rwstat - * @op: REQ_OP - * @op_flags: rq_flag_bits + * @op: REQ_OP and flags * @val: value to add * * Add @val to @rwstat. The counters are chosen according to @rw. The * caller is responsible for synchronizing calls to this function. */ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, - int op, int op_flags, uint64_t val) + unsigned int op, uint64_t val) { struct percpu_counter *cnt; @@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); - if (op_flags & REQ_SYNC) + if (op_is_sync(op)) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; @@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 535ab2e13d2e..4a2ab5d99ff7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -3,6 +3,7 @@ #include <linux/blkdev.h> #include <linux/sbitmap.h> +#include <linux/srcu.h> struct blk_mq_tags; struct blk_flush_queue; @@ -35,6 +36,8 @@ struct blk_mq_hw_ctx { struct blk_mq_tags *tags; + struct srcu_struct queue_rq_srcu; + unsigned long queued; unsigned long run; #define BLK_MQ_MAX_DISPATCH_ORDER 7 @@ -215,18 +218,20 @@ void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, int error); void __blk_mq_end_request(struct request *rq, int error); -void blk_mq_requeue_request(struct request *rq); -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); -void blk_mq_cancel_requeue_work(struct request_queue *q); +void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, + bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq, int error); +bool blk_mq_queue_stopped(struct request_queue *q); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); @@ -237,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); +int blk_mq_map_queues(struct blk_mq_tag_set *set); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); /* diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cd395ecec99d..519ea2c9df61 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -17,7 +17,6 @@ struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); -#ifdef CONFIG_BLOCK /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -88,24 +87,6 @@ struct bio { struct bio_vec bi_inline_vecs[0]; }; -#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) -#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) -#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) - -#define bio_set_op_attrs(bio, op, op_flags) do { \ - if (__builtin_constant_p(op)) \ - BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ - else \ - WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ - if (__builtin_constant_p(op_flags)) \ - BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ - else \ - WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ - (bio)->bi_opf = bio_flags(bio); \ - (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ - (bio)->bi_opf |= (op_flags); \ -} while (0) - #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) /* @@ -119,6 +100,8 @@ struct bio { #define BIO_QUIET 6 /* Make BIO Quiet */ #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ +#define BIO_THROTTLED 9 /* This bio has already been subjected to + * throttling rules. Don't do it again. */ /* * Flags starting here get preserved by bio_reset() - this includes @@ -142,53 +125,61 @@ struct bio { #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) -#endif /* CONFIG_BLOCK */ - /* - * Request flags. For use in the cmd_flags field of struct request, and in - * bi_opf of struct bio. Note that some flags are only valid in either one. + * Operations and flags common to the bio and request structures. + * We use 8 bits for encoding the operation, and the remaining 24 for flags. + * + * The least significant bit of the operation number indicates the data + * transfer direction: + * + * - if the least significant bit is set transfers are TO the device + * - if the least significant bit is not set transfers are FROM the device + * + * If a operation does not transfer data the least significant bit has no + * meaning. */ -enum rq_flag_bits { - /* common flags */ - __REQ_FAILFAST_DEV, /* no driver retries of device errors */ +#define REQ_OP_BITS 8 +#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) +#define REQ_FLAG_BITS 24 + +enum req_opf { + /* read sectors from the device */ + REQ_OP_READ = 0, + /* write sectors to the device */ + REQ_OP_WRITE = 1, + /* flush the volatile write cache */ + REQ_OP_FLUSH = 2, + /* discard sectors */ + REQ_OP_DISCARD = 3, + /* get zone information */ + REQ_OP_ZONE_REPORT = 4, + /* securely erase sectors */ + REQ_OP_SECURE_ERASE = 5, + /* seset a zone write pointer */ + REQ_OP_ZONE_RESET = 6, + /* write the same sector many times */ + REQ_OP_WRITE_SAME = 7, + /* write the zero filled sector many times */ + REQ_OP_WRITE_ZEROES = 8, + + REQ_OP_LAST, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = /* no driver retries of device errors */ + REQ_OP_BITS, __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ - __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ - - __REQ_NOIDLE, /* don't anticipate more IO after this one */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_IDLE, /* anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ __REQ_PREFLUSH, /* request for cache flush */ - - /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ - __REQ_THROTTLED, /* This bio has already been subjected to - * throttling rules. Don't do it again. */ - - /* request only flags */ - __REQ_SORTED, /* elevator knows about this request */ - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ - __REQ_NOMERGE, /* don't touch this for merging */ - __REQ_STARTED, /* drive already may have started this one */ - __REQ_DONTPREP, /* don't call prep for this one */ - __REQ_QUEUED, /* uses queueing */ - __REQ_ELVPRIV, /* elevator private data attached */ - __REQ_FAILED, /* set if the request failed */ - __REQ_QUIET, /* don't worry about errors */ - __REQ_PREEMPT, /* set for "ide_preempt" requests and also - for requests for which the SCSI "quiesce" - state must be ignored. */ - __REQ_ALLOCED, /* request came from our alloc pool */ - __REQ_COPY_USER, /* contains copies of user pages */ - __REQ_FLUSH_SEQ, /* request for flush sequence */ - __REQ_IO_STAT, /* account I/O stat */ - __REQ_MIXED_MERGE, /* merge of different types, fail separately */ - __REQ_PM, /* runtime pm request */ - __REQ_HASHED, /* on IO scheduler merge hash */ - __REQ_MQ_INFLIGHT, /* track inflight for MQ */ + __REQ_BACKGROUND, /* background IO */ __REQ_NR_BITS, /* stops here */ }; @@ -198,54 +189,47 @@ enum rq_flag_bits { #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) +#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) +#define REQ_IDLE (1ULL << __REQ_IDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) +#define REQ_FUA (1ULL << __REQ_FUA) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) +#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) +#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) -#define REQ_COMMON_MASK \ - (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ - REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) -#define REQ_CLONE_MASK REQ_COMMON_MASK -/* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) + (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) -#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) -#define REQ_THROTTLED (1ULL << __REQ_THROTTLED) +#define bio_op(bio) \ + ((bio)->bi_opf & REQ_OP_MASK) +#define req_op(req) \ + ((req)->cmd_flags & REQ_OP_MASK) -#define REQ_SORTED (1ULL << __REQ_SORTED) -#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) -#define REQ_FUA (1ULL << __REQ_FUA) -#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) -#define REQ_STARTED (1ULL << __REQ_STARTED) -#define REQ_DONTPREP (1ULL << __REQ_DONTPREP) -#define REQ_QUEUED (1ULL << __REQ_QUEUED) -#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) -#define REQ_FAILED (1ULL << __REQ_FAILED) -#define REQ_QUIET (1ULL << __REQ_QUIET) -#define REQ_PREEMPT (1ULL << __REQ_PREEMPT) -#define REQ_ALLOCED (1ULL << __REQ_ALLOCED) -#define REQ_COPY_USER (1ULL << __REQ_COPY_USER) -#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) -#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) -#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) -#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) -#define REQ_PM (1ULL << __REQ_PM) -#define REQ_HASHED (1ULL << __REQ_HASHED) -#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) - -enum req_op { - REQ_OP_READ, - REQ_OP_WRITE, - REQ_OP_DISCARD, /* request to discard sectors */ - REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ - REQ_OP_WRITE_SAME, /* write same block many times */ - REQ_OP_FLUSH, /* request for cache flush */ -}; +/* obsolete, don't use in new code */ +static inline void bio_set_op_attrs(struct bio *bio, unsigned op, + unsigned op_flags) +{ + bio->bi_opf = op | op_flags; +} -#define REQ_OP_BITS 3 +static inline bool op_is_write(unsigned int op) +{ + return (op & 1); +} + +/* + * Reads are always treated as synchronous, as are requests with the FUA or + * PREFLUSH flag. Other operations may be marked as synchronous using the + * REQ_SYNC flag. + */ +static inline bool op_is_sync(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_READ || + (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); +} typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U @@ -271,4 +255,20 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) return cookie & ((1u << BLK_QC_T_SHIFT) - 1); } +struct blk_issue_stat { + u64 time; +}; + +#define BLK_RQ_STAT_BATCH 64 + +struct blk_rq_stat { + s64 mean; + u64 min; + u64 max; + s32 nr_samples; + s32 nr_batch; + u64 batch; + s64 time; +}; + #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c47c358ba052..1ca8e8fd1078 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -24,6 +24,7 @@ #include <linux/rcupdate.h> #include <linux/percpu-refcount.h> #include <linux/scatterlist.h> +#include <linux/blkzoned.h> struct module; struct scsi_ioctl_command; @@ -37,6 +38,7 @@ struct bsg_job; struct blkcg_gq; struct blk_flush_queue; struct pr_ops; +struct rq_wb; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -77,6 +79,55 @@ enum rq_cmd_type_bits { REQ_TYPE_DRV_PRIV, /* driver defined types from here */ }; +/* + * request flags */ +typedef __u32 __bitwise req_flags_t; + +/* elevator knows about this request */ +#define RQF_SORTED ((__force req_flags_t)(1 << 0)) +/* drive already may have started this one */ +#define RQF_STARTED ((__force req_flags_t)(1 << 1)) +/* uses tagged queueing */ +#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) +/* may not be passed by ioscheduler */ +#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) +/* request for flush sequence */ +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) +/* merge of different types, fail separately */ +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) +/* track inflight for MQ */ +#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) +/* don't call prep for this one */ +#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* set for "ide_preempt" requests and also for requests for which the SCSI + "quiesce" state must be ignored. */ +#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) +/* contains copies of user pages */ +#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) +/* vaguely specified driver internal error. Ignored by the block layer */ +#define RQF_FAILED ((__force req_flags_t)(1 << 10)) +/* don't warn about errors */ +#define RQF_QUIET ((__force req_flags_t)(1 << 11)) +/* elevator private data attached */ +#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) +/* account I/O stat */ +#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +/* request came from our alloc pool */ +#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) +/* runtime pm request */ +#define RQF_PM ((__force req_flags_t)(1 << 15)) +/* on IO scheduler merge hash */ +#define RQF_HASHED ((__force req_flags_t)(1 << 16)) +/* IO stats tracking on */ +#define RQF_STATS ((__force req_flags_t)(1 << 17)) +/* Look at ->special_vec for the actual data payload instead of the + bio chain. */ +#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) + +/* flags that prevent us from merging requests: */ +#define RQF_NOMERGE_FLAGS \ + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) + #define BLK_MAX_CDB 16 /* @@ -97,7 +148,8 @@ struct request { int cpu; unsigned cmd_type; - u64 cmd_flags; + unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; unsigned long atomic_flags; /* the following two fields are internal, NEVER access directly */ @@ -126,6 +178,7 @@ struct request { */ union { struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; void *completion_data; }; @@ -151,6 +204,7 @@ struct request { struct gendisk *rq_disk; struct hd_struct *part; unsigned long start_time; + struct blk_issue_stat issue_stat; #ifdef CONFIG_BLK_CGROUP struct request_list *rl; /* rl this rq is alloced from */ unsigned long long start_time_ns; @@ -198,20 +252,6 @@ struct request { struct request *next_rq; }; -#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) -#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) - -#define req_set_op(req, op) do { \ - WARN_ON(op >= (1 << REQ_OP_BITS)); \ - (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ - (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ -} while (0) - -#define req_set_op_attrs(req, op, flags) do { \ - req_set_op(req, op); \ - (req)->cmd_flags |= flags; \ -} while (0) - static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -248,7 +288,6 @@ enum blk_queue_state { struct blk_queue_tag { struct request **tag_index; /* map of busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */ - int busy; /* current depth */ int max_depth; /* what we will send to device */ int real_max_depth; /* what the array can hold */ atomic_t refcnt; /* map can be shared */ @@ -261,6 +300,15 @@ struct blk_queue_tag { #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) +/* + * Zoned block device models (zoned limit). + */ +enum blk_zoned_model { + BLK_ZONED_NONE, /* Regular block device */ + BLK_ZONED_HA, /* Host-aware zoned block device */ + BLK_ZONED_HM, /* Host-managed zoned block device */ +}; + struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; @@ -278,6 +326,7 @@ struct queue_limits { unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -290,8 +339,45 @@ struct queue_limits { unsigned char cluster; unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; +}; + +#ifdef CONFIG_BLK_DEV_ZONED + +struct blk_zone_report_hdr { + unsigned int nr_zones; + u8 padding[60]; }; +extern int blkdev_report_zones(struct block_device *bdev, + sector_t sector, struct blk_zone *zones, + unsigned int *nr_zones, gfp_t gfp_mask); +extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, + sector_t nr_sectors, gfp_t gfp_mask); + +extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); + +#else /* CONFIG_BLK_DEV_ZONED */ + +static inline int blkdev_report_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +#endif /* CONFIG_BLK_DEV_ZONED */ + struct request_queue { /* * Together with queue_head for cacheline sharing @@ -302,6 +388,8 @@ struct request_queue { int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ + struct rq_wb *rq_wb; + /* * If blkcg is not used, @q->root_rl serves all requests. If blkcg * is used, root blkg allocates from @q->root_rl and all other @@ -327,6 +415,8 @@ struct request_queue { struct blk_mq_ctx __percpu *queue_ctx; unsigned int nr_queues; + unsigned int queue_depth; + /* hw dispatch queues */ struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; @@ -412,6 +502,9 @@ struct request_queue { unsigned int nr_sorted; unsigned int in_flight[2]; + + struct blk_rq_stat rq_stats[2]; + /* * Number of active block driver functions for which blk_drain_queue() * must wait. Must be incremented around functions that unlock the @@ -420,6 +513,7 @@ struct request_queue { unsigned int request_fn_active; unsigned int rq_timeout; + int poll_nsec; struct timer_list timeout; struct work_struct timeout_work; struct list_head timeout_list; @@ -505,6 +599,7 @@ struct request_queue { #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DAX 26 /* device supports DAX */ +#define QUEUE_FLAG_STATS 27 /* track rq completion times */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -601,7 +696,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) REQ_FAILFAST_DRIVER)) #define blk_account_rq(rq) \ - (((rq)->cmd_flags & REQ_STARTED) && \ + (((rq)->rq_flags & RQF_STARTED) && \ ((rq)->cmd_type == REQ_TYPE_FS)) #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) @@ -627,17 +722,31 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) return q->limits.cluster; } -/* - * We regard a request as sync, if either a read or a sync write - */ -static inline bool rw_is_sync(int op, unsigned int rw_flags) +static inline enum blk_zoned_model +blk_queue_zoned_model(struct request_queue *q) { - return op == REQ_OP_READ || (rw_flags & REQ_SYNC); + return q->limits.zoned; +} + +static inline bool blk_queue_is_zoned(struct request_queue *q) +{ + switch (blk_queue_zoned_model(q)) { + case BLK_ZONED_HA: + case BLK_ZONED_HM: + return true; + default: + return false; + } +} + +static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; } static inline bool rq_is_sync(struct request *rq) { - return rw_is_sync(req_op(rq), rq->cmd_flags); + return op_is_sync(rq->cmd_flags); } static inline bool blk_rl_full(struct request_list *rl, bool sync) @@ -669,8 +778,13 @@ static inline bool rq_mergeable(struct request *rq) if (req_op(rq) == REQ_OP_FLUSH) return false; + if (req_op(rq) == REQ_OP_WRITE_ZEROES) + return false; + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; + if (rq->rq_flags & RQF_NOMERGE_FLAGS) + return false; return true; } @@ -683,6 +797,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) return false; } +static inline unsigned int blk_queue_depth(struct request_queue *q) +{ + if (q->queue_depth) + return q->queue_depth; + + return q->nr_requests; +} + /* * q->prep_rq_fn return values */ @@ -790,8 +912,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); -extern void blk_add_request_payload(struct request *rq, struct page *page, - int offset, unsigned int len); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, @@ -824,6 +944,7 @@ extern void __blk_run_queue(struct request_queue *q); extern void __blk_run_queue_uncond(struct request_queue *q); extern void blk_run_queue(struct request_queue *); extern void blk_run_queue_async(struct request_queue *q); +extern void blk_mq_quiesce_queue(struct request_queue *q); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); @@ -837,7 +958,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); -bool blk_poll(struct request_queue *q, blk_qc_t cookie); +bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -879,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) return blk_rq_cur_bytes(rq) >> 9; } +/* + * Some commands like WRITE SAME have a payload or data transfer size which + * is different from the size of the request. Any driver that supports such + * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to + * calculate the data transfer size. + */ +static inline unsigned int blk_rq_payload_bytes(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec.bv_len; + return blk_rq_bytes(rq); +} + static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { @@ -888,6 +1022,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; + if (unlikely(op == REQ_OP_WRITE_ZEROES)) + return q->limits.max_write_zeroes_sectors; + return q->limits.max_sectors; } @@ -934,6 +1071,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq) } /* + * blk_rq_set_prio - associate a request with prio from ioc + * @rq: request of interest + * @ioc: target iocontext + * + * Assocate request prio with ioc prio so request based drivers + * can leverage priority information. + */ +static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc) +{ + if (ioc) + rq->ioprio = ioc->ioprio; +} + +/* * Request issue related functions. */ extern struct request *blk_peek_request(struct request_queue *q); @@ -991,6 +1142,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); +extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, @@ -999,6 +1152,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, @@ -1027,6 +1181,13 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); +static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return 1; + return rq->nr_phys_segments; +} + extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); @@ -1057,7 +1218,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q) static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} static inline void blk_pre_runtime_resume(struct request_queue *q) {} static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} -extern inline void blk_set_runtime_active(struct request_queue *q) {} +static inline void blk_set_runtime_active(struct request_queue *q) {} #endif /* @@ -1078,6 +1239,7 @@ struct blk_plug { struct list_head cb_list; /* md requires an unplug callback */ }; #define BLK_MAX_REQUEST_COUNT 16 +#define BLK_PLUG_FLUSH_SIZE (128 * 1024) struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); @@ -1151,6 +1313,9 @@ extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); +extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, + bool discard); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, bool discard); static inline int sb_issue_discard(struct super_block *sb, sector_t block, @@ -1354,6 +1519,46 @@ static inline unsigned int bdev_write_same(struct block_device *bdev) return 0; } +static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_zeroes_sectors; + + return 0; +} + +static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zoned_model(q); + + return BLK_ZONED_NONE; +} + +static inline bool bdev_is_zoned(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_is_zoned(q); + + return false; +} + +static inline unsigned int bdev_zone_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zone_sectors(q); + + return 0; +} + static inline int queue_dma_alignment(struct request_queue *q) { return q ? q->dma_alignment : 511; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index cceb72f9e29f..e417f080219a 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) } extern void blk_dump_cmd(char *buf, struct request *rq); -extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); +extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h new file mode 100644 index 000000000000..92bc89ae7e20 --- /dev/null +++ b/include/linux/bpf-cgroup.h @@ -0,0 +1,92 @@ +#ifndef _BPF_CGROUP_H +#define _BPF_CGROUP_H + +#include <linux/jump_label.h> +#include <uapi/linux/bpf.h> + +struct sock; +struct cgroup; +struct sk_buff; + +#ifdef CONFIG_CGROUP_BPF + +extern struct static_key_false cgroup_bpf_enabled_key; +#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) + +struct cgroup_bpf { + /* + * Store two sets of bpf_prog pointers, one for programs that are + * pinned directly to this cgroup, and one for those that are effective + * when this cgroup is accessed. + */ + struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; + struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; +}; + +void cgroup_bpf_put(struct cgroup *cgrp); +void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); + +void __cgroup_bpf_update(struct cgroup *cgrp, + struct cgroup *parent, + struct bpf_prog *prog, + enum bpf_attach_type type); + +/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ +void cgroup_bpf_update(struct cgroup *cgrp, + struct bpf_prog *prog, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_skb(struct sock *sk, + struct sk_buff *skb, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sk(struct sock *sk, + enum bpf_attach_type type); + +/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ + BPF_CGROUP_INET_INGRESS); \ + \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ + typeof(sk) __sk = sk_to_full_sk(sk); \ + if (sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ + BPF_CGROUP_INET_EGRESS); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && sk) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, \ + BPF_CGROUP_INET_SOCK_CREATE); \ + } \ + __ret; \ +}) + +#else + +struct cgroup_bpf {}; +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} +static inline void cgroup_bpf_inherit(struct cgroup *cgrp, + struct cgroup *parent) {} + +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) + +#endif /* CONFIG_CGROUP_BPF */ + +#endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c201017b5730..05cf951df3fe 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -233,13 +234,16 @@ void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); -struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i); -struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); +struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_sub(struct bpf_prog *prog, int i); +struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); +int __bpf_prog_charge(struct user_struct *user, u32 pages); +void __bpf_prog_uncharge(struct user_struct *user, u32 pages); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); @@ -298,18 +302,33 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, { return ERR_PTR(-EOPNOTSUPP); } -static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) +static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, + int i) { return ERR_PTR(-EOPNOTSUPP); } +static inline void bpf_prog_sub(struct bpf_prog *prog, int i) +{ +} + static inline void bpf_prog_put(struct bpf_prog *prog) { } -static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) + +static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) { return ERR_PTR(-EOPNOTSUPP); } + +static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) +{ + return 0; +} + +static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) +{ +} #endif /* CONFIG_BPF_SYSCALL */ /* verifier prototypes for helper functions called from eBPF programs */ @@ -319,6 +338,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; +extern const struct bpf_func_proto bpf_get_numa_node_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; extern const struct bpf_func_proto bpf_ktime_get_ns_proto; extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7035b997aaa5..a13b031dc6b8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -14,22 +14,16 @@ * are obviously wrong for any sort of memory access. */ #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) -#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) +#define BPF_REGISTER_MIN_RANGE -1 struct bpf_reg_state { enum bpf_reg_type type; - /* - * Used to determine if any memory access using this register will - * result in a bad access. - */ - u64 min_value, max_value; union { /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ s64 imm; /* valid when type == PTR_TO_PACKET* */ struct { - u32 id; u16 off; u16 range; }; @@ -39,6 +33,13 @@ struct bpf_reg_state { */ struct bpf_map *map_ptr; }; + u32 id; + /* Used to determine if any memory access using this register will + * result in a bad access. These two fields must be last. + * See states_equal() + */ + s64 min_value; + u64 max_value; }; enum bpf_stack_slot_type { diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index e3354b74286c..4f7d8be9ddbf 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -13,11 +13,13 @@ #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 #define PHY_ID_BCM57780 0x03625d90 @@ -55,6 +57,7 @@ #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 + /* Broadcom BCM7xxx specific workarounds */ #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) #define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) @@ -105,11 +108,15 @@ #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 +#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 #define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8) +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4) -#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 /* * Broadcom LED source encodings. These are used in BCM5461, BCM5481, @@ -124,6 +131,7 @@ #define BCM_LED_SRC_INTR 0x6 #define BCM_LED_SRC_QUALITY 0x7 #define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_WIRESPEED 0x9 #define BCM_LED_SRC_MULTICOLOR1 0xa #define BCM_LED_SRC_OPENSHORT 0xb #define BCM_LED_SRC_OFF 0xe /* Tied high */ @@ -135,6 +143,14 @@ * Shadow values go into bits [14:10] of register 0x1c to select a shadow * register to access. */ + +/* 00100: Reserved control register 2 */ +#define BCM54XX_SHD_SCR2 0x04 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 + /* 00101: Spare Control Register 3 */ #define BCM54XX_SHD_SCR3 0x05 #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 @@ -189,6 +205,12 @@ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ +/* BCM54810 Registers */ +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) +#define BCM54810_SHD_CLK_CTL 0x3 +#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) + /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ @@ -222,6 +244,9 @@ #define LPI_FEATURE_EN_DIG1000X 0x4000 /* Core register definitions*/ +#define MII_BRCM_CORE_BASE12 0x12 +#define MII_BRCM_CORE_BASE13 0x13 +#define MII_BRCM_CORE_BASE14 0x14 #define MII_BRCM_CORE_BASE1E 0x1E #define MII_BRCM_CORE_EXPB0 0xB0 #define MII_BRCM_CORE_EXPB1 0xB1 diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index a226652a5a6c..657a718c27d2 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -40,6 +40,8 @@ struct bsg_job { struct device *dev; struct request *req; + struct kref kref; + /* Transport/driver specific request/reply structs */ void *request; void *reply; @@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result, int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, bsg_job_fn *job_fn, int dd_job_size); void bsg_request_fn(struct request_queue *q); +void bsg_job_put(struct bsg_job *job); +int __must_check bsg_job_get(struct bsg_job *job); #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index ebbacd14d450..d67ab83823ad 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -168,7 +168,12 @@ int inode_has_buffers(struct inode *); void invalidate_inode_buffers(struct inode *); int remove_inode_buffers(struct inode *inode); int sync_mapping_buffers(struct address_space *mapping); -void unmap_underlying_metadata(struct block_device *bdev, sector_t block); +void clean_bdev_aliases(struct block_device *bdev, sector_t block, + sector_t len); +static inline void clean_bdev_bh_alias(struct buffer_head *bh) +{ + clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); +} void mark_buffer_async_write(struct buffer_head *bh); void __wait_on_buffer(struct buffer_head *); diff --git a/include/linux/bug.h b/include/linux/bug.h index 292d6a10b0c2..baff2e8fc8a8 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, } #endif /* CONFIG_GENERIC_BUG */ + +/* + * Since detected data corruption should stop operation on the affected + * structures, this returns false if the corruption condition is found. + */ +#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ + do { \ + if (unlikely(condition)) { \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ + pr_err(fmt, ##__VA_ARGS__); \ + BUG(); \ + } else \ + WARN(1, fmt, ##__VA_ARGS__); \ + return false; \ + } \ + } while (0) + #endif /* _LINUX_BUG_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2189935075b4..6a524bf6a06d 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -18,6 +18,7 @@ enum cache_type { /** * struct cacheinfo - represent a cache leaf node + * @id: This cache's id. It is unique among caches with the same (type, level). * @type: type of the cache - data, inst or unified * @level: represents the hierarchy in the multi-level cache * @coherency_line_size: size of each cache line usually representing @@ -44,6 +45,7 @@ enum cache_type { * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { + unsigned int id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; @@ -61,6 +63,7 @@ struct cacheinfo { #define CACHE_WRITE_ALLOCATE BIT(3) #define CACHE_ALLOCATE_POLICY_MASK \ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) +#define CACHE_ID BIT(4) struct device_node *of_node; bool disable_sysfs; @@ -71,6 +74,7 @@ struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int num_levels; unsigned int num_leaves; + bool cpu_map_populated; }; /* diff --git a/include/linux/capability.h b/include/linux/capability.h index dbc21c719ce6..6ffb67e10c06 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) return true; } #endif /* CONFIG_MULTIUSER */ +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/ccp.h b/include/linux/ccp.h index a7653339fedb..c71dd8fa5764 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -11,8 +11,8 @@ * published by the Free Software Foundation. */ -#ifndef __CPP_H__ -#define __CPP_H__ +#ifndef __CCP_H__ +#define __CCP_H__ #include <linux/scatterlist.h> #include <linux/workqueue.h> @@ -553,7 +553,7 @@ enum ccp_engine { #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 /** - * struct ccp_cmd - CPP operation request + * struct ccp_cmd - CCP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) * @ccp: CCP device to be run on (ccp driver use only) diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h deleted file mode 100644 index 138bbf721e70..000000000000 --- a/include/linux/cec-funcs.h +++ /dev/null @@ -1,1971 +0,0 @@ -/* - * cec - HDMI Consumer Electronics Control message functions - * - * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * Alternatively you can redistribute this file under the terms of the - * BSD license as stated below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. The names of its contributors may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * Note: this framework is still in staging and it is likely the API - * will change before it goes out of staging. - * - * Once it is moved out of staging this header will move to uapi. - */ -#ifndef _CEC_UAPI_FUNCS_H -#define _CEC_UAPI_FUNCS_H - -#include <linux/cec.h> - -/* One Touch Play Feature */ -static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr) -{ - msg->len = 4; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_ACTIVE_SOURCE; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; -} - -static inline void cec_ops_active_source(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - -static inline void cec_msg_image_view_on(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON; -} - -static inline void cec_msg_text_view_on(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_TEXT_VIEW_ON; -} - - -/* Routing Control Feature */ -static inline void cec_msg_inactive_source(struct cec_msg *msg, - __u16 phys_addr) -{ - msg->len = 4; - msg->msg[1] = CEC_MSG_INACTIVE_SOURCE; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; -} - -static inline void cec_ops_inactive_source(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - -static inline void cec_msg_request_active_source(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE; - msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0; -} - -static inline void cec_msg_routing_information(struct cec_msg *msg, - __u16 phys_addr) -{ - msg->len = 4; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_ROUTING_INFORMATION; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; -} - -static inline void cec_ops_routing_information(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - -static inline void cec_msg_routing_change(struct cec_msg *msg, - bool reply, - __u16 orig_phys_addr, - __u16 new_phys_addr) -{ - msg->len = 6; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_ROUTING_CHANGE; - msg->msg[2] = orig_phys_addr >> 8; - msg->msg[3] = orig_phys_addr & 0xff; - msg->msg[4] = new_phys_addr >> 8; - msg->msg[5] = new_phys_addr & 0xff; - msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0; -} - -static inline void cec_ops_routing_change(const struct cec_msg *msg, - __u16 *orig_phys_addr, - __u16 *new_phys_addr) -{ - *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5]; -} - -static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr) -{ - msg->len = 4; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_SET_STREAM_PATH; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; -} - -static inline void cec_ops_set_stream_path(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - - -/* Standby Feature */ -static inline void cec_msg_standby(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_STANDBY; -} - - -/* One Touch Record Feature */ -static inline void cec_msg_record_off(struct cec_msg *msg, bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_RECORD_OFF; - msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; -} - -struct cec_op_arib_data { - __u16 transport_id; - __u16 service_id; - __u16 orig_network_id; -}; - -struct cec_op_atsc_data { - __u16 transport_id; - __u16 program_number; -}; - -struct cec_op_dvb_data { - __u16 transport_id; - __u16 service_id; - __u16 orig_network_id; -}; - -struct cec_op_channel_data { - __u8 channel_number_fmt; - __u16 major; - __u16 minor; -}; - -struct cec_op_digital_service_id { - __u8 service_id_method; - __u8 dig_bcast_system; - union { - struct cec_op_arib_data arib; - struct cec_op_atsc_data atsc; - struct cec_op_dvb_data dvb; - struct cec_op_channel_data channel; - }; -}; - -struct cec_op_record_src { - __u8 type; - union { - struct cec_op_digital_service_id digital; - struct { - __u8 ana_bcast_type; - __u16 ana_freq; - __u8 bcast_system; - } analog; - struct { - __u8 plug; - } ext_plug; - struct { - __u16 phys_addr; - } ext_phys_addr; - }; -}; - -static inline void cec_set_digital_service_id(__u8 *msg, - const struct cec_op_digital_service_id *digital) -{ - *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system; - if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { - *msg++ = (digital->channel.channel_number_fmt << 2) | - (digital->channel.major >> 8); - *msg++ = digital->channel.major & 0xff; - *msg++ = digital->channel.minor >> 8; - *msg++ = digital->channel.minor & 0xff; - *msg++ = 0; - *msg++ = 0; - return; - } - switch (digital->dig_bcast_system) { - case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN: - case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE: - case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT: - case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T: - *msg++ = digital->atsc.transport_id >> 8; - *msg++ = digital->atsc.transport_id & 0xff; - *msg++ = digital->atsc.program_number >> 8; - *msg++ = digital->atsc.program_number & 0xff; - *msg++ = 0; - *msg++ = 0; - break; - default: - *msg++ = digital->dvb.transport_id >> 8; - *msg++ = digital->dvb.transport_id & 0xff; - *msg++ = digital->dvb.service_id >> 8; - *msg++ = digital->dvb.service_id & 0xff; - *msg++ = digital->dvb.orig_network_id >> 8; - *msg++ = digital->dvb.orig_network_id & 0xff; - break; - } -} - -static inline void cec_get_digital_service_id(const __u8 *msg, - struct cec_op_digital_service_id *digital) -{ - digital->service_id_method = msg[0] >> 7; - digital->dig_bcast_system = msg[0] & 0x7f; - if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { - digital->channel.channel_number_fmt = msg[1] >> 2; - digital->channel.major = ((msg[1] & 3) << 6) | msg[2]; - digital->channel.minor = (msg[3] << 8) | msg[4]; - return; - } - digital->dvb.transport_id = (msg[1] << 8) | msg[2]; - digital->dvb.service_id = (msg[3] << 8) | msg[4]; - digital->dvb.orig_network_id = (msg[5] << 8) | msg[6]; -} - -static inline void cec_msg_record_on_own(struct cec_msg *msg) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_RECORD_ON; - msg->msg[2] = CEC_OP_RECORD_SRC_OWN; -} - -static inline void cec_msg_record_on_digital(struct cec_msg *msg, - const struct cec_op_digital_service_id *digital) -{ - msg->len = 10; - msg->msg[1] = CEC_MSG_RECORD_ON; - msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL; - cec_set_digital_service_id(msg->msg + 3, digital); -} - -static inline void cec_msg_record_on_analog(struct cec_msg *msg, - __u8 ana_bcast_type, - __u16 ana_freq, - __u8 bcast_system) -{ - msg->len = 7; - msg->msg[1] = CEC_MSG_RECORD_ON; - msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG; - msg->msg[3] = ana_bcast_type; - msg->msg[4] = ana_freq >> 8; - msg->msg[5] = ana_freq & 0xff; - msg->msg[6] = bcast_system; -} - -static inline void cec_msg_record_on_plug(struct cec_msg *msg, - __u8 plug) -{ - msg->len = 4; - msg->msg[1] = CEC_MSG_RECORD_ON; - msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG; - msg->msg[3] = plug; -} - -static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg, - __u16 phys_addr) -{ - msg->len = 5; - msg->msg[1] = CEC_MSG_RECORD_ON; - msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR; - msg->msg[3] = phys_addr >> 8; - msg->msg[4] = phys_addr & 0xff; -} - -static inline void cec_msg_record_on(struct cec_msg *msg, - bool reply, - const struct cec_op_record_src *rec_src) -{ - switch (rec_src->type) { - case CEC_OP_RECORD_SRC_OWN: - cec_msg_record_on_own(msg); - break; - case CEC_OP_RECORD_SRC_DIGITAL: - cec_msg_record_on_digital(msg, &rec_src->digital); - break; - case CEC_OP_RECORD_SRC_ANALOG: - cec_msg_record_on_analog(msg, - rec_src->analog.ana_bcast_type, - rec_src->analog.ana_freq, - rec_src->analog.bcast_system); - break; - case CEC_OP_RECORD_SRC_EXT_PLUG: - cec_msg_record_on_plug(msg, rec_src->ext_plug.plug); - break; - case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: - cec_msg_record_on_phys_addr(msg, - rec_src->ext_phys_addr.phys_addr); - break; - } - msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; -} - -static inline void cec_ops_record_on(const struct cec_msg *msg, - struct cec_op_record_src *rec_src) -{ - rec_src->type = msg->msg[2]; - switch (rec_src->type) { - case CEC_OP_RECORD_SRC_OWN: - break; - case CEC_OP_RECORD_SRC_DIGITAL: - cec_get_digital_service_id(msg->msg + 3, &rec_src->digital); - break; - case CEC_OP_RECORD_SRC_ANALOG: - rec_src->analog.ana_bcast_type = msg->msg[3]; - rec_src->analog.ana_freq = - (msg->msg[4] << 8) | msg->msg[5]; - rec_src->analog.bcast_system = msg->msg[6]; - break; - case CEC_OP_RECORD_SRC_EXT_PLUG: - rec_src->ext_plug.plug = msg->msg[3]; - break; - case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: - rec_src->ext_phys_addr.phys_addr = - (msg->msg[3] << 8) | msg->msg[4]; - break; - } -} - -static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_RECORD_STATUS; - msg->msg[2] = rec_status; -} - -static inline void cec_ops_record_status(const struct cec_msg *msg, - __u8 *rec_status) -{ - *rec_status = msg->msg[2]; -} - -static inline void cec_msg_record_tv_screen(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN; - msg->reply = reply ? CEC_MSG_RECORD_ON : 0; -} - - -/* Timer Programming Feature */ -static inline void cec_msg_timer_status(struct cec_msg *msg, - __u8 timer_overlap_warning, - __u8 media_info, - __u8 prog_info, - __u8 prog_error, - __u8 duration_hr, - __u8 duration_min) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_TIMER_STATUS; - msg->msg[2] = (timer_overlap_warning << 7) | - (media_info << 5) | - (prog_info ? 0x10 : 0) | - (prog_info ? prog_info : prog_error); - if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || - prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || - prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { - msg->len += 2; - msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10); - } -} - -static inline void cec_ops_timer_status(const struct cec_msg *msg, - __u8 *timer_overlap_warning, - __u8 *media_info, - __u8 *prog_info, - __u8 *prog_error, - __u8 *duration_hr, - __u8 *duration_min) -{ - *timer_overlap_warning = msg->msg[2] >> 7; - *media_info = (msg->msg[2] >> 5) & 3; - if (msg->msg[2] & 0x10) { - *prog_info = msg->msg[2] & 0xf; - *prog_error = 0; - } else { - *prog_info = 0; - *prog_error = msg->msg[2] & 0xf; - } - if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || - *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || - *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { - *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf); - *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - } else { - *duration_hr = *duration_min = 0; - } -} - -static inline void cec_msg_timer_cleared_status(struct cec_msg *msg, - __u8 timer_cleared_status) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS; - msg->msg[2] = timer_cleared_status; -} - -static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg, - __u8 *timer_cleared_status) -{ - *timer_cleared_status = msg->msg[2]; -} - -static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg, - bool reply, - __u8 day, - __u8 month, - __u8 start_hr, - __u8 start_min, - __u8 duration_hr, - __u8 duration_min, - __u8 recording_seq, - __u8 ana_bcast_type, - __u16 ana_freq, - __u8 bcast_system) -{ - msg->len = 13; - msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER; - msg->msg[2] = day; - msg->msg[3] = month; - /* Hours and minutes are in BCD format */ - msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); - msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); - msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); - msg->msg[8] = recording_seq; - msg->msg[9] = ana_bcast_type; - msg->msg[10] = ana_freq >> 8; - msg->msg[11] = ana_freq & 0xff; - msg->msg[12] = bcast_system; - msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; -} - -static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg, - __u8 *day, - __u8 *month, - __u8 *start_hr, - __u8 *start_min, - __u8 *duration_hr, - __u8 *duration_min, - __u8 *recording_seq, - __u8 *ana_bcast_type, - __u16 *ana_freq, - __u8 *bcast_system) -{ - *day = msg->msg[2]; - *month = msg->msg[3]; - /* Hours and minutes are in BCD format */ - *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); - *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); - *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); - *recording_seq = msg->msg[8]; - *ana_bcast_type = msg->msg[9]; - *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; - *bcast_system = msg->msg[12]; -} - -static inline void cec_msg_clear_digital_timer(struct cec_msg *msg, - bool reply, - __u8 day, - __u8 month, - __u8 start_hr, - __u8 start_min, - __u8 duration_hr, - __u8 duration_min, - __u8 recording_seq, - const struct cec_op_digital_service_id *digital) -{ - msg->len = 16; - msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; - msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER; - msg->msg[2] = day; - msg->msg[3] = month; - /* Hours and minutes are in BCD format */ - msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); - msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); - msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); - msg->msg[8] = recording_seq; - cec_set_digital_service_id(msg->msg + 9, digital); -} - -static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg, - __u8 *day, - __u8 *month, - __u8 *start_hr, - __u8 *start_min, - __u8 *duration_hr, - __u8 *duration_min, - __u8 *recording_seq, - struct cec_op_digital_service_id *digital) -{ - *day = msg->msg[2]; - *month = msg->msg[3]; - /* Hours and minutes are in BCD format */ - *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); - *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); - *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); - *recording_seq = msg->msg[8]; - cec_get_digital_service_id(msg->msg + 9, digital); -} - -static inline void cec_msg_clear_ext_timer(struct cec_msg *msg, - bool reply, - __u8 day, - __u8 month, - __u8 start_hr, - __u8 start_min, - __u8 duration_hr, - __u8 duration_min, - __u8 recording_seq, - __u8 ext_src_spec, - __u8 plug, - __u16 phys_addr) -{ - msg->len = 13; - msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER; - msg->msg[2] = day; - msg->msg[3] = month; - /* Hours and minutes are in BCD format */ - msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); - msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); - msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); - msg->msg[8] = recording_seq; - msg->msg[9] = ext_src_spec; - msg->msg[10] = plug; - msg->msg[11] = phys_addr >> 8; - msg->msg[12] = phys_addr & 0xff; - msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; -} - -static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg, - __u8 *day, - __u8 *month, - __u8 *start_hr, - __u8 *start_min, - __u8 *duration_hr, - __u8 *duration_min, - __u8 *recording_seq, - __u8 *ext_src_spec, - __u8 *plug, - __u16 *phys_addr) -{ - *day = msg->msg[2]; - *month = msg->msg[3]; - /* Hours and minutes are in BCD format */ - *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); - *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); - *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); - *recording_seq = msg->msg[8]; - *ext_src_spec = msg->msg[9]; - *plug = msg->msg[10]; - *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; -} - -static inline void cec_msg_set_analogue_timer(struct cec_msg *msg, - bool reply, - __u8 day, - __u8 month, - __u8 start_hr, - __u8 start_min, - __u8 duration_hr, - __u8 duration_min, - __u8 recording_seq, - __u8 ana_bcast_type, - __u16 ana_freq, - __u8 bcast_system) -{ - msg->len = 13; - msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER; - msg->msg[2] = day; - msg->msg[3] = month; - /* Hours and minutes are in BCD format */ - msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); - msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); - msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); - msg->msg[8] = recording_seq; - msg->msg[9] = ana_bcast_type; - msg->msg[10] = ana_freq >> 8; - msg->msg[11] = ana_freq & 0xff; - msg->msg[12] = bcast_system; - msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; -} - -static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg, - __u8 *day, - __u8 *month, - __u8 *start_hr, - __u8 *start_min, - __u8 *duration_hr, - __u8 *duration_min, - __u8 *recording_seq, - __u8 *ana_bcast_type, - __u16 *ana_freq, - __u8 *bcast_system) -{ - *day = msg->msg[2]; - *month = msg->msg[3]; - /* Hours and minutes are in BCD format */ - *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); - *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); - *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); - *recording_seq = msg->msg[8]; - *ana_bcast_type = msg->msg[9]; - *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; - *bcast_system = msg->msg[12]; -} - -static inline void cec_msg_set_digital_timer(struct cec_msg *msg, - bool reply, - __u8 day, - __u8 month, - __u8 start_hr, - __u8 start_min, - __u8 duration_hr, - __u8 duration_min, - __u8 recording_seq, - const struct cec_op_digital_service_id *digital) -{ - msg->len = 16; - msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; - msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER; - msg->msg[2] = day; - msg->msg[3] = month; - /* Hours and minutes are in BCD format */ - msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); - msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); - msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); - msg->msg[8] = recording_seq; - cec_set_digital_service_id(msg->msg + 9, digital); -} - -static inline void cec_ops_set_digital_timer(const struct cec_msg *msg, - __u8 *day, - __u8 *month, - __u8 *start_hr, - __u8 *start_min, - __u8 *duration_hr, - __u8 *duration_min, - __u8 *recording_seq, - struct cec_op_digital_service_id *digital) -{ - *day = msg->msg[2]; - *month = msg->msg[3]; - /* Hours and minutes are in BCD format */ - *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); - *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); - *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); - *recording_seq = msg->msg[8]; - cec_get_digital_service_id(msg->msg + 9, digital); -} - -static inline void cec_msg_set_ext_timer(struct cec_msg *msg, - bool reply, - __u8 day, - __u8 month, - __u8 start_hr, - __u8 start_min, - __u8 duration_hr, - __u8 duration_min, - __u8 recording_seq, - __u8 ext_src_spec, - __u8 plug, - __u16 phys_addr) -{ - msg->len = 13; - msg->msg[1] = CEC_MSG_SET_EXT_TIMER; - msg->msg[2] = day; - msg->msg[3] = month; - /* Hours and minutes are in BCD format */ - msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); - msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); - msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); - msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); - msg->msg[8] = recording_seq; - msg->msg[9] = ext_src_spec; - msg->msg[10] = plug; - msg->msg[11] = phys_addr >> 8; - msg->msg[12] = phys_addr & 0xff; - msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; -} - -static inline void cec_ops_set_ext_timer(const struct cec_msg *msg, - __u8 *day, - __u8 *month, - __u8 *start_hr, - __u8 *start_min, - __u8 *duration_hr, - __u8 *duration_min, - __u8 *recording_seq, - __u8 *ext_src_spec, - __u8 *plug, - __u16 *phys_addr) -{ - *day = msg->msg[2]; - *month = msg->msg[3]; - /* Hours and minutes are in BCD format */ - *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); - *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); - *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); - *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); - *recording_seq = msg->msg[8]; - *ext_src_spec = msg->msg[9]; - *plug = msg->msg[10]; - *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; -} - -static inline void cec_msg_set_timer_program_title(struct cec_msg *msg, - const char *prog_title) -{ - unsigned int len = strlen(prog_title); - - if (len > 14) - len = 14; - msg->len = 2 + len; - msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE; - memcpy(msg->msg + 2, prog_title, len); -} - -static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg, - char *prog_title) -{ - unsigned int len = msg->len > 2 ? msg->len - 2 : 0; - - if (len > 14) - len = 14; - memcpy(prog_title, msg->msg + 2, len); - prog_title[len] = '\0'; -} - -/* System Information Feature */ -static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_CEC_VERSION; - msg->msg[2] = cec_version; -} - -static inline void cec_ops_cec_version(const struct cec_msg *msg, - __u8 *cec_version) -{ - *cec_version = msg->msg[2]; -} - -static inline void cec_msg_get_cec_version(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GET_CEC_VERSION; - msg->reply = reply ? CEC_MSG_CEC_VERSION : 0; -} - -static inline void cec_msg_report_physical_addr(struct cec_msg *msg, - __u16 phys_addr, __u8 prim_devtype) -{ - msg->len = 5; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; - msg->msg[4] = prim_devtype; -} - -static inline void cec_ops_report_physical_addr(const struct cec_msg *msg, - __u16 *phys_addr, __u8 *prim_devtype) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *prim_devtype = msg->msg[4]; -} - -static inline void cec_msg_give_physical_addr(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR; - msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0; -} - -static inline void cec_msg_set_menu_language(struct cec_msg *msg, - const char *language) -{ - msg->len = 5; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE; - memcpy(msg->msg + 2, language, 3); -} - -static inline void cec_ops_set_menu_language(const struct cec_msg *msg, - char *language) -{ - memcpy(language, msg->msg + 2, 3); - language[3] = '\0'; -} - -static inline void cec_msg_get_menu_language(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE; - msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0; -} - -/* - * Assumes a single RC Profile byte and a single Device Features byte, - * i.e. no extended features are supported by this helper function. - * - * As of CEC 2.0 no extended features are defined, should those be added - * in the future, then this function needs to be adapted or a new function - * should be added. - */ -static inline void cec_msg_report_features(struct cec_msg *msg, - __u8 cec_version, __u8 all_device_types, - __u8 rc_profile, __u8 dev_features) -{ - msg->len = 6; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_REPORT_FEATURES; - msg->msg[2] = cec_version; - msg->msg[3] = all_device_types; - msg->msg[4] = rc_profile; - msg->msg[5] = dev_features; -} - -static inline void cec_ops_report_features(const struct cec_msg *msg, - __u8 *cec_version, __u8 *all_device_types, - const __u8 **rc_profile, const __u8 **dev_features) -{ - const __u8 *p = &msg->msg[4]; - - *cec_version = msg->msg[2]; - *all_device_types = msg->msg[3]; - *rc_profile = p; - while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT)) - p++; - if (!(*p & CEC_OP_FEAT_EXT)) { - *dev_features = p + 1; - while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT)) - p++; - } - if (*p & CEC_OP_FEAT_EXT) - *rc_profile = *dev_features = NULL; -} - -static inline void cec_msg_give_features(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_FEATURES; - msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0; -} - -/* Deck Control Feature */ -static inline void cec_msg_deck_control(struct cec_msg *msg, - __u8 deck_control_mode) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_DECK_CONTROL; - msg->msg[2] = deck_control_mode; -} - -static inline void cec_ops_deck_control(const struct cec_msg *msg, - __u8 *deck_control_mode) -{ - *deck_control_mode = msg->msg[2]; -} - -static inline void cec_msg_deck_status(struct cec_msg *msg, - __u8 deck_info) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_DECK_STATUS; - msg->msg[2] = deck_info; -} - -static inline void cec_ops_deck_status(const struct cec_msg *msg, - __u8 *deck_info) -{ - *deck_info = msg->msg[2]; -} - -static inline void cec_msg_give_deck_status(struct cec_msg *msg, - bool reply, - __u8 status_req) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS; - msg->msg[2] = status_req; - msg->reply = reply ? CEC_MSG_DECK_STATUS : 0; -} - -static inline void cec_ops_give_deck_status(const struct cec_msg *msg, - __u8 *status_req) -{ - *status_req = msg->msg[2]; -} - -static inline void cec_msg_play(struct cec_msg *msg, - __u8 play_mode) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_PLAY; - msg->msg[2] = play_mode; -} - -static inline void cec_ops_play(const struct cec_msg *msg, - __u8 *play_mode) -{ - *play_mode = msg->msg[2]; -} - - -/* Tuner Control Feature */ -struct cec_op_tuner_device_info { - __u8 rec_flag; - __u8 tuner_display_info; - bool is_analog; - union { - struct cec_op_digital_service_id digital; - struct { - __u8 ana_bcast_type; - __u16 ana_freq; - __u8 bcast_system; - } analog; - }; -}; - -static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg, - __u8 rec_flag, - __u8 tuner_display_info, - __u8 ana_bcast_type, - __u16 ana_freq, - __u8 bcast_system) -{ - msg->len = 7; - msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; - msg->msg[2] = (rec_flag << 7) | tuner_display_info; - msg->msg[3] = ana_bcast_type; - msg->msg[4] = ana_freq >> 8; - msg->msg[5] = ana_freq & 0xff; - msg->msg[6] = bcast_system; -} - -static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg, - __u8 rec_flag, __u8 tuner_display_info, - const struct cec_op_digital_service_id *digital) -{ - msg->len = 10; - msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; - msg->msg[2] = (rec_flag << 7) | tuner_display_info; - cec_set_digital_service_id(msg->msg + 3, digital); -} - -static inline void cec_msg_tuner_device_status(struct cec_msg *msg, - const struct cec_op_tuner_device_info *tuner_dev_info) -{ - if (tuner_dev_info->is_analog) - cec_msg_tuner_device_status_analog(msg, - tuner_dev_info->rec_flag, - tuner_dev_info->tuner_display_info, - tuner_dev_info->analog.ana_bcast_type, - tuner_dev_info->analog.ana_freq, - tuner_dev_info->analog.bcast_system); - else - cec_msg_tuner_device_status_digital(msg, - tuner_dev_info->rec_flag, - tuner_dev_info->tuner_display_info, - &tuner_dev_info->digital); -} - -static inline void cec_ops_tuner_device_status(const struct cec_msg *msg, - struct cec_op_tuner_device_info *tuner_dev_info) -{ - tuner_dev_info->is_analog = msg->len < 10; - tuner_dev_info->rec_flag = msg->msg[2] >> 7; - tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f; - if (tuner_dev_info->is_analog) { - tuner_dev_info->analog.ana_bcast_type = msg->msg[3]; - tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5]; - tuner_dev_info->analog.bcast_system = msg->msg[6]; - return; - } - cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital); -} - -static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg, - bool reply, - __u8 status_req) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS; - msg->msg[2] = status_req; - msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0; -} - -static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg, - __u8 *status_req) -{ - *status_req = msg->msg[2]; -} - -static inline void cec_msg_select_analogue_service(struct cec_msg *msg, - __u8 ana_bcast_type, - __u16 ana_freq, - __u8 bcast_system) -{ - msg->len = 6; - msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE; - msg->msg[2] = ana_bcast_type; - msg->msg[3] = ana_freq >> 8; - msg->msg[4] = ana_freq & 0xff; - msg->msg[5] = bcast_system; -} - -static inline void cec_ops_select_analogue_service(const struct cec_msg *msg, - __u8 *ana_bcast_type, - __u16 *ana_freq, - __u8 *bcast_system) -{ - *ana_bcast_type = msg->msg[2]; - *ana_freq = (msg->msg[3] << 8) | msg->msg[4]; - *bcast_system = msg->msg[5]; -} - -static inline void cec_msg_select_digital_service(struct cec_msg *msg, - const struct cec_op_digital_service_id *digital) -{ - msg->len = 9; - msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE; - cec_set_digital_service_id(msg->msg + 2, digital); -} - -static inline void cec_ops_select_digital_service(const struct cec_msg *msg, - struct cec_op_digital_service_id *digital) -{ - cec_get_digital_service_id(msg->msg + 2, digital); -} - -static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT; -} - -static inline void cec_msg_tuner_step_increment(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT; -} - - -/* Vendor Specific Commands Feature */ -static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id) -{ - msg->len = 5; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID; - msg->msg[2] = vendor_id >> 16; - msg->msg[3] = (vendor_id >> 8) & 0xff; - msg->msg[4] = vendor_id & 0xff; -} - -static inline void cec_ops_device_vendor_id(const struct cec_msg *msg, - __u32 *vendor_id) -{ - *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; -} - -static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID; - msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; -} - -static inline void cec_msg_vendor_command(struct cec_msg *msg, - __u8 size, const __u8 *vendor_cmd) -{ - if (size > 14) - size = 14; - msg->len = 2 + size; - msg->msg[1] = CEC_MSG_VENDOR_COMMAND; - memcpy(msg->msg + 2, vendor_cmd, size); -} - -static inline void cec_ops_vendor_command(const struct cec_msg *msg, - __u8 *size, - const __u8 **vendor_cmd) -{ - *size = msg->len - 2; - - if (*size > 14) - *size = 14; - *vendor_cmd = msg->msg + 2; -} - -static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg, - __u32 vendor_id, __u8 size, - const __u8 *vendor_cmd) -{ - if (size > 11) - size = 11; - msg->len = 5 + size; - msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID; - msg->msg[2] = vendor_id >> 16; - msg->msg[3] = (vendor_id >> 8) & 0xff; - msg->msg[4] = vendor_id & 0xff; - memcpy(msg->msg + 5, vendor_cmd, size); -} - -static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg, - __u32 *vendor_id, __u8 *size, - const __u8 **vendor_cmd) -{ - *size = msg->len - 5; - - if (*size > 11) - *size = 11; - *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; - *vendor_cmd = msg->msg + 5; -} - -static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg, - __u8 size, - const __u8 *rc_code) -{ - if (size > 14) - size = 14; - msg->len = 2 + size; - msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN; - memcpy(msg->msg + 2, rc_code, size); -} - -static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg, - __u8 *size, - const __u8 **rc_code) -{ - *size = msg->len - 2; - - if (*size > 14) - *size = 14; - *rc_code = msg->msg + 2; -} - -static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP; -} - - -/* OSD Display Feature */ -static inline void cec_msg_set_osd_string(struct cec_msg *msg, - __u8 disp_ctl, - const char *osd) -{ - unsigned int len = strlen(osd); - - if (len > 13) - len = 13; - msg->len = 3 + len; - msg->msg[1] = CEC_MSG_SET_OSD_STRING; - msg->msg[2] = disp_ctl; - memcpy(msg->msg + 3, osd, len); -} - -static inline void cec_ops_set_osd_string(const struct cec_msg *msg, - __u8 *disp_ctl, - char *osd) -{ - unsigned int len = msg->len > 3 ? msg->len - 3 : 0; - - *disp_ctl = msg->msg[2]; - if (len > 13) - len = 13; - memcpy(osd, msg->msg + 3, len); - osd[len] = '\0'; -} - - -/* Device OSD Transfer Feature */ -static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name) -{ - unsigned int len = strlen(name); - - if (len > 14) - len = 14; - msg->len = 2 + len; - msg->msg[1] = CEC_MSG_SET_OSD_NAME; - memcpy(msg->msg + 2, name, len); -} - -static inline void cec_ops_set_osd_name(const struct cec_msg *msg, - char *name) -{ - unsigned int len = msg->len > 2 ? msg->len - 2 : 0; - - if (len > 14) - len = 14; - memcpy(name, msg->msg + 2, len); - name[len] = '\0'; -} - -static inline void cec_msg_give_osd_name(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_OSD_NAME; - msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0; -} - - -/* Device Menu Control Feature */ -static inline void cec_msg_menu_status(struct cec_msg *msg, - __u8 menu_state) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_MENU_STATUS; - msg->msg[2] = menu_state; -} - -static inline void cec_ops_menu_status(const struct cec_msg *msg, - __u8 *menu_state) -{ - *menu_state = msg->msg[2]; -} - -static inline void cec_msg_menu_request(struct cec_msg *msg, - bool reply, - __u8 menu_req) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_MENU_REQUEST; - msg->msg[2] = menu_req; - msg->reply = reply ? CEC_MSG_MENU_STATUS : 0; -} - -static inline void cec_ops_menu_request(const struct cec_msg *msg, - __u8 *menu_req) -{ - *menu_req = msg->msg[2]; -} - -struct cec_op_ui_command { - __u8 ui_cmd; - bool has_opt_arg; - union { - struct cec_op_channel_data channel_identifier; - __u8 ui_broadcast_type; - __u8 ui_sound_presentation_control; - __u8 play_mode; - __u8 ui_function_media; - __u8 ui_function_select_av_input; - __u8 ui_function_select_audio_input; - }; -}; - -static inline void cec_msg_user_control_pressed(struct cec_msg *msg, - const struct cec_op_ui_command *ui_cmd) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED; - msg->msg[2] = ui_cmd->ui_cmd; - if (!ui_cmd->has_opt_arg) - return; - switch (ui_cmd->ui_cmd) { - case 0x56: - case 0x57: - case 0x60: - case 0x68: - case 0x69: - case 0x6a: - /* The optional operand is one byte for all these ui commands */ - msg->len++; - msg->msg[3] = ui_cmd->play_mode; - break; - case 0x67: - msg->len += 4; - msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | - (ui_cmd->channel_identifier.major >> 8); - msg->msg[4] = ui_cmd->channel_identifier.major & 0xff; - msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; - msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; - break; - } -} - -static inline void cec_ops_user_control_pressed(const struct cec_msg *msg, - struct cec_op_ui_command *ui_cmd) -{ - ui_cmd->ui_cmd = msg->msg[2]; - ui_cmd->has_opt_arg = false; - if (msg->len == 3) - return; - switch (ui_cmd->ui_cmd) { - case 0x56: - case 0x57: - case 0x60: - case 0x68: - case 0x69: - case 0x6a: - /* The optional operand is one byte for all these ui commands */ - ui_cmd->play_mode = msg->msg[3]; - ui_cmd->has_opt_arg = true; - break; - case 0x67: - if (msg->len < 7) - break; - ui_cmd->has_opt_arg = true; - ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2; - ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4]; - ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6]; - break; - } -} - -static inline void cec_msg_user_control_released(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED; -} - -/* Remote Control Passthrough Feature */ - -/* Power Status Feature */ -static inline void cec_msg_report_power_status(struct cec_msg *msg, - __u8 pwr_state) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS; - msg->msg[2] = pwr_state; -} - -static inline void cec_ops_report_power_status(const struct cec_msg *msg, - __u8 *pwr_state) -{ - *pwr_state = msg->msg[2]; -} - -static inline void cec_msg_give_device_power_status(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS; - msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0; -} - -/* General Protocol Messages */ -static inline void cec_msg_feature_abort(struct cec_msg *msg, - __u8 abort_msg, __u8 reason) -{ - msg->len = 4; - msg->msg[1] = CEC_MSG_FEATURE_ABORT; - msg->msg[2] = abort_msg; - msg->msg[3] = reason; -} - -static inline void cec_ops_feature_abort(const struct cec_msg *msg, - __u8 *abort_msg, __u8 *reason) -{ - *abort_msg = msg->msg[2]; - *reason = msg->msg[3]; -} - -/* This changes the current message into a feature abort message */ -static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason) -{ - cec_msg_set_reply_to(msg, msg); - msg->len = 4; - msg->msg[2] = msg->msg[1]; - msg->msg[3] = reason; - msg->msg[1] = CEC_MSG_FEATURE_ABORT; -} - -static inline void cec_msg_abort(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_ABORT; -} - - -/* System Audio Control Feature */ -static inline void cec_msg_report_audio_status(struct cec_msg *msg, - __u8 aud_mute_status, - __u8 aud_vol_status) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS; - msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f); -} - -static inline void cec_ops_report_audio_status(const struct cec_msg *msg, - __u8 *aud_mute_status, - __u8 *aud_vol_status) -{ - *aud_mute_status = msg->msg[2] >> 7; - *aud_vol_status = msg->msg[2] & 0x7f; -} - -static inline void cec_msg_give_audio_status(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS; - msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0; -} - -static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg, - __u8 sys_aud_status) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE; - msg->msg[2] = sys_aud_status; -} - -static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg, - __u8 *sys_aud_status) -{ - *sys_aud_status = msg->msg[2]; -} - -static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg, - bool reply, - __u16 phys_addr) -{ - msg->len = phys_addr == 0xffff ? 2 : 4; - msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; - msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0; - -} - -static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg, - __u16 *phys_addr) -{ - if (msg->len < 4) - *phys_addr = 0xffff; - else - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - -static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg, - __u8 sys_aud_status) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS; - msg->msg[2] = sys_aud_status; -} - -static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg, - __u8 *sys_aud_status) -{ - *sys_aud_status = msg->msg[2]; -} - -static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS; - msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0; -} - -static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg, - __u8 num_descriptors, - const __u32 *descriptors) -{ - unsigned int i; - - if (num_descriptors > 4) - num_descriptors = 4; - msg->len = 2 + num_descriptors * 3; - msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR; - for (i = 0; i < num_descriptors; i++) { - msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff; - msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff; - msg->msg[4 + i * 3] = descriptors[i] & 0xff; - } -} - -static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg, - __u8 *num_descriptors, - __u32 *descriptors) -{ - unsigned int i; - - *num_descriptors = (msg->len - 2) / 3; - if (*num_descriptors > 4) - *num_descriptors = 4; - for (i = 0; i < *num_descriptors; i++) - descriptors[i] = (msg->msg[2 + i * 3] << 16) | - (msg->msg[3 + i * 3] << 8) | - msg->msg[4 + i * 3]; -} - -static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg, - bool reply, - __u8 num_descriptors, - const __u8 *audio_format_id, - const __u8 *audio_format_code) -{ - unsigned int i; - - if (num_descriptors > 4) - num_descriptors = 4; - msg->len = 2 + num_descriptors; - msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR; - msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0; - for (i = 0; i < num_descriptors; i++) - msg->msg[2 + i] = (audio_format_id[i] << 6) | - (audio_format_code[i] & 0x3f); -} - -static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg, - __u8 *num_descriptors, - __u8 *audio_format_id, - __u8 *audio_format_code) -{ - unsigned int i; - - *num_descriptors = msg->len - 2; - if (*num_descriptors > 4) - *num_descriptors = 4; - for (i = 0; i < *num_descriptors; i++) { - audio_format_id[i] = msg->msg[2 + i] >> 6; - audio_format_code[i] = msg->msg[2 + i] & 0x3f; - } -} - - -/* Audio Rate Control Feature */ -static inline void cec_msg_set_audio_rate(struct cec_msg *msg, - __u8 audio_rate) -{ - msg->len = 3; - msg->msg[1] = CEC_MSG_SET_AUDIO_RATE; - msg->msg[2] = audio_rate; -} - -static inline void cec_ops_set_audio_rate(const struct cec_msg *msg, - __u8 *audio_rate) -{ - *audio_rate = msg->msg[2]; -} - - -/* Audio Return Channel Control Feature */ -static inline void cec_msg_report_arc_initiated(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED; -} - -static inline void cec_msg_initiate_arc(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_INITIATE_ARC; - msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0; -} - -static inline void cec_msg_request_arc_initiation(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION; - msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0; -} - -static inline void cec_msg_report_arc_terminated(struct cec_msg *msg) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED; -} - -static inline void cec_msg_terminate_arc(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_TERMINATE_ARC; - msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0; -} - -static inline void cec_msg_request_arc_termination(struct cec_msg *msg, - bool reply) -{ - msg->len = 2; - msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION; - msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0; -} - - -/* Dynamic Audio Lipsync Feature */ -/* Only for CEC 2.0 and up */ -static inline void cec_msg_report_current_latency(struct cec_msg *msg, - __u16 phys_addr, - __u8 video_latency, - __u8 low_latency_mode, - __u8 audio_out_compensated, - __u8 audio_out_delay) -{ - msg->len = 7; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; - msg->msg[4] = video_latency; - msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; - msg->msg[6] = audio_out_delay; -} - -static inline void cec_ops_report_current_latency(const struct cec_msg *msg, - __u16 *phys_addr, - __u8 *video_latency, - __u8 *low_latency_mode, - __u8 *audio_out_compensated, - __u8 *audio_out_delay) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *video_latency = msg->msg[4]; - *low_latency_mode = (msg->msg[5] >> 2) & 1; - *audio_out_compensated = msg->msg[5] & 3; - *audio_out_delay = msg->msg[6]; -} - -static inline void cec_msg_request_current_latency(struct cec_msg *msg, - bool reply, - __u16 phys_addr) -{ - msg->len = 4; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY; - msg->msg[2] = phys_addr >> 8; - msg->msg[3] = phys_addr & 0xff; - msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0; -} - -static inline void cec_ops_request_current_latency(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - - -/* Capability Discovery and Control Feature */ -static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg, - __u16 phys_addr1, - __u16 phys_addr2) -{ - msg->len = 9; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; - msg->msg[5] = phys_addr1 >> 8; - msg->msg[6] = phys_addr1 & 0xff; - msg->msg[7] = phys_addr2 >> 8; - msg->msg[8] = phys_addr2 & 0xff; -} - -static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg, - __u16 *phys_addr, - __u16 *phys_addr1, - __u16 *phys_addr2) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; - *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; -} - -static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg, - __u16 target_phys_addr, - __u8 hec_func_state, - __u8 host_func_state, - __u8 enc_func_state, - __u8 cdc_errcode, - __u8 has_field, - __u16 hec_field) -{ - msg->len = has_field ? 10 : 8; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE; - msg->msg[5] = target_phys_addr >> 8; - msg->msg[6] = target_phys_addr & 0xff; - msg->msg[7] = (hec_func_state << 6) | - (host_func_state << 4) | - (enc_func_state << 2) | - cdc_errcode; - if (has_field) { - msg->msg[8] = hec_field >> 8; - msg->msg[9] = hec_field & 0xff; - } -} - -static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg, - __u16 *phys_addr, - __u16 *target_phys_addr, - __u8 *hec_func_state, - __u8 *host_func_state, - __u8 *enc_func_state, - __u8 *cdc_errcode, - __u8 *has_field, - __u16 *hec_field) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6]; - *hec_func_state = msg->msg[7] >> 6; - *host_func_state = (msg->msg[7] >> 4) & 3; - *enc_func_state = (msg->msg[7] >> 4) & 3; - *cdc_errcode = msg->msg[7] & 3; - *has_field = msg->len >= 10; - *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0; -} - -static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg, - __u16 phys_addr1, - __u16 phys_addr2, - __u8 hec_set_state, - __u16 phys_addr3, - __u16 phys_addr4, - __u16 phys_addr5) -{ - msg->len = 10; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; - msg->msg[5] = phys_addr1 >> 8; - msg->msg[6] = phys_addr1 & 0xff; - msg->msg[7] = phys_addr2 >> 8; - msg->msg[8] = phys_addr2 & 0xff; - msg->msg[9] = hec_set_state; - if (phys_addr3 != CEC_PHYS_ADDR_INVALID) { - msg->msg[msg->len++] = phys_addr3 >> 8; - msg->msg[msg->len++] = phys_addr3 & 0xff; - if (phys_addr4 != CEC_PHYS_ADDR_INVALID) { - msg->msg[msg->len++] = phys_addr4 >> 8; - msg->msg[msg->len++] = phys_addr4 & 0xff; - if (phys_addr5 != CEC_PHYS_ADDR_INVALID) { - msg->msg[msg->len++] = phys_addr5 >> 8; - msg->msg[msg->len++] = phys_addr5 & 0xff; - } - } - } -} - -static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg, - __u16 *phys_addr, - __u16 *phys_addr1, - __u16 *phys_addr2, - __u8 *hec_set_state, - __u16 *phys_addr3, - __u16 *phys_addr4, - __u16 *phys_addr5) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; - *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; - *hec_set_state = msg->msg[9]; - *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID; - if (msg->len >= 12) - *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11]; - if (msg->len >= 14) - *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13]; - if (msg->len >= 16) - *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15]; -} - -static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg, - __u16 phys_addr1, - __u8 hec_set_state) -{ - msg->len = 8; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT; - msg->msg[5] = phys_addr1 >> 8; - msg->msg[6] = phys_addr1 & 0xff; - msg->msg[7] = hec_set_state; -} - -static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg, - __u16 *phys_addr, - __u16 *phys_addr1, - __u8 *hec_set_state) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; - *hec_set_state = msg->msg[7]; -} - -static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg, - __u16 phys_addr1, - __u16 phys_addr2, - __u16 phys_addr3) -{ - msg->len = 11; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION; - msg->msg[5] = phys_addr1 >> 8; - msg->msg[6] = phys_addr1 & 0xff; - msg->msg[7] = phys_addr2 >> 8; - msg->msg[8] = phys_addr2 & 0xff; - msg->msg[9] = phys_addr3 >> 8; - msg->msg[10] = phys_addr3 & 0xff; -} - -static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg, - __u16 *phys_addr, - __u16 *phys_addr1, - __u16 *phys_addr2, - __u16 *phys_addr3) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; - *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; - *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10]; -} - -static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg) -{ - msg->len = 5; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE; -} - -static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - -static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg) -{ - msg->len = 5; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER; -} - -static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg, - __u16 *phys_addr) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; -} - -static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg, - __u8 input_port, - __u8 hpd_state) -{ - msg->len = 6; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE; - msg->msg[5] = (input_port << 4) | hpd_state; -} - -static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg, - __u16 *phys_addr, - __u8 *input_port, - __u8 *hpd_state) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *input_port = msg->msg[5] >> 4; - *hpd_state = msg->msg[5] & 0xf; -} - -static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg, - __u8 hpd_state, - __u8 hpd_error) -{ - msg->len = 6; - msg->msg[0] |= 0xf; /* broadcast */ - msg->msg[1] = CEC_MSG_CDC_MESSAGE; - /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ - msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE; - msg->msg[5] = (hpd_state << 4) | hpd_error; -} - -static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg, - __u16 *phys_addr, - __u8 *hpd_state, - __u8 *hpd_error) -{ - *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; - *hpd_state = msg->msg[5] >> 4; - *hpd_error = msg->msg[5] & 0xf; -} - -#endif diff --git a/include/linux/cec.h b/include/linux/cec.h deleted file mode 100644 index 851968e803fa..000000000000 --- a/include/linux/cec.h +++ /dev/null @@ -1,1014 +0,0 @@ -/* - * cec - HDMI Consumer Electronics Control public header - * - * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * Alternatively you can redistribute this file under the terms of the - * BSD license as stated below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. The names of its contributors may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * Note: this framework is still in staging and it is likely the API - * will change before it goes out of staging. - * - * Once it is moved out of staging this header will move to uapi. - */ -#ifndef _CEC_UAPI_H -#define _CEC_UAPI_H - -#include <linux/types.h> - -#define CEC_MAX_MSG_SIZE 16 - -/** - * struct cec_msg - CEC message structure. - * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the - * driver when the message transmission has finished. - * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the - * driver when the message was received. - * @len: Length in bytes of the message. - * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE. - * Set to 0 if you want to wait forever. This timeout can also be - * used with CEC_TRANSMIT as the timeout for waiting for a reply. - * If 0, then it will use a 1 second timeout instead of waiting - * forever as is done with CEC_RECEIVE. - * @sequence: The framework assigns a sequence number to messages that are - * sent. This can be used to track replies to previously sent - * messages. - * @flags: Set to 0. - * @msg: The message payload. - * @reply: This field is ignored with CEC_RECEIVE and is only used by - * CEC_TRANSMIT. If non-zero, then wait for a reply with this - * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for - * a possible ABORT reply. If there was an error when sending the - * msg or FeatureAbort was returned, then reply is set to 0. - * If reply is non-zero upon return, then len/msg are set to - * the received message. - * If reply is zero upon return and status has the - * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to - * the received feature abort message. - * If reply is zero upon return and status has the - * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at - * all. If reply is non-zero for CEC_TRANSMIT and the message is a - * broadcast, then -EINVAL is returned. - * if reply is non-zero, then timeout is set to 1000 (the required - * maximum response time). - * @rx_status: The message receive status bits. Set by the driver. - * @tx_status: The message transmit status bits. Set by the driver. - * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver. - * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver. - * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the - * driver. - * @tx_error_cnt: The number of 'Error' events. Set by the driver. - */ -struct cec_msg { - __u64 tx_ts; - __u64 rx_ts; - __u32 len; - __u32 timeout; - __u32 sequence; - __u32 flags; - __u8 msg[CEC_MAX_MSG_SIZE]; - __u8 reply; - __u8 rx_status; - __u8 tx_status; - __u8 tx_arb_lost_cnt; - __u8 tx_nack_cnt; - __u8 tx_low_drive_cnt; - __u8 tx_error_cnt; -}; - -/** - * cec_msg_initiator - return the initiator's logical address. - * @msg: the message structure - */ -static inline __u8 cec_msg_initiator(const struct cec_msg *msg) -{ - return msg->msg[0] >> 4; -} - -/** - * cec_msg_destination - return the destination's logical address. - * @msg: the message structure - */ -static inline __u8 cec_msg_destination(const struct cec_msg *msg) -{ - return msg->msg[0] & 0xf; -} - -/** - * cec_msg_opcode - return the opcode of the message, -1 for poll - * @msg: the message structure - */ -static inline int cec_msg_opcode(const struct cec_msg *msg) -{ - return msg->len > 1 ? msg->msg[1] : -1; -} - -/** - * cec_msg_is_broadcast - return true if this is a broadcast message. - * @msg: the message structure - */ -static inline bool cec_msg_is_broadcast(const struct cec_msg *msg) -{ - return (msg->msg[0] & 0xf) == 0xf; -} - -/** - * cec_msg_init - initialize the message structure. - * @msg: the message structure - * @initiator: the logical address of the initiator - * @destination:the logical address of the destination (0xf for broadcast) - * - * The whole structure is zeroed, the len field is set to 1 (i.e. a poll - * message) and the initiator and destination are filled in. - */ -static inline void cec_msg_init(struct cec_msg *msg, - __u8 initiator, __u8 destination) -{ - memset(msg, 0, sizeof(*msg)); - msg->msg[0] = (initiator << 4) | destination; - msg->len = 1; -} - -/** - * cec_msg_set_reply_to - fill in destination/initiator in a reply message. - * @msg: the message structure for the reply - * @orig: the original message structure - * - * Set the msg destination to the orig initiator and the msg initiator to the - * orig destination. Note that msg and orig may be the same pointer, in which - * case the change is done in place. - */ -static inline void cec_msg_set_reply_to(struct cec_msg *msg, - struct cec_msg *orig) -{ - /* The destination becomes the initiator and vice versa */ - msg->msg[0] = (cec_msg_destination(orig) << 4) | - cec_msg_initiator(orig); - msg->reply = msg->timeout = 0; -} - -/* cec status field */ -#define CEC_TX_STATUS_OK (1 << 0) -#define CEC_TX_STATUS_ARB_LOST (1 << 1) -#define CEC_TX_STATUS_NACK (1 << 2) -#define CEC_TX_STATUS_LOW_DRIVE (1 << 3) -#define CEC_TX_STATUS_ERROR (1 << 4) -#define CEC_TX_STATUS_MAX_RETRIES (1 << 5) - -#define CEC_RX_STATUS_OK (1 << 0) -#define CEC_RX_STATUS_TIMEOUT (1 << 1) -#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2) - -static inline bool cec_msg_status_is_ok(const struct cec_msg *msg) -{ - if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK)) - return false; - if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK)) - return false; - if (!msg->tx_status && !msg->rx_status) - return false; - return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT); -} - -#define CEC_LOG_ADDR_INVALID 0xff -#define CEC_PHYS_ADDR_INVALID 0xffff - -/* - * The maximum number of logical addresses one device can be assigned to. - * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The - * Analog Devices CEC hardware supports 3. So let's go wild and go for 4. - */ -#define CEC_MAX_LOG_ADDRS 4 - -/* The logical addresses defined by CEC 2.0 */ -#define CEC_LOG_ADDR_TV 0 -#define CEC_LOG_ADDR_RECORD_1 1 -#define CEC_LOG_ADDR_RECORD_2 2 -#define CEC_LOG_ADDR_TUNER_1 3 -#define CEC_LOG_ADDR_PLAYBACK_1 4 -#define CEC_LOG_ADDR_AUDIOSYSTEM 5 -#define CEC_LOG_ADDR_TUNER_2 6 -#define CEC_LOG_ADDR_TUNER_3 7 -#define CEC_LOG_ADDR_PLAYBACK_2 8 -#define CEC_LOG_ADDR_RECORD_3 9 -#define CEC_LOG_ADDR_TUNER_4 10 -#define CEC_LOG_ADDR_PLAYBACK_3 11 -#define CEC_LOG_ADDR_BACKUP_1 12 -#define CEC_LOG_ADDR_BACKUP_2 13 -#define CEC_LOG_ADDR_SPECIFIC 14 -#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */ -#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */ - -/* The logical address types that the CEC device wants to claim */ -#define CEC_LOG_ADDR_TYPE_TV 0 -#define CEC_LOG_ADDR_TYPE_RECORD 1 -#define CEC_LOG_ADDR_TYPE_TUNER 2 -#define CEC_LOG_ADDR_TYPE_PLAYBACK 3 -#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4 -#define CEC_LOG_ADDR_TYPE_SPECIFIC 5 -#define CEC_LOG_ADDR_TYPE_UNREGISTERED 6 -/* - * Switches should use UNREGISTERED. - * Processors should use SPECIFIC. - */ - -#define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV) -#define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \ - (1 << CEC_LOG_ADDR_RECORD_2) | \ - (1 << CEC_LOG_ADDR_RECORD_3)) -#define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \ - (1 << CEC_LOG_ADDR_TUNER_2) | \ - (1 << CEC_LOG_ADDR_TUNER_3) | \ - (1 << CEC_LOG_ADDR_TUNER_4)) -#define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \ - (1 << CEC_LOG_ADDR_PLAYBACK_2) | \ - (1 << CEC_LOG_ADDR_PLAYBACK_3)) -#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM) -#define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \ - (1 << CEC_LOG_ADDR_BACKUP_2)) -#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC) -#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED) - -static inline bool cec_has_tv(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_TV; -} - -static inline bool cec_has_record(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD; -} - -static inline bool cec_has_tuner(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER; -} - -static inline bool cec_has_playback(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK; -} - -static inline bool cec_has_audiosystem(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM; -} - -static inline bool cec_has_backup(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP; -} - -static inline bool cec_has_specific(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC; -} - -static inline bool cec_is_unregistered(__u16 log_addr_mask) -{ - return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED; -} - -static inline bool cec_is_unconfigured(__u16 log_addr_mask) -{ - return log_addr_mask == 0; -} - -/* - * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID - * should be disabled (CEC_S_VENDOR_ID) - */ -#define CEC_VENDOR_ID_NONE 0xffffffff - -/* The message handling modes */ -/* Modes for initiator */ -#define CEC_MODE_NO_INITIATOR (0x0 << 0) -#define CEC_MODE_INITIATOR (0x1 << 0) -#define CEC_MODE_EXCL_INITIATOR (0x2 << 0) -#define CEC_MODE_INITIATOR_MSK 0x0f - -/* Modes for follower */ -#define CEC_MODE_NO_FOLLOWER (0x0 << 4) -#define CEC_MODE_FOLLOWER (0x1 << 4) -#define CEC_MODE_EXCL_FOLLOWER (0x2 << 4) -#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4) -#define CEC_MODE_MONITOR (0xe << 4) -#define CEC_MODE_MONITOR_ALL (0xf << 4) -#define CEC_MODE_FOLLOWER_MSK 0xf0 - -/* Userspace has to configure the physical address */ -#define CEC_CAP_PHYS_ADDR (1 << 0) -/* Userspace has to configure the logical addresses */ -#define CEC_CAP_LOG_ADDRS (1 << 1) -/* Userspace can transmit messages (and thus become follower as well) */ -#define CEC_CAP_TRANSMIT (1 << 2) -/* - * Passthrough all messages instead of processing them. - */ -#define CEC_CAP_PASSTHROUGH (1 << 3) -/* Supports remote control */ -#define CEC_CAP_RC (1 << 4) -/* Hardware can monitor all messages, not just directed and broadcast. */ -#define CEC_CAP_MONITOR_ALL (1 << 5) - -/** - * struct cec_caps - CEC capabilities structure. - * @driver: name of the CEC device driver. - * @name: name of the CEC device. @driver + @name must be unique. - * @available_log_addrs: number of available logical addresses. - * @capabilities: capabilities of the CEC adapter. - * @version: version of the CEC adapter framework. - */ -struct cec_caps { - char driver[32]; - char name[32]; - __u32 available_log_addrs; - __u32 capabilities; - __u32 version; -}; - -/** - * struct cec_log_addrs - CEC logical addresses structure. - * @log_addr: the claimed logical addresses. Set by the driver. - * @log_addr_mask: current logical address mask. Set by the driver. - * @cec_version: the CEC version that the adapter should implement. Set by the - * caller. - * @num_log_addrs: how many logical addresses should be claimed. Set by the - * caller. - * @vendor_id: the vendor ID of the device. Set by the caller. - * @flags: flags. - * @osd_name: the OSD name of the device. Set by the caller. - * @primary_device_type: the primary device type for each logical address. - * Set by the caller. - * @log_addr_type: the logical address types. Set by the caller. - * @all_device_types: CEC 2.0: all device types represented by the logical - * address. Set by the caller. - * @features: CEC 2.0: The logical address features. Set by the caller. - */ -struct cec_log_addrs { - __u8 log_addr[CEC_MAX_LOG_ADDRS]; - __u16 log_addr_mask; - __u8 cec_version; - __u8 num_log_addrs; - __u32 vendor_id; - __u32 flags; - char osd_name[15]; - __u8 primary_device_type[CEC_MAX_LOG_ADDRS]; - __u8 log_addr_type[CEC_MAX_LOG_ADDRS]; - - /* CEC 2.0 */ - __u8 all_device_types[CEC_MAX_LOG_ADDRS]; - __u8 features[CEC_MAX_LOG_ADDRS][12]; -}; - -/* Allow a fallback to unregistered */ -#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0) - -/* Events */ - -/* Event that occurs when the adapter state changes */ -#define CEC_EVENT_STATE_CHANGE 1 -/* - * This event is sent when messages are lost because the application - * didn't empty the message queue in time - */ -#define CEC_EVENT_LOST_MSGS 2 - -#define CEC_EVENT_FL_INITIAL_STATE (1 << 0) - -/** - * struct cec_event_state_change - used when the CEC adapter changes state. - * @phys_addr: the current physical address - * @log_addr_mask: the current logical address mask - */ -struct cec_event_state_change { - __u16 phys_addr; - __u16 log_addr_mask; -}; - -/** - * struct cec_event_lost_msgs - tells you how many messages were lost due. - * @lost_msgs: how many messages were lost. - */ -struct cec_event_lost_msgs { - __u32 lost_msgs; -}; - -/** - * struct cec_event - CEC event structure - * @ts: the timestamp of when the event was sent. - * @event: the event. - * array. - * @state_change: the event payload for CEC_EVENT_STATE_CHANGE. - * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS. - * @raw: array to pad the union. - */ -struct cec_event { - __u64 ts; - __u32 event; - __u32 flags; - union { - struct cec_event_state_change state_change; - struct cec_event_lost_msgs lost_msgs; - __u32 raw[16]; - }; -}; - -/* ioctls */ - -/* Adapter capabilities */ -#define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps) - -/* - * phys_addr is either 0 (if this is the CEC root device) - * or a valid physical address obtained from the sink's EDID - * as read by this CEC device (if this is a source device) - * or a physical address obtained and modified from a sink - * EDID and used for a sink CEC device. - * If nothing is connected, then phys_addr is 0xffff. - * See HDMI 1.4b, section 8.7 (Physical Address). - * - * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled - * internally. - */ -#define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16) -#define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16) - -/* - * Configure the CEC adapter. It sets the device type and which - * logical types it will try to claim. It will return which - * logical addresses it could actually claim. - * An error is returned if the adapter is disabled or if there - * is no physical address assigned. - */ - -#define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs) -#define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs) - -/* Transmit/receive a CEC command */ -#define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg) -#define CEC_RECEIVE _IOWR('a', 6, struct cec_msg) - -/* Dequeue CEC events */ -#define CEC_DQEVENT _IOWR('a', 7, struct cec_event) - -/* - * Get and set the message handling mode for this filehandle. - */ -#define CEC_G_MODE _IOR('a', 8, __u32) -#define CEC_S_MODE _IOW('a', 9, __u32) - -/* - * The remainder of this header defines all CEC messages and operands. - * The format matters since it the cec-ctl utility parses it to generate - * code for implementing all these messages. - * - * Comments ending with 'Feature' group messages for each feature. - * If messages are part of multiple features, then the "Has also" - * comment is used to list the previously defined messages that are - * supported by the feature. - * - * Before operands are defined a comment is added that gives the - * name of the operand and in brackets the variable name of the - * corresponding argument in the cec-funcs.h function. - */ - -/* Messages */ - -/* One Touch Play Feature */ -#define CEC_MSG_ACTIVE_SOURCE 0x82 -#define CEC_MSG_IMAGE_VIEW_ON 0x04 -#define CEC_MSG_TEXT_VIEW_ON 0x0d - - -/* Routing Control Feature */ - -/* - * Has also: - * CEC_MSG_ACTIVE_SOURCE - */ - -#define CEC_MSG_INACTIVE_SOURCE 0x9d -#define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85 -#define CEC_MSG_ROUTING_CHANGE 0x80 -#define CEC_MSG_ROUTING_INFORMATION 0x81 -#define CEC_MSG_SET_STREAM_PATH 0x86 - - -/* Standby Feature */ -#define CEC_MSG_STANDBY 0x36 - - -/* One Touch Record Feature */ -#define CEC_MSG_RECORD_OFF 0x0b -#define CEC_MSG_RECORD_ON 0x09 -/* Record Source Type Operand (rec_src_type) */ -#define CEC_OP_RECORD_SRC_OWN 1 -#define CEC_OP_RECORD_SRC_DIGITAL 2 -#define CEC_OP_RECORD_SRC_ANALOG 3 -#define CEC_OP_RECORD_SRC_EXT_PLUG 4 -#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5 -/* Service Identification Method Operand (service_id_method) */ -#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0 -#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1 -/* Digital Service Broadcast System Operand (dig_bcast_system) */ -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19 -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a -#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b -/* Analogue Broadcast Type Operand (ana_bcast_type) */ -#define CEC_OP_ANA_BCAST_TYPE_CABLE 0 -#define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1 -#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2 -/* Broadcast System Operand (bcast_system) */ -#define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00 -#define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */ -#define CEC_OP_BCAST_SYSTEM_PAL_M 0x02 -#define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03 -#define CEC_OP_BCAST_SYSTEM_PAL_I 0x04 -#define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05 -#define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06 -#define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07 -#define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08 -#define CEC_OP_BCAST_SYSTEM_OTHER 0x1f -/* Channel Number Format Operand (channel_number_fmt) */ -#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01 -#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02 - -#define CEC_MSG_RECORD_STATUS 0x0a -/* Record Status Operand (rec_status) */ -#define CEC_OP_RECORD_STATUS_CUR_SRC 0x01 -#define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02 -#define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03 -#define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04 -#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05 -#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06 -#define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07 -#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09 -#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a -#define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b -#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c -#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d -#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e -#define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10 -#define CEC_OP_RECORD_STATUS_PLAYING 0x11 -#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12 -#define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13 -#define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14 -#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15 -#define CEC_OP_RECORD_STATUS_NO_SPACE 0x16 -#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17 -#define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a -#define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b -#define CEC_OP_RECORD_STATUS_OTHER 0x1f - -#define CEC_MSG_RECORD_TV_SCREEN 0x0f - - -/* Timer Programming Feature */ -#define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33 -/* Recording Sequence Operand (recording_seq) */ -#define CEC_OP_REC_SEQ_SUNDAY 0x01 -#define CEC_OP_REC_SEQ_MONDAY 0x02 -#define CEC_OP_REC_SEQ_TUESDAY 0x04 -#define CEC_OP_REC_SEQ_WEDNESDAY 0x08 -#define CEC_OP_REC_SEQ_THURSDAY 0x10 -#define CEC_OP_REC_SEQ_FRIDAY 0x20 -#define CEC_OP_REC_SEQ_SATERDAY 0x40 -#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00 - -#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99 - -#define CEC_MSG_CLEAR_EXT_TIMER 0xa1 -/* External Source Specifier Operand (ext_src_spec) */ -#define CEC_OP_EXT_SRC_PLUG 0x04 -#define CEC_OP_EXT_SRC_PHYS_ADDR 0x05 - -#define CEC_MSG_SET_ANALOGUE_TIMER 0x34 -#define CEC_MSG_SET_DIGITAL_TIMER 0x97 -#define CEC_MSG_SET_EXT_TIMER 0xa2 - -#define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67 -#define CEC_MSG_TIMER_CLEARED_STATUS 0x43 -/* Timer Cleared Status Data Operand (timer_cleared_status) */ -#define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00 -#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01 -#define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02 -#define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80 - -#define CEC_MSG_TIMER_STATUS 0x35 -/* Timer Overlap Warning Operand (timer_overlap_warning) */ -#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0 -#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1 -/* Media Info Operand (media_info) */ -#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0 -#define CEC_OP_MEDIA_INFO_PROT_MEDIA 1 -#define CEC_OP_MEDIA_INFO_NO_MEDIA 2 -/* Programmed Indicator Operand (prog_indicator) */ -#define CEC_OP_PROG_IND_NOT_PROGRAMMED 0 -#define CEC_OP_PROG_IND_PROGRAMMED 1 -/* Programmed Info Operand (prog_info) */ -#define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08 -#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09 -#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b -#define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a -/* Not Programmed Error Info Operand (prog_error) */ -#define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01 -#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02 -#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03 -#define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04 -#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05 -#define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06 -#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07 -#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08 -#define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09 -#define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a -#define CEC_OP_PROG_ERROR_DUPLICATE 0x0e - - -/* System Information Feature */ -#define CEC_MSG_CEC_VERSION 0x9e -/* CEC Version Operand (cec_version) */ -#define CEC_OP_CEC_VERSION_1_3A 4 -#define CEC_OP_CEC_VERSION_1_4 5 -#define CEC_OP_CEC_VERSION_2_0 6 - -#define CEC_MSG_GET_CEC_VERSION 0x9f -#define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83 -#define CEC_MSG_GET_MENU_LANGUAGE 0x91 -#define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84 -/* Primary Device Type Operand (prim_devtype) */ -#define CEC_OP_PRIM_DEVTYPE_TV 0 -#define CEC_OP_PRIM_DEVTYPE_RECORD 1 -#define CEC_OP_PRIM_DEVTYPE_TUNER 3 -#define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4 -#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5 -#define CEC_OP_PRIM_DEVTYPE_SWITCH 6 -#define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7 - -#define CEC_MSG_SET_MENU_LANGUAGE 0x32 -#define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */ -/* All Device Types Operand (all_device_types) */ -#define CEC_OP_ALL_DEVTYPE_TV 0x80 -#define CEC_OP_ALL_DEVTYPE_RECORD 0x40 -#define CEC_OP_ALL_DEVTYPE_TUNER 0x20 -#define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10 -#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08 -#define CEC_OP_ALL_DEVTYPE_SWITCH 0x04 -/* - * And if you wondering what happened to PROCESSOR devices: those should - * be mapped to a SWITCH. - */ - -/* Valid for RC Profile and Device Feature operands */ -#define CEC_OP_FEAT_EXT 0x80 /* Extension bit */ -/* RC Profile Operand (rc_profile) */ -#define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00 -#define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02 -#define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06 -#define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a -#define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e -#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50 -#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48 -#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44 -#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42 -#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41 -/* Device Feature Operand (dev_features) */ -#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40 -#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20 -#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10 -#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08 -#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04 -#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02 - -#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */ - - -/* Deck Control Feature */ -#define CEC_MSG_DECK_CONTROL 0x42 -/* Deck Control Mode Operand (deck_control_mode) */ -#define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1 -#define CEC_OP_DECK_CTL_MODE_SKIP_REV 2 -#define CEC_OP_DECK_CTL_MODE_STOP 3 -#define CEC_OP_DECK_CTL_MODE_EJECT 4 - -#define CEC_MSG_DECK_STATUS 0x1b -/* Deck Info Operand (deck_info) */ -#define CEC_OP_DECK_INFO_PLAY 0x11 -#define CEC_OP_DECK_INFO_RECORD 0x12 -#define CEC_OP_DECK_INFO_PLAY_REV 0x13 -#define CEC_OP_DECK_INFO_STILL 0x14 -#define CEC_OP_DECK_INFO_SLOW 0x15 -#define CEC_OP_DECK_INFO_SLOW_REV 0x16 -#define CEC_OP_DECK_INFO_FAST_FWD 0x17 -#define CEC_OP_DECK_INFO_FAST_REV 0x18 -#define CEC_OP_DECK_INFO_NO_MEDIA 0x19 -#define CEC_OP_DECK_INFO_STOP 0x1a -#define CEC_OP_DECK_INFO_SKIP_FWD 0x1b -#define CEC_OP_DECK_INFO_SKIP_REV 0x1c -#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d -#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e -#define CEC_OP_DECK_INFO_OTHER 0x1f - -#define CEC_MSG_GIVE_DECK_STATUS 0x1a -/* Status Request Operand (status_req) */ -#define CEC_OP_STATUS_REQ_ON 1 -#define CEC_OP_STATUS_REQ_OFF 2 -#define CEC_OP_STATUS_REQ_ONCE 3 - -#define CEC_MSG_PLAY 0x41 -/* Play Mode Operand (play_mode) */ -#define CEC_OP_PLAY_MODE_PLAY_FWD 0x24 -#define CEC_OP_PLAY_MODE_PLAY_REV 0x20 -#define CEC_OP_PLAY_MODE_PLAY_STILL 0x25 -#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05 -#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06 -#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07 -#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09 -#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a -#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b -#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15 -#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16 -#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17 -#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19 -#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a -#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b - - -/* Tuner Control Feature */ -#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08 -#define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92 -#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93 -#define CEC_MSG_TUNER_DEVICE_STATUS 0x07 -/* Recording Flag Operand (rec_flag) */ -#define CEC_OP_REC_FLAG_USED 0 -#define CEC_OP_REC_FLAG_NOT_USED 1 -/* Tuner Display Info Operand (tuner_display_info) */ -#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0 -#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1 -#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2 - -#define CEC_MSG_TUNER_STEP_DECREMENT 0x06 -#define CEC_MSG_TUNER_STEP_INCREMENT 0x05 - - -/* Vendor Specific Commands Feature */ - -/* - * Has also: - * CEC_MSG_CEC_VERSION - * CEC_MSG_GET_CEC_VERSION - */ -#define CEC_MSG_DEVICE_VENDOR_ID 0x87 -#define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c -#define CEC_MSG_VENDOR_COMMAND 0x89 -#define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0 -#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a -#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b - - -/* OSD Display Feature */ -#define CEC_MSG_SET_OSD_STRING 0x64 -/* Display Control Operand (disp_ctl) */ -#define CEC_OP_DISP_CTL_DEFAULT 0x00 -#define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40 -#define CEC_OP_DISP_CTL_CLEAR 0x80 - - -/* Device OSD Transfer Feature */ -#define CEC_MSG_GIVE_OSD_NAME 0x46 -#define CEC_MSG_SET_OSD_NAME 0x47 - - -/* Device Menu Control Feature */ -#define CEC_MSG_MENU_REQUEST 0x8d -/* Menu Request Type Operand (menu_req) */ -#define CEC_OP_MENU_REQUEST_ACTIVATE 0x00 -#define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01 -#define CEC_OP_MENU_REQUEST_QUERY 0x02 - -#define CEC_MSG_MENU_STATUS 0x8e -/* Menu State Operand (menu_state) */ -#define CEC_OP_MENU_STATE_ACTIVATED 0x00 -#define CEC_OP_MENU_STATE_DEACTIVATED 0x01 - -#define CEC_MSG_USER_CONTROL_PRESSED 0x44 -/* UI Broadcast Type Operand (ui_bcast_type) */ -#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00 -#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01 -#define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10 -#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20 -#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30 -#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40 -#define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50 -#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60 -#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70 -#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80 -#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90 -#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91 -#define CEC_OP_UI_BCAST_TYPE_IP 0xa0 -/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */ -#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10 -#define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20 -#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80 -#define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90 -#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0 -#define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1 -#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2 -#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3 -#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1 -#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2 -#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3 - -#define CEC_MSG_USER_CONTROL_RELEASED 0x45 - - -/* Remote Control Passthrough Feature */ - -/* - * Has also: - * CEC_MSG_USER_CONTROL_PRESSED - * CEC_MSG_USER_CONTROL_RELEASED - */ - - -/* Power Status Feature */ -#define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f -#define CEC_MSG_REPORT_POWER_STATUS 0x90 -/* Power Status Operand (pwr_state) */ -#define CEC_OP_POWER_STATUS_ON 0 -#define CEC_OP_POWER_STATUS_STANDBY 1 -#define CEC_OP_POWER_STATUS_TO_ON 2 -#define CEC_OP_POWER_STATUS_TO_STANDBY 3 - - -/* General Protocol Messages */ -#define CEC_MSG_FEATURE_ABORT 0x00 -/* Abort Reason Operand (reason) */ -#define CEC_OP_ABORT_UNRECOGNIZED_OP 0 -#define CEC_OP_ABORT_INCORRECT_MODE 1 -#define CEC_OP_ABORT_NO_SOURCE 2 -#define CEC_OP_ABORT_INVALID_OP 3 -#define CEC_OP_ABORT_REFUSED 4 -#define CEC_OP_ABORT_UNDETERMINED 5 - -#define CEC_MSG_ABORT 0xff - - -/* System Audio Control Feature */ - -/* - * Has also: - * CEC_MSG_USER_CONTROL_PRESSED - * CEC_MSG_USER_CONTROL_RELEASED - */ -#define CEC_MSG_GIVE_AUDIO_STATUS 0x71 -#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d -#define CEC_MSG_REPORT_AUDIO_STATUS 0x7a -/* Audio Mute Status Operand (aud_mute_status) */ -#define CEC_OP_AUD_MUTE_STATUS_OFF 0 -#define CEC_OP_AUD_MUTE_STATUS_ON 1 - -#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3 -#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4 -#define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72 -/* System Audio Status Operand (sys_aud_status) */ -#define CEC_OP_SYS_AUD_STATUS_OFF 0 -#define CEC_OP_SYS_AUD_STATUS_ON 1 - -#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70 -#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e -/* Audio Format ID Operand (audio_format_id) */ -#define CEC_OP_AUD_FMT_ID_CEA861 0 -#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1 - - -/* Audio Rate Control Feature */ -#define CEC_MSG_SET_AUDIO_RATE 0x9a -/* Audio Rate Operand (audio_rate) */ -#define CEC_OP_AUD_RATE_OFF 0 -#define CEC_OP_AUD_RATE_WIDE_STD 1 -#define CEC_OP_AUD_RATE_WIDE_FAST 2 -#define CEC_OP_AUD_RATE_WIDE_SLOW 3 -#define CEC_OP_AUD_RATE_NARROW_STD 4 -#define CEC_OP_AUD_RATE_NARROW_FAST 5 -#define CEC_OP_AUD_RATE_NARROW_SLOW 6 - - -/* Audio Return Channel Control Feature */ -#define CEC_MSG_INITIATE_ARC 0xc0 -#define CEC_MSG_REPORT_ARC_INITIATED 0xc1 -#define CEC_MSG_REPORT_ARC_TERMINATED 0xc2 -#define CEC_MSG_REQUEST_ARC_INITIATION 0xc3 -#define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4 -#define CEC_MSG_TERMINATE_ARC 0xc5 - - -/* Dynamic Audio Lipsync Feature */ -/* Only for CEC 2.0 and up */ -#define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7 -#define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8 -/* Low Latency Mode Operand (low_latency_mode) */ -#define CEC_OP_LOW_LATENCY_MODE_OFF 0 -#define CEC_OP_LOW_LATENCY_MODE_ON 1 -/* Audio Output Compensated Operand (audio_out_compensated) */ -#define CEC_OP_AUD_OUT_COMPENSATED_NA 0 -#define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1 -#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2 -#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3 - - -/* Capability Discovery and Control Feature */ -#define CEC_MSG_CDC_MESSAGE 0xf8 -/* Ethernet-over-HDMI: nobody ever does this... */ -#define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00 -#define CEC_MSG_CDC_HEC_REPORT_STATE 0x01 -/* HEC Functionality State Operand (hec_func_state) */ -#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0 -#define CEC_OP_HEC_FUNC_STATE_INACTIVE 1 -#define CEC_OP_HEC_FUNC_STATE_ACTIVE 2 -#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3 -/* Host Functionality State Operand (host_func_state) */ -#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0 -#define CEC_OP_HOST_FUNC_STATE_INACTIVE 1 -#define CEC_OP_HOST_FUNC_STATE_ACTIVE 2 -/* ENC Functionality State Operand (enc_func_state) */ -#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0 -#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1 -#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2 -/* CDC Error Code Operand (cdc_errcode) */ -#define CEC_OP_CDC_ERROR_CODE_NONE 0 -#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1 -#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2 -#define CEC_OP_CDC_ERROR_CODE_OTHER 3 -/* HEC Support Operand (hec_support) */ -#define CEC_OP_HEC_SUPPORT_NO 0 -#define CEC_OP_HEC_SUPPORT_YES 1 -/* HEC Activation Operand (hec_activation) */ -#define CEC_OP_HEC_ACTIVATION_ON 0 -#define CEC_OP_HEC_ACTIVATION_OFF 1 - -#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02 -#define CEC_MSG_CDC_HEC_SET_STATE 0x03 -/* HEC Set State Operand (hec_set_state) */ -#define CEC_OP_HEC_SET_STATE_DEACTIVATE 0 -#define CEC_OP_HEC_SET_STATE_ACTIVATE 1 - -#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04 -#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05 -#define CEC_MSG_CDC_HEC_DISCOVER 0x06 -/* Hotplug Detect messages */ -#define CEC_MSG_CDC_HPD_SET_STATE 0x10 -/* HPD State Operand (hpd_state) */ -#define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0 -#define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1 -#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2 -#define CEC_OP_HPD_STATE_EDID_DISABLE 3 -#define CEC_OP_HPD_STATE_EDID_ENABLE 4 -#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5 -#define CEC_MSG_CDC_HPD_REPORT_STATE 0x11 -/* HPD Error Code Operand (hpd_error) */ -#define CEC_OP_HPD_ERROR_NONE 0 -#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1 -#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2 -#define CEC_OP_HPD_ERROR_OTHER 3 -#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4 - -#endif diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 374bb1c4ef52..a6747789fe5c 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -64,7 +64,7 @@ struct ceph_auth_client_ops { int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, - struct ceph_authorizer *a, size_t len); + struct ceph_authorizer *a); void (*invalidate_authorizer)(struct ceph_auth_client *ac, int peer_type); @@ -118,8 +118,7 @@ extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *a); extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, - struct ceph_authorizer *a, - size_t len); + struct ceph_authorizer *a); extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index f96de8de4fa7..f4b2ee18f38c 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -653,6 +653,9 @@ enum { extern const char *ceph_cap_op_name(int op); +/* flags field in client cap messages (version >= 10) */ +#define CEPH_CLIENT_CAPS_SYNC (0x1) + /* * caps message, used for capability callbacks, acks, requests, etc. */ diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 87ed09f54800..8ed5dc505fbb 100644 --- a/include/linux/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h @@ -31,6 +31,10 @@ struct ceph_mdsmap { int m_num_data_pg_pools; u64 *m_data_pg_pools; u64 m_cas_pg_pool; + + bool m_enabled; + bool m_damaged; + int m_num_laggy; }; static inline struct ceph_entity_addr * @@ -59,5 +63,6 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); +extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); #endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 8dbd7879fdc6..c5c4c713e00f 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -1,7 +1,7 @@ #ifndef __FS_CEPH_MESSENGER_H #define __FS_CEPH_MESSENGER_H -#include <linux/blk_types.h> +#include <linux/bvec.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/net.h> @@ -30,7 +30,7 @@ struct ceph_connection_operations { struct ceph_auth_handshake *(*get_authorizer) ( struct ceph_connection *con, int *proto, int force_new); - int (*verify_authorizer_reply) (struct ceph_connection *con, int len); + int (*verify_authorizer_reply) (struct ceph_connection *con); int (*invalidate_authorizer)(struct ceph_connection *con); /* there was some error on the socket (disconnect, whatever) */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 96337b15a60d..03a6653d329a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -176,7 +176,7 @@ struct ceph_osd_request { struct kref r_kref; bool r_mempool; struct completion r_completion; - struct completion r_safe_completion; /* fsync waiter */ + struct completion r_done_completion; /* fsync waiter */ ceph_osdc_callback_t r_callback; ceph_osdc_unsafe_callback_t r_unsafe_callback; struct list_head r_unsafe_item; @@ -258,6 +258,8 @@ struct ceph_watch_item { struct ceph_entity_addr addr; }; +#define CEPH_LINGER_ID_START 0xffff000000000000ULL + struct ceph_osd_client { struct ceph_client *client; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 5b17de62c962..861b4677fc5b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -16,6 +16,7 @@ #include <linux/percpu-refcount.h> #include <linux/percpu-rwsem.h> #include <linux/workqueue.h> +#include <linux/bpf-cgroup.h> #ifdef CONFIG_CGROUPS @@ -300,6 +301,9 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; + /* used to store eBPF programs */ + struct cgroup_bpf bpf; + /* ids of the ancestors at each level including self */ int ancestor_ids[]; }; diff --git a/include/linux/clk.h b/include/linux/clk.h index 123c02788807..e9d36b3e49de 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -17,8 +17,9 @@ #include <linux/notifier.h> struct device; - struct clk; +struct device_node; +struct of_phandle_args; /** * DOC: clk notifier callback types @@ -249,6 +250,23 @@ struct clk *clk_get(struct device *dev, const char *id); struct clk *devm_clk_get(struct device *dev, const char *id); /** + * devm_get_clk_from_child - lookup and obtain a managed reference to a + * clock producer from child node. + * @dev: device for clock "consumer" + * @np: pointer to clock consumer node + * @con_id: clock consumer ID + * + * This function parses the clocks, and uses them to look up the + * struct clk from the registered list of clock providers by using + * @np and @con_id + * + * The clock will automatically be freed when the device is unbound + * from the bus. + */ +struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id); + +/** * clk_enable - inform the system when the clock source should be running. * @clk: clock source * @@ -432,6 +450,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id) +{ + return NULL; +} + static inline void clk_put(struct clk *clk) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} @@ -501,9 +525,6 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -struct device_node; -struct of_phandle_args; - #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get(struct device_node *np, int index); struct clk *of_clk_get_by_name(struct device_node *np, const char *name); diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index ba6fa4148515..9ebf1f8243bb 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -20,10 +20,6 @@ struct device; struct device_node; struct generic_pm_domain; -void r8a7778_clocks_init(u32 mode); -void r8a7779_clocks_init(u32 mode); -void rcar_gen2_clocks_init(u32 mode); - void cpg_mstp_add_clk_domain(struct device_node *np); #ifdef CONFIG_CLK_RENESAS_CPG_MSTP int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 08398182f56e..e315d04a2fd9 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -75,8 +75,8 @@ struct module; * structure. */ struct clocksource { - cycle_t (*read)(struct clocksource *cs); - cycle_t mask; + u64 (*read)(struct clocksource *cs); + u64 mask; u32 mult; u32 shift; u64 max_idle_ns; @@ -98,8 +98,8 @@ struct clocksource { #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ struct list_head wd_list; - cycle_t cs_last; - cycle_t wd_last; + u64 cs_last; + u64 wd_last; #endif struct module *owner; }; @@ -117,7 +117,7 @@ struct clocksource { #define CLOCK_SOURCE_RESELECT 0x100 /* simplify initialization of mask field */ -#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) +#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) { @@ -169,11 +169,14 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * - * Converts cycles to nanoseconds, using the given mult and shift. + * Converts clocksource cycles to nanoseconds, using the given @mult and @shift. + * The code is optimized for performance and is not intended to work + * with absolute clocksource cycles (as those will easily overflow), + * but is only intended to be used with relative (delta) clocksource cycles. * * XXX - This could use some mult_lxl_ll() asm optimization */ -static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) +static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) { return ((u64) cycles * mult) >> shift; } @@ -233,13 +236,13 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz extern int timekeeping_notify(struct clocksource *clock); -extern cycle_t clocksource_mmio_readl_up(struct clocksource *); -extern cycle_t clocksource_mmio_readl_down(struct clocksource *); -extern cycle_t clocksource_mmio_readw_up(struct clocksource *); -extern cycle_t clocksource_mmio_readw_down(struct clocksource *); +extern u64 clocksource_mmio_readl_up(struct clocksource *); +extern u64 clocksource_mmio_readl_down(struct clocksource *); +extern u64 clocksource_mmio_readw_up(struct clocksource *); +extern u64 clocksource_mmio_readw_down(struct clocksource *); extern int clocksource_mmio_init(void __iomem *, const char *, - unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); + unsigned long, int, unsigned, u64 (*)(struct clocksource *)); extern int clocksource_i8253_init(void); diff --git a/include/linux/cma.h b/include/linux/cma.h index 29f9e774ab76..6f0a91b37f68 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -1,6 +1,9 @@ #ifndef __CMA_H__ #define __CMA_H__ +#include <linux/init.h> +#include <linux/types.h> + /* * There is always at least global CMA area and a few optional * areas configured in kernel .config. diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 432f5c97e18f..0444b1336268 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -21,7 +21,7 @@ * clobbered. The issue is as follows: while the inline asm might * access any memory it wants, the compiler could have fit all of * @ptr into memory registers instead, and since @ptr never escaped - * from that, it proofed that the inline asm wasn't touching any of + * from that, it proved that the inline asm wasn't touching any of * it. This version works well with both compilers, i.e. we're telling * the compiler that the inline asm absolutely may see the contents * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 @@ -263,7 +263,9 @@ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ -#if GCC_VERSION >= 50000 +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d9d6a9d77489..2319b8c108e8 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -35,14 +35,11 @@ #ifndef _CONFIGFS_H_ #define _CONFIGFS_H_ -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/kref.h> -#include <linux/mutex.h> -#include <linux/err.h> - -#include <linux/atomic.h> +#include <linux/stat.h> /* S_IRUGO */ +#include <linux/types.h> /* ssize_t */ +#include <linux/list.h> /* struct list_head */ +#include <linux/kref.h> /* struct kref */ +#include <linux/mutex.h> /* struct mutex */ #define CONFIGFS_ITEM_NAME_LEN 20 @@ -228,7 +225,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \ struct configfs_item_operations { void (*release)(struct config_item *); int (*allow_link)(struct config_item *src, struct config_item *target); - int (*drop_link)(struct config_item *src, struct config_item *target); + void (*drop_link)(struct config_item *src, struct config_item *target); }; struct configfs_group_operations { diff --git a/include/linux/console.h b/include/linux/console.h index 3672809234a7..9c26c6685587 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -28,9 +28,17 @@ struct tty_struct; #define VT100ID "\033[?1;2c" #define VT102ID "\033[?6c" +enum con_scroll { + SM_UP, + SM_DOWN, +}; + /** * struct consw - callbacks for consoles * + * @con_scroll: move lines from @top to @bottom in direction @dir by @lines. + * Return true if no generic handling should be done. + * Invoked by csi_M and printing to the console. * @con_set_palette: sets the palette of the console to @table (optional) * @con_scrolldelta: the contents of the console should be scrolled by @lines. * Invoked by user. (optional) @@ -44,7 +52,9 @@ struct consw { void (*con_putc)(struct vc_data *, int, int, int); void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); void (*con_cursor)(struct vc_data *, int); - int (*con_scroll)(struct vc_data *, int, int, int, int); + bool (*con_scroll)(struct vc_data *, unsigned int top, + unsigned int bottom, enum con_scroll dir, + unsigned int lines); int (*con_switch)(struct vc_data *); int (*con_blank)(struct vc_data *, int, int); int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); @@ -99,10 +109,6 @@ static inline int con_debug_leave(void) } #endif -/* scroll */ -#define SM_UP (1) -#define SM_DOWN (2) - /* cursor */ #define CM_DRAW (1) #define CM_ERASE (2) @@ -173,12 +179,6 @@ static inline void console_sysfs_notify(void) #endif extern bool console_suspend_enabled; -#ifdef CONFIG_OF -extern void console_set_by_of(void); -#else -static inline void console_set_by_of(void) {} -#endif - /* Suspend and resume console messages over PM events */ extern void suspend_console(void); extern void resume_console(void); diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d016a121a8c4..28ffa94aed6b 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -14,6 +14,7 @@ struct coredump_params; extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); +extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP extern void do_coredump(const siginfo_t *siginfo); #else diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b886dc17f2f3..21f9c74496e7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -57,9 +57,6 @@ struct notifier_block; #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ -#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ -#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ -#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug * lock is dropped */ @@ -80,87 +77,14 @@ struct notifier_block; #ifdef CONFIG_SMP extern bool cpuhp_tasks_frozen; -/* Need to know about CPUs going up/down? */ -#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) -#define cpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb = \ - { .notifier_call = fn, .priority = pri }; \ - register_cpu_notifier(&fn##_nb); \ -} - -#define __cpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb = \ - { .notifier_call = fn, .priority = pri }; \ - __register_cpu_notifier(&fn##_nb); \ -} -#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ -#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) -#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) -#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ - -#ifdef CONFIG_HOTPLUG_CPU -extern int register_cpu_notifier(struct notifier_block *nb); -extern int __register_cpu_notifier(struct notifier_block *nb); -extern void unregister_cpu_notifier(struct notifier_block *nb); -extern void __unregister_cpu_notifier(struct notifier_block *nb); -#else - -#ifndef MODULE -extern int register_cpu_notifier(struct notifier_block *nb); -extern int __register_cpu_notifier(struct notifier_block *nb); -#else -static inline int register_cpu_notifier(struct notifier_block *nb) -{ - return 0; -} - -static inline int __register_cpu_notifier(struct notifier_block *nb) -{ - return 0; -} -#endif - -static inline void unregister_cpu_notifier(struct notifier_block *nb) -{ -} - -static inline void __unregister_cpu_notifier(struct notifier_block *nb) -{ -} -#endif - int cpu_up(unsigned int cpu); void notify_cpu_starting(unsigned int cpu); extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); -#define cpu_notifier_register_begin cpu_maps_update_begin -#define cpu_notifier_register_done cpu_maps_update_done - #else /* CONFIG_SMP */ #define cpuhp_tasks_frozen 0 -#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) -#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) - -static inline int register_cpu_notifier(struct notifier_block *nb) -{ - return 0; -} - -static inline int __register_cpu_notifier(struct notifier_block *nb) -{ - return 0; -} - -static inline void unregister_cpu_notifier(struct notifier_block *nb) -{ -} - -static inline void __unregister_cpu_notifier(struct notifier_block *nb) -{ -} - static inline void cpu_maps_update_begin(void) { } @@ -169,14 +93,6 @@ static inline void cpu_maps_update_done(void) { } -static inline void cpu_notifier_register_begin(void) -{ -} - -static inline void cpu_notifier_register_done(void) -{ -} - #endif /* CONFIG_SMP */ extern struct bus_type cpu_subsys; @@ -189,12 +105,6 @@ extern void get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); -#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) -#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) -#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) -#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb) -#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) -#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb) void clear_tasks_mm_cpumask(int cpu); int cpu_down(unsigned int cpu); @@ -206,13 +116,6 @@ static inline void cpu_hotplug_done(void) {} #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) -#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) -#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) -/* These aren't inline functions due to a GCC bug. */ -#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) -#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) -#define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) -#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); }) #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PM_SLEEP_SMP @@ -245,6 +148,8 @@ void arch_cpu_idle_dead(void); int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); +void play_idle(unsigned long duration_ms); + #ifdef CONFIG_HOTPLUG_CPU bool cpu_wait_death(unsigned int cpu, int seconds); bool cpu_report_death(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 32dc0cbd51ca..7e05c5e4e45c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -175,7 +175,7 @@ void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); -int cpufreq_update_policy(unsigned int cpu); +void cpufreq_update_policy(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); @@ -234,6 +234,10 @@ __ATTR(_name, _perm, show_##_name, NULL) static struct freq_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) +#define cpufreq_freq_attr_wo(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0200, NULL, store_##_name) + struct global_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index afe641c02dca..d936a0021839 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -16,9 +16,11 @@ enum cpuhp_state { CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, CPUHP_X86_APB_DEAD, + CPUHP_X86_MCE_DEAD, CPUHP_VIRT_NET_DEAD, CPUHP_SLUB_DEAD, CPUHP_MM_WRITEBACK_DEAD, + CPUHP_MM_VMSTAT_DEAD, CPUHP_SOFTIRQ_DEAD, CPUHP_NET_MVNETA_DEAD, CPUHP_CPUIDLE_DEAD, @@ -30,6 +32,18 @@ enum cpuhp_state { CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, + CPUHP_FS_BUFF_DEAD, + CPUHP_PRINTK_DEAD, + CPUHP_MM_MEMCQ_DEAD, + CPUHP_PERCPU_CNT_DEAD, + CPUHP_RADIX_DEAD, + CPUHP_PAGE_ALLOC_DEAD, + CPUHP_NET_DEV_DEAD, + CPUHP_PCI_XGENE_DEAD, + CPUHP_IOMMU_INTEL_DEAD, + CPUHP_LUSTRE_CFS_DEAD, + CPUHP_SCSI_BNX2FC_DEAD, + CPUHP_SCSI_BNX2I_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -45,23 +59,31 @@ enum cpuhp_state { CPUHP_POWERPC_MMU_CTX_PREPARE, CPUHP_XEN_PREPARE, CPUHP_XEN_EVTCHN_PREPARE, - CPUHP_NOTIFY_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, CPUHP_BLK_MQ_PREPARE, + CPUHP_NET_FLOW_PREPARE, + CPUHP_TOPOLOGY_PREPARE, + CPUHP_NET_IUCV_PREPARE, + CPUHP_ARM_BL_PREPARE, + CPUHP_TRACE_RB_PREPARE, + CPUHP_MM_ZS_PREPARE, + CPUHP_MM_ZSWP_MEM_PREPARE, + CPUHP_MM_ZSWP_POOL_PREPARE, + CPUHP_KVM_PPC_BOOK3S_PREPARE, + CPUHP_ZCOMP_PREPARE, CPUHP_TIMERS_DEAD, - CPUHP_NOTF_ERR_INJ_PREPARE, CPUHP_MIPS_SOC_PREPARE, + CPUHP_BP_PREPARE_DYN, + CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, CPUHP_AP_RCUTREE_DYING, CPUHP_AP_IRQ_GIC_STARTING, - CPUHP_AP_IRQ_GICV3_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, - CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_PERF_X86_UNCORE_STARTING, @@ -80,7 +102,6 @@ enum cpuhp_state { CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, - CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, @@ -94,9 +115,10 @@ enum cpuhp_state { CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, CPUHP_AP_KVM_ARM_TIMER_STARTING, + /* Must be the last timer callback */ + CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, - CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, @@ -120,7 +142,6 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, - CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb31373c3478..da346f2817a8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -74,6 +74,7 @@ struct cpuidle_driver_kobj; struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; + unsigned int use_deepest_state:1; unsigned int cpu; int last_residency; @@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver( static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } #endif -#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) +#ifdef CONFIG_CPU_IDLE extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev); +extern void cpuidle_use_deepest_state(bool enable); #else static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev) @@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } +static inline void cpuidle_use_deepest_state(bool enable) +{ +} #endif /* kernel/sched/idle.c */ @@ -235,8 +240,6 @@ struct cpuidle_governor { int (*select) (struct cpuidle_driver *drv, struct cpuidle_device *dev); void (*reflect) (struct cpuidle_device *dev, int index); - - struct module *owner; }; #ifdef CONFIG_CPU_IDLE diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index da7fbf1cdd56..c717f5ea88cb 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -722,6 +722,11 @@ void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); void init_cpu_online(const struct cpumask *src); +static inline void reset_cpu_possible_mask(void) +{ + bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); +} + static inline void set_cpu_possible(unsigned int cpu, bool possible) { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7cee5551625b..c0b0cf3d2d2f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -50,6 +50,8 @@ #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e @@ -60,6 +62,7 @@ #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 @@ -87,7 +90,7 @@ #define CRYPTO_ALG_TESTED 0x00000400 /* - * Set if the algorithm is an instance that is build from templates. + * Set if the algorithm is an instance that is built from templates. */ #define CRYPTO_ALG_INSTANCE 0x00000800 @@ -960,7 +963,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) * ablkcipher_request_set_callback() - set asynchronous callback function * @req: request handle * @flags: specify zero or an ORing of the flags - * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and * increase the wait queue beyond the initial maximum size; * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep * @compl: callback function pointer to be registered with the request handle @@ -977,7 +980,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) * cipher operation completes. * * The callback function is registered with the ablkcipher_request handle and - * must comply with the following template + * must comply with the following template:: * * void callback_function(struct crypto_async_request *req, int error) */ diff --git a/include/linux/dax.h b/include/linux/dax.h index add6c4bc568f..24ad71173995 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -8,25 +8,47 @@ struct iomap_ops; -/* We use lowest available exceptional entry bit for locking */ +/* + * We use lowest available bit in exceptional entry for locking, one bit for + * the entry size (PMD) and two more to tell us if the entry is a huge zero + * page (HZP) or an empty entry that is just used for locking. In total four + * special bits. + * + * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and + * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem + * block allocation. + */ +#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) +#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) +#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) +#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) -ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, +static inline unsigned long dax_radix_sector(void *entry) +{ + return (unsigned long)entry >> RADIX_DAX_SHIFT; +} + +static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) +{ + return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | + ((unsigned long)sector << RADIX_DAX_SHIFT) | + RADIX_DAX_ENTRY_LOCK); +} + +ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops); -ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, - get_block_t, dio_iodone_t, int flags); -int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); -int dax_truncate_page(struct inode *, loff_t from, get_block_t); -int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, +int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap_ops *ops); -int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry_sync(struct address_space *mapping, + pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, - pgoff_t index, bool wake_all); + pgoff_t index, void *entry, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); -void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length); #else @@ -35,12 +57,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev, { return ERR_PTR(-ENXIO); } -/* Shouldn't ever be called when dax is disabled. */ -static inline void dax_unlock_mapping_entry(struct address_space *mapping, - pgoff_t index) -{ - BUG(); -} static inline int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length) { @@ -48,18 +64,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev, } #endif -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) -int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t); +#ifdef CONFIG_FS_DAX_PMD +static inline unsigned int dax_radix_order(void *entry) +{ + if ((unsigned long)entry & RADIX_DAX_PMD) + return PMD_SHIFT - PAGE_SHIFT; + return 0; +} +int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmd, unsigned int flags, struct iomap_ops *ops); #else -static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags, get_block_t gb) +static inline unsigned int dax_radix_order(void *entry) +{ + return 0; +} +static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags, + struct iomap_ops *ops) { return VM_FAULT_FALLBACK; } #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); -#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 5beed7b30561..c965e4469499 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -139,7 +139,7 @@ struct dentry_operations { void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); struct vfsmount *(*d_automount)(struct path *); - int (*d_manage)(struct dentry *, bool); + int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *, unsigned int); } ____cacheline_aligned; @@ -254,7 +254,7 @@ extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); /* test whether we have any submounts in a subdir tree */ -extern int have_submounts(struct dentry *); +extern int path_has_submounts(const struct path *); /* * This adds the entry to the hash queues. diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h index 5ac3bdd5cee6..699b6c499c4f 100644 --- a/include/linux/dcookies.h +++ b/include/linux/dcookies.h @@ -44,7 +44,7 @@ void dcookie_unregister(struct dcookie_user * user); * * Returns 0 on success, with *cookie filled in */ -int get_dcookie(struct path *path, unsigned long *cookie); +int get_dcookie(const struct path *path, unsigned long *cookie); #else @@ -58,7 +58,7 @@ static inline void dcookie_unregister(struct dcookie_user * user) return; } -static inline int get_dcookie(struct path *path, unsigned long *cookie) +static inline int get_dcookie(const struct path *path, unsigned long *cookie) { return -ENOSYS; } diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d3f0d1aec73..014cc564d1c4 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu; * Must only be called under the protection established by * debugfs_use_file_start(). */ -static inline const struct file_operations *debugfs_real_fops(struct file *filp) +static inline const struct file_operations * +debugfs_real_fops(const struct file *filp) __must_hold(&debugfs_srcu) { /* @@ -62,6 +63,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp) return filp->f_path.dentry->d_fsdata; } +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = debugfs_attr_read, \ + .write = debugfs_attr_write, \ + .llseek = generic_file_llseek, \ +} + #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_file(const char *name, umode_t mode, @@ -99,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, ssize_t debugfs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ -static int __fops ## _open(struct inode *inode, struct file *file) \ -{ \ - __simple_attr_check_format(__fmt, 0ull); \ - return simple_attr_open(inode, file, __get, __set, __fmt); \ -} \ -static const struct file_operations __fops = { \ - .owner = THIS_MODULE, \ - .open = __fops ## _open, \ - .release = simple_attr_release, \ - .read = debugfs_attr_read, \ - .write = debugfs_attr_write, \ - .llseek = generic_file_llseek, \ -} - struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -233,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu) { } -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ - static const struct file_operations __fops = { 0 } +static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t debugfs_attr_write(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 7adf6cc4b305..c35d0c0e0ada 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -20,7 +20,6 @@ #include <linux/devfreq.h> #include <linux/thermal.h> -#ifdef CONFIG_DEVFREQ_THERMAL /** * struct devfreq_cooling_power - Devfreq cooling power ops @@ -37,12 +36,16 @@ * @dyn_power_coeff * frequency * voltage^2 */ struct devfreq_cooling_power { - unsigned long (*get_static_power)(unsigned long voltage); - unsigned long (*get_dynamic_power)(unsigned long freq, + unsigned long (*get_static_power)(struct devfreq *devfreq, + unsigned long voltage); + unsigned long (*get_dynamic_power)(struct devfreq *devfreq, + unsigned long freq, unsigned long voltage); unsigned long dyn_power_coeff; }; +#ifdef CONFIG_DEVFREQ_THERMAL + struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power); diff --git a/include/linux/device.h b/include/linux/device.h index bc41e87a969b..491b4c0ca633 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys, * @name: Name of the class. * @owner: The module owner. * @class_attrs: Default attributes of this class. + * @class_groups: Default attributes of this class. * @dev_groups: Default attributes of the devices that belong to the class. * @dev_kobj: The kobject that represents this class and links it into the hierarchy. * @dev_uevent: Called when a device is added, removed from this class, or a @@ -390,6 +391,7 @@ struct class { struct module *owner; struct class_attribute *class_attrs; + const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; @@ -465,6 +467,8 @@ struct class_attribute { struct class_attribute class_attr_##_name = __ATTR_RW(_name) #define CLASS_ATTR_RO(_name) \ struct class_attribute class_attr_##_name = __ATTR_RO(_name) +#define CLASS_ATTR_WO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_WO(_name) extern int __must_check class_create_file_ns(struct class *class, const struct class_attribute *attr, @@ -698,6 +702,25 @@ static inline int devm_add_action_or_reset(struct device *dev, return ret; } +/** + * devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @type: Type to allocate per-cpu memory for + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +#define devm_alloc_percpu(dev, type) \ + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ + __alignof__(type))) + +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align); +void devm_free_percpu(struct device *dev, void __percpu *pdata); + struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about @@ -708,6 +731,87 @@ struct device_dma_parameters { }; /** + * enum device_link_state - Device link states. + * @DL_STATE_NONE: The presence of the drivers is not being tracked. + * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. + * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. + * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). + * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. + * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. + */ +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE, + DL_STATE_CONSUMER_PROBE, + DL_STATE_ACTIVE, + DL_STATE_SUPPLIER_UNBIND, +}; + +/* + * Device link flags. + * + * STATELESS: The core won't track the presence of supplier/consumer drivers. + * AUTOREMOVE: Remove this link automatically on consumer driver unbind. + * PM_RUNTIME: If set, the runtime PM framework will use this link. + * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. + */ +#define DL_FLAG_STATELESS BIT(0) +#define DL_FLAG_AUTOREMOVE BIT(1) +#define DL_FLAG_PM_RUNTIME BIT(2) +#define DL_FLAG_RPM_ACTIVE BIT(3) + +/** + * struct device_link - Device link representation. + * @supplier: The device on the supplier end of the link. + * @s_node: Hook to the supplier device's list of links to consumers. + * @consumer: The device on the consumer end of the link. + * @c_node: Hook to the consumer device's list of links to suppliers. + * @status: The state of the link (with respect to the presence of drivers). + * @flags: Link flags. + * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + */ +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + enum device_link_state status; + u32 flags; + bool rpm_active; +#ifdef CONFIG_SRCU + struct rcu_head rcu_head; +#endif +}; + +/** + * enum dl_dev_state - Device driver presence tracking information. + * @DL_DEV_NO_DRIVER: There is no driver attached to the device. + * @DL_DEV_PROBING: A driver is probing. + * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. + * @DL_DEV_UNBINDING: The driver is unbinding from the device. + */ +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING, + DL_DEV_DRIVER_BOUND, + DL_DEV_UNBINDING, +}; + +/** + * struct dev_links_info - Device data related to device links. + * @suppliers: List of links to supplier devices. + * @consumers: List of links to consumer devices. + * @status: Driver status information. + */ +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + enum dl_dev_state status; +}; + +/** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. * In most cases, a parent device is some sort of bus or host @@ -732,8 +836,9 @@ struct device_dma_parameters { * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. * @driver_data: Private pointer for driver specific info. + * @links: Links to suppliers and consumers of this device. * @power: For device power management. - * See Documentation/power/devices.txt for details. + * See Documentation/power/admin-guide/devices.rst for details. * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. @@ -799,6 +904,7 @@ struct device { core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set/get_drvdata */ + struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; @@ -1116,6 +1222,10 @@ extern void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); +/* Device links interface. */ +struct device_link *device_link_add(struct device *consumer, + struct device *supplier, u32 flags); +void device_link_del(struct device_link *link); #ifdef CONFIG_PRINTK diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index b91b023deffb..a52c6580cc9a 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -58,7 +58,7 @@ struct dm_io_notify { struct dm_io_client; struct dm_io_request { int bi_op; /* REQ_OP */ - int bi_op_flags; /* rq_flag_bits */ + int bi_op_flags; /* req_flag_bits */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index e0b0741ae671..8daeb3ce0016 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -30,7 +30,7 @@ #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/wait.h> struct device; @@ -143,7 +143,7 @@ struct dma_buf { wait_queue_head_t poll; struct dma_buf_poll_cb_t { - struct fence_cb cb; + struct dma_fence_cb cb; wait_queue_head_t *poll; unsigned long active; diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000000..5900945f962d --- /dev/null +++ b/include/linux/dma-fence-array.h @@ -0,0 +1,86 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_ARRAY_H +#define __LINUX_DMA_FENCE_ARRAY_H + +#include <linux/dma-fence.h> + +/** + * struct dma_fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +/** + * struct dma_fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct dma_fence_array { + struct dma_fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct dma_fence **fences; +}; + +extern const struct dma_fence_ops dma_fence_array_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subsclass + * @fence: fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * to_dma_fence_array - cast a fence to a dma_fence_array + * @fence: fence to cast to a dma_fence_array + * + * Returns NULL if the fence is not a dma_fence_array, + * or the dma_fence_array otherwise. + */ +static inline struct dma_fence_array * +to_dma_fence_array(struct dma_fence *fence) +{ + if (fence->ops != &dma_fence_array_ops) + return NULL; + + return container_of(fence, struct dma_fence_array, base); +} + +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +#endif /* __LINUX_DMA_FENCE_ARRAY_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h new file mode 100644 index 000000000000..d51a7d23c358 --- /dev/null +++ b/include/linux/dma-fence.h @@ -0,0 +1,438 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_H +#define __LINUX_DMA_FENCE_H + +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/bitops.h> +#include <linux/kref.h> +#include <linux/sched.h> +#include <linux/printk.h> +#include <linux/rcupdate.h> + +struct dma_fence; +struct dma_fence_ops; +struct dma_fence_cb; + +/** + * struct dma_fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: dma_fence_ops associated with this fence + * @rcu: used for releasing fence with kfree_rcu + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * dma_fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of DMA_FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @status: Optional, only valid if < 0, must be set before calling + * dma_fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called + * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but dma_fence_signal was called right + * before this bit was set, it would have been able to set the + * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after dma_fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct dma_fence { + struct kref refcount; + const struct dma_fence_ops *ops; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; + u64 context; + unsigned seqno; + unsigned long flags; + ktime_t timestamp; + int status; +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*dma_fence_func_t)(struct dma_fence *fence, + struct dma_fence_cb *cb); + +/** + * struct dma_fence_cb - callback for dma_fence_add_callback + * @node: used by dma_fence_add_callback to append this struct to fence::cb_list + * @func: dma_fence_func_t to call + * + * This struct will be initialized by dma_fence_add_callback, additional + * data can be passed along by embedding dma_fence_cb in another struct. + */ +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +/** + * struct dma_fence_ops - operations implemented for fence + * @get_driver_name: returns the driver name. + * @get_timeline_name: return the name of the context this fence belongs to. + * @enable_signaling: enable software signaling of fence. + * @signaled: [optional] peek whether the fence is signaled, can be null. + * @wait: custom wait implementation, or dma_fence_default_wait. + * @release: [optional] called on destruction of fence, can be null + * @fill_driver_data: [optional] callback to fill in free-form debug info + * Returns amount of bytes filled, or -errno. + * @fence_value_str: [optional] fills in the value of the fence as a string + * @timeline_value_str: [optional] fills in the current value of the timeline + * as a string + * + * Notes on enable_signaling: + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * irqs, or insert commands into cmdstream, etc. This is called + * in the first wait() or add_callback() path to let the fence + * implementation know that there is another driver waiting on + * the signal (ie. hw->sw case). + * + * This function can be called called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. + * + * fence->status may be set in enable_signaling, but only when false is + * returned. + * + * Calling dma_fence_signal before enable_signaling is called allows + * for a tiny race window in which enable_signaling is called during, + * before, or after dma_fence_signal. To fight this, it is recommended + * that before enable_signaling returns true an extra reference is + * taken on the fence, to be released when the fence is signaled. + * This will mean dma_fence_signal will still be called twice, but + * the second time will be a noop since it was already signaled. + * + * Notes on signaled: + * May set fence->status if returning true. + * + * Notes on wait: + * Must not be NULL, set to dma_fence_default_wait for default implementation. + * the dma_fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * Notes on release: + * Can be NULL, this function allows additional commands to run on + * destruction of the fence. Can be called from irq context. + * If pointer is set to NULL, kfree will get called instead. + */ + +struct dma_fence_ops { + const char * (*get_driver_name)(struct dma_fence *fence); + const char * (*get_timeline_name)(struct dma_fence *fence); + bool (*enable_signaling)(struct dma_fence *fence); + bool (*signaled)(struct dma_fence *fence); + signed long (*wait)(struct dma_fence *fence, + bool intr, signed long timeout); + void (*release)(struct dma_fence *fence); + + int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); + void (*fence_value_str)(struct dma_fence *fence, char *str, int size); + void (*timeline_value_str)(struct dma_fence *fence, + char *str, int size); +}; + +void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno); + +void dma_fence_release(struct kref *kref); +void dma_fence_free(struct dma_fence *fence); + +/** + * dma_fence_put - decreases refcount of the fence + * @fence: [in] fence to reduce refcount of + */ +static inline void dma_fence_put(struct dma_fence *fence) +{ + if (fence) + kref_put(&fence->refcount, dma_fence_release); +} + +/** + * dma_fence_get - increases refcount of the fence + * @fence: [in] fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * dma_fence_get_rcu - get a fence from a reservation_object_list with + * rcu read lock + * @fence: [in] fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + */ +static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) +{ + if (kref_get_unless_zero(&fence->refcount)) + return fence; + else + return NULL; +} + +/** + * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence + * @fencep: [in] pointer to fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + * This function handles acquiring a reference to a fence that may be + * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), + * so long as the caller is using RCU on the pointer to the fence. + * + * An alternative mechanism is to employ a seqlock to protect a bunch of + * fences, such as used by struct reservation_object. When using a seqlock, + * the seqlock must be taken before and checked after a reference to the + * fence is acquired (as shown here). + * + * The caller is required to hold the RCU read lock. + */ +static inline struct dma_fence * +dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) +{ + do { + struct dma_fence *fence; + + fence = rcu_dereference(*fencep); + if (!fence || !dma_fence_get_rcu(fence)) + return NULL; + + /* The atomic_inc_not_zero() inside dma_fence_get_rcu() + * provides a full memory barrier upon success (such as now). + * This is paired with the write barrier from assigning + * to the __rcu protected fence pointer so that if that + * pointer still matches the current fence, we know we + * have successfully acquire a reference to it. If it no + * longer matches, we are holding a reference to some other + * reallocated pointer. This is possible if the allocator + * is using a freelist like SLAB_DESTROY_BY_RCU where the + * fence remains valid for the RCU grace period, but it + * may be reallocated. When using such allocators, we are + * responsible for ensuring the reference we get is to + * the right fence, as below. + */ + if (fence == rcu_access_pointer(*fencep)) + return rcu_pointer_handoff(fence); + + dma_fence_put(fence); + } while (1); +} + +int dma_fence_signal(struct dma_fence *fence); +int dma_fence_signal_locked(struct dma_fence *fence); +signed long dma_fence_default_wait(struct dma_fence *fence, + bool intr, signed long timeout); +int dma_fence_add_callback(struct dma_fence *fence, + struct dma_fence_cb *cb, + dma_fence_func_t func); +bool dma_fence_remove_callback(struct dma_fence *fence, + struct dma_fence_cb *cb); +void dma_fence_enable_sw_signaling(struct dma_fence *fence); + +/** + * dma_fence_is_signaled_locked - Return an indication if the fence + * is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback, dma_fence_wait or + * dma_fence_enable_sw_signaling haven't been called before. + * + * This function requires fence->lock to be held. + */ +static inline bool +dma_fence_is_signaled_locked(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback, dma_fence_wait or + * dma_fence_enable_sw_signaling haven't been called before. + * + * It's recommended for seqno fences to call dma_fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + */ +static inline bool +dma_fence_is_signaled(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool dma_fence_is_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return (int)(f1->seqno - f2->seqno) > 0; +} + +/** + * dma_fence_later - return the chronologically later fence + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never + * have been set if enable_signaling wasn't called, and enabling that + * here is overkill. + */ + if (dma_fence_is_later(f1, f2)) + return dma_fence_is_signaled(f1) ? NULL : f1; + else + return dma_fence_is_signaled(f2) ? NULL : f2; +} + +signed long dma_fence_wait_timeout(struct dma_fence *, + bool intr, signed long timeout); +signed long dma_fence_wait_any_timeout(struct dma_fence **fences, + uint32_t count, + bool intr, signed long timeout, + uint32_t *idx); + +/** + * dma_fence_wait - sleep until the fence gets signaled + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + */ +static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) +{ + signed long ret; + + /* Since dma_fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +u64 dma_fence_context_alloc(unsigned num); + +#define DMA_FENCE_TRACE(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ + pr_info("f %llu#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define DMA_FENCE_WARN(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define DMA_FENCE_ERR(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 32c589062bd9..7f7e9a7e3839 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -61,6 +61,10 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs); void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); +dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, unsigned long attrs); int iommu_dma_supported(struct device *dev, u64 mask); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 08528afdf58b..10c5a17b1f51 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg ops->unmap_sg(dev, sg, nents, dir, attrs); } -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) +static inline dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, 0); + addr = ops->map_page(dev, page, offset, size, dir, attrs); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return addr; } -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) +static inline void dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, 0); + ops->unmap_page(dev, addr, size, dir, attrs); debug_dma_unmap_page(dev, addr, size, dir, false); } @@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) +#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) +#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cc535a478bae..feee6ec6a13b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -336,6 +336,12 @@ enum dma_slave_buswidth { * may or may not be applicable on memory sources. * @dst_maxburst: same as src_maxburst but for destination target * mutatis mutandis. + * @src_port_window_size: The length of the register area in words the data need + * to be accessed on the device side. It is only used for devices which is using + * an area instead of a single register to receive the data. Typically the DMA + * loops in this area in order to transfer the data. + * @dst_port_window_size: same as src_port_window_size but for the destination + * port. * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill * with 'true' if peripheral should be flow controller. Direction will be * selected at Runtime. @@ -363,6 +369,8 @@ struct dma_slave_config { enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; bool device_fc; unsigned int slave_id; }; diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index c934d3a96b5e..2896f93808ae 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -67,7 +67,7 @@ * genl_magic_func.h * generates an entry in the static genl_ops array, * and static register/unregister functions to - * genl_register_family_with_ops(). + * genl_register_family(). * * flags and handler: * GENL_op_init( .doit = x, .dumpit = y, .flags = something) diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 1f79b20918b1..4334106f44c3 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h @@ -50,6 +50,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, unsigned long freq); void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); -cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); +u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); #endif /* __DW_APB_TIMER_H__ */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 9e0d78966552..07c52c0af62d 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -18,6 +18,8 @@ #include <linux/workqueue.h> #include <linux/debugfs.h> +#define EDAC_DEVICE_NAME_LEN 31 + struct device; #define EDAC_OPSTATE_INVAL -1 @@ -128,12 +130,21 @@ enum dev_type { * fatal (maybe it is on an unused memory area, * or the memory controller could recover from * it for example, by re-trying the operation). + * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable + * error whose handling is not urgent. This could + * be due to hardware data poisoning where the + * system can continue operation until the poisoned + * data is consumed. Preemptive measures may also + * be taken, e.g. offlining pages, etc. * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not * be recovered. + * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth + * type of error: informational logs. */ enum hw_event_mc_err_type { HW_EVENT_ERR_CORRECTED, HW_EVENT_ERR_UNCORRECTED, + HW_EVENT_ERR_DEFERRED, HW_EVENT_ERR_FATAL, HW_EVENT_ERR_INFO, }; @@ -145,6 +156,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) return "Corrected"; case HW_EVENT_ERR_UNCORRECTED: return "Uncorrected"; + case HW_EVENT_ERR_DEFERRED: + return "Deferred"; case HW_EVENT_ERR_FATAL: return "Fatal"; default: @@ -157,7 +170,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * enum mem_type - memory types. For a more detailed reference, please see * http://en.wikipedia.org/wiki/DRAM * - * @MEM_EMPTY Empty csrow + * @MEM_EMPTY: Empty csrow * @MEM_RESERVED: Reserved csrow type * @MEM_UNKNOWN: Unknown csrow type * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. @@ -192,10 +205,11 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR3: DDR3 RAM * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. - * @MEM_LRDDR3 Load-Reduced DDR3 memory. + * @MEM_LRDDR3: Load-Reduced DDR3 memory. * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. + * @MEM_LRDDR4: Load-Reduced DDR4 memory. */ enum mem_type { MEM_EMPTY = 0, @@ -218,6 +232,7 @@ enum mem_type { MEM_LRDDR3, MEM_DDR4, MEM_RDDR4, + MEM_LRDDR4, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -239,6 +254,7 @@ enum mem_type { #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) +#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) /** * enum edac-type - Error Detection and Correction capabilities and mode @@ -278,7 +294,7 @@ enum edac_type { /** * enum scrub_type - scrubbing capabilities - * @SCRUB_UNKNOWN Unknown if scrubber is available + * @SCRUB_UNKNOWN: Unknown if scrubber is available * @SCRUB_NONE: No scrubber * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing * @SCRUB_SW_SRC: Software scrub only errors @@ -287,7 +303,7 @@ enum edac_type { * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing * @SCRUB_HW_SRC: Hardware scrub only errors * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error - * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable + * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable */ enum scrub_type { SCRUB_UNKNOWN = 0, @@ -320,114 +336,6 @@ enum scrub_type { #define OP_RUNNING_POLL_INTR 0x203 #define OP_OFFLINE 0x300 -/* - * Concepts used at the EDAC subsystem - * - * There are several things to be aware of that aren't at all obvious: - * - * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. - * - * These are some of the many terms that are thrown about that don't always - * mean what people think they mean (Inconceivable!). In the interest of - * creating a common ground for discussion, terms and their definitions - * will be established. - * - * Memory devices: The individual DRAM chips on a memory stick. These - * devices commonly output 4 and 8 bits each (x4, x8). - * Grouping several of these in parallel provides the - * number of bits that the memory controller expects: - * typically 72 bits, in order to provide 64 bits + - * 8 bits of ECC data. - * - * Memory Stick: A printed circuit board that aggregates multiple - * memory devices in parallel. In general, this is the - * Field Replaceable Unit (FRU) which gets replaced, in - * the case of excessive errors. Most often it is also - * called DIMM (Dual Inline Memory Module). - * - * Memory Socket: A physical connector on the motherboard that accepts - * a single memory stick. Also called as "slot" on several - * datasheets. - * - * Channel: A memory controller channel, responsible to communicate - * with a group of DIMMs. Each channel has its own - * independent control (command) and data bus, and can - * be used independently or grouped with other channels. - * - * Branch: It is typically the highest hierarchy on a - * Fully-Buffered DIMM memory controller. - * Typically, it contains two channels. - * Two channels at the same branch can be used in single - * mode or in lockstep mode. - * When lockstep is enabled, the cacheline is doubled, - * but it generally brings some performance penalty. - * Also, it is generally not possible to point to just one - * memory stick when an error occurs, as the error - * correction code is calculated using two DIMMs instead - * of one. Due to that, it is capable of correcting more - * errors than on single mode. - * - * Single-channel: The data accessed by the memory controller is contained - * into one dimm only. E. g. if the data is 64 bits-wide, - * the data flows to the CPU using one 64 bits parallel - * access. - * Typically used with SDR, DDR, DDR2 and DDR3 memories. - * FB-DIMM and RAMBUS use a different concept for channel, - * so this concept doesn't apply there. - * - * Double-channel: The data size accessed by the memory controller is - * interlaced into two dimms, accessed at the same time. - * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), - * the data flows to the CPU using a 128 bits parallel - * access. - * - * Chip-select row: This is the name of the DRAM signal used to select the - * DRAM ranks to be accessed. Common chip-select rows for - * single channel are 64 bits, for dual channel 128 bits. - * It may not be visible by the memory controller, as some - * DIMM types have a memory buffer that can hide direct - * access to it from the Memory Controller. - * - * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. - * Motherboards commonly drive two chip-select pins to - * a memory stick. A single-ranked stick, will occupy - * only one of those rows. The other will be unused. - * - * Double-Ranked stick: A double-ranked stick has two chip-select rows which - * access different sets of memory devices. The two - * rows cannot be accessed concurrently. - * - * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. - * A double-sided stick has two chip-select rows which - * access different sets of memory devices. The two - * rows cannot be accessed concurrently. "Double-sided" - * is irrespective of the memory devices being mounted - * on both sides of the memory stick. - * - * Socket set: All of the memory sticks that are required for - * a single memory access or all of the memory sticks - * spanned by a chip-select row. A single socket set - * has two chip-select rows and if double-sided sticks - * are used these will occupy those chip-select rows. - * - * Bank: This term is avoided because it is unclear when - * needing to distinguish between chip-select rows and - * socket sets. - * - * Controller pages: - * - * Physical pages: - * - * Virtual pages: - * - * - * STRUCTURE ORGANIZATION AND CHOICES - * - * - * - * PS - I enjoyed writing all that about as much as you enjoyed reading it. - */ - /** * enum edac_mc_layer - memory controller hierarchy layer * @@ -452,7 +360,7 @@ enum edac_mc_layer_type { /** * struct edac_mc_layer - describes the memory controller hierarchy - * @layer: layer type + * @type: layer type * @size: number of components per layer. For example, * if the channel layer has two channels, size = 2 * @is_virt_csrow: This layer is part of the "csrow" when old API @@ -475,24 +383,28 @@ struct edac_mc_layer { #define EDAC_MAX_LAYERS 3 /** - * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array - * for the element given by [layer0,layer1,layer2] position + * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer + * array for the element given by [layer0,layer1,layer2] + * position * * @layers: a struct edac_mc_layer array, describing how many elements * were allocated for each layer - * @n_layers: Number of layers at the @layers array + * @nlayers: Number of layers at the @layers array * @layer0: layer0 position * @layer1: layer1 position. Unused if n_layers < 2 * @layer2: layer2 position. Unused if n_layers < 3 * - * For 1 layer, this macro returns &var[layer0] - &var + * For 1 layer, this macro returns "var[layer0] - var"; + * * For 2 layers, this macro is similar to allocate a bi-dimensional array - * and to return "&var[layer0][layer1] - &var" + * and to return "var[layer0][layer1] - var"; + * * For 3 layers, this macro is similar to allocate a tri-dimensional array - * and to return "&var[layer0][layer1][layer2] - &var" + * and to return "var[layer0][layer1][layer2] - var". * * A loop could be used here to make it more generic, but, as we only have * 3 layers, this is a little faster. + * * By design, layers can never be 0 or more than 3. If that ever happens, * a NULL is returned, causing an OOPS during the memory allocation routine, * with would point to the developer that he's doing something wrong. @@ -519,16 +431,18 @@ struct edac_mc_layer { * were allocated for each layer * @var: name of the var where we want to get the pointer * (like mci->dimms) - * @n_layers: Number of layers at the @layers array + * @nlayers: Number of layers at the @layers array * @layer0: layer0 position * @layer1: layer1 position. Unused if n_layers < 2 * @layer2: layer2 position. Unused if n_layers < 3 * - * For 1 layer, this macro returns &var[layer0] + * For 1 layer, this macro returns "var[layer0]"; + * * For 2 layers, this macro is similar to allocate a bi-dimensional array - * and to return "&var[layer0][layer1]" + * and to return "var[layer0][layer1]"; + * * For 3 layers, this macro is similar to allocate a tri-dimensional array - * and to return "&var[layer0][layer1][layer2]" + * and to return "var[layer0][layer1][layer2]"; */ #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ typeof(*var) __p; \ @@ -614,7 +528,7 @@ struct errcount_attribute_data { }; /** - * edac_raw_error_desc - Raw error report structure + * struct edac_raw_error_desc - Raw error report structure * @grain: minimum granularity for an error report, in bytes * @error_count: number of errors of the same type * @top_layer: top layer of the error (layer[0]) diff --git a/include/linux/efi.h b/include/linux/efi.h index 2d089487d2da..5b1af30ece55 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -103,6 +103,7 @@ typedef struct { #define EFI_PAGE_SHIFT 12 #define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT) +#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT) typedef struct { u32 type; @@ -443,6 +444,22 @@ typedef struct { #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 +typedef struct { + u32 version; + u32 get; + u32 set; + u32 del; + u32 get_all; +} apple_properties_protocol_32_t; + +typedef struct { + u64 version; + u64 get; + u64 set; + u64 del; + u64 get_all; +} apple_properties_protocol_64_t; + /* * Types and defines for EFI ResetSystem */ @@ -589,8 +606,10 @@ void efi_native_runtime_setup(void); #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) #define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) /* * This GUID is used to pass to the kernel proper the struct screen_info @@ -599,6 +618,7 @@ void efi_native_runtime_setup(void); */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) +#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) typedef struct { efi_guid_t guid; @@ -872,6 +892,7 @@ extern struct efi { unsigned long esrt; /* ESRT table */ unsigned long properties_table; /* properties table */ unsigned long mem_attr_table; /* memory attributes table */ + unsigned long rng_seed; /* UEFI firmware random seed */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -930,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, #endif extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); +extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries); extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); extern void __init efi_memmap_unmap(void); @@ -1145,6 +1167,26 @@ struct efi_generic_dev_path { u16 length; } __attribute ((packed)); +struct efi_dev_path { + u8 type; /* can be replaced with unnamed */ + u8 sub_type; /* struct efi_generic_dev_path; */ + u16 length; /* once we've moved to -std=c11 */ + union { + struct { + u32 hid; + u32 uid; + } acpi; + struct { + u8 fn; + u8 dev; + } pci; + }; +} __attribute ((packed)); + +#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER) +struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len); +#endif + static inline void memrange_efi_to_native(u64 *addr, u64 *npages) { *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); @@ -1493,4 +1535,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, struct efi_boot_memmap *map, void *priv, efi_exit_boot_map_processing priv_func); + +struct linux_efi_random_seed { + u32 size; + u8 bits[]; +}; + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index e7f358d2e5fc..b276e9ef0e0b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); +typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); typedef void (elevator_init_icq_fn) (struct io_cq *); typedef void (elevator_exit_icq_fn) (struct io_cq *); @@ -108,6 +108,11 @@ struct elevator_type #define ELV_HASH_BITS 6 +void elv_rqhash_del(struct request_queue *q, struct request *rq); +void elv_rqhash_add(struct request_queue *q, struct request *rq); +void elv_rqhash_reposition(struct request_queue *q, struct request *rq); +struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); + /* * each queue has an elevator_queue associated with it */ @@ -139,7 +144,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); -extern int elv_may_queue(struct request_queue *, int, int); +extern int elv_may_queue(struct request_queue *, unsigned int); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 422630b8e588..cea41a124a80 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -52,10 +52,17 @@ #define VERSION_LEN 256 #define MAX_VOLUME_NAME 512 +#define MAX_PATH_LEN 64 +#define MAX_DEVICES 8 /* * For superblock */ +struct f2fs_device { + __u8 path[MAX_PATH_LEN]; + __le32 total_segments; +} __packed; + struct f2fs_super_block { __le32 magic; /* Magic Number */ __le16 major_ver; /* Major Version */ @@ -94,7 +101,8 @@ struct f2fs_super_block { __le32 feature; /* defined features */ __u8 encryption_level; /* versioning level for encryption */ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ - __u8 reserved[871]; /* valid reserved region */ + struct f2fs_device devs[MAX_DEVICES]; /* device list */ + __u8 reserved[327]; /* valid reserved region */ } __packed; /* diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h index 9a79f0106da1..32c22cfb238b 100644 --- a/include/linux/fddidevice.h +++ b/include/linux/fddidevice.h @@ -26,7 +26,6 @@ #ifdef __KERNEL__ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); -int fddi_change_mtu(struct net_device *dev, int new_mtu); struct net_device *alloc_fddidev(int sizeof_priv); #endif diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h deleted file mode 100644 index a44794e508df..000000000000 --- a/include/linux/fence-array.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * fence-array: aggregates fence to be waited together - * - * Copyright (C) 2016 Collabora Ltd - * Copyright (C) 2016 Advanced Micro Devices, Inc. - * Authors: - * Gustavo Padovan <gustavo@padovan.org> - * Christian König <christian.koenig@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __LINUX_FENCE_ARRAY_H -#define __LINUX_FENCE_ARRAY_H - -#include <linux/fence.h> - -/** - * struct fence_array_cb - callback helper for fence array - * @cb: fence callback structure for signaling - * @array: reference to the parent fence array object - */ -struct fence_array_cb { - struct fence_cb cb; - struct fence_array *array; -}; - -/** - * struct fence_array - fence to represent an array of fences - * @base: fence base class - * @lock: spinlock for fence handling - * @num_fences: number of fences in the array - * @num_pending: fences in the array still pending - * @fences: array of the fences - */ -struct fence_array { - struct fence base; - - spinlock_t lock; - unsigned num_fences; - atomic_t num_pending; - struct fence **fences; -}; - -extern const struct fence_ops fence_array_ops; - -/** - * fence_is_array - check if a fence is from the array subsclass - * - * Return true if it is a fence_array and false otherwise. - */ -static inline bool fence_is_array(struct fence *fence) -{ - return fence->ops == &fence_array_ops; -} - -/** - * to_fence_array - cast a fence to a fence_array - * @fence: fence to cast to a fence_array - * - * Returns NULL if the fence is not a fence_array, - * or the fence_array otherwise. - */ -static inline struct fence_array *to_fence_array(struct fence *fence) -{ - if (fence->ops != &fence_array_ops) - return NULL; - - return container_of(fence, struct fence_array, base); -} - -struct fence_array *fence_array_create(int num_fences, struct fence **fences, - u64 context, unsigned seqno, - bool signal_on_any); - -#endif /* __LINUX_FENCE_ARRAY_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h deleted file mode 100644 index 0d763053f97a..000000000000 --- a/include/linux/fence.h +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Fence mechanism for dma-buf to allow for asynchronous dma access - * - * Copyright (C) 2012 Canonical Ltd - * Copyright (C) 2012 Texas Instruments - * - * Authors: - * Rob Clark <robdclark@gmail.com> - * Maarten Lankhorst <maarten.lankhorst@canonical.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __LINUX_FENCE_H -#define __LINUX_FENCE_H - -#include <linux/err.h> -#include <linux/wait.h> -#include <linux/list.h> -#include <linux/bitops.h> -#include <linux/kref.h> -#include <linux/sched.h> -#include <linux/printk.h> -#include <linux/rcupdate.h> - -struct fence; -struct fence_ops; -struct fence_cb; - -/** - * struct fence - software synchronization primitive - * @refcount: refcount for this fence - * @ops: fence_ops associated with this fence - * @rcu: used for releasing fence with kfree_rcu - * @cb_list: list of all callbacks to call - * @lock: spin_lock_irqsave used for locking - * @context: execution context this fence belongs to, returned by - * fence_context_alloc() - * @seqno: the sequence number of this fence inside the execution context, - * can be compared to decide which fence would be signaled later. - * @flags: A mask of FENCE_FLAG_* defined below - * @timestamp: Timestamp when the fence was signaled. - * @status: Optional, only valid if < 0, must be set before calling - * fence_signal, indicates that the fence has completed with an error. - * - * the flags member must be manipulated and read using the appropriate - * atomic ops (bit_*), so taking the spinlock will not be needed most - * of the time. - * - * FENCE_FLAG_SIGNALED_BIT - fence is already signaled - * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* - * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the - * implementer of the fence for its own purposes. Can be used in different - * ways by different fence implementers, so do not rely on this. - * - * Since atomic bitops are used, this is not guaranteed to be the case. - * Particularly, if the bit was set, but fence_signal was called right - * before this bit was set, it would have been able to set the - * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. - * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting - * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that - * after fence_signal was called, any enable_signaling call will have either - * been completed, or never called at all. - */ -struct fence { - struct kref refcount; - const struct fence_ops *ops; - struct rcu_head rcu; - struct list_head cb_list; - spinlock_t *lock; - u64 context; - unsigned seqno; - unsigned long flags; - ktime_t timestamp; - int status; -}; - -enum fence_flag_bits { - FENCE_FLAG_SIGNALED_BIT, - FENCE_FLAG_ENABLE_SIGNAL_BIT, - FENCE_FLAG_USER_BITS, /* must always be last member */ -}; - -typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); - -/** - * struct fence_cb - callback for fence_add_callback - * @node: used by fence_add_callback to append this struct to fence::cb_list - * @func: fence_func_t to call - * - * This struct will be initialized by fence_add_callback, additional - * data can be passed along by embedding fence_cb in another struct. - */ -struct fence_cb { - struct list_head node; - fence_func_t func; -}; - -/** - * struct fence_ops - operations implemented for fence - * @get_driver_name: returns the driver name. - * @get_timeline_name: return the name of the context this fence belongs to. - * @enable_signaling: enable software signaling of fence. - * @signaled: [optional] peek whether the fence is signaled, can be null. - * @wait: custom wait implementation, or fence_default_wait. - * @release: [optional] called on destruction of fence, can be null - * @fill_driver_data: [optional] callback to fill in free-form debug info - * Returns amount of bytes filled, or -errno. - * @fence_value_str: [optional] fills in the value of the fence as a string - * @timeline_value_str: [optional] fills in the current value of the timeline - * as a string - * - * Notes on enable_signaling: - * For fence implementations that have the capability for hw->hw - * signaling, they can implement this op to enable the necessary - * irqs, or insert commands into cmdstream, etc. This is called - * in the first wait() or add_callback() path to let the fence - * implementation know that there is another driver waiting on - * the signal (ie. hw->sw case). - * - * This function can be called called from atomic context, but not - * from irq context, so normal spinlocks can be used. - * - * A return value of false indicates the fence already passed, - * or some failure occurred that made it impossible to enable - * signaling. True indicates successful enabling. - * - * fence->status may be set in enable_signaling, but only when false is - * returned. - * - * Calling fence_signal before enable_signaling is called allows - * for a tiny race window in which enable_signaling is called during, - * before, or after fence_signal. To fight this, it is recommended - * that before enable_signaling returns true an extra reference is - * taken on the fence, to be released when the fence is signaled. - * This will mean fence_signal will still be called twice, but - * the second time will be a noop since it was already signaled. - * - * Notes on signaled: - * May set fence->status if returning true. - * - * Notes on wait: - * Must not be NULL, set to fence_default_wait for default implementation. - * the fence_default_wait implementation should work for any fence, as long - * as enable_signaling works correctly. - * - * Must return -ERESTARTSYS if the wait is intr = true and the wait was - * interrupted, and remaining jiffies if fence has signaled, or 0 if wait - * timed out. Can also return other error values on custom implementations, - * which should be treated as if the fence is signaled. For example a hardware - * lockup could be reported like that. - * - * Notes on release: - * Can be NULL, this function allows additional commands to run on - * destruction of the fence. Can be called from irq context. - * If pointer is set to NULL, kfree will get called instead. - */ - -struct fence_ops { - const char * (*get_driver_name)(struct fence *fence); - const char * (*get_timeline_name)(struct fence *fence); - bool (*enable_signaling)(struct fence *fence); - bool (*signaled)(struct fence *fence); - signed long (*wait)(struct fence *fence, bool intr, signed long timeout); - void (*release)(struct fence *fence); - - int (*fill_driver_data)(struct fence *fence, void *data, int size); - void (*fence_value_str)(struct fence *fence, char *str, int size); - void (*timeline_value_str)(struct fence *fence, char *str, int size); -}; - -void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno); - -void fence_release(struct kref *kref); -void fence_free(struct fence *fence); - -/** - * fence_get - increases refcount of the fence - * @fence: [in] fence to increase refcount of - * - * Returns the same fence, with refcount increased by 1. - */ -static inline struct fence *fence_get(struct fence *fence) -{ - if (fence) - kref_get(&fence->refcount); - return fence; -} - -/** - * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock - * @fence: [in] fence to increase refcount of - * - * Function returns NULL if no refcount could be obtained, or the fence. - */ -static inline struct fence *fence_get_rcu(struct fence *fence) -{ - if (kref_get_unless_zero(&fence->refcount)) - return fence; - else - return NULL; -} - -/** - * fence_put - decreases refcount of the fence - * @fence: [in] fence to reduce refcount of - */ -static inline void fence_put(struct fence *fence) -{ - if (fence) - kref_put(&fence->refcount, fence_release); -} - -int fence_signal(struct fence *fence); -int fence_signal_locked(struct fence *fence); -signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); -int fence_add_callback(struct fence *fence, struct fence_cb *cb, - fence_func_t func); -bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); -void fence_enable_sw_signaling(struct fence *fence); - -/** - * fence_is_signaled_locked - Return an indication if the fence is signaled yet. - * @fence: [in] the fence to check - * - * Returns true if the fence was already signaled, false if not. Since this - * function doesn't enable signaling, it is not guaranteed to ever return - * true if fence_add_callback, fence_wait or fence_enable_sw_signaling - * haven't been called before. - * - * This function requires fence->lock to be held. - */ -static inline bool -fence_is_signaled_locked(struct fence *fence) -{ - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - - if (fence->ops->signaled && fence->ops->signaled(fence)) { - fence_signal_locked(fence); - return true; - } - - return false; -} - -/** - * fence_is_signaled - Return an indication if the fence is signaled yet. - * @fence: [in] the fence to check - * - * Returns true if the fence was already signaled, false if not. Since this - * function doesn't enable signaling, it is not guaranteed to ever return - * true if fence_add_callback, fence_wait or fence_enable_sw_signaling - * haven't been called before. - * - * It's recommended for seqno fences to call fence_signal when the - * operation is complete, it makes it possible to prevent issues from - * wraparound between time of issue and time of use by checking the return - * value of this function before calling hardware-specific wait instructions. - */ -static inline bool -fence_is_signaled(struct fence *fence) -{ - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - - if (fence->ops->signaled && fence->ops->signaled(fence)) { - fence_signal(fence); - return true; - } - - return false; -} - -/** - * fence_is_later - return if f1 is chronologically later than f2 - * @f1: [in] the first fence from the same context - * @f2: [in] the second fence from the same context - * - * Returns true if f1 is chronologically later than f2. Both fences must be - * from the same context, since a seqno is not re-used across contexts. - */ -static inline bool fence_is_later(struct fence *f1, struct fence *f2) -{ - if (WARN_ON(f1->context != f2->context)) - return false; - - return (int)(f1->seqno - f2->seqno) > 0; -} - -/** - * fence_later - return the chronologically later fence - * @f1: [in] the first fence from the same context - * @f2: [in] the second fence from the same context - * - * Returns NULL if both fences are signaled, otherwise the fence that would be - * signaled last. Both fences must be from the same context, since a seqno is - * not re-used across contexts. - */ -static inline struct fence *fence_later(struct fence *f1, struct fence *f2) -{ - if (WARN_ON(f1->context != f2->context)) - return NULL; - - /* - * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been - * set if enable_signaling wasn't called, and enabling that here is - * overkill. - */ - if (fence_is_later(f1, f2)) - return fence_is_signaled(f1) ? NULL : f1; - else - return fence_is_signaled(f2) ? NULL : f2; -} - -signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); -signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, - bool intr, signed long timeout); - -/** - * fence_wait - sleep until the fence gets signaled - * @fence: [in] the fence to wait on - * @intr: [in] if true, do an interruptible wait - * - * This function will return -ERESTARTSYS if interrupted by a signal, - * or 0 if the fence was signaled. Other error values may be - * returned on custom implementations. - * - * Performs a synchronous wait on this fence. It is assumed the caller - * directly or indirectly holds a reference to the fence, otherwise the - * fence might be freed before return, resulting in undefined behavior. - */ -static inline signed long fence_wait(struct fence *fence, bool intr) -{ - signed long ret; - - /* Since fence_wait_timeout cannot timeout with - * MAX_SCHEDULE_TIMEOUT, only valid return values are - * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. - */ - ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); - - return ret < 0 ? ret : 0; -} - -u64 fence_context_alloc(unsigned num); - -#define FENCE_TRACE(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ - pr_info("f %llu#%u: " fmt, \ - __ff->context, __ff->seqno, ##args); \ - } while (0) - -#define FENCE_WARN(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - -#define FENCE_ERR(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - -#endif /* __LINUX_FENCE_H */ diff --git a/include/linux/file.h b/include/linux/file.h index 7444f5feda12..61eb82cbafba 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -17,7 +17,7 @@ struct file_operations; struct vfsmount; struct dentry; struct path; -extern struct file *alloc_file(struct path *, fmode_t mode, +extern struct file *alloc_file(const struct path *, fmode_t mode, const struct file_operations *fop); static inline void fput_light(struct file *file, int fput_needed) diff --git a/include/linux/filter.h b/include/linux/filter.h index 1f09c521adfe..e4eb2546339a 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -14,6 +14,7 @@ #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/capability.h> +#include <linux/cryptohash.h> #include <net/sch_generic.h> @@ -56,6 +57,8 @@ struct bpf_prog_aux; /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 +#define BPF_TAG_SIZE 8 + /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ @@ -402,14 +405,16 @@ struct bpf_prog { u16 jited:1, /* Is our filter JIT'ed? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1; /* Do we need dst entry? */ + dst_needed:1, /* Do we need dst entry? */ + xdp_adjust_head:1; /* Adjusting pkt head? */ kmemcheck_bitfield_end(meta); - u32 len; /* Number of filter blocks */ enum bpf_prog_type type; /* Type of BPF program */ + u32 len; /* Number of filter blocks */ + u8 tag[BPF_TAG_SIZE]; struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ - unsigned int (*bpf_func)(const struct sk_buff *skb, - const struct bpf_insn *filter); + unsigned int (*bpf_func)(const void *ctx, + const struct bpf_insn *insn); /* Instructions for interpreter */ union { struct sock_filter insns[0]; @@ -435,10 +440,11 @@ struct bpf_skb_data_end { struct xdp_buff { void *data; void *data_end; + void *data_hard_start; }; /* compute the linear packet data range [data, data_end) which - * will be accessed by cls_bpf and act_bpf programs + * will be accessed by cls_bpf, act_bpf and lwt programs */ static inline void bpf_compute_data_end(struct sk_buff *skb) { @@ -498,16 +504,27 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, return BPF_PROG_RUN(prog, skb); } -static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, - struct xdp_buff *xdp) +static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) { - u32 ret; + /* Caller needs to hold rcu_read_lock() (!), otherwise program + * can be released while still running, or map elements could be + * freed early while still having concurrent users. XDP fastpath + * already takes rcu_read_lock() when fetching the program, so + * it's not necessary here anymore. + */ + return BPF_PROG_RUN(prog, xdp); +} - rcu_read_lock(); - ret = BPF_PROG_RUN(prog, (void *)xdp); - rcu_read_unlock(); +static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) +{ + return prog->len * sizeof(struct bpf_insn); +} - return ret; +static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) +{ + return round_up(bpf_prog_insn_size(prog) + + sizeof(__be64) + 1, SHA_MESSAGE_BYTES); } static inline unsigned int bpf_prog_size(unsigned int proglen) @@ -590,7 +607,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); -bool bpf_helper_changes_skb_data(void *func); +bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h new file mode 100644 index 000000000000..dba6e3c697c7 --- /dev/null +++ b/include/linux/fpga/fpga-bridge.h @@ -0,0 +1,60 @@ +#include <linux/device.h> +#include <linux/fpga/fpga-mgr.h> + +#ifndef _LINUX_FPGA_BRIDGE_H +#define _LINUX_FPGA_BRIDGE_H + +struct fpga_bridge; + +/** + * struct fpga_bridge_ops - ops for low level FPGA bridge drivers + * @enable_show: returns the FPGA bridge's status + * @enable_set: set a FPGA bridge as enabled or disabled + * @fpga_bridge_remove: set FPGA into a specific state during driver remove + */ +struct fpga_bridge_ops { + int (*enable_show)(struct fpga_bridge *bridge); + int (*enable_set)(struct fpga_bridge *bridge, bool enable); + void (*fpga_bridge_remove)(struct fpga_bridge *bridge); +}; + +/** + * struct fpga_bridge - FPGA bridge structure + * @name: name of low level FPGA bridge + * @dev: FPGA bridge device + * @mutex: enforces exclusive reference to bridge + * @br_ops: pointer to struct of FPGA bridge ops + * @info: fpga image specific information + * @node: FPGA bridge list node + * @priv: low level driver private date + */ +struct fpga_bridge { + const char *name; + struct device dev; + struct mutex mutex; /* for exclusive reference to bridge */ + const struct fpga_bridge_ops *br_ops; + struct fpga_image_info *info; + struct list_head node; + void *priv; +}; + +#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev) + +struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, + struct fpga_image_info *info); +void fpga_bridge_put(struct fpga_bridge *bridge); +int fpga_bridge_enable(struct fpga_bridge *bridge); +int fpga_bridge_disable(struct fpga_bridge *bridge); + +int fpga_bridges_enable(struct list_head *bridge_list); +int fpga_bridges_disable(struct list_head *bridge_list); +void fpga_bridges_put(struct list_head *bridge_list); +int fpga_bridge_get_to_list(struct device_node *np, + struct fpga_image_info *info, + struct list_head *bridge_list); + +int fpga_bridge_register(struct device *dev, const char *name, + const struct fpga_bridge_ops *br_ops, void *priv); +void fpga_bridge_unregister(struct device *dev); + +#endif /* _LINUX_FPGA_BRIDGE_H */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 0940bf45e2f2..16551d5eac36 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -65,11 +65,26 @@ enum fpga_mgr_states { /* * FPGA Manager flags * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported + * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting */ #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) +#define FPGA_MGR_EXTERNAL_CONFIG BIT(1) + +/** + * struct fpga_image_info - information specific to a FPGA image + * @flags: boolean flags as defined above + * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) + * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) + */ +struct fpga_image_info { + u32 flags; + u32 enable_timeout_us; + u32 disable_timeout_us; +}; /** * struct fpga_manager_ops - ops for low level fpga manager drivers + * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @write_init: prepare the FPGA to receive confuration data * @write: write count bytes of configuration data to the FPGA @@ -81,11 +96,14 @@ enum fpga_mgr_states { * called, so leaving them out is fine. */ struct fpga_manager_ops { + size_t initial_header_size; enum fpga_mgr_states (*state)(struct fpga_manager *mgr); - int (*write_init)(struct fpga_manager *mgr, u32 flags, + int (*write_init)(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *buf, size_t count); int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); - int (*write_complete)(struct fpga_manager *mgr, u32 flags); + int (*write_complete)(struct fpga_manager *mgr, + struct fpga_image_info *info); void (*fpga_remove)(struct fpga_manager *mgr); }; @@ -109,14 +127,17 @@ struct fpga_manager { #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) -int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, +int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count); -int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, +int fpga_mgr_firmware_load(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *image_name); struct fpga_manager *of_fpga_mgr_get(struct device_node *node); +struct fpga_manager *fpga_mgr_get(struct device *dev); + void fpga_mgr_put(struct fpga_manager *mgr); int fpga_mgr_register(struct device *dev, const char *name, diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index c46d2aa16d81..1d18af034554 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type) static inline void frontswap_init(unsigned type, unsigned long *map) { - if (frontswap_enabled()) - __frontswap_init(type, map); +#ifdef CONFIG_FRONTSWAP + __frontswap_init(type, map); +#endif } #endif /* _LINUX_FRONTSWAP_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 16d2b6e874d6..2ba074328894 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -28,7 +28,6 @@ #include <linux/uidgid.h> #include <linux/lockdep.h> #include <linux/percpu-rwsem.h> -#include <linux/blk_types.h> #include <linux/workqueue.h> #include <linux/percpu-rwsem.h> #include <linux/delayed_call.h> @@ -38,6 +37,7 @@ struct backing_dev_info; struct bdi_writeback; +struct bio; struct export_operations; struct hd_geometry; struct iovec; @@ -152,58 +152,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define CHECK_IOVEC_ONLY -1 /* - * The below are the various read and write flags that we support. Some of - * them include behavioral modifiers that send information down to the - * block layer and IO scheduler. They should be used along with a req_op. - * Terminology: - * - * The block layer uses device plugging to defer IO a little bit, in - * the hope that we will see more IO very shortly. This increases - * coalescing of adjacent IO and thus reduces the number of IOs we - * have to send to the device. It also allows for better queuing, - * if the IO isn't mergeable. If the caller is going to be waiting - * for the IO, then he must ensure that the device is unplugged so - * that the IO is dispatched to the driver. - * - * All IO is handled async in Linux. This is fine for background - * writes, but for reads or writes that someone waits for completion - * on, we want to notify the block layer and IO scheduler so that they - * know about it. That allows them to make better scheduling - * decisions. So when the below references 'sync' and 'async', it - * is referencing this priority hint. - * - * With that in mind, the available types are: - * - * READ A normal read operation. Device will be plugged. - * READ_SYNC A synchronous read. Device is not plugged, caller can - * immediately wait on this read without caring about - * unplugging. - * WRITE A normal async write. Device will be plugged. - * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down - * the hint that someone will be waiting on this IO - * shortly. The write equivalent of READ_SYNC. - * WRITE_ODIRECT Special case write for O_DIRECT only. - * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. - * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on - * non-volatile media on completion. - * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded - * by a cache flush and data is guaranteed to be on - * non-volatile media on completion. - * - */ -#define RW_MASK REQ_OP_WRITE - -#define READ REQ_OP_READ -#define WRITE REQ_OP_WRITE - -#define READ_SYNC REQ_SYNC -#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) -#define WRITE_ODIRECT REQ_SYNC -#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) -#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) -#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) - -/* * Attribute flags. These should be or-ed together to figure out what * has been changed! */ @@ -321,6 +269,7 @@ struct writeback_control; #define IOCB_HIPRI (1 << 3) #define IOCB_DSYNC (1 << 4) #define IOCB_SYNC (1 << 5) +#define IOCB_WRITE (1 << 6) struct kiocb { struct file *ki_filp; @@ -594,6 +543,7 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_LOOKUP 0x0002 #define IOP_NOFOLLOW 0x0004 #define IOP_XATTR 0x0008 +#define IOP_DEFAULT_READLINK 0x0010 /* * Keep mostly read-only and often accessed (especially for @@ -1709,7 +1659,6 @@ struct file_operations { int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); int (*fsync) (struct file *, loff_t, loff_t, int datasync); - int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); @@ -1778,11 +1727,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *, unsigned long, loff_t *, int); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); +extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, + struct inode *inode_out, loff_t pos_out, + u64 *len, bool is_dedupe); extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, u64 len); +extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, + struct inode *dest, loff_t destoff, + loff_t len, bool *is_same); extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); +static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + u64 len) +{ + int ret; + + sb_start_write(file_inode(file_out)->i_sb); + ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); + sb_end_write(file_inode(file_out)->i_sb); + + return ret; +} + struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); @@ -2123,11 +2091,11 @@ extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); extern long do_mount(const char *, const char __user *, const char *, unsigned long, void *); -extern struct vfsmount *collect_mounts(struct path *); +extern struct vfsmount *collect_mounts(const struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, struct vfsmount *); -extern int vfs_statfs(struct path *, struct kstatfs *); +extern int vfs_statfs(const struct path *, struct kstatfs *); extern int user_statfs(const char __user *, struct kstatfs *); extern int fd_statfs(int, struct kstatfs *); extern int vfs_ustat(dev_t, struct kstatfs *); @@ -2499,19 +2467,6 @@ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK -static inline bool op_is_write(unsigned int op) -{ - return op == REQ_OP_READ ? false : true; -} - -/* - * return data direction, READ or WRITE - */ -static inline int bio_data_dir(struct bio *bio) -{ - return op_is_write(bio_op(bio)) ? WRITE : READ; -} - extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); extern int revalidate_disk(struct gendisk *); @@ -2709,7 +2664,7 @@ extern struct file * open_exec(const char *); /* fs/dcache.c -- generic fs support functions */ extern bool is_subdir(struct dentry *, struct dentry *); -extern bool path_is_under(struct path *, struct path *); +extern bool path_is_under(const struct path *, const struct path *); extern char *file_path(struct file *, char *, int); @@ -2782,7 +2737,6 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); #ifdef CONFIG_BLOCK -extern blk_qc_t submit_bio(struct bio *); extern int bdev_read_only(struct block_device *); #endif extern int set_blocksize(struct block_device *, int); @@ -2914,7 +2868,6 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len, extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); -extern int generic_readlink(struct dentry *, char __user *, int); extern void generic_fillattr(struct inode *, struct kstat *); int vfs_getattr_nosec(struct path *path, struct kstat *stat); extern int vfs_getattr(struct path *, struct kstat *); @@ -2935,6 +2888,7 @@ extern int vfs_lstat(const char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); extern int vfs_fstatat(int , const char __user *, struct kstat *, int); extern const char *vfs_get_link(struct dentry *, struct delayed_call *); +extern int vfs_readlink(struct dentry *, char __user *, int); extern int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, @@ -2949,8 +2903,10 @@ extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern struct super_block *get_super(struct block_device *); extern struct super_block *get_super_thawed(struct block_device *); +extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev); extern struct super_block *get_active_super(struct block_device *bdev); extern void drop_super(struct super_block *sb); +extern void drop_super_exclusive(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); extern void iterate_supers_type(struct file_system_type *, void (*)(struct super_block *, void *), void *); diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index ff8b11b26f31..c074b670aa99 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@ -18,73 +18,9 @@ #include <crypto/skcipher.h> #include <uapi/linux/fs.h> -#define FS_KEY_DERIVATION_NONCE_SIZE 16 -#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 - -#define FS_POLICY_FLAGS_PAD_4 0x00 -#define FS_POLICY_FLAGS_PAD_8 0x01 -#define FS_POLICY_FLAGS_PAD_16 0x02 -#define FS_POLICY_FLAGS_PAD_32 0x03 -#define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAGS_VALID 0x03 - -/* Encryption algorithms */ -#define FS_ENCRYPTION_MODE_INVALID 0 -#define FS_ENCRYPTION_MODE_AES_256_XTS 1 -#define FS_ENCRYPTION_MODE_AES_256_GCM 2 -#define FS_ENCRYPTION_MODE_AES_256_CBC 3 -#define FS_ENCRYPTION_MODE_AES_256_CTS 4 - -/** - * Encryption context for inode - * - * Protector format: - * 1 byte: Protector format (1 = this version) - * 1 byte: File contents encryption mode - * 1 byte: File names encryption mode - * 1 byte: Flags - * 8 bytes: Master Key descriptor - * 16 bytes: Encryption Key derivation nonce - */ -struct fscrypt_context { - u8 format; - u8 contents_encryption_mode; - u8 filenames_encryption_mode; - u8 flags; - u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; -} __packed; - -/* Encryption parameters */ -#define FS_XTS_TWEAK_SIZE 16 -#define FS_AES_128_ECB_KEY_SIZE 16 -#define FS_AES_256_GCM_KEY_SIZE 32 -#define FS_AES_256_CBC_KEY_SIZE 32 -#define FS_AES_256_CTS_KEY_SIZE 32 -#define FS_AES_256_XTS_KEY_SIZE 64 -#define FS_MAX_KEY_SIZE 64 - -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 - -/* This is passed in from userspace into the kernel keyring */ -struct fscrypt_key { - u32 mode; - u8 raw[FS_MAX_KEY_SIZE]; - u32 size; -} __packed; - -struct fscrypt_info { - u8 ci_data_mode; - u8 ci_filename_mode; - u8 ci_flags; - struct crypto_skcipher *ci_ctfm; - struct key *ci_keyring_key; - u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; -}; +#define FS_CRYPTO_BLOCK_SIZE 16 -#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 -#define FS_WRITE_PATH_FL 0x00000002 +struct fscrypt_info; struct fscrypt_ctx { union { @@ -102,19 +38,6 @@ struct fscrypt_ctx { u8 mode; /* Encryption mode for tfm */ }; -struct fscrypt_completion_result { - struct completion completion; - int res; -}; - -#define DECLARE_FS_COMPLETION_RESULT(ecr) \ - struct fscrypt_completion_result ecr = { \ - COMPLETION_INITIALIZER((ecr).completion), 0 } - -#define FS_FNAME_NUM_SCATTER_ENTRIES 4 -#define FS_CRYPTO_BLOCK_SIZE 16 -#define FS_FNAME_CRYPTO_DIGEST_SIZE 32 - /** * For encrypted symlinks, the ciphertext length is stored at the beginning * of the string in little-endian format. @@ -154,9 +77,15 @@ struct fscrypt_name { #define fname_len(p) ((p)->disk_name.len) /* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* * crypto opertions for filesystems */ struct fscrypt_operations { + unsigned int flags; int (*get_context)(struct inode *, void *, size_t); int (*key_prefix)(struct inode *, u8 **); int (*prepare_context)(struct inode *); @@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page) #endif } -static inline int fscrypt_has_encryption_key(struct inode *inode) +static inline int fscrypt_has_encryption_key(const struct inode *inode) { #if IS_ENABLED(CONFIG_FS_ENCRYPTION) return (inode->i_crypt_info != NULL); @@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry) #if IS_ENABLED(CONFIG_FS_ENCRYPTION) /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; -int fscrypt_initialize(void); - -extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t); +extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); -extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t); -extern int fscrypt_decrypt_page(struct page *); +extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, + unsigned int, unsigned int, + u64, gfp_t); +extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, + unsigned int, u64); extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); extern void fscrypt_pullback_bio_page(struct page **, bool); extern void fscrypt_restore_control_page(struct page *); -extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, +extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, unsigned int); /* policy.c */ -extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *); -extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); +extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); +extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); /* keyinfo.c */ -extern int get_crypt_info(struct inode *); extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); @@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); extern int fscrypt_setup_filename(struct inode *, const struct qstr *, int lookup, struct fscrypt_name *); extern void fscrypt_free_filename(struct fscrypt_name *); -extern u32 fscrypt_fname_encrypted_size(struct inode *, u32); -extern int fscrypt_fname_alloc_buffer(struct inode *, u32, +extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); +extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, @@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, #endif /* crypto.c */ -static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i, +static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i, gfp_t f) { return ERR_PTR(-EOPNOTSUPP); @@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c) return; } -static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, - struct page *p, gfp_t f) +static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i, + struct page *p, + unsigned int len, + unsigned int offs, + u64 lblk_num, gfp_t f) { return ERR_PTR(-EOPNOTSUPP); } -static inline int fscrypt_notsupp_decrypt_page(struct page *p) +static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p, + unsigned int len, unsigned int offs, + u64 lblk_num) { return -EOPNOTSUPP; } @@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p) return; } -static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, +static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p, sector_t s, unsigned int f) { return -EOPNOTSUPP; } /* policy.c */ -static inline int fscrypt_notsupp_process_policy(struct file *f, - const struct fscrypt_policy *p) +static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f, + const void __user *arg) { return -EOPNOTSUPP; } -static inline int fscrypt_notsupp_get_policy(struct inode *i, - struct fscrypt_policy *p) +static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f, + void __user *arg) { return -EOPNOTSUPP; } diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 649e9171a9b3..3efa3b861d44 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -29,83 +29,112 @@ * #ifdefs. */ struct ccsr_guts { - __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ - __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ - __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ - __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ - __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ - __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and + * Control Register + */ + u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ u8 res018[0x20 - 0x18]; - __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u32 porcir; /* 0x.0020 - POR Configuration Information + * Register + */ u8 res024[0x30 - 0x24]; - __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u32 gpiocr; /* 0x.0030 - GPIO Control Register */ u8 res034[0x40 - 0x34]; - __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data + * Register + */ u8 res044[0x50 - 0x44]; - __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u32 gpindr; /* 0x.0050 - General-Purpose Input Data + * Register + */ u8 res054[0x60 - 0x54]; - __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ - __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ - __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u32 pmuxcr; /* 0x.0060 - Alternate Function Signal + * Multiplex Control + */ + u32 pmuxcr2; /* 0x.0064 - Alternate function signal + * multiplex control 2 + */ + u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ u8 res06c[0x70 - 0x6c]; - __be32 devdisr; /* 0x.0070 - Device Disable Control */ + u32 devdisr; /* 0x.0070 - Device Disable Control */ #define CCSR_GUTS_DEVDISR_TB1 0x00001000 #define CCSR_GUTS_DEVDISR_TB0 0x00004000 - __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ u8 res078[0x7c - 0x78]; - __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ - __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ - __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ - __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ - __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ - __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ - __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ - __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ - __be32 autorstsr; /* 0x.009c - Automatic reset status register */ - __be32 pvr; /* 0x.00a0 - Processor Version Register */ - __be32 svr; /* 0x.00a4 - System Version Register */ + u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control + * Register + */ + u32 powmgtcsr; /* 0x.0080 - Power Management Status and + * Control Register + */ + u32 pmrccr; /* 0x.0084 - Power Management Reset Counter + * Configuration Register + */ + u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter + * Configuration Register + */ + u32 pmcdr; /* 0x.008c - 4Power management clock disable + * register + */ + u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + u32 rstrscr; /* 0x.0094 - Reset Request Status and + * Control Register + */ + u32 ectrstcr; /* 0x.0098 - Exception reset control register */ + u32 autorstsr; /* 0x.009c - Automatic reset status register */ + u32 pvr; /* 0x.00a0 - Processor Version Register */ + u32 svr; /* 0x.00a4 - System Version Register */ u8 res0a8[0xb0 - 0xa8]; - __be32 rstcr; /* 0x.00b0 - Reset Control Register */ + u32 rstcr; /* 0x.00b0 - Reset Control Register */ u8 res0b4[0xc0 - 0xb4]; - __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register + u32 iovselsr; /* 0x.00c0 - I/O voltage select status register Called 'elbcvselcr' on 86xx SOCs */ u8 res0c4[0x100 - 0xc4]; - __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers There are 16 registers */ u8 res140[0x224 - 0x140]; - __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ - __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ u8 res22c[0x604 - 0x22c]; - __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ u8 res608[0x800 - 0x608]; - __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ u8 res804[0x900 - 0x804]; - __be32 ircr; /* 0x.0900 - Infrared Control Register */ + u32 ircr; /* 0x.0900 - Infrared Control Register */ u8 res904[0x908 - 0x904]; - __be32 dmacr; /* 0x.0908 - DMA Control Register */ + u32 dmacr; /* 0x.0908 - DMA Control Register */ u8 res90c[0x914 - 0x90c]; - __be32 elbccr; /* 0x.0914 - eLBC Control Register */ + u32 elbccr; /* 0x.0914 - eLBC Control Register */ u8 res918[0xb20 - 0x918]; - __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ - __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ - __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ u8 resb2c[0xe00 - 0xb2c]; - __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ u8 rese04[0xe10 - 0xe04]; - __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ u8 rese14[0xe20 - 0xe14]; - __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ - __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override + * register + */ u8 rese28[0xf04 - 0xe28]; - __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ - __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ u8 resf0c[0xf2c - 0xf0c]; - __be32 itcr; /* 0x.0f2c - Internal transaction control register */ + u32 itcr; /* 0x.0f2c - Internal transaction control + * register + */ u8 resf30[0xf40 - 0xf30]; - __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ - __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ + u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ } __attribute__ ((packed)); +u32 fsl_guts_get_svr(void); /* Alternate function signal multiplex control */ #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index f2912914141a..60cef8227534 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -100,6 +100,7 @@ struct fsl_usb2_platform_data { unsigned already_suspended:1; unsigned has_fsl_erratum_a007792:1; unsigned has_fsl_erratum_a005275:1; + unsigned has_fsl_erratum_a005697:1; unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index b8bcc058e031..b43d3f5bd9ea 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -17,7 +17,7 @@ #include <linux/bug.h> /* Notify this dentry's parent about a child's events. */ -static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) +static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) { if (!dentry) dentry = path->dentry; @@ -28,7 +28,7 @@ static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u3 /* simple call site for access decisions */ static inline int fsnotify_perm(struct file *file, int mask) { - struct path *path = &file->f_path; + const struct path *path = &file->f_path; /* * Do not use file_inode() here or anywhere in this file to get the * inode. That would break *notity on overlayfs. @@ -176,7 +176,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) */ static inline void fsnotify_access(struct file *file) { - struct path *path = &file->f_path; + const struct path *path = &file->f_path; struct inode *inode = path->dentry->d_inode; __u32 mask = FS_ACCESS; @@ -194,7 +194,7 @@ static inline void fsnotify_access(struct file *file) */ static inline void fsnotify_modify(struct file *file) { - struct path *path = &file->f_path; + const struct path *path = &file->f_path; struct inode *inode = path->dentry->d_inode; __u32 mask = FS_MODIFY; @@ -212,7 +212,7 @@ static inline void fsnotify_modify(struct file *file) */ static inline void fsnotify_open(struct file *file) { - struct path *path = &file->f_path; + const struct path *path = &file->f_path; struct inode *inode = path->dentry->d_inode; __u32 mask = FS_OPEN; @@ -228,7 +228,7 @@ static inline void fsnotify_open(struct file *file) */ static inline void fsnotify_close(struct file *file) { - struct path *path = &file->f_path; + const struct path *path = &file->f_path; struct inode *inode = path->dentry->d_inode; fmode_t mode = file->f_mode; __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 79467b239fcf..487246546ebe 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -96,7 +96,7 @@ struct fsnotify_ops { struct inode *inode, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, - u32 mask, void *data, int data_type, + u32 mask, const void *data, int data_type, const unsigned char *file_name, u32 cookie); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); @@ -245,9 +245,9 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, +extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, const unsigned char *name, u32 cookie); -extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask); +extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern u32 fsnotify_get_cookie(void); @@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); /* find (and take a reference) to a mark associated with group and vfsmount */ extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); -/* copy the values from old into new */ -extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old); /* set the ignored_mask of a mark */ extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask); /* set the mask of a mark (might pin the object into memory */ @@ -357,13 +355,13 @@ extern void fsnotify_init_event(struct fsnotify_event *event, #else -static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, +static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, const unsigned char *name, u32 cookie) { return 0; } -static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) +static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) { return 0; } diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b3d34d3e0e7e..3633e8beff39 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, void ftrace_set_global_filter(unsigned char *buf, int len, int reset); void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); void ftrace_free_filter(struct ftrace_ops *ops); +void ftrace_ops_set_global_filter(struct ftrace_ops *ops); int register_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd); @@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) +#define ftrace_ops_set_global_filter(ops) do { } while (0) static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } @@ -945,6 +947,10 @@ extern int __disable_trace_on_warning; #define INIT_TRACE_RECURSION .trace_recursion = 0, #endif +int tracepoint_printk_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + #else /* CONFIG_TRACING */ static inline void disable_trace_on_warning(void) { } #endif /* CONFIG_TRACING */ diff --git a/include/linux/futex.h b/include/linux/futex.h index 6435f46d6e13..7c5b694864cd 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -1,14 +1,14 @@ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H +#include <linux/ktime.h> #include <uapi/linux/futex.h> struct inode; struct mm_struct; struct task_struct; -union ktime; -long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); extern int diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 851671742790..8bd28ce6d76e 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -17,8 +17,9 @@ enum fwnode_type { FWNODE_OF, FWNODE_ACPI, FWNODE_ACPI_DATA, + FWNODE_ACPI_STATIC, FWNODE_PDATA, - FWNODE_IRQCHIP, + FWNODE_IRQCHIP }; struct fwnode_handle { diff --git a/include/linux/genhd.h b/include/linux/genhd.h index e0341af6950e..76f39754e7b0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -146,15 +146,6 @@ enum { DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ }; -#define BLK_SCSI_MAX_CMDS (256) -#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) - -struct blk_scsi_cmd_filter { - unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; - unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; - struct kobject kobj; -}; - struct disk_part_tbl { struct rcu_head rcu_head; int len; diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 667c31101b8b..377257d8f7e3 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -259,16 +259,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = { * {{{2 */ #define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family) -static struct genl_family ZZZ_genl_family __read_mostly = { - .id = GENL_ID_GENERATE, - .name = __stringify(GENL_MAGIC_FAMILY), - .version = GENL_MAGIC_VERSION, -#ifdef GENL_MAGIC_FAMILY_HDRSZ - .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), -#endif - .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, -}; - +static struct genl_family ZZZ_genl_family; /* * Magic: define multicast groups * Magic: define multicast group registration helper @@ -302,11 +293,23 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ #undef GENL_mc_group #define GENL_mc_group(group) +static struct genl_family ZZZ_genl_family __ro_after_init = { + .name = __stringify(GENL_MAGIC_FAMILY), + .version = GENL_MAGIC_VERSION, +#ifdef GENL_MAGIC_FAMILY_HDRSZ + .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), +#endif + .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, + .ops = ZZZ_genl_ops, + .n_ops = ARRAY_SIZE(ZZZ_genl_ops), + .mcgrps = ZZZ_genl_mcgrps, + .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps), + .module = THIS_MODULE, +}; + int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) { - return genl_register_family_with_ops_groups(&ZZZ_genl_family, \ - ZZZ_genl_ops, \ - ZZZ_genl_mcgrps); + return genl_register_family(&ZZZ_genl_family); } void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f8041f9de31e..0fe0b6295ab5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -38,9 +38,8 @@ struct vm_area_struct; #define ___GFP_ACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_DIRECT_RECLAIM 0x400000u -#define ___GFP_OTHER_NODE 0x800000u -#define ___GFP_WRITE 0x1000000u -#define ___GFP_KSWAPD_RECLAIM 0x2000000u +#define ___GFP_WRITE 0x800000u +#define ___GFP_KSWAPD_RECLAIM 0x1000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -172,11 +171,6 @@ struct vm_area_struct; * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of * distinguishing in the source between false positives and allocations that * cannot be supported (e.g. page tables). - * - * __GFP_OTHER_NODE is for allocations that are on a remote node but that - * should not be accounted for as a remote allocation in vmstat. A - * typical user would be khugepaged collapsing a huge page on a remote - * node. */ #define __GFP_COLD ((__force gfp_t)___GFP_COLD) #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) @@ -184,10 +178,9 @@ struct vm_area_struct; #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) -#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT 26 +#define __GFP_BITS_SHIFT 25 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* @@ -506,9 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold); extern void free_hot_cold_page_list(struct list_head *list, bool cold); struct page_frag_cache; -extern void *__alloc_page_frag(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask); -extern void __free_page_frag(void *addr); +extern void __page_frag_cache_drain(struct page *page, unsigned int count); +extern void *page_frag_alloc(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask); +extern void page_frag_free(void *addr); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 24e2cc56beb1..c2748accea71 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -82,8 +82,6 @@ enum single_ended_mode { * implies that if the chip supports IRQs, these IRQs need to be threaded * as the chip access may sleep when e.g. reading out the IRQ status * registers. - * @irq_not_threaded: flag must be set if @can_sleep is set but the - * IRQs don't need to be threaded * @read_reg: reader function for generic GPIO * @write_reg: writer function for generic GPIO * @pin2mask: some generic GPIO controllers work with the big-endian bits @@ -91,7 +89,7 @@ enum single_ended_mode { * bit. This callback assigns the right bit mask. * @reg_dat: data (in) register for generic GPIO * @reg_set: output set register (out=high) for generic GPIO - * @reg_clk: output clear register (out=low) for generic GPIO + * @reg_clr: output clear register (out=low) for generic GPIO * @reg_dir: direction setting register for generic GPIO * @bgpio_bits: number of register bits used for a generic GPIO i.e. * <register width> * 8 @@ -109,8 +107,10 @@ enum single_ended_mode { * for GPIO IRQs, provided by GPIO driver * @irq_default_type: default IRQ triggering type applied during GPIO driver * initialization, provided by GPIO driver - * @irq_parent: GPIO IRQ chip parent/bank linux irq number, - * provided by GPIO driver + * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number, + * provided by GPIO driver for chained interrupt (not for nested + * interrupts). + * @irq_nested: True if set the interrupt handling is nested. * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all * bits set to one * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to @@ -166,7 +166,6 @@ struct gpio_chip { u16 ngpio; const char *const *names; bool can_sleep; - bool irq_not_threaded; #if IS_ENABLED(CONFIG_GPIO_GENERIC) unsigned long (*read_reg)(void __iomem *reg); @@ -192,7 +191,8 @@ struct gpio_chip { unsigned int irq_base; irq_flow_handler_t irq_handler; unsigned int irq_default_type; - int irq_parent; + int irq_chained_parent; + bool irq_nested; bool irq_need_valid_mask; unsigned long *irq_valid_mask; struct lock_class_key *lock_key; @@ -270,24 +270,40 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, int parent_irq, irq_flow_handler_t parent_handler); +void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + int parent_irq); + int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type, + bool nested, struct lock_class_key *lock_key); +/* FIXME: I assume threaded IRQchips do not have the lockdep problem */ +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type) +{ + return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, + handler, type, true, NULL); +} + #ifdef CONFIG_LOCKDEP #define gpiochip_irqchip_add(...) \ ( \ ({ \ static struct lock_class_key _key; \ - _gpiochip_irqchip_add(__VA_ARGS__, &_key); \ + _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \ }) \ ) #else #define gpiochip_irqchip_add(...) \ - _gpiochip_irqchip_add(__VA_ARGS__, NULL) + _gpiochip_irqchip_add(__VA_ARGS__, false, NULL) #endif #endif /* CONFIG_GPIOLIB_IRQCHIP */ diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index ee2d8c6f9130..0b71024c082c 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -2,7 +2,6 @@ #define _GPIO_KEYS_H struct device; -struct gpio_desc; /** * struct gpio_keys_button - configuration parameters @@ -18,7 +17,6 @@ struct gpio_desc; * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys - * @gpiod: GPIO descriptor */ struct gpio_keys_button { unsigned int code; @@ -31,7 +29,6 @@ struct gpio_keys_button { bool can_disable; int value; unsigned int irq; - struct gpio_desc *gpiod; }; /** @@ -46,7 +43,7 @@ struct gpio_keys_button { * @name: input device name */ struct gpio_keys_platform_data { - struct gpio_keys_button *buttons; + const struct gpio_keys_button *buttons; int nbuttons; unsigned int poll_interval; unsigned int rep:1; diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index e31bcd4c7859..97585d9679f3 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -93,8 +93,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb) int hdlc_open(struct net_device *dev); /* Must be called by hardware driver when HDLC device is being closed */ void hdlc_close(struct net_device *dev); -/* May be used by hardware driver */ -int hdlc_change_mtu(struct net_device *dev, int new_mtu); /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index e9744202fa29..edbb4fc674ed 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -78,6 +78,8 @@ enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE, HDMI_PICTURE_ASPECT_4_3, HDMI_PICTURE_ASPECT_16_9, + HDMI_PICTURE_ASPECT_64_27, + HDMI_PICTURE_ASPECT_256_135, HDMI_PICTURE_ASPECT_RESERVED, }; diff --git a/include/linux/hid.h b/include/linux/hid.h index b2ec82712baa..28f38e2b8f30 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -231,7 +231,11 @@ struct hid_item { #define HID_DG_TAP 0x000d0035 #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 #define HID_DG_PROGRAMCHANGEKEY 0x000d003a +#define HID_DG_BATTERYSTRENGTH 0x000d003b #define HID_DG_INVERT 0x000d003c +#define HID_DG_TILT_X 0x000d003d +#define HID_DG_TILT_Y 0x000d003e +#define HID_DG_TWIST 0x000d0041 #define HID_DG_TIPSWITCH 0x000d0042 #define HID_DG_TIPSWITCH2 0x000d0043 #define HID_DG_BARRELSWITCH 0x000d0044 @@ -479,6 +483,7 @@ struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; + bool registered; }; enum hid_type { diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h index 8ec23fb0b412..402f99e328d4 100644 --- a/include/linux/hippidevice.h +++ b/include/linux/hippidevice.h @@ -32,7 +32,6 @@ struct hippi_cb { }; __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); -int hippi_change_mtu(struct net_device *dev, int new_mtu); int hippi_mac_addr(struct net_device *dev, void *p); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); struct net_device *alloc_hippi_dev(int sizeof_priv); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 5e00f80b1535..cdab81ba29f8 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -228,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) { - timer->node.expires.tv64 = tv64; - timer->_softexpires.tv64 = tv64; + timer->node.expires = tv64; + timer->_softexpires = tv64; } static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) @@ -256,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) { - return timer->node.expires.tv64; + return timer->node.expires; } static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) { - return timer->_softexpires.tv64; + return timer->_softexpires; } static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) @@ -297,7 +297,7 @@ extern void hrtimer_peek_ahead_timers(void); * this resolution values. */ # define HIGH_RES_NSEC 1 -# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } +# define KTIME_HIGH_RES (HIGH_RES_NSEC) # define MONOTONIC_RES_NSEC HIGH_RES_NSEC # define KTIME_MONOTONIC_RES KTIME_HIGH_RES @@ -333,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) * hrtimer_start_range_ns() to prevent short timeouts. */ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) - rem.tv64 -= hrtimer_resolution; + rem -= hrtimer_resolution; return rem; } diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b9f65d99873..97e478d6b690 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,12 +1,12 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -extern int do_huge_pmd_anonymous_page(struct fault_env *fe); +extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); -extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); +extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd); + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); @@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page) return 1; } -extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -189,6 +189,8 @@ static inline void deferred_split_huge_page(struct page *page) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) +static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address, bool freeze, struct page *page) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} @@ -210,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, return NULL; } -static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) +static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { return 0; } diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 34a0dc18f327..bee0827766a3 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -30,8 +30,7 @@ * Must not be NULL. *OBSOLETE* * @read: New API. drivers can fill up to max bytes of data * into the buffer. The buffer is aligned for any type - * and max is guaranteed to be >= to that alignment - * (either 4 or 8 depending on architecture). + * and max is a multiple of 4 and >= 32 bytes. * @priv: Private data, for use by the RNG driver. * @quality: Estimation of true entropy in RNG's bitstream * (per mill). diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 9d2f8bde7d12..78d59dba563e 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -298,8 +298,8 @@ enum hwmon_pwm_attributes { * Channel number * The function returns the file permissions. * If the return value is 0, no attribute will be created. - * @read: Read callback. Optional. If not provided, attributes - * will not be readable. + * @read: Read callback for data attributes. Mandatory if readable + * data attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type @@ -308,8 +308,19 @@ enum hwmon_pwm_attributes { * Channel number * @val: Pointer to returned value * The function returns 0 on success or a negative error number. - * @write: Write callback. Optional. If not provided, attributes - * will not be writable. + * @read_string: + * Read callback for string attributes. Mandatory if string + * attributes are present. + * Parameters are: + * @dev: Pointer to hardware monitoring device + * @type: Sensor type + * @attr: Sensor attribute + * @channel: + * Channel number + * @str: Pointer to returned string + * The function returns 0 on success or a negative error number. + * @write: Write callback for data attributes. Mandatory if writeable + * data attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type @@ -324,6 +335,8 @@ struct hwmon_ops { u32 attr, int channel); int (*read)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val); + int (*read_string)(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, char **str); int (*write)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val); }; @@ -349,7 +362,9 @@ struct hwmon_chip_info { const struct hwmon_channel_info **info; }; +/* hwmon_device_register() is deprecated */ struct device *hwmon_device_register(struct device *dev); + struct device * hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, @@ -362,12 +377,12 @@ struct device * hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *info, - const struct attribute_group **groups); + const struct attribute_group **extra_groups); struct device * devm_hwmon_device_register_with_info(struct device *dev, - const char *name, void *drvdata, - const struct hwmon_chip_info *info, - const struct attribute_group **groups); + const char *name, void *drvdata, + const struct hwmon_chip_info *info, + const struct attribute_group **extra_groups); void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6824556d37ed..42fe43fb0c80 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -696,7 +696,7 @@ enum vmbus_device_type { HV_FCOPY, HV_BACKUP, HV_DM, - HV_UNKOWN, + HV_UNKNOWN, }; struct vmbus_device { @@ -1119,6 +1119,12 @@ struct hv_driver { struct device_driver driver; + /* dynamic device GUID's */ + struct { + spinlock_t lock; + struct list_head list; + } dynids; + int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); int (*remove)(struct hv_device *); void (*shutdown)(struct hv_device *); @@ -1169,13 +1175,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); -static inline const char *vmbus_dev_name(const struct hv_device *device_obj) -{ - const struct kobject *kobj = &device_obj->device.kobj; - - return kobj->name; -} - void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, @@ -1454,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel); void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); +void vmbus_setevent(struct vmbus_channel *channel); /* * Negotiated version with the Host. */ @@ -1486,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) * there is room for the producer to send the pending packet. */ -static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) +static inline void hv_signal_on_read(struct vmbus_channel *channel) { u32 cur_write_sz; u32 pending_sz; + struct hv_ring_buffer_info *rbi = &channel->inbound; /* * Issue a full memory barrier before making the signaling decision. @@ -1507,14 +1508,14 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); /* If the other end is not blocked on write don't bother. */ if (pending_sz == 0) - return false; + return; cur_write_sz = hv_get_bytes_to_write(rbi); if (cur_write_sz >= pending_sz) - return true; + vmbus_setevent(channel); - return false; + return; } /* @@ -1526,31 +1527,23 @@ static inline struct vmpacket_descriptor * get_next_pkt_raw(struct vmbus_channel *channel) { struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 read_loc = ring_info->priv_read_index; + u32 priv_read_loc = ring_info->priv_read_index; void *ring_buffer = hv_get_ring_buffer(ring_info); - struct vmpacket_descriptor *cur_desc; - u32 packetlen; u32 dsize = ring_info->ring_datasize; - u32 delta = read_loc - ring_info->ring_buffer->read_index; + /* + * delta is the difference between what is available to read and + * what was already consumed in place. We commit read index after + * the whole batch is processed. + */ + u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? + priv_read_loc - ring_info->ring_buffer->read_index : + (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) return NULL; - if ((read_loc + sizeof(*cur_desc)) > dsize) - return NULL; - - cur_desc = ring_buffer + read_loc; - packetlen = cur_desc->len8 << 3; - - /* - * If the packet under consideration is wrapping around, - * return failure. - */ - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) - return NULL; - - return cur_desc; + return ring_buffer + priv_read_loc; } /* @@ -1562,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, struct vmpacket_descriptor *desc) { struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 read_loc = ring_info->priv_read_index; u32 packetlen = desc->len8 << 3; u32 dsize = ring_info->ring_datasize; - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) - BUG(); /* * Include the packet trailer. */ ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; + ring_info->priv_read_index %= dsize; } /* @@ -1596,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel) virt_rmb(); ring_info->ring_buffer->read_index = ring_info->priv_read_index; - if (hv_need_to_signal_on_read(ring_info)) - vmbus_set_event(channel); + hv_signal_on_read(channel); } diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index c2e3324f9468..a1385023a29b 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -50,31 +50,4 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); -/** - * smbus_host_notify - internal structure used by the Host Notify mechanism. - * @adapter: the I2C adapter associated with this struct - * @work: worker used to schedule the IRQ in the slave device - * @lock: spinlock to check if a notification is already pending - * @pending: flag set when a notification is pending (any new notification will - * be rejected if pending is true) - * @payload: the actual payload of the Host Notify event - * @addr: the address of the slave device which raised the notification - * - * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does - * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a - * managed resource to clean this up when the adapter get released. - */ -struct smbus_host_notify { - struct i2c_adapter *adapter; - struct work_struct work; - spinlock_t lock; - bool pending; - u16 payload; - u8 addr; -}; - -struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap); -int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify, - unsigned short addr, unsigned int data); - #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 6422eef428c4..4b45ec46161f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -30,6 +30,7 @@ #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ #include <linux/mutex.h> +#include <linux/irqdomain.h> /* for Host Notify IRQ */ #include <linux/of.h> /* for struct device_node */ #include <linux/swab.h> /* for swab16 */ #include <uapi/linux/i2c.h> @@ -135,7 +136,8 @@ enum i2c_alert_protocol { * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) * @attach_adapter: Callback for bus addition (deprecated) - * @probe: Callback for device binding + * @probe: Callback for device binding - soon to be deprecated + * @probe_new: New callback for device binding * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown * @alert: Alert callback, for example for the SMBus alert protocol @@ -178,6 +180,11 @@ struct i2c_driver { int (*probe)(struct i2c_client *, const struct i2c_device_id *); int (*remove)(struct i2c_client *); + /* New driver model interface to aid the seamless removal of the + * current probe()'s, more commonly unused than used second parameter. + */ + int (*probe_new)(struct i2c_client *); + /* driver model interfaces that don't relate to enumeration */ void (*shutdown)(struct i2c_client *); @@ -243,6 +250,8 @@ struct i2c_client { extern struct i2c_client *i2c_verify_client(struct device *dev); extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); +extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, + const struct i2c_client *client); static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) { @@ -567,6 +576,8 @@ struct i2c_adapter { struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; + + struct irq_domain *host_notify_domain; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -654,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter) #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ /* Must equal I2C_M_TEN below */ #define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ +#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ #define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ /* Must match I2C_M_STOP|IGNORE_NAK */ @@ -739,6 +751,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); } +int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); /** * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct @@ -774,6 +787,10 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) /* must call i2c_put_adapter() when done with returned i2c_adapter device */ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); +extern const struct of_device_id +*i2c_of_match_device(const struct of_device_id *matches, + struct i2c_client *client); + #else static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) @@ -790,6 +807,14 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node { return NULL; } + +static inline const struct of_device_id +*i2c_of_match_device(const struct of_device_id *matches, + struct i2c_client *client) +{ + return NULL; +} + #endif /* CONFIG_OF */ #if IS_ENABLED(CONFIG_ACPI) diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/i2c/mlxcpld.h new file mode 100644 index 000000000000..b08dcb183fca --- /dev/null +++ b/include/linux/i2c/mlxcpld.h @@ -0,0 +1,52 @@ +/* + * mlxcpld.h - Mellanox I2C multiplexer support in CPLD + * + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Michael Shych <michaels@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_I2C_MLXCPLD_H +#define _LINUX_I2C_MLXCPLD_H + +/* Platform data for the CPLD I2C multiplexers */ + +/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info + * @adap_ids - adapter array + * @num_adaps - number of adapters + * @sel_reg_addr - mux select register offset in CPLD space + */ +struct mlxcpld_mux_plat_data { + int *adap_ids; + int num_adaps; + int sel_reg_addr; +}; + +#endif /* _LINUX_I2C_MLXCPLD_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 083d61e92706..3c01b89aed67 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -18,12 +18,11 @@ #include <linux/rcupdate.h> /* - * We want shallower trees and thus more bits covered at each layer. 8 - * bits gives us large enough first layer for most use cases and maximum - * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and - * 1k on 32bit. + * Using 6 bits at each layer allows us to allocate 7 layers out of each page. + * 8 bits only gave us 3 layers out of every pair of pages, which is less + * efficient except for trees with a largest element between 192-255 inclusive. */ -#define IDR_BITS 8 +#define IDR_BITS 6 #define IDR_SIZE (1 << IDR_BITS) #define IDR_MASK ((1 << IDR_BITS)-1) @@ -56,6 +55,32 @@ struct idr { #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) /** + * idr_get_cursor - Return the current position of the cyclic allocator + * @idr: idr handle + * + * The value returned is the value that will be next returned from + * idr_alloc_cyclic() if it is free (otherwise the search will start from + * this position). + */ +static inline unsigned int idr_get_cursor(struct idr *idr) +{ + return READ_ONCE(idr->cur); +} + +/** + * idr_set_cursor - Set the current position of the cyclic allocator + * @idr: idr handle + * @val: new position + * + * The next call to idr_alloc_cyclic() will return @val if it is free + * (otherwise the search will start from this position). + */ +static inline void idr_set_cursor(struct idr *idr, unsigned int val) +{ + WRITE_ONCE(idr->cur, val); +} + +/** * DOC: idr sync * idr synchronization (stolen from radix-tree.h) * @@ -195,6 +220,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } +static inline bool ida_is_empty(struct ida *ida) +{ + return idr_is_empty(&ida->idr); +} + void __init idr_init_cache(void); #endif /* __IDR_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a80516fd65c8..fe849329511a 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1576,6 +1576,9 @@ struct ieee80211_vht_operation { #define WLAN_AUTH_SHARED_KEY 1 #define WLAN_AUTH_FT 2 #define WLAN_AUTH_SAE 3 +#define WLAN_AUTH_FILS_SK 4 +#define WLAN_AUTH_FILS_SK_PFS 5 +#define WLAN_AUTH_FILS_PK 6 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 @@ -1960,6 +1963,26 @@ enum ieee80211_eid { WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, + WLAN_EID_CAG_NUMBER = 237, + WLAN_EID_AP_CSN = 239, + WLAN_EID_FILS_INDICATION = 240, + WLAN_EID_DILS = 241, + WLAN_EID_FRAGMENT = 242, + WLAN_EID_EXTENSION = 255 +}; + +/* Element ID Extensions for Element ID 255 */ +enum ieee80211_eid_ext { + WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, + WLAN_EID_EXT_FILS_REQ_PARAMS = 2, + WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, + WLAN_EID_EXT_FILS_SESSION = 4, + WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, + WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, + WLAN_EID_EXT_KEY_DELIVERY = 7, + WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, + WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, + WLAN_EID_EXT_FILS_NONCE = 13, }; /* Action category code */ @@ -2073,6 +2096,9 @@ enum ieee80211_key_len { #define IEEE80211_GCMP_MIC_LEN 16 #define IEEE80211_GCMP_PN_LEN 6 +#define FILS_NONCE_LEN 16 +#define FILS_MAX_KEK_LEN 64 + /* Public action codes */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index f563907ed776..3355efc89781 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -44,4 +44,20 @@ static inline int arp_hdr_len(struct net_device *dev) return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; } } + +static inline bool dev_is_mac_header_xmit(const struct net_device *dev) +{ + switch (dev->type) { + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + return false; + default: + return true; + } +} + #endif /* _LINUX_IF_ARP_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 3319d97d789d..8d5fcd6284ce 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -399,22 +399,6 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) skb->vlan_tci = 0; return skb; } -/* - * vlan_hwaccel_push_inside - pushes vlan tag to the payload - * @skb: skbuff to tag - * - * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the - * VLAN tag from @skb->vlan_tci inside to the payload. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. - */ -static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) -{ - if (skb_vlan_tag_present(skb)) - skb = __vlan_hwaccel_push_inside(skb); - return skb; -} /** * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index e7fdec4db9da..5ba430cc9a87 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_OFFSET), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = (_si), \ .scan_type = { \ .sign = 'u', \ diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 228bd44efa4c..497f2b3a5a62 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -116,6 +116,16 @@ struct st_sensor_bdu { }; /** + * struct st_sensor_das - ST sensor device data alignment selection + * @addr: address of the register. + * @mask: mask to write the das flag for left alignment. + */ +struct st_sensor_das { + u8 addr; + u8 mask; +}; + +/** * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt * @addr: address of the register. * @mask_int1: mask to enable/disable IRQ on INT1 pin. @@ -185,6 +195,7 @@ struct st_sensor_transfer_function { * @enable_axis: Enable one or more axis of the sensor. * @fs: Full scale register and full scale list available. * @bdu: Block data update register. + * @das: Data Alignment Selection register. * @drdy_irq: Data ready register of the sensor. * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. * @bootime: samples to discard when sensor passing from power-down to power-up. @@ -200,6 +211,7 @@ struct st_sensor_settings { struct st_sensor_axis enable_axis; struct st_sensor_fullscale fs; struct st_sensor_bdu bdu; + struct st_sensor_das das; struct st_sensor_data_ready_irq drdy_irq; bool multi_read_bit; unsigned int bootime; diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 9edccfba1ffb..47eeec3218b5 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val); int iio_write_channel_raw(struct iio_channel *chan, int val); /** + * iio_read_max_channel_raw() - read maximum available raw value from a given + * channel, i.e. the maximum possible value. + * @chan: The channel being queried. + * @val: Value read back. + * + * Note raw reads from iio channels are in adc counts and hence + * scale will need to be applied if standard units are required. + */ +int iio_read_max_channel_raw(struct iio_channel *chan, int *val); + +/** + * iio_read_avail_channel_raw() - read available raw values from a given channel + * @chan: The channel being queried. + * @vals: Available values read back. + * @length: Number of entries in vals. + * + * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. + * + * For ranges, three vals are always returned; min, step and max. + * For lists, all the possible values are enumerated. + * + * Note raw available values from iio channels are in adc counts and + * hence scale will need to be applied if standard units are required. + */ +int iio_read_avail_channel_raw(struct iio_channel *chan, + const int **vals, int *length); + +/** * iio_get_channel_type() - get the type of a channel * @channel: The channel being queried. * @type: The type of the channel. @@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel, enum iio_chan_type *type); /** + * iio_read_channel_offset() - read the offset value for a channel + * @chan: The channel being queried. + * @val: First part of value read back. + * @val2: Second part of value read back. + * + * Note returns a description of what is in val and val2, such + * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_offset(struct iio_channel *chan, int *val, + int *val2); + +/** * iio_read_channel_scale() - read the scale value for a channel * @chan: The channel being queried. * @val: First part of value read back. diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h index 91530e6611e9..628b2cf54c50 100644 --- a/include/linux/iio/dac/mcp4725.h +++ b/include/linux/iio/dac/mcp4725.h @@ -9,8 +9,18 @@ #ifndef IIO_DAC_MCP4725_H_ #define IIO_DAC_MCP4725_H_ +/** + * struct mcp4725_platform_data - MCP4725/6 DAC specific data. + * @use_vref: Whether an external reference voltage on Vref pin should be used. + * Additional vref-supply must be specified when used. + * @vref_buffered: Controls buffering of the external reference voltage. + * + * Vref related settings are available only on MCP4756. See + * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. + */ struct mcp4725_platform_data { - u16 vref_mv; + bool use_vref; + bool vref_buffered; }; #endif /* IIO_DAC_MCP4725_H_ */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b4a0679e4a49..3f5ea2e9a39e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -225,12 +225,22 @@ struct iio_event_spec { * endianness: little or big endian * @info_mask_separate: What information is to be exported that is specific to * this channel. + * @info_mask_separate_available: What availability information is to be + * exported that is specific to this channel. * @info_mask_shared_by_type: What information is to be exported that is shared * by all channels of the same type. + * @info_mask_shared_by_type_available: What availability information is to be + * exported that is shared by all channels of the same + * type. * @info_mask_shared_by_dir: What information is to be exported that is shared * by all channels of the same direction. + * @info_mask_shared_by_dir_available: What availability information is to be + * exported that is shared by all channels of the same + * direction. * @info_mask_shared_by_all: What information is to be exported that is shared * by all channels. + * @info_mask_shared_by_all_available: What availability information is to be + * exported that is shared by all channels. * @event_spec: Array of events which should be registered for this * channel. * @num_event_specs: Size of the event_spec array. @@ -269,9 +279,13 @@ struct iio_chan_spec { enum iio_endian endianness; } scan_type; long info_mask_separate; + long info_mask_separate_available; long info_mask_shared_by_type; + long info_mask_shared_by_type_available; long info_mask_shared_by_dir; + long info_mask_shared_by_dir_available; long info_mask_shared_by_all; + long info_mask_shared_by_all_available; const struct iio_event_spec *event_spec; unsigned int num_event_specs; const struct iio_chan_spec_ext_info *ext_info; @@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, (chan->info_mask_shared_by_all & BIT(type)); } +/** + * iio_channel_has_available() - Checks if a channel has an available attribute + * @chan: The channel to be queried + * @type: Type of the available attribute to be checked + * + * Returns true if the channel supports reporting available values for the + * given attribute type, false otherwise. + */ +static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, + enum iio_chan_info_enum type) +{ + return (chan->info_mask_separate_available & BIT(type)) | + (chan->info_mask_shared_by_type_available & BIT(type)) | + (chan->info_mask_shared_by_dir_available & BIT(type)) | + (chan->info_mask_shared_by_all_available & BIT(type)); +} + #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ .type = IIO_TIMESTAMP, \ .channel = -1, \ @@ -349,6 +380,14 @@ struct iio_dev; * max_len specifies maximum number of elements * vals pointer can contain. val_len is used to return * length of valid elements in vals. + * @read_avail: function to return the available values from the device. + * mask specifies which value. Note 0 means the available + * values for the channel in question. Return value + * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is + * returned in vals. The type of the vals are returned in + * type and the number of vals is returned in length. For + * ranges, there are always three vals returned; min, step + * and max. For lists, all possible values are enumerated. * @write_raw: function to write a value to the device. * Parameters are the same as for read_raw. * @write_raw_get_fmt: callback function to query the expected @@ -381,7 +420,7 @@ struct iio_dev; **/ struct iio_info { struct module *driver_module; - struct attribute_group *event_attrs; + const struct attribute_group *event_attrs; const struct attribute_group *attrs; int (*read_raw)(struct iio_dev *indio_dev, @@ -397,6 +436,13 @@ struct iio_info { int *val_len, long mask); + int (*read_avail)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask); + int (*write_raw)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index 9cd8f747212f..ce9426c507fd 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h @@ -55,10 +55,34 @@ struct iio_const_attr { { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .address = _addr } +#define IIO_ATTR_RO(_name, _addr) \ + { .dev_attr = __ATTR_RO(_name), \ + .address = _addr } + +#define IIO_ATTR_WO(_name, _addr) \ + { .dev_attr = __ATTR_WO(_name), \ + .address = _addr } + +#define IIO_ATTR_RW(_name, _addr) \ + { .dev_attr = __ATTR_RW(_name), \ + .address = _addr } + #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR(_name, _mode, _show, _store, _addr) +#define IIO_DEVICE_ATTR_RO(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_RO(_name, _addr) + +#define IIO_DEVICE_ATTR_WO(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_WO(_name, _addr) + +#define IIO_DEVICE_ATTR_RW(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_RW(_name, _addr) + #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_vname \ = IIO_ATTR(_name, _mode, _show, _store, _addr) diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 4f1154f7a33c..ea08302f2d7b 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig); */ bool iio_trigger_using_own(struct iio_dev *indio_dev); +int iio_trigger_validate_own_device(struct iio_trigger *trig, + struct iio_dev *indio_dev); #else struct iio_trigger; diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 32b579525004..2aa7b6384d64 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -29,4 +29,9 @@ enum iio_event_info { #define IIO_VAL_FRACTIONAL 10 #define IIO_VAL_FRACTIONAL_LOG2 11 +enum iio_available_type { + IIO_AVAIL_LIST, + IIO_AVAIL_RANGE, +}; + #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/ima.h b/include/linux/ima.h index 0eb7c2e7f0d6..7f6952f8d6aa 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -11,6 +11,7 @@ #define _LINUX_IMA_H #include <linux/fs.h> +#include <linux/kexec.h> struct linux_binprm; #ifdef CONFIG_IMA @@ -23,6 +24,10 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); +#ifdef CONFIG_IMA_KEXEC +extern void ima_add_kexec_buffer(struct kimage *image); +#endif + #else static inline int ima_bprm_check(struct linux_binprm *bprm) { @@ -62,6 +67,13 @@ static inline void ima_post_path_mknod(struct dentry *dentry) #endif /* CONFIG_IMA */ +#ifndef CONFIG_IMA_KEXEC +struct kimage; + +static inline void ima_add_kexec_buffer(struct kimage *image) +{} +#endif + #ifdef CONFIG_IMA_APPRAISE extern void ima_inode_post_setattr(struct dentry *dentry); extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, diff --git a/include/linux/init.h b/include/linux/init.h index e30104ceb86d..885c3e6d0f9d 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -126,6 +126,9 @@ void prepare_namespace(void); void __init load_default_modules(void); int __init init_rootfs(void); +#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX) +extern bool rodata_enabled; +#endif #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2d9b650047a5..d49e26c6cdc7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -429,6 +429,7 @@ struct intel_iommu { struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ struct idr pasid_idr; + u32 pasid_max; #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 72f0721f75e7..53144e78a369 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -232,6 +232,18 @@ struct irq_affinity_notify { void (*release)(struct kref *ref); }; +/** + * struct irq_affinity - Description for automatic irq affinity assignements + * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of + * the MSI(-X) vector space + * @post_vectors: Don't apply affinity to @post_vectors at end of + * the MSI(-X) vector space + */ +struct irq_affinity { + int pre_vectors; + int post_vectors; +}; + #if defined(CONFIG_SMP) extern cpumask_var_t irq_default_affinity; @@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); -int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec); +struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); +int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) } static inline struct cpumask * -irq_create_affinity_masks(const struct cpumask *affinity, int nvec) +irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) { return NULL; } static inline int -irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) +irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) { return maxvec; } diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 7892f55a1866..a4c94b86401e 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -49,6 +49,8 @@ struct iomap { #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ +#define IOMAP_FAULT (1 << 3) /* mapping for page fault */ +#define IOMAP_DIRECT (1 << 4) /* direct I/O */ struct iomap_ops { /* @@ -82,4 +84,14 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, struct iomap_ops *ops); +/* + * Flags for direct I/O ->end_io: + */ +#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ +#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ +typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, + unsigned flags); +ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, + struct iomap_ops *ops, iomap_dio_end_io_t end_io); + #endif /* LINUX_IOMAP_H */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 436dc21318af..0ff5111f6959 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -253,6 +253,7 @@ extern void iommu_group_remove_device(struct device *dev); extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)); extern struct iommu_group *iommu_group_get(struct device *dev); +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); extern void iommu_group_put(struct iommu_group *group); extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); @@ -351,6 +352,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); +void iommu_register_instance(struct fwnode_handle *fwnode, + const struct iommu_ops *ops); +const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode); #else /* CONFIG_IOMMU_API */ @@ -580,6 +584,17 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, return -ENODEV; } +static inline void iommu_register_instance(struct fwnode_handle *fwnode, + const struct iommu_ops *ops) +{ +} + +static inline +const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) +{ + return NULL; +} + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7e9a789be5e0..671d014e6429 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -64,6 +64,11 @@ struct ipv6_devconf { } stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; + __s32 seg6_enabled; +#ifdef CONFIG_IPV6_SEG6_HMAC + __s32 seg6_require_hmac; +#endif + __u32 enhanced_dad; struct ctl_table_header *sysctl_header; }; @@ -123,12 +128,12 @@ struct inet6_skb_parm { }; #if defined(CONFIG_NET_L3_MASTER_DEV) -static inline bool skb_l3mdev_slave(__u16 flags) +static inline bool ipv6_l3mdev_skb(__u16 flags) { return flags & IP6SKB_L3SLAVE; } #else -static inline bool skb_l3mdev_slave(__u16 flags) +static inline bool ipv6_l3mdev_skb(__u16 flags) { return false; } @@ -139,11 +144,22 @@ static inline bool skb_l3mdev_slave(__u16 flags) static inline int inet6_iif(const struct sk_buff *skb) { - bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); + bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags); return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } +/* can not be used in TCP layer after tcp_v6_fill_cb */ +static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if defined(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return true; +#endif + return false; +} + struct tcp6_request_sock { struct tcp_request_sock tcp6rsk_tcp; }; @@ -218,8 +234,9 @@ struct ipv6_pinfo { rxflow:1, rxtclass:1, rxpmtu:1, - rxorigdstaddr:1; - /* 2 bits hole */ + rxorigdstaddr:1, + recvfragsize:1; + /* 1 bits hole */ } bits; __u16 all; } rxopt; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index b7e34313cdfe..e808f8ae6f14 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -239,7 +239,7 @@ #define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_HWCOLLCNT_SHIFT 24 -#define GITS_CBASER_VALID (1UL << 63) +#define GITS_CBASER_VALID (1ULL << 63) #define GITS_CBASER_SHAREABILITY_SHIFT (10) #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) #define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) @@ -265,7 +265,7 @@ #define GITS_BASER_NR_REGS 8 -#define GITS_BASER_VALID (1UL << 63) +#define GITS_BASER_VALID (1ULL << 63) #define GITS_BASER_INDIRECT (1ULL << 62) #define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) @@ -295,10 +295,10 @@ #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) #define GITS_BASER_PAGE_SIZE_SHIFT (8) -#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) -#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_PAGES_SHIFT (0) #define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 81f930b0bca9..7b49c71c968b 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -259,11 +259,11 @@ extern void gic_init(unsigned long gic_base_addr, unsigned long gic_addrspace_size, unsigned int cpu_vec, unsigned int irqbase); extern void gic_clocksource_init(unsigned int); -extern cycle_t gic_read_count(void); +extern u64 gic_read_count(void); extern unsigned int gic_get_count_width(void); -extern cycle_t gic_read_compare(void); -extern void gic_write_compare(cycle_t cnt); -extern void gic_write_cpu_compare(cycle_t cnt, int cpu); +extern u64 gic_read_compare(void); +extern void gic_write_compare(u64 cnt); +extern void gic_write_cpu_compare(u64 cnt, int cpu); extern void gic_start_count(void); extern void gic_stop_count(void); extern int gic_get_c0_compare_int(void); diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h index 0fc6ff276221..8d80fdc68647 100644 --- a/include/linux/isdnif.h +++ b/include/linux/isdnif.h @@ -500,6 +500,6 @@ typedef struct { * */ extern int register_isdn(isdn_if*); -#include <asm/uaccess.h> +#include <linux/uaccess.h> #endif /* __ISDNIF_H__ */ diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index 089f70f83e97..23da3af459fe 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -14,6 +14,7 @@ struct static_key_deferred { #ifdef HAVE_JUMP_LABEL extern void static_key_slow_dec_deferred(struct static_key_deferred *key); +extern void static_key_deferred_flush(struct static_key_deferred *key); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); @@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) STATIC_KEY_CHECK_USE(); static_key_slow_dec(&key->key); } +static inline void static_key_deferred_flush(struct static_key_deferred *key) +{ + STATIC_KEY_CHECK_USE(); +} static inline void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 410decacff8f..68bd88223417 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -77,7 +77,6 @@ extern int kdb_poll_idx; * number whenever the kernel debugger is entered. */ extern int kdb_initial_cpu; -extern atomic_t kdb_event; /* Types and messages used for dynamically added kdb shell commands */ @@ -162,6 +161,7 @@ enum kdb_msgsrc { }; extern int kdb_trap_printk; +extern int kdb_printf_cpu; extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index bc6ed52a39b9..cb09238f6d32 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -45,11 +45,16 @@ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) +/* @a is a power of 2 value */ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) +/* generic data direction definitions */ +#define READ 0 +#define WRITE 1 + #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) #define u64_to_user_ptr(x) ( \ @@ -506,6 +511,15 @@ extern enum system_states { #define TAINT_UNSIGNED_MODULE 13 #define TAINT_SOFTLOCKUP 14 #define TAINT_LIVEPATCH 15 +#define TAINT_FLAGS_COUNT 16 + +struct taint_flag { + char c_true; /* character printed when tainted */ + char c_false; /* character printed when not tainted */ + bool module; /* also show as a per-module taint flag */ +}; + +extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; extern const char hex_asc[]; #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 44fda64ad434..00f776816aa3 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -78,8 +78,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -extern void account_user_time(struct task_struct *, cputime_t, cputime_t); -extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); +extern void account_user_time(struct task_struct *, cputime_t); +extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_steal_time(cputime_t); extern void account_idle_time(cputime_t); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 406c33dcae13..d419d0e51fe5 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -148,7 +148,36 @@ struct kexec_file_ops { kexec_verify_sig_t *verify_sig; #endif }; -#endif + +/** + * struct kexec_buf - parameters for finding a place for a buffer in memory + * @image: kexec image in which memory to search. + * @buffer: Contents which will be copied to the allocated memory. + * @bufsz: Size of @buffer. + * @mem: On return will have address of the buffer in memory. + * @memsz: Size for the buffer in memory. + * @buf_align: Minimum alignment needed. + * @buf_min: The buffer can't be placed below this address. + * @buf_max: The buffer can't be placed above this address. + * @top_down: Allocate from top of memory. + */ +struct kexec_buf { + struct kimage *image; + void *buffer; + unsigned long bufsz; + unsigned long mem; + unsigned long memsz; + unsigned long buf_align; + unsigned long buf_min; + unsigned long buf_max; + bool top_down; +}; + +int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, + int (*func)(u64, u64, void *)); +extern int kexec_add_buffer(struct kexec_buf *kbuf); +int kexec_locate_mem_hole(struct kexec_buf *kbuf); +#endif /* CONFIG_KEXEC_FILE */ struct kimage { kimage_entry_t head; @@ -212,11 +241,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry, struct kexec_segment __user *segments, unsigned long flags); extern int kernel_kexec(void); -extern int kexec_add_buffer(struct kimage *image, char *buffer, - unsigned long bufsz, unsigned long memsz, - unsigned long buf_align, unsigned long buf_min, - unsigned long buf_max, bool top_down, - unsigned long *load_addr); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); extern int kexec_load_purgatory(struct kimage *image, unsigned long min, @@ -259,12 +283,6 @@ phys_addr_t paddr_vmcoreinfo_note(void); vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) #define VMCOREINFO_CONFIG(name) \ vmcoreinfo_append_str("CONFIG_%s=y\n", #name) -#define VMCOREINFO_PAGE_OFFSET(value) \ - vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value) -#define VMCOREINFO_VMALLOC_START(value) \ - vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value) -#define VMCOREINFO_VMEMMAP_START(value) \ - vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value) extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; diff --git a/include/linux/kthread.h b/include/linux/kthread.h index a6e82a69c363..4fec8b775895 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), __k; \ }) +void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); int kthread_stop(struct task_struct *k); @@ -174,7 +175,7 @@ __printf(2, 3) struct kthread_worker * kthread_create_worker(unsigned int flags, const char namefmt[], ...); -struct kthread_worker * +__printf(3, 4) struct kthread_worker * kthread_create_worker_on_cpu(int cpu, unsigned int flags, const char namefmt[], ...); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 0fb7ffb1775f..0c8bd45c8206 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -24,21 +24,8 @@ #include <linux/time.h> #include <linux/jiffies.h> -/* - * ktime_t: - * - * A single 64-bit variable is used to store the hrtimers - * internal representation of time values in scalar nanoseconds. The - * design plays out best on 64-bit CPUs, where most conversions are - * NOPs and most arithmetic ktime_t operations are plain arithmetic - * operations. - * - */ -union ktime { - s64 tv64; -}; - -typedef union ktime ktime_t; /* Kill this */ +/* Nanosecond scalar representation for kernel time values */ +typedef s64 ktime_t; /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value @@ -50,39 +37,34 @@ typedef union ktime ktime_t; /* Kill this */ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) { if (unlikely(secs >= KTIME_SEC_MAX)) - return (ktime_t){ .tv64 = KTIME_MAX }; + return KTIME_MAX; - return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; + return secs * NSEC_PER_SEC + (s64)nsecs; } /* Subtract two ktime_t variables. rem = lhs -rhs: */ -#define ktime_sub(lhs, rhs) \ - ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; }) +#define ktime_sub(lhs, rhs) ((lhs) - (rhs)) /* Add two ktime_t variables. res = lhs + rhs: */ -#define ktime_add(lhs, rhs) \ - ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) +#define ktime_add(lhs, rhs) ((lhs) + (rhs)) /* * Same as ktime_add(), but avoids undefined behaviour on overflow; however, * this means that you must check the result for overflow yourself. */ -#define ktime_add_unsafe(lhs, rhs) \ - ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) +#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) /* * Add a ktime_t variable and a scalar nanosecond value. * res = kt + nsval: */ -#define ktime_add_ns(kt, nsval) \ - ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) +#define ktime_add_ns(kt, nsval) ((kt) + (nsval)) /* * Subtract a scalar nanosecod from a ktime_t variable * res = kt - nsval: */ -#define ktime_sub_ns(kt, nsval) \ - ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) +#define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) /* convert a timespec to ktime_t format: */ static inline ktime_t timespec_to_ktime(struct timespec ts) @@ -103,31 +85,16 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) } /* Map the ktime_t to timespec conversion to ns_to_timespec function */ -#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) +#define ktime_to_timespec(kt) ns_to_timespec((kt)) /* Map the ktime_t to timespec conversion to ns_to_timespec function */ -#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) +#define ktime_to_timespec64(kt) ns_to_timespec64((kt)) /* Map the ktime_t to timeval conversion to ns_to_timeval function */ -#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) +#define ktime_to_timeval(kt) ns_to_timeval((kt)) /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ -#define ktime_to_ns(kt) ((kt).tv64) - - -/** - * ktime_equal - Compares two ktime_t variables to see if they are equal - * @cmp1: comparable1 - * @cmp2: comparable2 - * - * Compare two ktime_t variables. - * - * Return: 1 if equal. - */ -static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) -{ - return cmp1.tv64 == cmp2.tv64; -} +#define ktime_to_ns(kt) (kt) /** * ktime_compare - Compares two ktime_t variables for less, greater or equal @@ -141,9 +108,9 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) */ static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) { - if (cmp1.tv64 < cmp2.tv64) + if (cmp1 < cmp2) return -1; - if (cmp1.tv64 > cmp2.tv64) + if (cmp1 > cmp2) return 1; return 0; } @@ -182,7 +149,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div) */ BUG_ON(div < 0); if (__builtin_constant_p(div) && !(div >> 32)) { - s64 ns = kt.tv64; + s64 ns = kt; u64 tmp = ns < 0 ? -ns : ns; do_div(tmp, div); @@ -199,7 +166,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div) * so catch them on 64bit as well. */ WARN_ON(div < 0); - return kt.tv64 / div; + return kt / div; } #endif @@ -256,7 +223,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, struct timespec *ts) { - if (kt.tv64) { + if (kt) { *ts = ktime_to_timespec(kt); return true; } else { @@ -275,7 +242,7 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, struct timespec64 *ts) { - if (kt.tv64) { + if (kt) { *ts = ktime_to_timespec64(kt); return true; } else { @@ -290,20 +257,16 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, * this resolution values. */ #define LOW_RES_NSEC TICK_NSEC -#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } +#define KTIME_LOW_RES (LOW_RES_NSEC) static inline ktime_t ns_to_ktime(u64 ns) { - static const ktime_t ktime_zero = { .tv64 = 0 }; - - return ktime_add_ns(ktime_zero, ns); + return ns; } static inline ktime_t ms_to_ktime(u64 ms) { - static const ktime_t ktime_zero = { .tv64 = 0 }; - - return ktime_add_ms(ktime_zero, ms); + return ms * NSEC_PER_MSEC; } # include <linux/timekeeping.h> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 01c0b9cc3915..1c5190dab2c1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -224,7 +224,6 @@ struct kvm_vcpu { int fpu_active; int guest_fpu_loaded, guest_xcr0_loaded; - unsigned char fpu_counter; struct swait_queue_head wq; struct pid *pid; int sigset_active; @@ -439,6 +438,9 @@ struct kvm { pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug(fmt, ...) \ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_debug_ratelimited(fmt, ...) \ + pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ + ## __VA_ARGS__) #define kvm_pr_unimpl(fmt, ...) \ pr_err_ratelimited("kvm [%i]: " fmt, \ task_tgid_nr(current), ## __VA_ARGS__) @@ -450,6 +452,9 @@ struct kvm { #define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ + kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ + ## __VA_ARGS__) #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) @@ -645,6 +650,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); +int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, int offset, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); @@ -1107,6 +1114,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) extern bool kvm_rebooting; +extern unsigned int halt_poll_ns; +extern unsigned int halt_poll_ns_grow; +extern unsigned int halt_poll_ns_shrink; + struct kvm_device { struct kvm_device_ops *ops; struct kvm *kvm; diff --git a/include/linux/leds.h b/include/linux/leds.h index ddfcb2df3656..569cb531094c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -42,16 +42,20 @@ struct led_classdev { #define LED_UNREGISTERING (1 << 1) /* Upper 16 bits reflect control information */ #define LED_CORE_SUSPENDRESUME (1 << 16) -#define LED_BLINK_SW (1 << 17) -#define LED_BLINK_ONESHOT (1 << 18) -#define LED_BLINK_ONESHOT_STOP (1 << 19) -#define LED_BLINK_INVERT (1 << 20) -#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) -#define LED_BLINK_DISABLE (1 << 22) -#define LED_SYSFS_DISABLE (1 << 23) -#define LED_DEV_CAP_FLASH (1 << 24) -#define LED_HW_PLUGGABLE (1 << 25) -#define LED_PANIC_INDICATOR (1 << 26) +#define LED_SYSFS_DISABLE (1 << 17) +#define LED_DEV_CAP_FLASH (1 << 18) +#define LED_HW_PLUGGABLE (1 << 19) +#define LED_PANIC_INDICATOR (1 << 20) + + /* set_brightness_work / blink_timer flags, atomic, private. */ + unsigned long work_flags; + +#define LED_BLINK_SW 0 +#define LED_BLINK_ONESHOT 1 +#define LED_BLINK_ONESHOT_STOP 2 +#define LED_BLINK_INVERT 3 +#define LED_BLINK_BRIGHTNESS_CHANGE 4 +#define LED_BLINK_DISABLE 5 /* Set LED brightness level * Must not sleep. Use brightness_set_blocking for drivers @@ -89,6 +93,7 @@ struct led_classdev { unsigned long blink_delay_on, blink_delay_off; struct timer_list blink_timer; int blink_brightness; + int new_blink_brightness; void (*flash_resume)(struct led_classdev *led_cdev); struct work_struct set_brightness_work; diff --git a/include/linux/libata.h b/include/linux/libata.h index 616eef4d81ea..c170be548b7f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -166,6 +166,8 @@ enum { ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ + ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */ + ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), @@ -342,7 +344,9 @@ enum { ATA_SHIFT_PIO = 0, ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, /* size of buffer to pad xfers ending on unaligned boundaries */ ATA_DMA_PAD_SZ = 4, @@ -542,6 +546,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes) extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_unload_heads; +extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index f4947fda11e7..8458c5351e56 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -143,7 +143,7 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, const struct nd_cmd_desc *desc, int idx, void *buf); u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, const struct nd_cmd_desc *desc, int idx, const u32 *in_field, - const u32 *out_field); + const u32 *out_field, unsigned long remainder); int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count); struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index d190786e4ad8..7c273bbc5351 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -47,6 +47,7 @@ struct ppa_addr { struct nvm_rq; struct nvm_id; struct nvm_dev; +struct nvm_tgt_dev; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); @@ -107,6 +108,8 @@ enum { NVM_RSP_NOT_CHANGEABLE = 0x1, NVM_RSP_ERR_FAILWRITE = 0x40ff, NVM_RSP_ERR_EMPTYPAGE = 0x42ff, + NVM_RSP_ERR_FAILECC = 0x4281, + NVM_RSP_WARN_HIGHECC = 0x4700, /* Device opcodes */ NVM_OP_HBREAD = 0x02, @@ -208,7 +211,7 @@ struct nvm_id { struct nvm_target { struct list_head list; - struct nvm_dev *dev; + struct nvm_tgt_dev *dev; struct nvm_tgt_type *type; struct gendisk *disk; }; @@ -228,7 +231,7 @@ typedef void (nvm_end_io_fn)(struct nvm_rq *); struct nvm_rq { struct nvm_tgt_instance *ins; - struct nvm_dev *dev; + struct nvm_tgt_dev *dev; struct bio *bio; @@ -263,35 +266,12 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) return rqdata + 1; } -struct nvm_block; - -struct nvm_lun { - int id; - - int lun_id; - int chnl_id; - - spinlock_t lock; - - unsigned int nr_free_blocks; /* Number of unused blocks */ - struct nvm_block *blocks; -}; - enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; -struct nvm_block { - struct list_head list; - struct nvm_lun *lun; - unsigned long id; - - void *priv; - int state; -}; - /* system block cpu representation */ struct nvm_sb_info { unsigned long seqnr; @@ -301,22 +281,12 @@ struct nvm_sb_info { struct ppa_addr fs_ppa; }; -struct nvm_dev { - struct nvm_dev_ops *ops; - - struct list_head devices; - - /* Media manager */ - struct nvmm_type *mt; - void *mp; - - /* System blocks */ - struct nvm_sb_info sb; - - /* Device information */ +/* Device generic information */ +struct nvm_geo { int nr_chnls; + int nr_luns; + int luns_per_chnl; /* -1 if channels are not symmetric */ int nr_planes; - int luns_per_chnl; int sec_per_pg; /* only sectors for a single page */ int pgs_per_blk; int blks_per_lun; @@ -336,14 +306,44 @@ struct nvm_dev { int sec_per_pl; /* all sectors across planes */ int sec_per_blk; int sec_per_lun; +}; + +struct nvm_tgt_dev { + /* Device information */ + struct nvm_geo geo; + + /* Base ppas for target LUNs */ + struct ppa_addr *luns; + + sector_t total_secs; + + struct nvm_id identity; + struct request_queue *q; + + struct nvm_dev *parent; + void *map; +}; + +struct nvm_dev { + struct nvm_dev_ops *ops; + + struct list_head devices; + + /* Media manager */ + struct nvmm_type *mt; + void *mp; + + /* System blocks */ + struct nvm_sb_info sb; + + /* Device information */ + struct nvm_geo geo; /* lower page table */ int lps_per_blk; int *lptbl; - unsigned long total_blocks; unsigned long total_secs; - int nr_luns; unsigned long *lun_map; void *dma_pool; @@ -352,26 +352,57 @@ struct nvm_dev { /* Backend device */ struct request_queue *q; - struct device dev; - struct device *parent_dev; char name[DISK_NAME_LEN]; void *private_data; + void *rmap; + struct mutex mlock; spinlock_t lock; }; +static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, + u64 pba) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = pba; + + l.ppa = 0; + + div_u64_rem(ppa, geo->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, geo->sec_per_pg); + div_u64_rem(ppa, geo->pgs_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, geo->pgs_per_blk); + div_u64_rem(ppa, geo->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, geo->blks_per_lun); + div_u64_rem(ppa, geo->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, geo->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, struct ppa_addr r) { + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; - l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; - l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; - l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; - l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; - l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; - l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; + l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; + l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset; + l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset; + l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset; + l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset; + l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset; return l; } @@ -379,24 +410,25 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, struct ppa_addr r) { + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; l.ppa = 0; /* * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. */ - l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & - (((1 << dev->ppaf.blk_len) - 1)); - l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & - (((1 << dev->ppaf.pg_len) - 1)); - l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & - (((1 << dev->ppaf.sect_len) - 1)); - l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & - (((1 << dev->ppaf.pln_len) - 1)); - l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & - (((1 << dev->ppaf.lun_len) - 1)); - l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & - (((1 << dev->ppaf.ch_len) - 1)); + l.g.blk = (r.ppa >> geo->ppaf.blk_offset) & + (((1 << geo->ppaf.blk_len) - 1)); + l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) & + (((1 << geo->ppaf.pg_len) - 1)); + l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) & + (((1 << geo->ppaf.sect_len) - 1)); + l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) & + (((1 << geo->ppaf.pln_len) - 1)); + l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) & + (((1 << geo->ppaf.lun_len) - 1)); + l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) & + (((1 << geo->ppaf.ch_len) - 1)); return l; } @@ -411,18 +443,13 @@ static inline void ppa_set_empty(struct ppa_addr *ppa_addr) ppa_addr->ppa = ADDR_EMPTY; } -static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, - struct nvm_block *blk) +static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2) { - struct ppa_addr ppa; - struct nvm_lun *lun = blk->lun; - - ppa.ppa = 0; - ppa.g.blk = blk->id % dev->blks_per_lun; - ppa.g.lun = lun->lun_id; - ppa.g.ch = lun->chnl_id; + if (ppa_empty(ppa1) || ppa_empty(ppa2)) + return 0; - return ppa; + return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) && + (ppa1.g.blk == ppa2.g.blk)); } static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) @@ -432,7 +459,7 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); +typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *); typedef void (nvm_tgt_exit_fn)(void *); struct nvm_tgt_type { @@ -465,23 +492,18 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *); typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); -typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, - struct nvm_lun *, unsigned long); -typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); -typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, - unsigned long); -typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int); -typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); -typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); -typedef void (nvmm_release_lun)(struct nvm_dev *, int); -typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); - +typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *); +typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int); typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); +typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *, + struct ppa_addr, int); +typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int); + +enum { + TRANS_TGT_TO_DEV = 0x0, + TRANS_DEV_TO_TGT = 0x1, +}; struct nvmm_type { const char *name; @@ -493,54 +515,41 @@ struct nvmm_type { nvmm_create_tgt_fn *create_tgt; nvmm_remove_tgt_fn *remove_tgt; - /* Block administration callbacks */ - nvmm_get_blk_fn *get_blk; - nvmm_put_blk_fn *put_blk; - nvmm_open_blk_fn *open_blk; - nvmm_close_blk_fn *close_blk; - nvmm_flush_blk_fn *flush_blk; - nvmm_submit_io_fn *submit_io; nvmm_erase_blk_fn *erase_blk; - /* Bad block mgmt */ - nvmm_mark_blk_fn *mark_blk; - - /* Configuration management */ - nvmm_get_lun_fn *get_lun; - nvmm_reserve_lun *reserve_lun; - nvmm_release_lun *release_lun; - - /* Statistics */ - nvmm_lun_info_print_fn *lun_info_print; - nvmm_get_area_fn *get_area; nvmm_put_area_fn *put_area; + nvmm_trans_ppa_fn *trans_ppa; + nvmm_part_to_tgt_fn *part_to_tgt; + struct list_head list; }; extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); -extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, - unsigned long); -extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); - extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); -void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); - -extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); +extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int); +extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, + int, int); +extern int nvm_max_phys_sects(struct nvm_tgt_dev *); +extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, const struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); -extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); -extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int); +extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int); +extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, + void *); +extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); +extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); extern void nvm_end_io(struct nvm_rq *, int); extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); @@ -548,6 +557,7 @@ extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); +extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); /* sysblk.c */ #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ @@ -569,10 +579,10 @@ extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_dev_factory(struct nvm_dev *, int flags); -#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \ - for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \ +#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \ + for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \ (chid)++, (ppa).g.ch = (chid)) \ - for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \ + for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \ (lunid)++, (ppa).g.lun = (lunid)) #else /* CONFIG_NVM */ diff --git a/include/linux/list.h b/include/linux/list.h index 5809e9a2de5b..d1039ecaf94f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list) list->prev = list; } +#ifdef CONFIG_DEBUG_LIST +extern bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next); +extern bool __list_del_entry_valid(struct list_head *entry); +#else +static inline bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + return true; +} +static inline bool __list_del_entry_valid(struct list_head *entry) +{ + return true; +} +#endif + /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { + if (!__list_add_valid(new, prev, next)) + return; + next->prev = new; new->next = next; new->prev = prev; WRITE_ONCE(prev->next, new); } -#else -extern void __list_add(struct list_head *new, - struct list_head *prev, - struct list_head *next); -#endif /** * list_add - add a new entry @@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_del_entry(struct list_head *entry) { + if (!__list_del_entry_valid(entry)) + return; + __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } -#else -extern void __list_del_entry(struct list_head *entry); -extern void list_del(struct list_head *entry); -#endif /** * list_replace - replace old entry by new one diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c1458fede1f9..1e327bb80838 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -338,9 +338,18 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); -#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) +/* + * Same "read" as for lock_acquire(), except -1 means any. + */ +extern int lock_is_held_type(struct lockdep_map *lock, int read); + +static inline int lock_is_held(struct lockdep_map *lock) +{ + return lock_is_held_type(lock, -1); +} -extern int lock_is_held(struct lockdep_map *lock); +#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) +#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) extern void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, @@ -372,6 +381,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); WARN_ON(debug_locks && !lockdep_is_held(l)); \ } while (0) +#define lockdep_assert_held_exclusive(l) do { \ + WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ + } while (0) + +#define lockdep_assert_held_read(l) do { \ + WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ + } while (0) + #define lockdep_assert_held_once(l) do { \ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ } while (0) @@ -428,7 +445,11 @@ struct lock_class_key { }; #define lockdep_depth(tsk) (0) +#define lockdep_is_held_type(l, r) (1) + #define lockdep_assert_held(l) do { (void)(l); } while (0) +#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) +#define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_recursing(tsk) (0) diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 2931aa43dab1..0d3f14fd2621 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -82,6 +82,7 @@ static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, } #endif +#ifdef CONFIG_MVEBU_MBUS int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); @@ -97,5 +98,12 @@ int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base, size_t mbus_size, phys_addr_t sdram_phys_base, size_t sdram_size); int mvebu_mbus_dt_init(bool is_coherent); +#else +static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, + u8 *attr) +{ + return -EINVAL; +} +#endif /* CONFIG_MVEBU_MBUS */ #endif /* __LINUX_MBUS_H */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index a585b4b5fa0e..0661af17a758 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -16,6 +16,7 @@ #include <asm/mc146818rtc.h> /* register access macros */ #include <linux/bcd.h> #include <linux/delay.h> +#include <linux/pm-trace.h> #ifdef __KERNEL__ #include <linux/spinlock.h> /* spinlock_t */ diff --git a/include/linux/mdev.h b/include/linux/mdev.h new file mode 100644 index 000000000000..b6e048e1045f --- /dev/null +++ b/include/linux/mdev.h @@ -0,0 +1,138 @@ +/* + * Mediated device definition + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia <cjia@nvidia.com> + * Kirti Wankhede <kwankhede@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MDEV_H +#define MDEV_H + +struct mdev_device; + +/** + * struct mdev_parent_ops - Structure to be registered for each parent device to + * register the device to mdev module. + * + * @owner: The module owner. + * @dev_attr_groups: Attributes of the parent device. + * @mdev_attr_groups: Attributes of the mediated device. + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. + * @create: Called to allocate basic resources in parent device's + * driver for a particular mediated device. It is + * mandatory to provide create ops. + * @kobj: kobject of type for which 'create' is called. + * @mdev: mdev_device structure on of mediated device + * that is being created + * Returns integer: success (0) or error (< 0) + * @remove: Called to free resources in parent device's driver for a + * a mediated device. It is mandatory to provide 'remove' + * ops. + * @mdev: mdev_device device structure which is being + * destroyed + * Returns integer: success (0) or error (< 0) + * @open: Open mediated device. + * @mdev: mediated device. + * Returns integer: success (0) or error (< 0) + * @release: release mediated device + * @mdev: mediated device. + * @read: Read emulation callback + * @mdev: mediated device structure + * @buf: read buffer + * @count: number of bytes to read + * @ppos: address. + * Retuns number on bytes read on success or error. + * @write: Write emulation callback + * @mdev: mediated device structure + * @buf: write buffer + * @count: number of bytes to be written + * @ppos: address. + * Retuns number on bytes written on success or error. + * @ioctl: IOCTL callback + * @mdev: mediated device structure + * @cmd: ioctl command + * @arg: arguments to ioctl + * @mmap: mmap callback + * @mdev: mediated device structure + * @vma: vma structure + * Parent device that support mediated device should be registered with mdev + * module with mdev_parent_ops structure. + **/ +struct mdev_parent_ops { + struct module *owner; + const struct attribute_group **dev_attr_groups; + const struct attribute_group **mdev_attr_groups; + struct attribute_group **supported_type_groups; + + int (*create)(struct kobject *kobj, struct mdev_device *mdev); + int (*remove)(struct mdev_device *mdev); + int (*open)(struct mdev_device *mdev); + void (*release)(struct mdev_device *mdev); + ssize_t (*read)(struct mdev_device *mdev, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, + size_t count, loff_t *ppos); + long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, + unsigned long arg); + int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); +}; + +/* interface for exporting mdev supported type attributes */ +struct mdev_type_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf); + ssize_t (*store)(struct kobject *kobj, struct device *dev, + const char *buf, size_t count); +}; + +#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ +struct mdev_type_attribute mdev_type_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) +#define MDEV_TYPE_ATTR_RW(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name) +#define MDEV_TYPE_ATTR_RO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) +#define MDEV_TYPE_ATTR_WO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) + +/** + * struct mdev_driver - Mediated device driver + * @name: driver name + * @probe: called when new device created + * @remove: called when device removed + * @driver: device driver structure + * + **/ +struct mdev_driver { + const char *name; + int (*probe)(struct device *dev); + void (*remove)(struct device *dev); + struct device_driver driver; +}; + +#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) + +extern void *mdev_get_drvdata(struct mdev_device *mdev); +extern void mdev_set_drvdata(struct mdev_device *mdev, void *data); +extern uuid_le mdev_uuid(struct mdev_device *mdev); + +extern struct bus_type mdev_bus_type; + +extern int mdev_register_device(struct device *dev, + const struct mdev_parent_ops *ops); +extern void mdev_unregister_device(struct device *dev); + +extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner); +extern void mdev_unregister_driver(struct mdev_driver *drv); + +extern struct device *mdev_parent_dev(struct mdev_device *mdev); +extern struct device *mdev_dev(struct mdev_device *mdev); +extern struct mdev_device *mdev_from_dev(struct device *dev); + +#endif /* MDEV_H */ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index e746919530f5..a0d274fe08f1 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -8,8 +8,7 @@ struct mei_cl_device; struct mei_device; -typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, - u32 events, void *context); +typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); /** * struct mei_cl_device - MEI device handle @@ -24,12 +23,12 @@ typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, * @me_cl: me client * @cl: mei client * @name: device name - * @event_work: async work to execute event callback - * @event_cb: Drivers register this callback to get asynchronous ME - * events (e.g. Rx buffer pending) notifications. - * @event_context: event callback run context - * @events_mask: Events bit mask requested by driver. - * @events: Events bitmask sent to the driver. + * @rx_work: async work to execute Rx event callback + * @rx_cb: Drivers register this callback to get asynchronous ME + * Rx buffer pending notifications. + * @notif_work: async work to execute FW notif event callback + * @notif_cb: Drivers register this callback to get asynchronous ME + * FW notification pending notifications. * * @do_match: wheather device can be matched with a driver * @is_added: device is already scanned @@ -44,11 +43,10 @@ struct mei_cl_device { struct mei_cl *cl; char name[MEI_CL_NAME_SIZE]; - struct work_struct event_work; - mei_cldev_event_cb_t event_cb; - void *event_context; - unsigned long events_mask; - unsigned long events; + struct work_struct rx_work; + mei_cldev_cb_t rx_cb; + struct work_struct notif_work; + mei_cldev_cb_t notif_cb; unsigned int do_match:1; unsigned int is_added:1; @@ -74,16 +72,27 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); +/** + * module_mei_cl_driver - Helper macro for registering mei cl driver + * + * @__mei_cldrv: mei_cl_driver structure + * + * Helper macro for mei cl drivers which do not do anything special in module + * init/exit, for eliminating a boilerplate code. + */ +#define module_mei_cl_driver(__mei_cldrv) \ + module_driver(__mei_cldrv, \ + mei_cldev_driver_register,\ + mei_cldev_driver_unregister) -int mei_cldev_register_event_cb(struct mei_cl_device *cldev, - unsigned long event_mask, - mei_cldev_event_cb_t read_cb, void *context); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, + size_t length); -#define MEI_CL_EVENT_RX 0 -#define MEI_CL_EVENT_TX 1 -#define MEI_CL_EVENT_NOTIF 2 +int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); +int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, + mei_cldev_cb_t notif_cb); const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); u8 mei_cldev_ver(const struct mei_cl_device *cldev); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 61d20c17f3b7..254698856b8f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter { */ struct mem_cgroup_per_node { struct lruvec lruvec; - unsigned long lru_size[NR_LRU_LISTS]; + unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; @@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, - int nr_pages); + int zid, int nr_pages); unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask); @@ -441,9 +441,23 @@ static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { struct mem_cgroup_per_node *mz; + unsigned long nr_pages = 0; + int zid; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return mz->lru_size[lru]; + for (zid = 0; zid < MAX_NR_ZONES; zid++) + nr_pages += mz->lru_zone_size[zid][lru]; + return nr_pages; +} + +static inline +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, + enum lru_list lru, int zone_idx) +{ + struct mem_cgroup_per_node *mz; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + return mz->lru_zone_size[zone_idx][lru]; } void mem_cgroup_handle_over_high(void); @@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { return 0; } +static inline +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, + enum lru_list lru, int zone_idx) +{ + return 0; +} static inline unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5e5b2969d931..5f4d8281832b 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -7,6 +7,7 @@ #include <linux/mmzone.h> +#include <linux/dax.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/spinlock.h> @@ -177,6 +178,13 @@ static inline bool vma_migratable(struct vm_area_struct *vma) if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return false; + /* + * DAX device mappings require predictable access latency, so avoid + * incurring periodic faults. + */ + if (vma_is_dax(vma)) + return false; + #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) return false; diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index fec597fb34cb..a4860bc9b73d 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -115,6 +115,8 @@ enum { #define AXP806_CLDO2_V_CTRL 0x25 #define AXP806_CLDO3_V_CTRL 0x26 #define AXP806_VREF_TEMP_WARN_L 0xf3 +#define AXP806_BUS_ADDR_EXT 0xfe +#define AXP806_REG_ADDR_EXT 0xff /* Interrupt */ #define AXP152_IRQ1_EN 0x40 @@ -226,6 +228,10 @@ enum { #define AXP20X_OCV_MAX 0xf /* AXP22X specific registers */ +#define AXP22X_PMIC_ADC_H 0x56 +#define AXP22X_PMIC_ADC_L 0x57 +#define AXP22X_TS_ADC_H 0x58 +#define AXP22X_TS_ADC_L 0x59 #define AXP22X_BATLOW_THRES1 0xe6 /* AXP288 specific registers */ diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 76f7ef4d3a0d..f62043a75f43 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -148,6 +148,15 @@ struct cros_ec_device { int event_size; }; +/** + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information + * + * @sensor_num: Id of the sensor, as reported by the EC. + */ +struct cros_ec_sensor_platform { + u8 sensor_num; +}; + /* struct cros_ec_platform - ChromeOS EC platform information * * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) @@ -175,6 +184,7 @@ struct cros_ec_dev { struct cros_ec_device *ec_dev; struct device *dev; u16 cmd_offset; + u32 features[2]; }; /** diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 76728ff37d01..1683003603f3 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -713,6 +713,90 @@ struct ec_response_get_set_value { /* More than one command can use these structs to get/set paramters. */ #define EC_CMD_GSV_PAUSE_IN_S5 0x0c +/*****************************************************************************/ +/* List the features supported by the firmware */ +#define EC_CMD_GET_FEATURES 0x0d + +/* Supported features */ +enum ec_feature_code { + /* + * This image contains a limited set of features. Another image + * in RW partition may support more features. + */ + EC_FEATURE_LIMITED = 0, + /* + * Commands for probing/reading/writing/erasing the flash in the + * EC are present. + */ + EC_FEATURE_FLASH = 1, + /* + * Can control the fan speed directly. + */ + EC_FEATURE_PWM_FAN = 2, + /* + * Can control the intensity of the keyboard backlight. + */ + EC_FEATURE_PWM_KEYB = 3, + /* + * Support Google lightbar, introduced on Pixel. + */ + EC_FEATURE_LIGHTBAR = 4, + /* Control of LEDs */ + EC_FEATURE_LED = 5, + /* Exposes an interface to control gyro and sensors. + * The host goes through the EC to access these sensors. + * In addition, the EC may provide composite sensors, like lid angle. + */ + EC_FEATURE_MOTION_SENSE = 6, + /* The keyboard is controlled by the EC */ + EC_FEATURE_KEYB = 7, + /* The AP can use part of the EC flash as persistent storage. */ + EC_FEATURE_PSTORE = 8, + /* The EC monitors BIOS port 80h, and can return POST codes. */ + EC_FEATURE_PORT80 = 9, + /* + * Thermal management: include TMP specific commands. + * Higher level than direct fan control. + */ + EC_FEATURE_THERMAL = 10, + /* Can switch the screen backlight on/off */ + EC_FEATURE_BKLIGHT_SWITCH = 11, + /* Can switch the wifi module on/off */ + EC_FEATURE_WIFI_SWITCH = 12, + /* Monitor host events, through for example SMI or SCI */ + EC_FEATURE_HOST_EVENTS = 13, + /* The EC exposes GPIO commands to control/monitor connected devices. */ + EC_FEATURE_GPIO = 14, + /* The EC can send i2c messages to downstream devices. */ + EC_FEATURE_I2C = 15, + /* Command to control charger are included */ + EC_FEATURE_CHARGER = 16, + /* Simple battery support. */ + EC_FEATURE_BATTERY = 17, + /* + * Support Smart battery protocol + * (Common Smart Battery System Interface Specification) + */ + EC_FEATURE_SMART_BATTERY = 18, + /* EC can dectect when the host hangs. */ + EC_FEATURE_HANG_DETECT = 19, + /* Report power information, for pit only */ + EC_FEATURE_PMU = 20, + /* Another Cros EC device is present downstream of this one */ + EC_FEATURE_SUB_MCU = 21, + /* Support USB Power delivery (PD) commands */ + EC_FEATURE_USB_PD = 22, + /* Control USB multiplexer, for audio through USB port for instance. */ + EC_FEATURE_USB_MUX = 23, + /* Motion Sensor code has an internal software FIFO */ + EC_FEATURE_MOTION_SENSE_FIFO = 24, +}; + +#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) +#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) +struct ec_response_get_features { + uint32_t flags[2]; +} __packed; /*****************************************************************************/ /* Flash commands */ @@ -1315,6 +1399,24 @@ enum motionsense_command { */ MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + /* + * Returns a single sensor data. + */ + MOTIONSENSE_CMD_DATA = 6, + + /* + * Perform low level calibration.. On sensors that support it, ask to + * do offset calibration. + */ + MOTIONSENSE_CMD_PERFORM_CALIB = 10, + + /* + * Sensor Offset command is a setter/getter command for the offset used + * for calibration. The offsets can be calculated by the host, or via + * PERFORM_CALIB command. + */ + MOTIONSENSE_CMD_SENSOR_OFFSET = 11, + /* Number of motionsense sub-commands. */ MOTIONSENSE_NUM_CMDS }; @@ -1335,12 +1437,18 @@ enum motionsensor_id { enum motionsensor_type { MOTIONSENSE_TYPE_ACCEL = 0, MOTIONSENSE_TYPE_GYRO = 1, + MOTIONSENSE_TYPE_MAG = 2, + MOTIONSENSE_TYPE_PROX = 3, + MOTIONSENSE_TYPE_LIGHT = 4, + MOTIONSENSE_TYPE_ACTIVITY = 5, + MOTIONSENSE_TYPE_MAX }; /* List of motion sensor locations. */ enum motionsensor_location { MOTIONSENSE_LOC_BASE = 0, MOTIONSENSE_LOC_LID = 1, + MOTIONSENSE_LOC_MAX, }; /* List of motion sensor chips. */ @@ -1361,6 +1469,31 @@ enum motionsensor_chip { */ #define EC_MOTION_SENSE_NO_VALUE -1 +#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 + +/* Set Calibration information */ +#define MOTION_SENSE_SET_OFFSET 1 + +struct ec_response_motion_sensor_data { + /* Flags for each sensor. */ + uint8_t flags; + /* Sensor number the data comes from */ + uint8_t sensor_num; + /* Each sensor is up to 3-axis. */ + union { + int16_t data[3]; + struct { + uint16_t rsvd; + uint32_t timestamp; + } __packed; + struct { + uint8_t activity; /* motionsensor_activity */ + uint8_t state; + int16_t add_info[2]; + }; + }; +} __packed; + struct ec_params_motion_sense { uint8_t cmd; union { @@ -1378,9 +1511,37 @@ struct ec_params_motion_sense { int16_t data; } ec_rate, kb_wake_angle; + /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ + struct { + uint8_t sensor_num; + + /* + * bit 0: If set (MOTION_SENSE_SET_OFFSET), set + * the calibration information in the EC. + * If unset, just retrieve calibration information. + */ + uint16_t flags; + + /* + * Temperature at calibration, in units of 0.01 C + * 0x8000: invalid / unknown. + * 0x0: 0C + * 0x7fff: +327.67C + */ + int16_t temp; + + /* + * Offset for calibration. + * Unit: + * Accelerometer: 1/1024 g + * Gyro: 1/1024 deg/s + * Compass: 1/16 uT + */ + int16_t offset[3]; + } __packed sensor_offset; + /* Used for MOTIONSENSE_CMD_INFO. */ struct { - /* Should be element of enum motionsensor_id. */ uint8_t sensor_num; } info; @@ -1410,11 +1571,14 @@ struct ec_response_motion_sense { /* Flags representing the motion sensor module. */ uint8_t module_flags; - /* Flags for each sensor in enum motionsensor_id. */ - uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT]; + /* Number of sensors managed directly by the EC. */ + uint8_t sensor_count; - /* Array of all sensor data. Each sensor is 3-axis. */ - int16_t data[3*EC_MOTION_SENSOR_COUNT]; + /* + * Sensor data is truncated if response_max is too small + * for holding all the data. + */ + struct ec_response_motion_sensor_data sensor[0]; } dump; /* Used for MOTIONSENSE_CMD_INFO. */ @@ -1429,6 +1593,9 @@ struct ec_response_motion_sense { uint8_t chip; } info; + /* Used for MOTIONSENSE_CMD_DATA */ + struct ec_response_motion_sensor_data data; + /* * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, * MOTIONSENSE_CMD_SENSOR_RANGE, and @@ -1438,6 +1605,12 @@ struct ec_response_motion_sense { /* Current value of the parameter queried. */ int32_t ret; } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + + /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ + struct { + int16_t temp; + int16_t offset[3]; + } sensor_offset, perform_calib; }; } __packed; diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index 8e1cdbef3dad..2c0127cb06c5 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -28,8 +28,6 @@ #include <linux/mfd/core.h> #include <linux/platform_data/edma.h> -#include <mach/hardware.h> - struct regmap; /* @@ -99,8 +97,6 @@ struct davinci_vcif { dma_addr_t dma_rx_addr; }; -struct davinci_vc; - struct davinci_vc { /* Device data */ struct device *dev; diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index cf619dbeace2..956caa0628f5 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -26,6 +26,7 @@ struct intel_soc_pmic { struct regmap *regmap; struct regmap_irq_chip_data *irq_chip_data; struct regmap_irq_chip_data *irq_chip_data_level2; + struct regmap_irq_chip_data *irq_chip_data_tmu; struct device *dev; }; diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h index 3ca0af07fc78..ad2a9a852aea 100644 --- a/include/linux/mfd/max77620.h +++ b/include/linux/mfd/max77620.h @@ -180,6 +180,7 @@ #define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) #define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 #define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) +#define MAX20024_SD_CFG1_MPOK_MASK BIT(1) #define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) #define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 #define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) @@ -187,6 +188,7 @@ /* LDO_CNFG2 */ #define MAX77620_LDO_POWER_MODE_MASK 0xC0 #define MAX77620_LDO_POWER_MODE_SHIFT 6 +#define MAX20024_LDO_CFG2_MPOK_MASK BIT(2) #define MAX77620_LDO_CFG2_ADE_MASK BIT(1) #define MAX77620_LDO_CFG2_ADE_DISABLE 0 #define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 6d435a3c06bc..83701ef7d3c7 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -290,6 +290,7 @@ enum rk818_reg { #define SWITCH2_EN BIT(6) #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) +#define DEV_OFF BIT(0) #define VB_LO_ACT BIT(4) #define VB_LO_SEL_3500MV (7 << 0) diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index cadc6543909d..e5a6cdeb77db 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -58,10 +58,13 @@ #define RN5T618_DC3CTL2 0x31 #define RN5T618_DC4CTL 0x32 #define RN5T618_DC4CTL2 0x33 +#define RN5T618_DC5CTL 0x34 +#define RN5T618_DC5CTL2 0x35 #define RN5T618_DC1DAC 0x36 #define RN5T618_DC2DAC 0x37 #define RN5T618_DC3DAC 0x38 #define RN5T618_DC4DAC 0x39 +#define RN5T618_DC5DAC 0x3a #define RN5T618_DC1DAC_SLP 0x3b #define RN5T618_DC2DAC_SLP 0x3c #define RN5T618_DC3DAC_SLP 0x3d @@ -77,6 +80,11 @@ #define RN5T618_LDO3DAC 0x4e #define RN5T618_LDO4DAC 0x4f #define RN5T618_LDO5DAC 0x50 +#define RN5T618_LDO6DAC 0x51 +#define RN5T618_LDO7DAC 0x52 +#define RN5T618_LDO8DAC 0x53 +#define RN5T618_LDO9DAC 0x54 +#define RN5T618_LDO10DAC 0x55 #define RN5T618_LDORTCDAC 0x56 #define RN5T618_LDORTC2DAC 0x57 #define RN5T618_LDO1DAC_SLP 0x58 @@ -231,6 +239,7 @@ enum { enum { RN5T567 = 0, RN5T618, + RC5T619, }; struct rn5t618 { diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h new file mode 100644 index 000000000000..d7a29f246d64 --- /dev/null +++ b/include/linux/mfd/sun4i-gpadc.h @@ -0,0 +1,94 @@ +/* Header of ADC MFD core driver for sunxi platforms + * + * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#ifndef __SUN4I_GPADC__H__ +#define __SUN4I_GPADC__H__ + +#define SUN4I_GPADC_CTRL0 0x00 + +#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY(x) ((GENMASK(7, 0) & (x)) << 24) +#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY_MODE BIT(23) +#define SUN4I_GPADC_CTRL0_ADC_CLK_SELECT BIT(22) +#define SUN4I_GPADC_CTRL0_ADC_CLK_DIVIDER(x) ((GENMASK(1, 0) & (x)) << 20) +#define SUN4I_GPADC_CTRL0_FS_DIV(x) ((GENMASK(3, 0) & (x)) << 16) +#define SUN4I_GPADC_CTRL0_T_ACQ(x) (GENMASK(15, 0) & (x)) + +#define SUN4I_GPADC_CTRL1 0x04 + +#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE(x) ((GENMASK(7, 0) & (x)) << 12) +#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE_EN BIT(9) +#define SUN4I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(6) +#define SUN4I_GPADC_CTRL1_TP_DUAL_EN BIT(5) +#define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4) +#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3) +#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x)) + +/* TP_CTRL1 bits for sun6i SOCs */ +#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7) +#define SUN6I_GPADC_CTRL1_TP_DUAL_EN BIT(6) +#define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5) +#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4) +#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x)) + +#define SUN4I_GPADC_CTRL2 0x08 + +#define SUN4I_GPADC_CTRL2_TP_SENSITIVE_ADJUST(x) ((GENMASK(3, 0) & (x)) << 28) +#define SUN4I_GPADC_CTRL2_TP_MODE_SELECT(x) ((GENMASK(1, 0) & (x)) << 26) +#define SUN4I_GPADC_CTRL2_PRE_MEA_EN BIT(24) +#define SUN4I_GPADC_CTRL2_PRE_MEA_THRE_CNT(x) (GENMASK(23, 0) & (x)) + +#define SUN4I_GPADC_CTRL3 0x0c + +#define SUN4I_GPADC_CTRL3_FILTER_EN BIT(2) +#define SUN4I_GPADC_CTRL3_FILTER_TYPE(x) (GENMASK(1, 0) & (x)) + +#define SUN4I_GPADC_TPR 0x18 + +#define SUN4I_GPADC_TPR_TEMP_ENABLE BIT(16) +#define SUN4I_GPADC_TPR_TEMP_PERIOD(x) (GENMASK(15, 0) & (x)) + +#define SUN4I_GPADC_INT_FIFOC 0x10 + +#define SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN BIT(18) +#define SUN4I_GPADC_INT_FIFOC_TP_OVERRUN_IRQ_EN BIT(17) +#define SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN BIT(16) +#define SUN4I_GPADC_INT_FIFOC_TP_DATA_XY_CHANGE BIT(13) +#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_TRIG_LEVEL(x) ((GENMASK(4, 0) & (x)) << 8) +#define SUN4I_GPADC_INT_FIFOC_TP_DATA_DRQ_EN BIT(7) +#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_FLUSH BIT(4) +#define SUN4I_GPADC_INT_FIFOC_TP_UP_IRQ_EN BIT(1) +#define SUN4I_GPADC_INT_FIFOC_TP_DOWN_IRQ_EN BIT(0) + +#define SUN4I_GPADC_INT_FIFOS 0x14 + +#define SUN4I_GPADC_INT_FIFOS_TEMP_DATA_PENDING BIT(18) +#define SUN4I_GPADC_INT_FIFOS_FIFO_OVERRUN_PENDING BIT(17) +#define SUN4I_GPADC_INT_FIFOS_FIFO_DATA_PENDING BIT(16) +#define SUN4I_GPADC_INT_FIFOS_TP_IDLE_FLG BIT(2) +#define SUN4I_GPADC_INT_FIFOS_TP_UP_PENDING BIT(1) +#define SUN4I_GPADC_INT_FIFOS_TP_DOWN_PENDING BIT(0) + +#define SUN4I_GPADC_CDAT 0x1c +#define SUN4I_GPADC_TEMP_DATA 0x20 +#define SUN4I_GPADC_DATA 0x24 + +#define SUN4I_GPADC_IRQ_FIFO_DATA 0 +#define SUN4I_GPADC_IRQ_TEMP_DATA 1 + +/* 10s delay before suspending the IP */ +#define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000 + +struct sun4i_gpadc_dev { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irqc; + void __iomem *base; +}; + +#endif diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 7f55b8b41032..b9a53e013bff 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -23,6 +23,8 @@ #define REG_IRQENABLE 0x02C #define REG_IRQCLR 0x030 #define REG_IRQWAKEUP 0x034 +#define REG_DMAENABLE_SET 0x038 +#define REG_DMAENABLE_CLEAR 0x03c #define REG_CTRL 0x040 #define REG_ADCFSM 0x044 #define REG_CLKDIV 0x04C @@ -36,6 +38,7 @@ #define REG_FIFO0THR 0xE8 #define REG_FIFO1CNT 0xF0 #define REG_FIFO1THR 0xF4 +#define REG_DMA1REQ 0xF8 #define REG_FIFO0 0x100 #define REG_FIFO1 0x200 @@ -126,6 +129,10 @@ #define FIFOREAD_DATA_MASK (0xfff << 0) #define FIFOREAD_CHNLID_MASK (0xf << 16) +/* DMA ENABLE/CLEAR Register */ +#define DMA_FIFO0 BIT(0) +#define DMA_FIFO1 BIT(1) + /* Sequencer Status */ #define SEQ_STATUS BIT(5) #define CHARGE_STEP 0x11 @@ -155,6 +162,7 @@ struct ti_tscadc_dev { struct device *dev; struct regmap *regmap; void __iomem *tscadc_base; + phys_addr_t tscadc_phys_base; int irq; int used_cells; /* 1-2 */ int tsc_wires; diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 7a26286db895..fba44abd05ba 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -100,6 +100,11 @@ #define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) /* + * Some controllers have a 32-bit wide data port register + */ +#define TMIO_MMC_32BIT_DATA_PORT (1 << 9) + +/* * Some controllers allows to set SDx actual clock */ #define TMIO_MMC_CLK_ACTUAL (1 << 10) diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index 4ccda8969639..eac285756b37 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -73,13 +73,15 @@ #define TPS65217_PPATH_AC_CURRENT_MASK 0x0C #define TPS65217_PPATH_USB_CURRENT_MASK 0x03 -#define TPS65217_INT_RESERVEDM BIT(7) #define TPS65217_INT_PBM BIT(6) #define TPS65217_INT_ACM BIT(5) #define TPS65217_INT_USBM BIT(4) #define TPS65217_INT_PBI BIT(2) #define TPS65217_INT_ACI BIT(1) #define TPS65217_INT_USBI BIT(0) +#define TPS65217_INT_SHIFT 4 +#define TPS65217_INT_MASK (TPS65217_INT_PBM | TPS65217_INT_ACM | \ + TPS65217_INT_USBM) #define TPS65217_CHGCONFIG0_TREG BIT(7) #define TPS65217_CHGCONFIG0_DPPM BIT(6) @@ -234,12 +236,11 @@ struct tps65217_bl_pdata { int dft_brightness; }; -enum tps65217_irq_type { - TPS65217_IRQ_PB, - TPS65217_IRQ_AC, - TPS65217_IRQ_USB, - TPS65217_NUM_IRQ -}; +/* Interrupt numbers */ +#define TPS65217_IRQ_USB 0 +#define TPS65217_IRQ_AC 1 +#define TPS65217_IRQ_PB 2 +#define TPS65217_NUM_IRQ 3 /** * struct tps65217_board - packages regulator init data diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index d1db9527fab5..bccd2d68b1e3 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -282,10 +282,9 @@ struct tps65218 { struct regulator_desc desc[TPS65218_NUM_REGULATOR]; struct tps_info *info[TPS65218_NUM_REGULATOR]; struct regmap *regmap; + u8 *strobes; }; -int tps65218_reg_read(struct tps65218 *tps, unsigned int reg, - unsigned int *val); int tps65218_reg_write(struct tps65218 *tps, unsigned int reg, unsigned int val, unsigned int level); int tps65218_set_bits(struct tps65218 *tps, unsigned int reg, diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 1a603701550e..b25d0297ba88 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -319,21 +319,7 @@ struct tps65912 { struct regmap_irq_chip_data *irq_data; }; -static const struct regmap_range tps65912_yes_ranges[] = { - regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5), -}; - -static const struct regmap_access_table tps65912_volatile_table = { - .yes_ranges = tps65912_yes_ranges, - .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges), -}; - -static const struct regmap_config tps65912_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - .cache_type = REGCACHE_RBTREE, - .volatile_table = &tps65912_volatile_table, -}; +extern const struct regmap_config tps65912_regmap_config; int tps65912_device_init(struct tps65912 *tps); int tps65912_device_exit(struct tps65912 *tps); diff --git a/include/linux/mii.h b/include/linux/mii.h index 47492c9631b3..1629a0c32679 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -31,7 +31,11 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_ethtool_get_link_ksettings( + struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_ethtool_set_link_ksettings( + struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); extern int mii_check_gmii_support(struct mii_if_info *mii); extern void mii_check_link (struct mii_if_info *mii); extern unsigned int mii_check_media (struct mii_if_info *mii, diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 722698a43d79..ed30d5d713e3 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -32,6 +32,7 @@ #define STORE_QUEUE_MINOR 155 /* unused */ #define I2O_MINOR 166 #define MICROCODE_MINOR 184 +#define IRNET_MINOR 187 #define VFIO_MINOR 196 #define TUN_MINOR 200 #define CUSE_MINOR 203 @@ -72,6 +73,13 @@ extern int misc_register(struct miscdevice *misc); extern void misc_deregister(struct miscdevice *misc); /* + * Helper macro for drivers that don't do anything special in the initcall. + * This helps in eleminating of boilerplate code. + */ +#define builtin_misc_device(__misc_device) \ + builtin_driver(__misc_device, misc_register) + +/* * Helper macro for drivers that don't do anything special in module init / exit * call. This helps in eleminating of boilerplate code. */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f6a164297358..6533c16e27ad 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -476,7 +476,6 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, - MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ @@ -1385,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val); int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv); int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, bool *vlan_offload_disabled); +void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, + struct _rule_hw *eth_header); int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); @@ -1399,7 +1400,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); -int mlx4_test_interrupts(struct mlx4_dev *dev); +int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); +int mlx4_test_async(struct mlx4_dev *dev); int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, const u32 offset[], u32 value[], size_t array_len, u8 port); @@ -1460,7 +1462,7 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn); -cycle_t mlx4_read_clock(struct mlx4_dev *dev); +u64 mlx4_read_clock(struct mlx4_dev *dev); struct mlx4_active_ports { DECLARE_BITMAP(ports, MLX4_MAX_PORTS); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 58276144ba81..52b437431c6a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -277,6 +277,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, @@ -552,6 +553,15 @@ struct mlx5_eqe_vport_change { __be32 rsvd1[6]; } __packed; +struct mlx5_eqe_port_module { + u8 reserved_at_0[1]; + u8 module; + u8 reserved_at_2[1]; + u8 module_status; + u8 reserved_at_4[2]; + u8 error_type; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -565,6 +575,7 @@ union ev_data { struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_page_fault page_fault; struct mlx5_eqe_vport_change vport_change; + struct mlx5_eqe_port_module port_module; } __packed; struct mlx5_eqe { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 85c4786427e4..735b36335f29 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -104,6 +104,8 @@ enum { enum { MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, + MLX5_REG_DCBX_PARAM = 0x4020, + MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -123,6 +125,11 @@ enum { MLX5_REG_MLCR = 0x902b, }; +enum mlx5_dcbx_oper_mode { + MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, + MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, +}; + enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, @@ -208,7 +215,7 @@ struct mlx5_cmd_first { struct mlx5_cmd_msg { struct list_head list; - struct cache_ent *cache; + struct cmd_msg_cache *parent; u32 len; struct mlx5_cmd_first first; struct mlx5_cmd_mailbox *next; @@ -228,17 +235,17 @@ struct mlx5_cmd_debug { u16 outlen; }; -struct cache_ent { +struct cmd_msg_cache { /* protect block chain allocations */ spinlock_t lock; struct list_head head; + unsigned int max_inbox_size; + unsigned int num_ent; }; -struct cmd_msg_cache { - struct cache_ent large; - struct cache_ent med; - +enum { + MLX5_NUM_COMMAND_CACHES = 5, }; struct mlx5_cmd_stats { @@ -281,7 +288,7 @@ struct mlx5_cmd { struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct pci_pool *pool; struct mlx5_cmd_debug dbg; - struct cmd_msg_cache cache; + struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; }; @@ -310,6 +317,13 @@ struct mlx5_buf { u8 page_shift; }; +struct mlx5_frag_buf { + struct mlx5_buf_list *frags; + int npages; + int size; + u8 page_shift; +}; + struct mlx5_eq_tasklet { struct list_head list; struct list_head process_list; @@ -418,8 +432,12 @@ struct mlx5_core_health { u32 prev; int miss_counter; bool sick; + /* wq spinlock to synchronize draining */ + spinlock_t wq_lock; struct workqueue_struct *wq; + unsigned long flags; struct work_struct work; + struct delayed_work recover_work; }; struct mlx5_cq_table { @@ -494,6 +512,31 @@ struct mlx5_rl_table { struct mlx5_rl_entry *rl_entry; }; +enum port_module_event_status_type { + MLX5_MODULE_STATUS_PLUGGED = 0x1, + MLX5_MODULE_STATUS_UNPLUGGED = 0x2, + MLX5_MODULE_STATUS_ERROR = 0x3, + MLX5_MODULE_STATUS_NUM = 0x3, +}; + +enum port_module_event_error_type { + MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, + MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, + MLX5_MODULE_EVENT_ERROR_BUS_STUCK, + MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, + MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, + MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, + MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, + MLX5_MODULE_EVENT_ERROR_BAD_CABLE, + MLX5_MODULE_EVENT_ERROR_UNKNOWN, + MLX5_MODULE_EVENT_ERROR_NUM, +}; + +struct mlx5_port_module_event_stats { + u64 status_counters[MLX5_MODULE_STATUS_NUM]; + u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -555,6 +598,8 @@ struct mlx5_priv { unsigned long pci_dev_data; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; + + struct mlx5_port_module_event_stats pme_stats; }; enum mlx5_device_state { @@ -626,10 +671,6 @@ struct mlx5_db { }; enum { - MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, -}; - -enum { MLX5_COMP_EQ_SIZE = 1024, }; @@ -638,13 +679,6 @@ enum { MLX5_PTYS_EN = 1 << 2, }; -struct mlx5_db_pgdir { - struct list_head list; - DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); - __be32 *db_page; - dma_addr_t db_dma; -}; - typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { @@ -789,10 +823,14 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_drain_health_wq(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); +int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node); +void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, @@ -837,6 +875,7 @@ void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 93ebc5e21334..949b24b6c479 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -42,6 +42,10 @@ enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, }; +enum { + MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), +}; + #define LEFTOVERS_RULE_NUM 2 static inline void build_leftovers_ft_param(int *priority, int *n_ent, @@ -69,8 +73,8 @@ enum mlx5_flow_namespace_type { struct mlx5_flow_table; struct mlx5_flow_group; -struct mlx5_flow_rule; struct mlx5_flow_namespace; +struct mlx5_flow_handle; struct mlx5_flow_spec { u8 match_criteria_enable; @@ -97,13 +101,15 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, int max_num_groups, - u32 level); + u32 level, + u32 flags); struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - u32 level); + u32 level, + u32 flags); struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, @@ -124,21 +130,28 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); +struct mlx5_flow_act { + u32 action; + u32 flow_tag; + u32 encap_id; +}; + /* Single destination per rule. * Group ID is implied by the match criteria. */ -struct mlx5_flow_rule * -mlx5_add_flow_rule(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, - u32 action, - u32 flow_tag, - struct mlx5_flow_destination *dest); -void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); - -int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, - struct mlx5_flow_destination *dest); - -struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); +struct mlx5_flow_handle * +mlx5_add_flow_rules(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num); +void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); + +int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, + struct mlx5_flow_destination *new_dest, + struct mlx5_flow_destination *old_dest); + +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6045d4d58065..a852e9db6f0d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -83,6 +83,7 @@ enum { MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, + MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -145,6 +146,12 @@ enum { MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, + MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, + MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, + MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, + MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, + MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, + MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, @@ -537,13 +544,27 @@ struct mlx5_ifc_e_switch_cap_bits { struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; - u8 reserved_0[0x1f]; - u8 reserved_1[0x20]; + u8 esw_scheduling[0x1]; + u8 reserved_at_2[0x1e]; + + u8 reserved_at_20[0x20]; + u8 packet_pacing_max_rate[0x20]; + u8 packet_pacing_min_rate[0x20]; - u8 reserved_2[0x10]; + + u8 reserved_at_80[0x10]; u8 packet_pacing_rate_table_size[0x10]; - u8 reserved_3[0x760]; + + u8 esw_element_type[0x10]; + u8 esw_tsar_type[0x10]; + + u8 reserved_at_c0[0x10]; + u8 max_qos_para_vport[0x10]; + + u8 max_tsar_bw_share[0x20]; + + u8 reserved_at_100[0x700]; }; struct mlx5_ifc_per_protocol_networking_offload_caps_bits { @@ -556,7 +577,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 reserved_at_10[0x2]; + u8 multi_pkt_send_wqe[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; @@ -804,7 +825,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 early_vf_enable[0x1]; u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; - u8 reserved_at_1af[0x2]; + u8 port_module_event[0x1]; + u8 reserved_at_1b0[0x1]; u8 ports_check[0x1]; u8 reserved_at_1b2[0x1]; u8 disable_link_up[0x1]; @@ -888,7 +910,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_at_261[0x1]; + u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; @@ -2333,6 +2355,30 @@ struct mlx5_ifc_sqc_bits { struct mlx5_ifc_wq_bits wq; }; +enum { + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, + SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, +}; + +struct mlx5_ifc_scheduling_context_bits { + u8 element_type[0x8]; + u8 reserved_at_8[0x18]; + + u8 element_attributes[0x20]; + + u8 parent_element_id[0x20]; + + u8 reserved_at_60[0x40]; + + u8 bw_share[0x20]; + + u8 max_average_bw[0x20]; + + u8 reserved_at_e0[0x120]; +}; + struct mlx5_ifc_rqtc_bits { u8 reserved_at_0[0xa0]; @@ -2844,7 +2890,7 @@ struct mlx5_ifc_xrqc_bits { struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; - u8 reserved_at_180[0x200]; + u8 reserved_at_180[0x880]; struct mlx5_ifc_wq_bits wq; }; @@ -2920,6 +2966,29 @@ struct mlx5_ifc_register_loopback_control_bits { u8 reserved_at_20[0x60]; }; +struct mlx5_ifc_vport_tc_element_bits { + u8 traffic_class[0x4]; + u8 reserved_at_4[0xc]; + u8 vport_number[0x10]; +}; + +struct mlx5_ifc_vport_element_bits { + u8 reserved_at_0[0x10]; + u8 vport_number[0x10]; +}; + +enum { + TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, + TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, +}; + +struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; + u8 reserved_at_10[0x10]; +}; + struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -3540,6 +3609,39 @@ struct mlx5_ifc_query_special_contexts_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_query_scheduling_element_out_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + +enum { + SCHEDULING_HIERARCHY_E_SWITCH = 0x2, +}; + +struct mlx5_ifc_query_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x180]; +}; + struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -3904,6 +4006,25 @@ struct mlx5_ifc_query_issi_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_set_driver_version_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_driver_version_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + u8 driver_version[64][0x8]; +}; + struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4725,6 +4846,43 @@ struct mlx5_ifc_modify_sq_in_bits { struct mlx5_ifc_sqc_bits ctx; }; +struct mlx5_ifc_modify_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; +}; + +enum { + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1, + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2, +}; + +struct mlx5_ifc_modify_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x20]; + + u8 modify_bitmask[0x20]; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5390,6 +5548,30 @@ struct mlx5_ifc_destroy_sq_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_destroy_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; +}; + +struct mlx5_ifc_destroy_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x180]; +}; + struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6017,6 +6199,36 @@ struct mlx5_ifc_create_sq_in_bits { struct mlx5_ifc_sqc_bits ctx; }; +struct mlx5_ifc_create_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_create_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 reserved_at_60[0xa0]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index b3065acd20b4..e527732fb31b 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -94,6 +94,9 @@ enum mlx5e_link_mode { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) +#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF +#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); @@ -138,8 +141,12 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, + u8 prio, u8 *tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, + u8 tc, u8 *bw_pct); int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_unit); @@ -155,4 +162,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data); +int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); +int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index 33c97dc900f8..1cde0fd53f90 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -55,7 +55,7 @@ struct mlx5_srq_attr { u32 lwm; u32 user_index; u64 db_record; - u64 *pas; + __be64 *pas; }; struct mlx5_core_dev; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 451b0bde9083..ec35157ea725 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -36,6 +36,12 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/device.h> +enum { + MLX5_CAP_INLINE_MODE_L2, + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, + MLX5_CAP_INLINE_MODE_NOT_REQUIRED, +}; + u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); @@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); -void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline); +int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, diff --git a/include/linux/mm.h b/include/linux/mm.h index a92c8d73aeaf..b84615b0f64c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -292,36 +292,23 @@ extern pgprot_t protection_map[16]; * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { + struct vm_area_struct *vma; /* Target VMA */ unsigned int flags; /* FAULT_FLAG_xxx flags */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ - void __user *virtual_address; /* Faulting virtual address */ + unsigned long address; /* Faulting virtual address */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' */ + pte_t orig_pte; /* Value of PTE at the time of fault */ - struct page *cow_page; /* Handler may choose to COW */ + struct page *cow_page; /* Page handler may use for COW fault */ + struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by * VM_FAULT_ERROR). */ - void *entry; /* ->fault handler can alternatively - * return locked DAX entry. In that - * case handler should return - * VM_FAULT_DAX_LOCKED and fill in - * entry here. - */ -}; - -/* - * Page fault context: passes though page fault handler instead of endless list - * of function arguments. - */ -struct fault_env { - struct vm_area_struct *vma; /* Target VMA */ - unsigned long address; /* Faulting virtual address */ - unsigned int flags; /* FAULT_FLAG_xxx flags */ - pmd_t *pmd; /* Pointer to pmd entry matching - * the 'address' - */ + /* These three entries are valid only while holding ptl lock */ pte_t *pte; /* Pointer to pte entry matching * the 'address'. NULL if the page * table hasn't been allocated. @@ -351,7 +338,7 @@ struct vm_operations_struct { int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_area_struct *, unsigned long address, pmd_t *, unsigned int flags); - void (*map_pages)(struct fault_env *fe, + void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become @@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, +int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page); +int finish_fault(struct vm_fault *vmf); +int finish_mkwrite_fault(struct vm_fault *vmf); #endif /* @@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ -#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ +#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ @@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_pte_pmd(struct mm_struct *mm, unsigned long address, + pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, @@ -1270,19 +1261,18 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); +extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + unsigned long addr, void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); -long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, int write, @@ -1768,6 +1758,8 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) return ptl; } +extern void __init pagecache_init(void); + extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); @@ -2097,7 +2089,7 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct fault_env *fe, +extern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 71613e8a720f..41d376e7116d 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, { __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG - mem_cgroup_update_lru_size(lruvec, lru, nr_pages); + mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4a8acedf4b7d..808751d7b737 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -473,6 +473,7 @@ struct mm_struct { */ struct task_struct __rcu *owner; #endif + struct user_namespace *user_ns; /* store ref to file /proc/<pid>/exe symlink points to */ struct file __rcu *exe_file; @@ -508,10 +509,6 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; -#ifdef CONFIG_X86_INTEL_MPX - /* address of the bounds directory */ - void __user *bd_addr; -#endif #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 73fad83acbcb..95d69d498296 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -89,6 +89,8 @@ struct mmc_ext_csd { unsigned int boot_ro_lock; /* ro lock support */ bool boot_ro_lockable; bool ffu_capable; /* Firmware upgrade support */ + bool cmdq_support; /* Command Queue supported */ + unsigned int cmdq_depth; /* Command Queue depth */ #define MMC_FIRMWARE_LEN 8 u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ u8 raw_exception_status; /* 54 */ @@ -207,18 +209,6 @@ struct sdio_func_tuple; #define SDIO_MAX_FUNCS 7 -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, - MMC_BLK_NEW_REQUEST, -}; - /* The number of MMC physical partitions. These consist of: * boot partitions (2), general purpose partitions (4) and * RPMB partition (1) in MMC v4.4. diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 2b953eb8ceae..e33cc748dcfe 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -15,6 +15,18 @@ struct request; struct mmc_data; struct mmc_request; +enum mmc_blk_status { + MMC_BLK_SUCCESS = 0, + MMC_BLK_PARTIAL, + MMC_BLK_CMD_ERR, + MMC_BLK_RETRY, + MMC_BLK_ABORT, + MMC_BLK_DATA_ERR, + MMC_BLK_ECC_ERR, + MMC_BLK_NOMEDIUM, + MMC_BLK_NEW_REQUEST, +}; + struct mmc_command { u32 opcode; u32 arg; @@ -150,7 +162,8 @@ struct mmc_async_req; extern int mmc_stop_bkops(struct mmc_card *); extern int mmc_read_bkops_status(struct mmc_card *); extern struct mmc_async_req *mmc_start_req(struct mmc_host *, - struct mmc_async_req *, int *); + struct mmc_async_req *, + enum mmc_blk_status *); extern int mmc_interrupt_hpi(struct mmc_card *); extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); extern void mmc_wait_for_req_done(struct mmc_host *host, @@ -163,6 +176,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); +extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index f5af2bd35e7f..15db6f83f53f 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -39,6 +39,12 @@ enum { EVENT_DATA_ERROR, }; +enum dw_mci_cookie { + COOKIE_UNMAPPED, + COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */ + COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */ +}; + struct mmc_data; enum { diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0b2439441cc8..8bc884121465 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -93,8 +93,7 @@ struct mmc_host_ops { */ void (*post_req)(struct mmc_host *host, struct mmc_request *req, int err); - void (*pre_req)(struct mmc_host *host, struct mmc_request *req, - bool is_first_req); + void (*pre_req)(struct mmc_host *host, struct mmc_request *req); void (*request)(struct mmc_host *host, struct mmc_request *req); /* @@ -173,7 +172,7 @@ struct mmc_async_req { * Check error status of completed mmc request. * Returns 0 if success otherwise non zero. */ - int (*err_check) (struct mmc_card *, struct mmc_async_req *); + enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); }; /** @@ -198,14 +197,12 @@ struct mmc_slot { * @is_new_req wake up reason was new request * @is_waiting_last_req mmc context waiting for single running request * @wait wait queue - * @lock lock to protect data fields */ struct mmc_context_info { bool is_done_rcv; bool is_new_req; bool is_waiting_last_req; wait_queue_head_t wait; - spinlock_t lock; }; struct regulator; @@ -495,11 +492,6 @@ static inline int mmc_host_uhs(struct mmc_host *host) MMC_CAP_UHS_DDR50); } -static inline int mmc_host_packed_wr(struct mmc_host *host) -{ - return host->caps2 & MMC_CAP2_PACKED_WR; -} - static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || @@ -546,6 +538,11 @@ static inline void mmc_retune_recheck(struct mmc_host *host) host->retune_now = 1; } +static inline bool mmc_can_retune(struct mmc_host *host) +{ + return host->can_retune == 1; +} + void mmc_retune_pause(struct mmc_host *host); void mmc_retune_unpause(struct mmc_host *host); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index c376209c70ef..672730acc705 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -84,6 +84,13 @@ #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ #define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ + /* class 11 */ +#define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */ +#define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */ +#define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */ +#define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */ +#define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */ + static inline bool mmc_op_multi(u32 opcode) { return opcode == MMC_WRITE_MULTIPLE_BLOCK || @@ -272,6 +279,7 @@ struct _mmc_csd { * EXT_CSD fields */ +#define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */ #define EXT_CSD_FLUSH_CACHE 32 /* W */ #define EXT_CSD_CACHE_CTRL 33 /* R/W */ #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ @@ -331,6 +339,8 @@ struct _mmc_csd { #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ +#define EXT_CSD_CMDQ_DEPTH 307 /* RO */ +#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ @@ -438,6 +448,13 @@ struct _mmc_csd { #define EXT_CSD_MANUAL_BKOPS_MASK 0x01 /* + * Command Queue + */ +#define EXT_CSD_CMDQ_MODE_ENABLED BIT(0) +#define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0) +#define EXT_CSD_CMDQ_SUPPORTED BIT(0) + +/* * MMC_SWITCH access modes */ diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 3945a8c9d3cb..a7972cd3bc14 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -29,5 +29,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); void mmc_gpiod_request_cd_irq(struct mmc_host *host); +bool mmc_can_gpio_cd(struct mmc_host *host); #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0f088f3a2fed..36d9896fbc1e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -246,7 +246,7 @@ struct lruvec { #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) /* LRU Isolation modes. */ -typedef unsigned __bitwise__ isolate_mode_t; +typedef unsigned __bitwise isolate_mode_t; enum zone_watermarks { WMARK_MIN, diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index ed84c07f6a51..8a57f0b1242d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -175,7 +175,8 @@ struct ap_device_id { kernel_ulong_t driver_info; }; -#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 +#define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01 +#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02 /* s390 css bus devices (subchannels) */ struct css_device_id { diff --git a/include/linux/module.h b/include/linux/module.h index 0c3207d26ac0..7c84273d60b9 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -399,7 +399,7 @@ struct module { /* Arch-specific module values */ struct mod_arch_specific arch; - unsigned int taints; /* same bits as kernel:tainted */ + unsigned long taints; /* same bits as kernel:taint_flags */ #ifdef CONFIG_GENERIC_BUG /* Support for BUG */ @@ -412,7 +412,7 @@ struct module { /* Protected by RCU and/or module_mutex: use rcu_dereference() */ struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; - + /* Section attributes */ struct module_sect_attrs *sect_attrs; diff --git a/include/linux/mount.h b/include/linux/mount.h index 1172cce949a4..c6f55158d5e5 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -79,12 +79,12 @@ extern void mnt_drop_write(struct vfsmount *mnt); extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); -extern struct vfsmount *mnt_clone_internal(struct path *path); +extern struct vfsmount *mnt_clone_internal(const struct path *path); extern int __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); struct path; -extern struct vfsmount *clone_private_mount(struct path *path); +extern struct vfsmount *clone_private_mount(const struct path *path); struct file_system_type; extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, @@ -98,4 +98,6 @@ extern dev_t name_to_dev_t(const char *name); extern unsigned int sysctl_mount_max; +extern bool path_is_mountpoint(const struct path *path); + #endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index c5d3d5024fc8..c5f3a012ae62 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -142,6 +142,12 @@ enum nand_ecc_algo { */ #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) #define NAND_ECC_MAXIMIZE BIT(1) +/* + * If your controller already sends the required NAND commands when + * reading or writing a page, then the framework is not supposed to + * send READ0 and SEQIN/PAGEPROG respectively. + */ +#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2) /* Bit mask for flags passed to do_nand_read_ecc */ #define NAND_GET_DEVICE 0x80 @@ -186,6 +192,7 @@ enum nand_ecc_algo { /* Macros to identify the above */ #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) +#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE) /* Non chip related options */ /* This option skips the bbt scan during initialization. */ @@ -210,6 +217,16 @@ enum nand_ecc_algo { */ #define NAND_USE_BOUNCE_BUFFER 0x00100000 +/* + * In case your controller is implementing ->cmd_ctrl() and is relying on the + * default ->cmdfunc() implementation, you may want to let the core handle the + * tCCS delay which is required when a column change (RNDIN or RNDOUT) is + * requested. + * If your controller already takes care of this delay, you don't need to set + * this flag. + */ +#define NAND_WAIT_TCCS 0x00200000 + /* Options set by nand scan */ /* Nand scan has allocated controller struct */ #define NAND_CONTROLLER_ALLOC 0x80000000 @@ -558,6 +575,11 @@ struct nand_ecc_ctrl { int page); }; +static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc) +{ + return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS); +} + /** * struct nand_buffers - buffer structure for read/write * @ecccalc: buffer pointer for calculated ECC, size is oobsize. @@ -584,6 +606,10 @@ struct nand_buffers { * * All these timings are expressed in picoseconds. * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time * @tALH_min: ALE hold time * @tADL_min: ALE to data loading time * @tALS_min: ALE setup time @@ -621,6 +647,10 @@ struct nand_buffers { * @tWW_min: WP# transition to WE# low */ struct nand_sdr_timings { + u32 tBERS_max; + u32 tCCS_min; + u32 tPROG_max; + u32 tR_max; u32 tALH_min; u32 tADL_min; u32 tALS_min; @@ -1184,7 +1214,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, int page); /* Reset and initialize a NAND device */ -int nand_reset(struct nand_chip *chip); +int nand_reset(struct nand_chip *chip, int chipnr); /* Free resources held by the NAND device */ void nand_cleanup(struct nand_chip *chip); diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h deleted file mode 100644 index 4ac8b1977b73..000000000000 --- a/include/linux/mutex-debug.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __LINUX_MUTEX_DEBUG_H -#define __LINUX_MUTEX_DEBUG_H - -#include <linux/linkage.h> -#include <linux/lockdep.h> -#include <linux/debug_locks.h> - -/* - * Mutexes - debugging helpers: - */ - -#define __DEBUG_MUTEX_INITIALIZER(lockname) \ - , .magic = &lockname - -#define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - __mutex_init((mutex), #mutex, &__key); \ -} while (0) - -extern void mutex_destroy(struct mutex *lock); - -#endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 2cb7531e7d7a..b97870f2debd 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -18,6 +18,7 @@ #include <linux/atomic.h> #include <asm/processor.h> #include <linux/osq_lock.h> +#include <linux/debug_locks.h> /* * Simple, straightforward mutexes with strict semantics: @@ -48,16 +49,12 @@ * locks and tasks (and only those tasks) */ struct mutex { - /* 1: unlocked, 0: locked, negative: locked, possible waiters */ - atomic_t count; + atomic_long_t owner; spinlock_t wait_lock; - struct list_head wait_list; -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) - struct task_struct *owner; -#endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif + struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES void *magic; #endif @@ -66,6 +63,11 @@ struct mutex { #endif }; +static inline struct task_struct *__mutex_owner(struct mutex *lock) +{ + return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03); +} + /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: @@ -79,9 +81,20 @@ struct mutex_waiter { }; #ifdef CONFIG_DEBUG_MUTEXES -# include <linux/mutex-debug.h> + +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname + +extern void mutex_destroy(struct mutex *lock); + #else + # define __DEBUG_MUTEX_INITIALIZER(lockname) + +static inline void mutex_destroy(struct mutex *lock) {} + +#endif + /** * mutex_init - initialize the mutex * @mutex: the mutex to be initialized @@ -90,14 +103,12 @@ struct mutex_waiter { * * It is not allowed to initialize an already locked mutex. */ -# define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - __mutex_init((mutex), #mutex, &__key); \ +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ } while (0) -static inline void mutex_destroy(struct mutex *lock) {} -#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ @@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {} #endif #define __MUTEX_INITIALIZER(lockname) \ - { .count = ATOMIC_INIT(1) \ + { .owner = ATOMIC_LONG_INIT(0) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ static inline int mutex_is_locked(struct mutex *lock) { - return atomic_read(&lock->count) != 1; + /* + * XXX think about spin_is_locked + */ + return __mutex_owner(lock) != NULL; } /* @@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); +/* + * These values are chosen such that FAIL and SUCCESS match the + * values of the regular mutex_trylock(). + */ +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; + +/** + * mutex_trylock_recursive - trylock variant that allows recursive locking + * @lock: mutex to be locked + * + * This function should not be used, _ever_. It is purely for hysterical GEM + * raisins, and once those are gone this will be removed. + * + * Returns: + * MUTEX_TRYLOCK_FAILED - trylock failed, + * MUTEX_TRYLOCK_SUCCESS - lock acquired, + * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. + */ +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock) +{ + if (unlikely(__mutex_owner(lock) == current)) + return MUTEX_TRYLOCK_RECURSIVE; + + return mutex_trylock(lock); +} + #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 136ae6bbe81e..9bde9558b596 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -192,6 +192,7 @@ struct net_device_stats { #ifdef CONFIG_RPS #include <linux/static_key.h> extern struct static_key rps_needed; +extern struct static_key rfs_needed; #endif struct neighbour; @@ -316,7 +317,6 @@ struct napi_struct { unsigned int gro_count; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL - spinlock_t poll_lock; int poll_owner; #endif struct net_device *dev; @@ -334,6 +334,16 @@ enum { NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ + NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ +}; + +enum { + NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), + NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), + NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), + NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), }; enum gro_result { @@ -453,32 +463,22 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -void __napi_complete(struct napi_struct *n); -void napi_complete_done(struct napi_struct *n, int work_done); +bool __napi_complete(struct napi_struct *n); +bool napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: NAPI context * * Mark NAPI processing as complete. * Consider using napi_complete_done() instead. + * Return false if device should avoid rearming interrupts. */ -static inline void napi_complete(struct napi_struct *n) +static inline bool napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); } /** - * napi_hash_add - add a NAPI to global hashtable - * @napi: NAPI context - * - * Generate a new napi_id and store a @napi under it in napi_hash. - * Used for busy polling (CONFIG_NET_RX_BUSY_POLL). - * Note: This is normally automatically done from netif_napi_add(), - * so might disappear in a future Linux version. - */ -void napi_hash_add(struct napi_struct *napi); - -/** * napi_hash_del - remove a NAPI from global table * @napi: NAPI context * @@ -732,8 +732,8 @@ struct xps_dev_maps { struct rcu_head rcu; struct xps_map __rcu *cpu_map[0]; }; -#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ - (nr_cpu_ids * sizeof(struct xps_map *))) +#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ + (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) #endif /* CONFIG_XPS */ #define TC_MAX_QUEUE 16 @@ -803,6 +803,7 @@ struct tc_to_netdev { struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; }; + bool egress_dev; }; /* These structures hold the attributes of xdp state that are being passed @@ -926,7 +927,7 @@ struct netdev_xdp { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * bool (*ndo_has_offload_stats)(int attr_id) + * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) * Return true if this device supports offload stats of this attr_id. * * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, @@ -1166,7 +1167,7 @@ struct net_device_ops { struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, struct rtnl_link_stats64 *storage); - bool (*ndo_has_offload_stats)(int attr_id); + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, void *attr_data); @@ -1456,7 +1457,6 @@ enum netdev_priv_flags { * @ptype_specific: Device-specific, protocol-specific packet handlers * * @adj_list: Directly linked devices, like slaves for bonding - * @all_adj_list: All linked devices, *including* neighbours * @features: Currently active device features * @hw_features: User-changeable features * @@ -1506,6 +1506,8 @@ enum netdev_priv_flags { * @if_port: Selectable AUI, TP, ... * @dma: DMA channel * @mtu: Interface MTU value + * @min_mtu: Interface Minimum MTU value + * @max_mtu: Interface Maximum MTU value * @type: Interface hardware type * @hard_header_len: Maximum hardware header length. * @@ -1619,7 +1621,7 @@ enum netdev_priv_flags { * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device * @tc_to_txq: XXX: need comments on this one - * @prio_tc_map XXX: need comments on this one + * @prio_tc_map: XXX: need comments on this one * * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp * @@ -1673,11 +1675,6 @@ struct net_device { struct list_head lower; } adj_list; - struct { - struct list_head upper; - struct list_head lower; - } all_adj_list; - netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; @@ -1726,6 +1723,8 @@ struct net_device { unsigned char dma; unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; @@ -1922,34 +1921,10 @@ int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) return 0; } -static inline -void netdev_reset_tc(struct net_device *dev) -{ - dev->num_tc = 0; - memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); - memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); -} - -static inline -int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) -{ - if (tc >= dev->num_tc) - return -EINVAL; - - dev->tc_to_txq[tc].count = count; - dev->tc_to_txq[tc].offset = offset; - return 0; -} - -static inline -int netdev_set_num_tc(struct net_device *dev, u8 num_tc) -{ - if (num_tc > TC_MAX_QUEUE) - return -EINVAL; - - dev->num_tc = num_tc; - return 0; -} +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); +void netdev_reset_tc(struct net_device *dev); +int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); +int netdev_set_num_tc(struct net_device *dev, u8 num_tc); static inline int netdev_get_num_tc(struct net_device *dev) @@ -2169,7 +2144,10 @@ struct napi_gro_cb { /* Used to determine if flush_id can be ignored */ u8 is_atomic:1; - /* 5 bit hole */ + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 1 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2180,6 +2158,40 @@ struct napi_gro_cb { #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) +#define GRO_RECURSION_LIMIT 15 +static inline int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; +} + +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); +static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); +} + +typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, + struct sk_buff *); +static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(sk, head, skb); +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@ -2465,14 +2477,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) return NAPI_GRO_CB(skb)->frag0_len < hlen; } +static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) +{ + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; +} + static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, unsigned int offset) { if (!pskb_may_pull(skb, hlen)) return NULL; - NAPI_GRO_CB(skb)->frag0 = NULL; - NAPI_GRO_CB(skb)->frag0_len = 0; + skb_gro_frag0_invalidate(skb); return skb->data + offset; } @@ -2649,71 +2666,6 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } -struct skb_csum_offl_spec { - __u16 ipv4_okay:1, - ipv6_okay:1, - encap_okay:1, - ip_options_okay:1, - ext_hdrs_okay:1, - tcp_okay:1, - udp_okay:1, - sctp_okay:1, - vlan_okay:1, - no_encapped_ipv6:1, - no_not_encapped:1; -}; - -bool __skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help); - -static inline bool skb_csum_offload_chk(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec, - bool *csum_encapped, - bool csum_help) -{ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return false; - - return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); -} - -static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, - const struct skb_csum_offl_spec *spec) -{ - bool csum_encapped; - - return skb_csum_offload_chk(skb, spec, &csum_encapped, true); -} - -static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) -{ - static const struct skb_csum_offl_spec csum_offl_spec = { - .ipv4_okay = 1, - .ip_options_okay = 1, - .ipv6_okay = 1, - .vlan_okay = 1, - .tcp_okay = 1, - .udp_okay = 1, - }; - - return skb_csum_offload_chk_help(skb, &csum_offl_spec); -} - -static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) -{ - static const struct skb_csum_offl_spec csum_offl_spec = { - .ipv4_okay = 1, - .ip_options_okay = 1, - .tcp_okay = 1, - .udp_okay = 1, - .vlan_okay = 1, - }; - - return skb_csum_offload_chk_help(skb, &csum_offl_spec); -} - static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -3308,7 +3260,7 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); -int dev_change_xdp_fd(struct net_device *dev, int fd); +int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3317,6 +3269,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); +static __always_inline int ____dev_forward_skb(struct net_device *dev, + struct sk_buff *skb) +{ + if (skb_orphan_frags(skb, GFP_ATOMIC) || + unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->priority = 0; + return 0; +} + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; @@ -3502,6 +3469,17 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) txq->xmit_lock_owner = cpu; } +static inline bool __netif_tx_acquire(struct netdev_queue *txq) +{ + __acquire(&txq->_xmit_lock); + return true; +} + +static inline void __netif_tx_release(struct netdev_queue *txq) +{ + __release(&txq->_xmit_lock); +} + static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -3603,17 +3581,21 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) #define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_lock(txq, cpu); \ + } else { \ + __netif_tx_acquire(txq); \ } \ } #define HARD_TX_TRYLOCK(dev, txq) \ (((dev->features & NETIF_F_LLTX) == 0) ? \ __netif_tx_trylock(txq) : \ - true ) + __netif_tx_acquire(txq)) #define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_unlock(txq); \ + } else { \ + __netif_tx_release(txq); \ } \ } @@ -3832,12 +3814,13 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, updev; \ updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) -/* iterate through upper list, must be called under RCU read lock */ -#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ - for (iter = &(dev)->all_adj_list.upper, \ - updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ - updev; \ - updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) +int netdev_walk_all_upper_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *upper_dev, + void *data), + void *data); + +bool netdev_has_upper_dev_all_rcu(struct net_device *dev, + struct net_device *upper_dev); void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); @@ -3870,17 +3853,14 @@ struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, struct list_head **iter); -#define netdev_for_each_all_lower_dev(dev, ldev, iter) \ - for (iter = (dev)->all_adj_list.lower.next, \ - ldev = netdev_all_lower_get_next(dev, &(iter)); \ - ldev; \ - ldev = netdev_all_lower_get_next(dev, &(iter))) - -#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ - for (iter = (dev)->all_adj_list.lower.next, \ - ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ - ldev; \ - ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) +int netdev_walk_all_lower_dev(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); +int netdev_walk_all_lower_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); @@ -3957,19 +3937,6 @@ static inline bool can_checksum_protocol(netdev_features_t features, } } -/* Map an ethertype into IP protocol if possible */ -static inline int eproto_to_ipproto(int eproto) -{ - switch (eproto) { - case htons(ETH_P_IP): - return IPPROTO_IP; - case htons(ETH_P_IPV6): - return IPPROTO_IPV6; - default: - return -1; - } -} - #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev); #else diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index abc7fdcb9eb1..a4b97be30b28 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -49,13 +49,11 @@ struct sock; struct nf_hook_state { unsigned int hook; - int thresh; u_int8_t pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; - struct nf_hook_entry __rcu *hook_entries; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; @@ -77,14 +75,42 @@ struct nf_hook_ops { struct nf_hook_entry { struct nf_hook_entry __rcu *next; - struct nf_hook_ops ops; + nf_hookfn *hook; + void *priv; const struct nf_hook_ops *orig_ops; }; +static inline void +nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops) +{ + entry->next = NULL; + entry->hook = ops->hook; + entry->priv = ops->priv; + entry->orig_ops = ops; +} + +static inline int +nf_hook_entry_priority(const struct nf_hook_entry *entry) +{ + return entry->orig_ops->priority; +} + +static inline int +nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, + struct nf_hook_state *state) +{ + return entry->hook(entry->priv, skb, state); +} + +static inline const struct nf_hook_ops * +nf_hook_entry_ops(const struct nf_hook_entry *entry) +{ + return entry->orig_ops; +} + static inline void nf_hook_state_init(struct nf_hook_state *p, - struct nf_hook_entry *hook_entry, unsigned int hook, - int thresh, u_int8_t pf, + u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, @@ -92,13 +118,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; - p->thresh = thresh; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; - RCU_INIT_POINTER(p->hook_entries, hook_entry); p->okfn = okfn; } @@ -152,23 +176,20 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif -int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); +int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, + struct nf_hook_entry *entry); /** - * nf_hook_thresh - call a netfilter hook + * nf_hook - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ -static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, - struct net *net, - struct sock *sk, - struct sk_buff *skb, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct net *, struct sock *, struct sk_buff *), - int thresh) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entry *hook_head; int ret = 1; @@ -185,24 +206,16 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, if (hook_head) { struct nf_hook_state state; - nf_hook_state_init(&state, hook_head, hook, thresh, - pf, indev, outdev, sk, net, okfn); + nf_hook_state_init(&state, hook, pf, indev, outdev, + sk, net, okfn); - ret = nf_hook_slow(skb, &state); + ret = nf_hook_slow(skb, &state, hook_head); } rcu_read_unlock(); return ret; } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, - struct sock *sk, struct sk_buff *skb, - struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct net *, struct sock *, struct sk_buff *)) -{ - return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); -} - /* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). @@ -221,19 +234,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, */ static inline int -NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, - struct sk_buff *skb, struct net_device *in, - struct net_device *out, - int (*okfn)(struct net *, struct sock *, struct sk_buff *), - int thresh) -{ - int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); - if (ret == 1) - ret = okfn(net, sk, skb); - return ret; -} - -static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), @@ -242,7 +242,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, int ret; if (!cond || - ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) + ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) ret = okfn(net, sk, skb); return ret; } @@ -252,7 +252,10 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); + int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); + if (ret == 1) + ret = okfn(net, sk, skb); + return ret; } /* Call setsockopt() */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 83b9a2e0d8d4..8e42253e5d4d 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -79,10 +79,12 @@ enum ip_set_ext_id { IPSET_EXT_ID_MAX, }; +struct ip_set; + /* Extension type */ struct ip_set_ext_type { /* Destroy extension private data (can be NULL) */ - void (*destroy)(void *ext); + void (*destroy)(struct ip_set *set, void *ext); enum ip_set_extension type; enum ipset_cadt_flags flag; /* Size and minimal alignment */ @@ -92,17 +94,6 @@ struct ip_set_ext_type { extern const struct ip_set_ext_type ip_set_extensions[]; -struct ip_set_ext { - u64 packets; - u64 bytes; - u32 timeout; - u32 skbmark; - u32 skbmarkmask; - u32 skbprio; - u16 skbqueue; - char *comment; -}; - struct ip_set_counter { atomic64_t bytes; atomic64_t packets; @@ -122,6 +113,15 @@ struct ip_set_skbinfo { u32 skbmarkmask; u32 skbprio; u16 skbqueue; + u16 __pad; +}; + +struct ip_set_ext { + struct ip_set_skbinfo skbinfo; + u64 packets; + u64 bytes; + char *comment; + u32 timeout; }; struct ip_set; @@ -252,6 +252,10 @@ struct ip_set { u8 flags; /* Default timeout value, if enabled */ u32 timeout; + /* Number of elements (vs timeout) */ + u32 elements; + /* Size of the dynamic extensions (vs timeout) */ + size_t ext_size; /* Element data size */ size_t dsize; /* Offsets to extensions in elements */ @@ -268,7 +272,7 @@ ip_set_ext_destroy(struct ip_set *set, void *data) */ if (SET_WITH_COMMENT(set)) ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy( - ext_comment(data, set)); + set, ext_comment(data, set)); } static inline int @@ -294,104 +298,6 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); } -static inline void -ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) -{ - atomic64_add((long long)bytes, &(counter)->bytes); -} - -static inline void -ip_set_add_packets(u64 packets, struct ip_set_counter *counter) -{ - atomic64_add((long long)packets, &(counter)->packets); -} - -static inline u64 -ip_set_get_bytes(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->bytes); -} - -static inline u64 -ip_set_get_packets(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->packets); -} - -static inline void -ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) -{ - if (ext->packets != ULLONG_MAX && - !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { - ip_set_add_bytes(ext->bytes, counter); - ip_set_add_packets(ext->packets, counter); - } - if (flags & IPSET_FLAG_MATCH_COUNTERS) { - mext->packets = ip_set_get_packets(counter); - mext->bytes = ip_set_get_bytes(counter); - } -} - -static inline void -ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) -{ - mext->skbmark = skbinfo->skbmark; - mext->skbmarkmask = skbinfo->skbmarkmask; - mext->skbprio = skbinfo->skbprio; - mext->skbqueue = skbinfo->skbqueue; -} -static inline bool -ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) -{ - /* Send nonzero parameters only */ - return ((skbinfo->skbmark || skbinfo->skbmarkmask) && - nla_put_net64(skb, IPSET_ATTR_SKBMARK, - cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask), - IPSET_ATTR_PAD)) || - (skbinfo->skbprio && - nla_put_net32(skb, IPSET_ATTR_SKBPRIO, - cpu_to_be32(skbinfo->skbprio))) || - (skbinfo->skbqueue && - nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, - cpu_to_be16(skbinfo->skbqueue))); -} - -static inline void -ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext) -{ - skbinfo->skbmark = ext->skbmark; - skbinfo->skbmarkmask = ext->skbmarkmask; - skbinfo->skbprio = ext->skbprio; - skbinfo->skbqueue = ext->skbqueue; -} - -static inline bool -ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) -{ - return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter)), - IPSET_ATTR_PAD) || - nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter)), - IPSET_ATTR_PAD); -} - -static inline void -ip_set_init_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext) -{ - if (ext->bytes != ULLONG_MAX) - atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); - if (ext->packets != ULLONG_MAX) - atomic64_set(&(counter)->packets, (long long)(ext->packets)); -} - /* Netlink CB args */ enum { IPSET_CB_NET = 0, /* net namespace */ @@ -431,6 +337,8 @@ extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, size_t align); extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext); +extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, + const void *e, bool active); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) @@ -546,10 +454,8 @@ bitmap_bytes(u32 a, u32 b) #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_comment.h> - -int -ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, - const void *e, bool active); +#include <linux/netfilter/ipset/ip_set_counter.h> +#include <linux/netfilter/ipset/ip_set_skbinfo.h> #define IP_SET_INIT_KEXT(skb, opt, set) \ { .bytes = (skb)->len, .packets = 1, \ diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h index 5e4662a71e01..366d6c0ea04f 100644 --- a/include/linux/netfilter/ipset/ip_set_bitmap.h +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -6,8 +6,8 @@ #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF enum { + IPSET_ADD_STORE_PLAIN_TIMEOUT = -1, IPSET_ADD_FAILED = 1, - IPSET_ADD_STORE_PLAIN_TIMEOUT, IPSET_ADD_START_STORED_TIMEOUT, }; diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h index 8d0248525957..8e2bab1e8e90 100644 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ b/include/linux/netfilter/ipset/ip_set_comment.h @@ -20,13 +20,14 @@ ip_set_comment_uget(struct nlattr *tb) * The kadt functions don't use the comment extensions in any way. */ static inline void -ip_set_init_comment(struct ip_set_comment *comment, +ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, const struct ip_set_ext *ext) { struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); size_t len = ext->comment ? strlen(ext->comment) : 0; if (unlikely(c)) { + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } @@ -34,16 +35,17 @@ ip_set_init_comment(struct ip_set_comment *comment, return; if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) len = IPSET_MAX_COMMENT_SIZE; - c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC); + c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); if (unlikely(!c)) return; strlcpy(c->str, ext->comment, len + 1); + set->ext_size += sizeof(*c) + strlen(c->str) + 1; rcu_assign_pointer(comment->c, c); } /* Used only when dumping a set, protected by rcu_read_lock_bh() */ static inline int -ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) +ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); @@ -58,13 +60,14 @@ ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) * of the set data anymore. */ static inline void -ip_set_comment_free(struct ip_set_comment *comment) +ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) { struct ip_set_comment_rcu *c; c = rcu_dereference_protected(comment->c, 1); if (unlikely(!c)) return; + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h new file mode 100644 index 000000000000..bb6fba480118 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -0,0 +1,75 @@ +#ifndef _IP_SET_COUNTER_H +#define _IP_SET_COUNTER_H + +/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->packets); +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } + if (flags & IPSET_FLAG_MATCH_COUNTERS) { + mext->packets = ip_set_get_packets(counter); + mext->bytes = ip_set_get_bytes(counter); + } +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set(&(counter)->packets, (long long)(ext->packets)); +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_COUNTER_H */ diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h new file mode 100644 index 000000000000..29d7ef2bc3fa --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h @@ -0,0 +1,46 @@ +#ifndef _IP_SET_SKBINFO_H +#define _IP_SET_SKBINFO_H + +/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbinfo = *skbinfo; +} + +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + *skbinfo = ext->skbinfo; +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_SKBINFO_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 1d6a935c1ac5..bfb3531fd88a 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -40,7 +40,7 @@ ip_set_timeout_uget(struct nlattr *tb) } static inline bool -ip_set_timeout_expired(unsigned long *t) +ip_set_timeout_expired(const unsigned long *t) { return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); } @@ -63,7 +63,7 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) } static inline u32 -ip_set_timeout_get(unsigned long *timeout) +ip_set_timeout_get(const unsigned long *timeout) { return *timeout == IPSET_ELEM_PERMANENT ? 0 : jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h index 40dcc82058d1..ff721d7325cf 100644 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -25,7 +25,7 @@ enum ct_dccp_roles { #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) #ifdef __KERNEL__ -#include <net/netfilter/nf_conntrack_tuple.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> struct nf_ct_dccp { u_int8_t role[IP_CT_DIR_MAX]; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 2ad1a2b289b5..5117e4d2ddfa 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -4,6 +4,7 @@ #include <linux/netdevice.h> #include <linux/static_key.h> +#include <linux/netfilter.h> #include <uapi/linux/netfilter/x_tables.h> /* Test a struct->invflags and a boolean for inequality */ @@ -17,14 +18,9 @@ * @target: the target extension * @matchinfo: per-match data * @targetinfo: per-target data - * @net network namespace through which the action was invoked - * @in: input netdevice - * @out: output netdevice + * @state: pointer to hook state this packet came from * @fragoff: packet is a fragment, this is the data offset * @thoff: position of transport header relative to skb->data - * @hook: hook number given packet came from - * @family: Actual NFPROTO_* through which the function is invoked - * (helpful when match->family == NFPROTO_UNSPEC) * * Fields written to by extensions: * @@ -38,15 +34,47 @@ struct xt_action_param { union { const void *matchinfo, *targinfo; }; - struct net *net; - const struct net_device *in, *out; + const struct nf_hook_state *state; int fragoff; unsigned int thoff; - unsigned int hooknum; - u_int8_t family; bool hotdrop; }; +static inline struct net *xt_net(const struct xt_action_param *par) +{ + return par->state->net; +} + +static inline struct net_device *xt_in(const struct xt_action_param *par) +{ + return par->state->in; +} + +static inline const char *xt_inname(const struct xt_action_param *par) +{ + return par->state->in->name; +} + +static inline struct net_device *xt_out(const struct xt_action_param *par) +{ + return par->state->out; +} + +static inline const char *xt_outname(const struct xt_action_param *par) +{ + return par->state->out->name; +} + +static inline unsigned int xt_hooknum(const struct xt_action_param *par) +{ + return par->state->hook; +} + +static inline u_int8_t xt_family(const struct xt_action_param *par) +{ + return par->state->pf; +} + /** * struct xt_mtchk_param - parameters for match extensions' * checkentry functions @@ -375,38 +403,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a, return ret; } +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char __percpu *mem; +}; -/* On SMP, ip(6)t_entry->counters.pcnt holds address of the - * real (percpu) counter. On !SMP, its just the packet count, - * so nothing needs to be done there. - * - * xt_percpu_counter_alloc returns the address of the percpu - * counter, or 0 on !SMP. We force an alignment of 16 bytes - * so that bytes/packets share a common cache line. - * - * Hence caller must use IS_ERR_VALUE to check for error, this - * allows us to return 0 for single core systems without forcing - * callers to deal with SMP vs. NONSMP issues. - */ -static inline unsigned long xt_percpu_counter_alloc(void) -{ - if (nr_cpu_ids > 1) { - void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), - sizeof(struct xt_counters)); - - if (res == NULL) - return -ENOMEM; - - return (__force unsigned long) res; - } - - return 0; -} -static inline void xt_percpu_counter_free(u64 pcnt) -{ - if (nr_cpu_ids > 1) - free_percpu((void __percpu *) (unsigned long) pcnt); -} +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, + struct xt_counters *counter); +void xt_percpu_counter_free(struct xt_counters *cnt); static inline struct xt_counters * xt_get_this_cpu_counter(struct xt_counters *cnt) diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 33e37fb41d5d..59476061de86 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -19,6 +19,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb) { struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress); struct nf_hook_state state; + int ret; /* Must recheck the ingress hook head, in the event it became NULL * after the check in nf_hook_ingress_active evaluated to true. @@ -26,10 +27,14 @@ static inline int nf_hook_ingress(struct sk_buff *skb) if (unlikely(!e)) return 0; - nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN, + nf_hook_state_init(&state, NF_NETDEV_INGRESS, NFPROTO_NETDEV, skb->dev, NULL, NULL, dev_net(skb->dev), NULL); - return nf_hook_slow(skb, &state); + ret = nf_hook_slow(skb, &state, e); + if (ret == 0) + return -1; + + return ret; } static inline void nf_hook_ingress_init(struct net_device *dev) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index b25ee9ffdbe6..1828900c9411 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) struct net_device *dev = napi->dev; if (dev && dev->npinfo) { - spin_lock(&napi->poll_lock); - napi->poll_owner = smp_processor_id(); + int owner = smp_processor_id(); + + while (cmpxchg(&napi->poll_owner, -1, owner) != -1) + cpu_relax(); + return napi; } return NULL; @@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have) { struct napi_struct *napi = have; - if (napi) { - napi->poll_owner = -1; - spin_unlock(&napi->poll_lock); - } + if (napi) + smp_store_release(&napi->poll_owner, -1); } static inline bool netpoll_tx_running(struct net_device *dev) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 9094faf0699d..bca536341d1a 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -440,6 +440,7 @@ enum lock_type4 { #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) +#define FATTR4_WORD2_MODE_UMASK (1UL << 17) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 810124b33327..f1da8c8dd473 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -55,22 +55,18 @@ struct nfs_access_entry { struct rcu_head rcu_head; }; -struct nfs_lockowner { - fl_owner_t l_owner; - pid_t l_pid; -}; - struct nfs_lock_context { atomic_t count; struct list_head list; struct nfs_open_context *open_context; - struct nfs_lockowner lockowner; + fl_owner_t lockowner; atomic_t io_count; }; struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; + fl_owner_t flock_owner; struct dentry *dentry; struct rpc_cred *cred; struct nfs4_state *state; @@ -344,11 +340,10 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); -extern int nfs_attribute_timeout(struct inode *inode); extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); -extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); extern int nfs_setattr(struct dentry *, struct iattr *); @@ -358,7 +353,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); -extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern void nfs_file_clear_open_context(struct file *flip); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index beb1e10f446e..348f7c158084 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -216,6 +216,20 @@ struct nfs4_get_lease_time_res { struct nfs_fsinfo *lr_fsinfo; }; +struct xdr_stream; +struct nfs4_xdr_opaque_data; + +struct nfs4_xdr_opaque_ops { + void (*encode)(struct xdr_stream *, const void *args, + const struct nfs4_xdr_opaque_data *); + void (*free)(struct nfs4_xdr_opaque_data *); +}; + +struct nfs4_xdr_opaque_data { + const struct nfs4_xdr_opaque_ops *ops; + void *data; +}; + #define PNFS_LAYOUT_MAXSIZE 4096 struct nfs4_layoutdriver_data { @@ -306,6 +320,7 @@ struct nfs4_layoutreturn_args { struct pnfs_layout_range range; nfs4_stateid stateid; __u32 layout_type; + struct nfs4_xdr_opaque_data *ld_private; }; struct nfs4_layoutreturn_res { @@ -321,6 +336,7 @@ struct nfs4_layoutreturn { struct nfs_client *clp; struct inode *inode; int rpc_status; + struct nfs4_xdr_opaque_data ld_private; }; #define PNFS_LAYOUTSTATS_MAXSIZE 256 @@ -341,8 +357,7 @@ struct nfs42_layoutstat_devinfo { __u64 write_count; __u64 write_bytes; __u32 layout_type; - layoutstats_encode_t layoutstats_encode; - void *layout_private; + struct nfs4_xdr_opaque_data ld_private; }; struct nfs42_layoutstat_args { @@ -418,6 +433,7 @@ struct nfs_openargs { enum open_claim_type4 claim; enum createmode4 createmode; const struct nfs4_label *label; + umode_t umask; }; struct nfs_openres { @@ -469,6 +485,7 @@ struct nfs_closeargs { fmode_t fmode; u32 share_access; const u32 * bitmask; + struct nfs4_layoutreturn_args *lr_args; }; struct nfs_closeres { @@ -477,6 +494,8 @@ struct nfs_closeres { struct nfs_fattr * fattr; struct nfs_seqid * seqid; const struct nfs_server *server; + struct nfs4_layoutreturn_res *lr_res; + int lr_ret; }; /* * * Arguments to the lock,lockt, and locku call. @@ -549,12 +568,15 @@ struct nfs4_delegreturnargs { const struct nfs_fh *fhandle; const nfs4_stateid *stateid; const u32 * bitmask; + struct nfs4_layoutreturn_args *lr_args; }; struct nfs4_delegreturnres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; struct nfs_server *server; + struct nfs4_layoutreturn_res *lr_res; + int lr_ret; }; /* @@ -937,6 +959,7 @@ struct nfs4_create_arg { const struct nfs_fh * dir_fh; const u32 * bitmask; const struct nfs4_label *label; + umode_t umask; }; struct nfs4_create_res { diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a78c35cff1ae..aacca824a6ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,23 @@ #include <linux/sched.h> #include <asm/irq.h> +/* + * The run state of the lockup detectors is controlled by the content of the + * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - + * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. + * + * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' + * are variables that are only used as an 'interface' between the parameters + * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The + * 'watchdog_thresh' variable is handled differently because its value is not + * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' + * is equal zero. + */ +#define NMI_WATCHDOG_ENABLED_BIT 0 +#define SOFT_WATCHDOG_ENABLED_BIT 1 +#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) +#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * @@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled; extern int soft_watchdog_enabled; extern int watchdog_user_enabled; extern int watchdog_thresh; +extern unsigned long watchdog_enabled; extern unsigned long *watchdog_cpumask_bits; +#ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; +#else +#define sysctl_softlockup_all_cpu_backtrace 0 +#define sysctl_hardlockup_all_cpu_backtrace 0 +#endif +extern bool is_hardlockup(void); struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 6f47562d477b..de87ceac110e 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -896,7 +896,7 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb) } /** - * ntb_mw_count() - get the number of scratchpads + * ntb_spad_count() - get the number of scratchpads * @ntb: NTB device context. * * Hardware and topology may support a different number of scratchpads. @@ -968,6 +968,9 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, */ static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx) { + if (!ntb->ops->peer_spad_read) + return 0; + return ntb->ops->peer_spad_read(ntb, idx); } diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h new file mode 100644 index 000000000000..f21471f7ee40 --- /dev/null +++ b/include/linux/nvme-fc-driver.h @@ -0,0 +1,851 @@ +/* + * Copyright (c) 2016, Avago Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _NVME_FC_DRIVER_H +#define _NVME_FC_DRIVER_H 1 + + +/* + * ********************** LLDD FC-NVME Host API ******************** + * + * For FC LLDD's that are the NVME Host role. + * + * ****************************************************************** + */ + + + +/* FC Port role bitmask - can merge with FC Port Roles in fc transport */ +#define FC_PORT_ROLE_NVME_INITIATOR 0x10 +#define FC_PORT_ROLE_NVME_TARGET 0x11 +#define FC_PORT_ROLE_NVME_DISCOVERY 0x12 + + +/** + * struct nvme_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Host role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvme_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_role; + u32 port_id; +}; + + +/** + * struct nvmefc_ls_req - Request structure passed from NVME-FC transport + * to LLDD in order to perform a NVME FC-4 LS + * request and obtain a response. + * + * Values set by the NVME-FC layer prior to calling the LLDD ls_req + * entrypoint. + * @rqstaddr: pointer to request buffer + * @rqstdma: PCI DMA address of request buffer + * @rqstlen: Length, in bytes, of request buffer + * @rspaddr: pointer to response buffer + * @rspdma: PCI DMA address of response buffer + * @rsplen: Length, in bytes, of response buffer + * @timeout: Maximum amount of time, in seconds, to wait for the LS response. + * If timeout exceeded, LLDD to abort LS exchange and complete + * LS request with error status. + * @private: pointer to memory allocated alongside the ls request structure + * that is specifically for the LLDD to use while processing the + * request. The length of the buffer corresponds to the + * lsrqst_priv_sz value specified in the nvme_fc_port_template + * supplied by the LLDD. + * @done: The callback routine the LLDD is to invoke upon completion of + * the LS request. req argument is the pointer to the original LS + * request structure. Status argument must be 0 upon success, a + * negative errno on failure (example: -ENXIO). + */ +struct nvmefc_ls_req { + void *rqstaddr; + dma_addr_t rqstdma; + u32 rqstlen; + void *rspaddr; + dma_addr_t rspdma; + u32 rsplen; + u32 timeout; + + void *private; + + void (*done)(struct nvmefc_ls_req *req, int status); + +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +enum nvmefc_fcp_datadir { + NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ + NVMEFC_FCP_WRITE, + NVMEFC_FCP_READ, +}; + + +#define NVME_FC_MAX_SEGMENTS 256 + +/** + * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport + * to LLDD in order to perform a NVME FCP IO operation. + * + * Values set by the NVME-FC layer prior to calling the LLDD fcp_io + * entrypoint. + * @cmdaddr: pointer to the FCP CMD IU buffer + * @rspaddr: pointer to the FCP RSP IU buffer + * @cmddma: PCI DMA address of the FCP CMD IU buffer + * @rspdma: PCI DMA address of the FCP RSP IU buffer + * @cmdlen: Length, in bytes, of the FCP CMD IU buffer + * @rsplen: Length, in bytes, of the FCP RSP IU buffer + * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer + * @sg_table: scatter/gather structure for payload data + * @first_sgl: memory for 1st scatter/gather list segment for payload data + * @sg_cnt: number of elements in the scatter/gather list + * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx) + * @sqid: The nvme SQID the command is being issued on + * @done: The callback routine the LLDD is to invoke upon completion of + * the FCP operation. req argument is the pointer to the original + * FCP IO operation. + * @private: pointer to memory allocated alongside the FCP operation + * request structure that is specifically for the LLDD to use + * while processing the operation. The length of the buffer + * corresponds to the fcprqst_priv_sz value specified in the + * nvme_fc_port_template supplied by the LLDD. + * + * Values set by the LLDD indicating completion status of the FCP operation. + * Must be set prior to calling the done() callback. + * @transferred_length: amount of payload data, in bytes, that were + * transferred. Should equal payload_length on success. + * @rcv_rsplen: length, in bytes, of the FCP RSP IU received. + * @status: Completion status of the FCP operation. must be 0 upon success, + * NVME_SC_FC_xxx value upon failure. Note: this is NOT a + * reflection of the NVME CQE completion status. Only the status + * of the FCP operation at the NVME-FC level. + */ +struct nvmefc_fcp_req { + void *cmdaddr; + void *rspaddr; + dma_addr_t cmddma; + dma_addr_t rspdma; + u16 cmdlen; + u16 rsplen; + + u32 payload_length; + struct sg_table sg_table; + struct scatterlist *first_sgl; + int sg_cnt; + enum nvmefc_fcp_datadir io_dir; + + __le16 sqid; + + void (*done)(struct nvmefc_fcp_req *req); + + void *private; + + u32 transferred_length; + u16 rcv_rsplen; + u32 status; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/* + * Direct copy of fc_port_state enum. For later merging + */ +enum nvme_fc_obj_state { + FC_OBJSTATE_UNKNOWN, + FC_OBJSTATE_NOTPRESENT, + FC_OBJSTATE_ONLINE, + FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */ + FC_OBJSTATE_BLOCKED, + FC_OBJSTATE_BYPASSED, + FC_OBJSTATE_DIAGNOSTICS, + FC_OBJSTATE_LINKDOWN, + FC_OBJSTATE_ERROR, + FC_OBJSTATE_LOOPBACK, + FC_OBJSTATE_DELETED, +}; + + +/** + * struct nvme_fc_local_port - structure used between NVME-FC transport and + * a LLDD to reference a local NVME host port. + * Allocated/created by the nvme_fc_register_localport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport host port number + * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @private: pointer to memory allocated alongside the local port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the local_priv_sz + * value specified in the nvme_fc_port_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link state. LLDD + * may reference fields directly to change them. Initialized by the + * port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the port. + */ +struct nvme_fc_local_port { + /* static/read-only fields */ + u32 port_num; + u32 port_role; + u64 node_name; + u64 port_name; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvme_fc_remote_port - structure used between NVME-FC transport and + * a LLDD to reference a remote NVME subsystem port. + * Allocated/created by the nvme_fc_register_remoteport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport remote subsystem port number + * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @localport: pointer to the NVME-FC local host port the subsystem is + * connected to. + * @private: pointer to memory allocated alongside the remote port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the remote_priv_sz + * value specified in the nvme_fc_port_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link or login + * state. LLDD may reference fields directly to change them. Initialized by + * the port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the remote port. Valid values are + * ONLINE or UNKNOWN. + */ +struct nvme_fc_remote_port { + /* static fields */ + u32 port_num; + u32 port_role; + u64 node_name; + u64 port_name; + + struct nvme_fc_local_port *localport; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvme_fc_port_template - structure containing static entrypoints and + * operational parameters for an LLDD that supports NVME host + * behavior. Passed by reference in port registrations. + * NVME-FC transport remembers template reference and may + * access it during runtime operation. + * + * Host/Initiator Transport Entrypoints/Parameters: + * + * @localport_delete: The LLDD initiates deletion of a localport via + * nvme_fc_deregister_localport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the localport has been deleted. + * Entrypoint is Mandatory. + * + * @remoteport_delete: The LLDD initiates deletion of a remoteport via + * nvme_fc_deregister_remoteport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the remoteport has been deleted. + * Entrypoint is Mandatory. + * + * @create_queue: Upon creating a host<->controller association, queues are + * created such that they can be affinitized to cpus/cores. This + * callback into the LLDD to notify that a controller queue is being + * created. The LLDD may choose to allocate an associated hw queue + * or map it onto a shared hw queue. Upon return from the call, the + * LLDD specifies a handle that will be given back to it for any + * command that is posted to the controller queue. The handle can + * be used by the LLDD to map quickly to the proper hw queue for + * command execution. The mask of cpu's that will map to this queue + * at the block-level is also passed in. The LLDD should use the + * queue id and/or cpu masks to ensure proper affinitization of the + * controller queue to the hw queue. + * Entrypoint is Optional. + * + * @delete_queue: This is the inverse of the crete_queue. During + * host<->controller association teardown, this routine is called + * when a controller queue is being terminated. Any association with + * a hw queue should be termined. If there is a unique hw queue, the + * hw queue should be torn down. + * Entrypoint is Optional. + * + * @poll_queue: Called to poll for the completion of an io on a blk queue. + * Entrypoint is Optional. + * + * @ls_req: Called to issue a FC-NVME FC-4 LS service request. + * The nvme_fc_ls_req structure will fully describe the buffers for + * the request payload and where to place the response payload. The + * LLDD is to allocate an exchange, issue the LS request, obtain the + * LS response, and call the "done" routine specified in the request + * structure (argument to done is the ls request structure itself). + * Entrypoint is Mandatory. + * + * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for + * an admin queue or an i/o queue. The nvmefc_fcp_req structure will + * fully describe the io: the buffer containing the FC-NVME CMD IU + * (which contains the SQE), the sg list for the payload if applicable, + * and the buffer to place the FC-NVME RSP IU into. The LLDD will + * complete the i/o, indicating the amount of data transferred or + * any transport error, and call the "done" routine specified in the + * request structure (argument to done is the fcp request structure + * itself). + * Entrypoint is Mandatory. + * + * @ls_abort: called to request the LLDD to abort the indicated ls request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the ls request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory. + * + * @fcp_abort: called to request the LLDD to abort the indicated fcp request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the fcp request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory. + * + * @max_hw_queues: indicates the maximum number of hw queues the LLDD + * supports for cpu affinitization. + * Value is Mandatory. Must be at least 1. + * + * @max_sgl_segments: indicates the maximum number of sgl segments supported + * by the LLDD + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @max_dif_sgl_segments: indicates the maximum number of sgl segments + * supported by the LLDD for DIF operations. + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @dma_boundary: indicates the dma address boundary where dma mappings + * will be split across. + * Value is Mandatory. Typical value is 0xFFFFFFFF to split across + * 4Gig address boundarys + * + * @local_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a localport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the localport->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @remote_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a remoteport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the remoteport->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a ls request structure is allocated. The additional + * memory area solely for the of the LLDD and its location is + * specified by the ls_request->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a fcp request structure is allocated. The additional + * memory area solely for the of the LLDD and its location is + * specified by the fcp_request->private pointer. + * Value is Mandatory. Allowed to be zero. + */ +struct nvme_fc_port_template { + /* initiator-based functions */ + void (*localport_delete)(struct nvme_fc_local_port *); + void (*remoteport_delete)(struct nvme_fc_remote_port *); + int (*create_queue)(struct nvme_fc_local_port *, + unsigned int qidx, u16 qsize, + void **handle); + void (*delete_queue)(struct nvme_fc_local_port *, + unsigned int qidx, void *handle); + void (*poll_queue)(struct nvme_fc_local_port *, void *handle); + int (*ls_req)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + struct nvmefc_ls_req *); + int (*fcp_io)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *hw_queue_handle, + struct nvmefc_fcp_req *); + void (*ls_abort)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + struct nvmefc_ls_req *); + void (*fcp_abort)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *hw_queue_handle, + struct nvmefc_fcp_req *); + + u32 max_hw_queues; + u16 max_sgl_segments; + u16 max_dif_sgl_segments; + u64 dma_boundary; + + /* sizes of additional private data for data structures */ + u32 local_priv_sz; + u32 remote_priv_sz; + u32 lsrqst_priv_sz; + u32 fcprqst_priv_sz; +}; + + +/* + * Initiator/Host functions + */ + +int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + struct nvme_fc_port_template *template, + struct device *dev, + struct nvme_fc_local_port **lport_p); + +int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport); + +int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, + struct nvme_fc_port_info *pinfo, + struct nvme_fc_remote_port **rport_p); + +int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport); + + + +/* + * *************** LLDD FC-NVME Target/Subsystem API *************** + * + * For FC LLDD's that are the NVME Subsystem role + * + * ****************************************************************** + */ + +/** + * struct nvmet_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Subsystem role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvmet_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_id; +}; + + +/** + * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC + * layer to represent the exchange context for + * a FC-NVME Link Service (LS). + * + * The structure is allocated by the LLDD whenever a LS Request is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure + * will be passed back to the LLDD when the response is to be transmit. + * The LLDD is to use the address to map back to the LLDD exchange structure + * which maintains information such as the targetport the LS was received + * on, the remote FC NVME initiator that sent the LS, and any FC exchange + * context. Upon completion of the LS response transmit, the address of the + * structure will be passed back to the LS rsp done() routine, allowing the + * nvmet-fc layer to release dma resources. Upon completion of the done() + * routine, no further access will be made by the nvmet-fc layer and the + * LLDD can de-allocate the structure. + * + * Field initialization: + * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc + * layer will fully set the fields in order to specify the response + * payload buffer and its length as well as the done routine to be called + * upon compeletion of the transmit. The nvmet-fc layer will also set a + * private pointer for its own use in the done routine. + * + * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp + * entrypoint. + * @rspbuf: pointer to the LS response buffer + * @rspdma: PCI DMA address of the LS response buffer + * @rsplen: Length, in bytes, of the LS response buffer + * @done: The callback routine the LLDD is to invoke upon completion of + * transmitting the LS response. req argument is the pointer to + * the original ls request. + * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used + * as part of the NVMET-FC processing. The LLDD is not to access + * this pointer. + */ +struct nvmefc_tgt_ls_req { + void *rspbuf; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_tgt_ls_req *req); + void *nvmet_fc_private; /* LLDD is not to access !! */ +}; + +/* Operations that NVME-FC layer may request the LLDD to perform for FCP */ +enum { + NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ + NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */ + NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send + * rsp as well + */ + NVMET_FCOP_RSP = 4, /* send rsp frame */ + NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */ + NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */ + NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */ +}; + +/** + * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC + * layer to represent the exchange context and + * the specific FC-NVME IU operation(s) to perform + * for a FC-NVME FCP IO. + * + * Structure used between LLDD and nvmet-fc layer to represent the exchange + * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related + * memory transfers, and its assocated cqe transfer). + * + * The structure is allocated by the LLDD whenever a FCP CMD IU is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure + * will be passed back to the LLDD for the data operations and transmit of + * the response. The LLDD is to use the address to map back to the LLDD + * exchange structure which maintains information such as the targetport + * the FCP I/O was received on, the remote FC NVME initiator that sent the + * FCP I/O, and any FC exchange context. Upon completion of the FCP target + * operation, the address of the structure will be passed back to the FCP + * op done() routine, allowing the nvmet-fc layer to release dma resources. + * Upon completion of the done() routine for either RSP or ABORT ops, no + * further access will be made by the nvmet-fc layer and the LLDD can + * de-allocate the structure. + * + * Field initialization: + * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for an FCP target operation, the nvmet-fc + * layer will fully set the fields in order to specify the scattergather + * list, the transfer length, as well as the done routine to be called + * upon compeletion of the operation. The nvmet-fc layer will also set a + * private pointer for its own use in the done routine. + * + * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !! + * + * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op + * entrypoint. + * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx) + * @hwqid: Specifies the hw queue index (0..N-1, where N is the + * max_hw_queues value from the LLD's nvmet_fc_target_template) + * that the operation is to use. + * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred. + * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops. + * @timeout: amount of time, in seconds, to wait for a response from the NVME + * host. A value of 0 is an infinite wait. + * Valid only for the following ops: + * WRITEDATA: caps the wait for data reception + * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used) + * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload + * that is to be transferred. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @ba_rjt: Contains the BA_RJT payload that is to be transferred. + * Valid only for the NVMET_FCOP_BA_RJT op. + * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @sg_cnt: Number of valid entries in the scatter/gather list. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @rspaddr: pointer to the FCP RSP IU buffer to be transmit + * Used by RSP and READDATA_RSP ops + * @rspdma: PCI DMA address of the FCP RSP IU buffer + * Used by RSP and READDATA_RSP ops + * @rsplen: Length, in bytes, of the FCP RSP IU buffer + * Used by RSP and READDATA_RSP ops + * @done: The callback routine the LLDD is to invoke upon completion of + * the operation. req argument is the pointer to the original + * FCP subsystem op request. + * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used + * as part of the NVMET-FC processing. The LLDD is not to + * reference this field. + * + * Values set by the LLDD indicating completion status of the FCP operation. + * Must be set prior to calling the done() callback. + * @transferred_length: amount of DATA_OUT payload data received by a + * a WRITEDATA operation. If not a WRITEDATA operation, value must + * be set to 0. Should equal transfer_length on success. + * @fcp_error: status of the FCP operation. Must be 0 on success; on failure + * must be a NVME_SC_FC_xxxx value. + */ +struct nvmefc_tgt_fcp_req { + u8 op; + u16 hwqid; + u32 offset; + u32 timeout; + u32 transfer_length; + struct fc_ba_rjt ba_rjt; + struct scatterlist sg[NVME_FC_MAX_SEGMENTS]; + int sg_cnt; + void *rspaddr; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_tgt_fcp_req *); + + void *nvmet_fc_private; /* LLDD is not to access !! */ + + u32 transferred_length; + int fcp_error; +}; + + +/* Target Features (Bit fields) LLDD supports */ +enum { + NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0), + /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which + * sends (the last) Read Data sequence followed by the RSP + * sequence in one LLDD operation. Errors during Data + * sequence transmit must not allow RSP sequence to be sent. + */ + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), + /* Bit 1: When 0, the LLDD will deliver FCP CMD + * on the CPU it should be affinitized to. Thus work will + * be scheduled on the cpu received on. When 1, the LLDD + * may not deliver the CMD on the CPU it should be worked + * on. The transport should pick a cpu to schedule the work + * on. + */ +}; + + +/** + * struct nvmet_fc_target_port - structure used between NVME-FC transport and + * a LLDD to reference a local NVME subsystem port. + * Allocated/created by the nvme_fc_register_targetport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport subsytem port number + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @private: pointer to memory allocated alongside the local port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the target_priv_sz + * value specified in the nvme_fc_target_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link state. LLDD + * may reference fields directly to change them. Initialized by the + * port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the port. + */ +struct nvmet_fc_target_port { + /* static/read-only fields */ + u32 port_num; + u64 node_name; + u64 port_name; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvmet_fc_target_template - structure containing static entrypoints + * and operational parameters for an LLDD that supports NVME + * subsystem behavior. Passed by reference in port + * registrations. NVME-FC transport remembers template + * reference and may access it during runtime operation. + * + * Subsystem/Target Transport Entrypoints/Parameters: + * + * @targetport_delete: The LLDD initiates deletion of a targetport via + * nvmet_fc_unregister_targetport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the targetport has been deleted. + * Entrypoint is Mandatory. + * + * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. + * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange + * structure specified in the nvmet_fc_rcv_ls_req() call made when + * the LS request was received. The structure will fully describe + * the buffers for the response payload and the dma address of the + * payload. The LLDD is to transmit the response (or return a non-zero + * errno status), and upon completion of the transmit, call the + * "done" routine specified in the nvmefc_tgt_ls_req structure + * (argument to done is the ls reqwuest structure itself). + * After calling the done routine, the LLDD shall consider the + * LS handling complete and the nvmefc_tgt_ls_req structure may + * be freed/released. + * Entrypoint is Mandatory. + * + * @fcp_op: Called to perform a data transfer, transmit a response, or + * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same + * LLDD-supplied exchange structure specified in the + * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received. + * The op field in the structure shall indicate the operation for + * the LLDD to perform relative to the io. + * NVMET_FCOP_READDATA operation: the LLDD is to send the + * payload data (described by sglist) to the host in 1 or + * more FC sequences (preferrably 1). Note: the fc-nvme layer + * may call the READDATA operation multiple times for longer + * payloads. + * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the + * payload data (described by sglist) from the host via 1 or + * more FC sequences (preferrably 1). The LLDD is to generate + * the XFER_RDY IU(s) corresponding to the data being requested. + * Note: the FC-NVME layer may call the WRITEDATA operation + * multiple times for longer payloads. + * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the + * payload data (described by sglist) to the host in 1 or + * more FC sequences (preferrably 1). If an error occurs during + * payload data transmission, the LLDD is to set the + * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then + * consider the operation complete. On error, the LLDD is to not + * transmit the FCP_RSP iu. If all payload data is transferred + * successfully, the LLDD is to update the nvmefc_tgt_fcp_req + * transferred_length field and may subsequently transmit the + * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen). + * The LLDD is to await FCP_CONF reception to confirm the RSP + * reception by the host. The LLDD may retramsit the FCP_RSP iu + * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon + * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req + * fcp_error field and consider the operation complete.. + * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload + * (described by rspbuf, rspdma, rsplen). The LLDD is to await + * FCP_CONF reception to confirm the RSP reception by the host. + * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME. + * Upon reception of FCP_CONF, or upon FCP_CONF failure, the + * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and + * consider the operation complete.. + * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange + * corresponding to the fcp operation. The LLDD shall send + * ABTS and follow FC exchange abort-multi rules, including + * ABTS retries and possible logout. + * Upon completing the indicated operation, the LLDD is to set the + * status fields for the operation (tranferred_length and fcp_error + * status) in the request, then all the "done" routine + * indicated in the fcp request. Upon return from the "done" + * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation + * the fc-nvme layer will not longer reference the fcp request, + * allowing the LLDD to free/release the fcp request. + * Note: when calling the done routine for READDATA or WRITEDATA + * operations, the fc-nvme layer may immediate convert, in the same + * thread and before returning to the LLDD, the fcp operation to + * the next operation for the fcp io and call the LLDDs fcp_op + * call again. If fields in the fcp request are to be accessed post + * the done call, the LLDD should save their values prior to calling + * the done routine, and inspect the save values after the done + * routine. + * Returns 0 on success, -<errno> on failure (Ex: -EIO) + * Entrypoint is Mandatory. + * + * @max_hw_queues: indicates the maximum number of hw queues the LLDD + * supports for cpu affinitization. + * Value is Mandatory. Must be at least 1. + * + * @max_sgl_segments: indicates the maximum number of sgl segments supported + * by the LLDD + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @max_dif_sgl_segments: indicates the maximum number of sgl segments + * supported by the LLDD for DIF operations. + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @dma_boundary: indicates the dma address boundary where dma mappings + * will be split across. + * Value is Mandatory. Typical value is 0xFFFFFFFF to split across + * 4Gig address boundarys + * + * @target_features: The LLDD sets bits in this field to correspond to + * optional features that are supported by the LLDD. + * Refer to the NVMET_FCTGTFEAT_xxx values. + * Value is Mandatory. Allowed to be zero. + * + * @target_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a targetport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the targetport->private pointer. + * Value is Mandatory. Allowed to be zero. + */ +struct nvmet_fc_target_template { + void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); + int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *tls_req); + int (*fcp_op)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *); + + u32 max_hw_queues; + u16 max_sgl_segments; + u16 max_dif_sgl_segments; + u64 dma_boundary; + + u32 target_features; + + u32 target_priv_sz; +}; + + +int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, + struct nvmet_fc_target_template *template, + struct device *dev, + struct nvmet_fc_target_port **tgtport_p); + +int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); + +int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *lsreq, + void *lsreqbuf, u32 lsreqbuf_len); + +int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq, + void *cmdiubuf, u32 cmdiubuf_len); + +#endif /* _NVME_FC_DRIVER_H */ diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h new file mode 100644 index 000000000000..4b45226bd604 --- /dev/null +++ b/include/linux/nvme-fc.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, + * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO + * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package + * + */ + +/* + * This file contains definitions relative to FC-NVME r1.11 and a few + * newer items + */ + +#ifndef _NVME_FC_H +#define _NVME_FC_H 1 + + +#define NVME_CMD_SCSI_ID 0xFD +#define NVME_CMD_FC_ID FC_TYPE_NVME + +/* FC-NVME Cmd IU Flags */ +#define FCNVME_CMD_FLAGS_DIRMASK 0x03 +#define FCNVME_CMD_FLAGS_WRITE 0x01 +#define FCNVME_CMD_FLAGS_READ 0x02 + +struct nvme_fc_cmd_iu { + __u8 scsi_id; + __u8 fc_id; + __be16 iu_len; + __u8 rsvd4[3]; + __u8 flags; + __be64 connection_id; + __be32 csn; + __be32 data_len; + struct nvme_command sqe; + __be32 rsvd88[2]; +}; + +#define NVME_FC_SIZEOF_ZEROS_RSP 12 + +struct nvme_fc_ersp_iu { + __u8 rsvd0[2]; + __be16 iu_len; + __be32 rsn; + __be32 xfrd_len; + __be32 rsvd12; + struct nvme_completion cqe; + /* for now - no additional payload */ +}; + + +/* FC-NVME r1.03/16-119v0 NVME Link Services */ +enum { + FCNVME_LS_RSVD = 0, + FCNVME_LS_RJT = 1, + FCNVME_LS_ACC = 2, + FCNVME_LS_CREATE_ASSOCIATION = 3, + FCNVME_LS_CREATE_CONNECTION = 4, + FCNVME_LS_DISCONNECT = 5, +}; + +/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */ +enum { + FCNVME_LSDESC_RSVD = 0x0, + FCNVME_LSDESC_RQST = 0x1, + FCNVME_LSDESC_RJT = 0x2, + FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3, + FCNVME_LSDESC_CREATE_CONN_CMD = 0x4, + FCNVME_LSDESC_DISCONN_CMD = 0x5, + FCNVME_LSDESC_CONN_ID = 0x6, + FCNVME_LSDESC_ASSOC_ID = 0x7, +}; + + +/* ********** start of Link Service Descriptors ********** */ + + +/* + * fills in length of a descriptor. Struture minus descriptor header + */ +static inline __be32 fcnvme_lsdesc_len(size_t sz) +{ + return cpu_to_be32(sz - (2 * sizeof(u32))); +} + + +struct fcnvme_ls_rqst_w0 { + u8 ls_cmd; /* FCNVME_LS_xxx */ + u8 zeros[3]; +}; + +/* FCNVME_LSDESC_RQST */ +struct fcnvme_lsdesc_rqst { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + struct fcnvme_ls_rqst_w0 w0; + __be32 rsvd12; +}; + + + + +/* FCNVME_LSDESC_RJT */ +struct fcnvme_lsdesc_rjt { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + u8 rsvd8; + + /* + * Reject reason and explanaction codes are generic + * to ELs's from LS-3. + */ + u8 reason_code; + u8 reason_explanation; + + u8 vendor; + __be32 rsvd12; +}; + + +#define FCNVME_ASSOC_HOSTID_LEN 64 +#define FCNVME_ASSOC_HOSTNQN_LEN 256 +#define FCNVME_ASSOC_SUBNQN_LEN 256 + +/* FCNVME_LSDESC_CREATE_ASSOC_CMD */ +struct fcnvme_lsdesc_cr_assoc_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be16 ersp_ratio; + __be16 rsvd10; + __be32 rsvd12[9]; + __be16 cntlid; + __be16 sqsize; + __be32 rsvd52; + u8 hostid[FCNVME_ASSOC_HOSTID_LEN]; + u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN]; + u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN]; + u8 rsvd632[384]; +}; + +/* FCNVME_LSDESC_CREATE_CONN_CMD */ +struct fcnvme_lsdesc_cr_conn_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be16 ersp_ratio; + __be16 rsvd10; + __be32 rsvd12[9]; + __be16 qid; + __be16 sqsize; + __be32 rsvd52; +}; + +/* Disconnect Scope Values */ +enum { + FCNVME_DISCONN_ASSOCIATION = 0, + FCNVME_DISCONN_CONNECTION = 1, +}; + +/* FCNVME_LSDESC_DISCONN_CMD */ +struct fcnvme_lsdesc_disconn_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + u8 rsvd8[3]; + /* note: scope is really a 1 bit field */ + u8 scope; /* FCNVME_DISCONN_xxx */ + __be32 rsvd12; + __be64 id; +}; + +/* FCNVME_LSDESC_CONN_ID */ +struct fcnvme_lsdesc_conn_id { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be64 connection_id; +}; + +/* FCNVME_LSDESC_ASSOC_ID */ +struct fcnvme_lsdesc_assoc_id { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be64 association_id; +}; + +/* r_ctl values */ +enum { + FCNVME_RS_RCTL_DATA = 1, + FCNVME_RS_RCTL_XFER_RDY = 5, + FCNVME_RS_RCTL_RSP = 8, +}; + + +/* ********** start of Link Services ********** */ + + +/* FCNVME_LS_RJT */ +struct fcnvme_ls_rjt { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_rqst rqst; + struct fcnvme_lsdesc_rjt rjt; +}; + +/* FCNVME_LS_ACC */ +struct fcnvme_ls_acc_hdr { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_rqst rqst; + /* Followed by cmd-specific ACC descriptors, see next definitions */ +}; + +/* FCNVME_LS_CREATE_ASSOCIATION */ +struct fcnvme_ls_cr_assoc_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd; +}; + +struct fcnvme_ls_cr_assoc_acc { + struct fcnvme_ls_acc_hdr hdr; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_conn_id connectid; +}; + + +/* FCNVME_LS_CREATE_CONNECTION */ +struct fcnvme_ls_cr_conn_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_cr_conn_cmd connect_cmd; +}; + +struct fcnvme_ls_cr_conn_acc { + struct fcnvme_ls_acc_hdr hdr; + struct fcnvme_lsdesc_conn_id connectid; +}; + +/* FCNVME_LS_DISCONNECT */ +struct fcnvme_ls_disconnect_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_disconn_cmd discon_cmd; +}; + +struct fcnvme_ls_disconnect_acc { + struct fcnvme_ls_acc_hdr hdr; +}; + + +/* + * Yet to be defined in FC-NVME: + */ +#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */ +#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */ +#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */ + + +#endif /* _NVME_FC_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index fc3c24206593..3d1c6f1b15c9 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -242,6 +242,7 @@ enum { NVME_CTRL_ONCS_COMPARE = 1 << 0, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, + NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, NVME_CTRL_VWC_PRESENT = 1 << 0, }; @@ -558,6 +559,23 @@ struct nvme_dsm_range { __le64 slba; }; +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + /* Admin commands */ enum nvme_admin_opcode { @@ -857,6 +875,7 @@ struct nvme_command { struct nvme_download_firmware dlfw; struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; @@ -947,6 +966,7 @@ enum { NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, + NVME_SC_ONCS_NOT_SUPPORTED = 0x183, /* * I/O Command Set Specific - Fabrics commands: @@ -973,17 +993,30 @@ enum { NVME_SC_UNWRITTEN_BLOCK = 0x287, NVME_SC_DNR = 0x4000, + + + /* + * FC Transport-specific error status values for NVME commands + * + * Transport-specific status code values must be in the range 0xB0..0xBF + */ + + /* Generic FC failure - catchall */ + NVME_SC_FC_TRANSPORT_ERROR = 0x00B0, + + /* I/O failure due to FC ABTS'd */ + NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1, }; struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: */ - union { - __le16 result16; - __le32 result; - __le64 result64; - }; + union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; + } result; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ diff --git a/include/linux/of.h b/include/linux/of.h index 299aeb192727..d72f01009297 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1266,6 +1266,18 @@ static inline bool of_device_is_system_power_controller(const struct device_node * Overlay support */ +enum of_overlay_notify_action { + OF_OVERLAY_PRE_APPLY, + OF_OVERLAY_POST_APPLY, + OF_OVERLAY_PRE_REMOVE, + OF_OVERLAY_POST_REMOVE, +}; + +struct of_overlay_notify_data { + struct device_node *overlay; + struct device_node *target; +}; + #ifdef CONFIG_OF_OVERLAY /* ID based overlays; the API for external users */ @@ -1273,6 +1285,9 @@ int of_overlay_create(struct device_node *tree); int of_overlay_destroy(int id); int of_overlay_destroy_all(void); +int of_overlay_notifier_register(struct notifier_block *nb); +int of_overlay_notifier_unregister(struct notifier_block *nb); + #else static inline int of_overlay_create(struct device_node *tree) @@ -1290,6 +1305,16 @@ static inline int of_overlay_destroy_all(void) return -ENOTSUPP; } +static inline int of_overlay_notifier_register(struct notifier_block *nb) +{ + return 0; +} + +static inline int of_overlay_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} + #endif #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 4341f32516d8..271b3fdf0070 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -71,6 +71,7 @@ extern int early_init_dt_scan_chosen_stdout(void); extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); extern void early_init_dt_add_memory_arch(u64 base, u64 size); +extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool no_map); extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index e80b9c762a03..6a7fc5051099 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -31,8 +31,16 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev, #endif /* CONFIG_OF_IOMMU */ -void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); -const struct iommu_ops *of_iommu_get_ops(struct device_node *np); +static inline void of_iommu_set_ops(struct device_node *np, + const struct iommu_ops *ops) +{ + iommu_register_instance(&np->fwnode, ops); +} + +static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np) +{ + return iommu_get_instance(&np->fwnode); +} extern struct of_device_id __iommu_of_table; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 2ab233661ae5..a58cca8bcb29 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -29,6 +29,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); extern int of_phy_register_fixed_link(struct device_node *np); +extern void of_phy_deregister_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); #else /* CONFIG_OF */ @@ -83,6 +84,9 @@ static inline int of_phy_register_fixed_link(struct device_node *np) { return -ENOSYS; } +static inline void of_phy_deregister_fixed_link(struct device_node *np) +{ +} static inline bool of_phy_is_fixed_link(struct device_node *np) { return false; diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 7fd5cfce9140..0e0974eceb80 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np); int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); int of_get_pci_domain_nr(struct device_node *node); +int of_pci_get_max_link_speed(struct device_node *node); void of_pci_check_probe_only(void); int of_pci_map_rid(struct device_node *np, u32 rid, const char *map_name, const char *map_mask_name, @@ -62,6 +63,12 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid, return -EINVAL; } +static inline int +of_pci_get_max_link_speed(struct device_node *node) +{ + return -EINVAL; +} + static inline void of_pci_check_probe_only(void) { } #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 74e4dda91238..6b5818d6de32 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -79,6 +79,7 @@ enum pageflags { PG_dirty, PG_lru, PG_active, + PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, @@ -87,7 +88,6 @@ enum pageflags { PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ PG_head, /* A head page */ - PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ @@ -110,6 +110,9 @@ enum pageflags { /* Filesystems */ PG_checked = PG_owner_priv_1, + /* SwapBacked */ + PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ + /* Two page bits are conscripted by FS-Cache to maintain local caching * state. These bits are set on pages belonging to the netfs's inodes * when those inodes are being locally cached. @@ -167,6 +170,9 @@ static __always_inline int PageCompound(struct page *page) * for compound page all operations related to the page flag applied to * head page. * + * PF_ONLY_HEAD: + * for compound page, callers only ever operate on the head page. + * * PF_NO_TAIL: * modifications of the page flag must be done on small or head pages, * checks can be done on tail pages too. @@ -176,6 +182,9 @@ static __always_inline int PageCompound(struct page *page) */ #define PF_ANY(page, enforce) page #define PF_HEAD(page, enforce) compound_head(page) +#define PF_ONLY_HEAD(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(PageTail(page), page); \ + page;}) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ compound_head(page);}) @@ -253,6 +262,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) __PAGEFLAG(Locked, locked, PF_NO_TAIL) +PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) @@ -314,7 +324,13 @@ PAGEFLAG_FALSE(HighMem) #endif #ifdef CONFIG_SWAP -PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) +static __always_inline int PageSwapCache(struct page *page) +{ + return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); + +} +SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) +CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) #else PAGEFLAG_FALSE(SwapCache) #endif @@ -701,12 +717,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. */ -#define PAGE_FLAGS_CHECK_AT_FREE \ - (1UL << PG_lru | 1UL << PG_locked | \ - 1UL << PG_private | 1UL << PG_private_2 | \ - 1UL << PG_writeback | 1UL << PG_reserved | \ - 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ - 1UL << PG_unevictable | __PG_MLOCKED) +#define PAGE_FLAGS_CHECK_AT_FREE \ + (1UL << PG_lru | 1UL << PG_locked | \ + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_active | \ + 1UL << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. @@ -735,6 +751,7 @@ static inline int page_has_private(struct page *page) #undef PF_ANY #undef PF_HEAD +#undef PF_ONLY_HEAD #undef PF_NO_TAIL #undef PF_NO_COMPOUND #endif /* !__GENERATING_BOUNDS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index dd15d39e1985..324c8dbad1e1 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -9,7 +9,7 @@ #include <linux/list.h> #include <linux/highmem.h> #include <linux/compiler.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> #include <linux/hardirq.h> /* for in_interrupt() */ @@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping, } /* - * Get the offset in PAGE_SIZE. - * (TODO: hugepage should have ->index in PAGE_SIZE) + * Get index of the page with in radix-tree + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */ -static inline pgoff_t page_to_pgoff(struct page *page) +static inline pgoff_t page_to_index(struct page *page) { pgoff_t pgoff; - if (unlikely(PageHeadHuge(page))) - return page->index << compound_order(page); - if (likely(!PageTransTail(page))) return page->index; @@ -397,6 +394,18 @@ static inline pgoff_t page_to_pgoff(struct page *page) } /* + * Get the offset in PAGE_SIZE. + * (TODO: hugepage should have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_pgoff(struct page *page) +{ + if (unlikely(PageHeadHuge(page))) + return page->index << compound_order(page); + + return page_to_index(page); +} + +/* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) @@ -477,22 +486,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, * and for filesystems which need to wait on PG_private. */ extern void wait_on_page_bit(struct page *page, int bit_nr); - extern int wait_on_page_bit_killable(struct page *page, int bit_nr); -extern int wait_on_page_bit_killable_timeout(struct page *page, - int bit_nr, unsigned long timeout); - -static inline int wait_on_page_locked_killable(struct page *page) -{ - if (!PageLocked(page)) - return 0; - return wait_on_page_bit_killable(compound_head(page), PG_locked); -} +extern void wake_up_page_bit(struct page *page, int bit_nr); -extern wait_queue_head_t *page_waitqueue(struct page *page); static inline void wake_up_page(struct page *page, int bit) { - __wake_up_bit(page_waitqueue(page), &page->flags, bit); + if (!PageWaiters(page)) + return; + wake_up_page_bit(page, bit); } /* @@ -508,6 +509,13 @@ static inline void wait_on_page_locked(struct page *page) wait_on_page_bit(compound_head(page), PG_locked); } +static inline int wait_on_page_locked_killable(struct page *page) +{ + if (!PageLocked(page)) + return 0; + return wait_on_page_bit_killable(compound_head(page), PG_locked); +} + /* * Wait for a page to complete writeback */ diff --git a/include/linux/parser.h b/include/linux/parser.h index 39d5b7955b23..884c1e6eb3fe 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -27,6 +27,7 @@ typedef struct { int match_token(char *, const match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); +int match_u64(substring_t *, u64 *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 7d63a66e8ed4..7a4e83a8c89c 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -24,7 +24,9 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) } extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); -extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res); +struct pci_ecam_ops; +extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, + struct pci_ecam_ops **ecam_ops); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 7adad206b1f4..f0d2b9451270 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -59,6 +59,15 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, /* default ECAM ops */ extern struct pci_ecam_ops pci_generic_ecam_ops; +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ +extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ +extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ +extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ +extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ +extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ +#endif + #ifdef CONFIG_PCI_HOST_GENERIC /* for DT-based PCI controllers that support ECAM */ int pci_host_common_probe(struct platform_device *pdev, diff --git a/include/linux/pci.h b/include/linux/pci.h index 0e49f70dbd9b..e2d1a124216a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -244,6 +244,7 @@ struct pci_cap_saved_state { struct pci_cap_saved_data cap; }; +struct irq_affinity; struct pcie_link_state; struct pci_vpd; struct pci_sriov; @@ -332,7 +333,6 @@ struct pci_dev { * directly, use the values stored here. They might be different! */ unsigned int irq; - struct cpumask *irq_affinity; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ bool match_driver; /* Skip attaching driver */ @@ -420,9 +420,13 @@ static inline int pci_channel_offline(struct pci_dev *pdev) struct pci_host_bridge { struct device dev; struct pci_bus *bus; /* root bus */ + struct pci_ops *ops; + void *sysdata; + int busnr; struct list_head windows; /* resource_entry */ void (*release_fn)(struct pci_host_bridge *); void *release_data; + struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* for entire hierarchy */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, @@ -430,10 +434,23 @@ struct pci_host_bridge { resource_size_t start, resource_size_t size, resource_size_t align); + unsigned long private[0] ____cacheline_aligned; }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) +static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) +{ + return (void *)bridge->private; +} + +static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) +{ + return container_of(priv, struct pci_host_bridge, private); +} + +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); +int pci_register_host_bridge(struct pci_host_bridge *bridge); struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); void pci_set_host_bridge_release(struct pci_host_bridge *bridge, @@ -1310,8 +1327,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, return rc; return 0; } -int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags); +int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *affd); + void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); @@ -1339,14 +1358,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev, static inline int pci_enable_msix_exact(struct pci_dev *dev, struct msix_entry *entries, int nvec) { return -ENOSYS; } -static inline int pci_alloc_irq_vectors(struct pci_dev *dev, - unsigned int min_vecs, unsigned int max_vecs, - unsigned int flags) + +static inline int +pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *aff_desc) { if (min_vecs > 1) return -EINVAL; return 1; } + static inline void pci_free_irq_vectors(struct pci_dev *dev) { } @@ -1364,6 +1386,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, } #endif +static inline int +pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, + NULL); +} + #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; extern bool pcie_ports_auto; @@ -1928,6 +1958,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } +static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) +{ + while (1) { + if (!pci_is_pcie(dev)) + break; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return dev; + if (!dev->bus->self) + break; + dev = dev->bus->self; + } + return NULL; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 8c7895061121..2e855afa0212 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -176,6 +176,7 @@ struct hotplug_params { #ifdef CONFIG_ACPI #include <linux/acpi.h> int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); +bool pciehp_is_native(struct pci_dev *pdev); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(acpi_handle handle); @@ -185,5 +186,6 @@ static inline int pci_get_hp_params(struct pci_dev *dev, { return -ENODEV; } +static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; } #endif #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c58752fe16c4..73dda0edcb97 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -23,8 +23,10 @@ #define PCI_CLASS_STORAGE_SATA 0x0106 #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 #define PCI_CLASS_STORAGE_SAS 0x0107 +#define PCI_CLASS_STORAGE_EXPRESS 0x010802 #define PCI_CLASS_STORAGE_OTHER 0x0180 + #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 @@ -2251,17 +2253,35 @@ #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 #define PCI_VENDOR_ID_VMWARE 0x15ad +#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 #define PCI_VENDOR_ID_MELLANOX 0x15b3 -#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007 +#define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015 +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 -#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 -#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 -#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c -#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340 +#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a +#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354 +#define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372 +#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732 +#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746 +#define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e #define PCI_VENDOR_ID_DFI 0x15bd diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4741ecdb9817..78ed8105e64d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); +extern int perf_event_account_interrupt(struct perf_event *event); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, diff --git a/include/linux/phy.h b/include/linux/phy.h index e25f1830fbcf..f7d95f644eed 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -25,6 +25,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> +#include <linux/phy_led_triggers.h> #include <linux/atomic.h> @@ -85,6 +86,21 @@ typedef enum { } phy_interface_t; /** + * phy_supported_speeds - return all speeds currently supported by a phy device + * @phy: The phy device to return supported speeds of. + * @speeds: buffer to store supported speeds in. + * @size: size of speeds buffer. + * + * Description: Returns the number of supported speeds, and + * fills the speeds * buffer with the supported speeds. If speeds buffer is + * too small to contain * all currently supported speeds, will return as + * many speeds as can fit. + */ +unsigned int phy_supported_speeds(struct phy_device *phy, + unsigned int *speeds, + unsigned int size); + +/** * It maps 'enum phy_interface_t' found in include/linux/phy.h * into the device tree binding of 'phy-mode', so that Ethernet * device driver can get phy interface from device tree. @@ -343,7 +359,7 @@ struct phy_c45_device_ids { * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine - * phy_queue: A work_queue for the interrupt + * phy_queue: A work_queue for the phy_mac_interrupt * attached_dev: The attached enet driver's device instance ptr * adjust_link: Callback for the enet controller to respond to * changes in the link state. @@ -401,10 +417,19 @@ struct phy_device { u32 advertising; u32 lp_advertising; + /* Energy efficient ethernet modes which should be prohibited */ + u32 eee_broken_modes; + int autoneg; int link_timeout; +#ifdef CONFIG_LED_TRIGGER_PHY + struct phy_led_trigger *phy_led_triggers; + unsigned int phy_num_led_triggers; + struct phy_led_trigger *last_triggered; +#endif + /* * Interrupt number for this PHY * -1 means no interrupt @@ -425,6 +450,7 @@ struct phy_device { struct net_device *attached_dev; u8 mdix; + u8 mdix_ctrl; void (*adjust_link)(struct net_device *dev); }; @@ -589,6 +615,13 @@ struct phy_driver { void (*get_strings)(struct phy_device *dev, u8 *data); void (*get_stats)(struct phy_device *dev, struct ethtool_stats *stats, u64 *data); + + /* Get and Set PHY tunables */ + int (*get_tunable)(struct phy_device *dev, + struct ethtool_tunable *tuna, void *data); + int (*set_tunable)(struct phy_device *dev, + struct ethtool_tunable *tuna, + const void *data); }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -764,6 +797,7 @@ void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); +int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); @@ -802,7 +836,8 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); -void phy_change(struct work_struct *work); +void phy_change(struct phy_device *phydev); +void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev, int new_link); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); @@ -825,6 +860,10 @@ int phy_register_fixup_for_id(const char *bus_id, int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); +int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); +int phy_unregister_fixup_for_id(const char *bus_id); +int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); + int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); int phy_get_eee_err(struct phy_device *phydev); int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); @@ -836,6 +875,7 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd); int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); +int phy_ethtool_nway_reset(struct net_device *ndev); int __init mdio_bus_init(void); void mdio_bus_exit(void); diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h index 9d18e9f948e9..35c070ea6ea3 100644 --- a/include/linux/phy/phy-qcom-ufs.h +++ b/include/linux/phy/phy-qcom-ufs.h @@ -18,22 +18,6 @@ #include "phy.h" /** - * ufs_qcom_phy_enable_ref_clk() - Enable the phy - * ref clock. - * @phy: reference to a generic phy - * - * returns 0 for success, and non-zero for error. - */ -int ufs_qcom_phy_enable_ref_clk(struct phy *phy); - -/** - * ufs_qcom_phy_disable_ref_clk() - Disable the phy - * ref clock. - * @phy: reference to a generic phy. - */ -void ufs_qcom_phy_disable_ref_clk(struct phy *phy); - -/** * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device * ref clock. * @phy: reference to a generic phy. @@ -47,8 +31,6 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy); */ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); -int ufs_qcom_phy_enable_iface_clk(struct phy *phy); -void ufs_qcom_phy_disable_iface_clk(struct phy *phy); int ufs_qcom_phy_start_serdes(struct phy *phy); int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index ee1bed7dbfc6..78bb0d7f6b11 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) return -ENOSYS; } +static inline int phy_reset(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h new file mode 100644 index 000000000000..a2daea0a37d2 --- /dev/null +++ b/include/linux/phy_led_triggers.h @@ -0,0 +1,51 @@ +/* Copyright (C) 2016 National Instruments Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __PHY_LED_TRIGGERS +#define __PHY_LED_TRIGGERS + +struct phy_device; + +#ifdef CONFIG_LED_TRIGGER_PHY + +#include <linux/leds.h> + +#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 +#define PHY_MII_BUS_ID_SIZE (20 - 3) + +#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ + FIELD_SIZEOF(struct mdio_device, addr)+\ + PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) + +struct phy_led_trigger { + struct led_trigger trigger; + char name[PHY_LINK_LED_TRIGGER_NAME_SIZE]; + unsigned int speed; +}; + + +extern int phy_led_triggers_register(struct phy_device *phy); +extern void phy_led_triggers_unregister(struct phy_device *phy); +extern void phy_led_trigger_change_speed(struct phy_device *phy); + +#else + +static inline int phy_led_triggers_register(struct phy_device *phy) +{ + return 0; +} +static inline void phy_led_triggers_unregister(struct phy_device *phy) { } +static inline void phy_led_trigger_change_speed(struct phy_device *phy) { } + +#endif + +#endif diff --git a/include/linux/pim.h b/include/linux/pim.h index e1d756f81348..0e81b2778ae0 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -1,6 +1,7 @@ #ifndef __LINUX_PIM_H #define __LINUX_PIM_H +#include <linux/skbuff.h> #include <asm/byteorder.h> /* Message types - V1 */ @@ -9,24 +10,86 @@ /* Message types - V2 */ #define PIM_VERSION 2 -#define PIM_REGISTER 1 + +/* RFC7761, sec 4.9: + * Type + * Types for specific PIM messages. PIM Types are: + * + * Message Type Destination + * --------------------------------------------------------------------- + * 0 = Hello Multicast to ALL-PIM-ROUTERS + * 1 = Register Unicast to RP + * 2 = Register-Stop Unicast to source of Register + * packet + * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS + * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS + * 5 = Assert Multicast to ALL-PIM-ROUTERS + * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S) + * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft + * packet + * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR + */ +enum { + PIM_TYPE_HELLO, + PIM_TYPE_REGISTER, + PIM_TYPE_REGISTER_STOP, + PIM_TYPE_JOIN_PRUNE, + PIM_TYPE_BOOTSTRAP, + PIM_TYPE_ASSERT, + PIM_TYPE_GRAFT, + PIM_TYPE_GRAFT_ACK, + PIM_TYPE_CANDIDATE_RP_ADV +}; #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) -static inline bool ipmr_pimsm_enabled(void) -{ - return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); -} +/* RFC7761, sec 4.9: + * The PIM header common to all PIM messages is: + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |PIM Ver| Type | Reserved | Checksum | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ -struct pimreghdr -{ +struct pimreghdr { __u8 type; __u8 reserved; __be16 csum; __be32 flags; }; -struct sk_buff; -extern int pim_rcv_v1(struct sk_buff *); +int pim_rcv_v1(struct sk_buff *skb); + +static inline bool ipmr_pimsm_enabled(void) +{ + return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); +} + +static inline struct pimhdr *pim_hdr(const struct sk_buff *skb) +{ + return (struct pimhdr *)skb_transport_header(skb); +} + +static inline u8 pim_hdr_version(const struct pimhdr *pimhdr) +{ + return pimhdr->type >> 4; +} + +static inline u8 pim_hdr_type(const struct pimhdr *pimhdr) +{ + return pimhdr->type & 0xf; +} + +/* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */ +static inline bool pim_ipv4_all_pim_routers(__be32 addr) +{ + return addr == htonl(0xE000000D); +} #endif diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 5f0e11e7354c..e69e415d0d98 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -14,6 +14,7 @@ #include <linux/device.h> #define DW_DMA_MAX_NR_MASTERS 4 +#define DW_DMA_MAX_NR_CHANNELS 8 /** * struct dw_dma_slave - Controller-specific information about a slave @@ -40,19 +41,18 @@ struct dw_dma_slave { * @is_private: The device channels should be marked as private and not for * by the general purpose DMA channel allocator. * @is_memcpy: The device channels do support memory-to-memory transfers. - * @is_nollp: The device channels does not support multi block transfers. * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) + * @multi_block: Multi block transfers supported by hardware per channel. */ struct dw_dma_platform_data { unsigned int nr_channels; bool is_private; bool is_memcpy; - bool is_nollp; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; @@ -62,6 +62,7 @@ struct dw_dma_platform_data { unsigned int block_size; unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; + unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h deleted file mode 100644 index 0a03b0944411..000000000000 --- a/include/linux/platform_data/drv260x-pdata.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Platform data for DRV260X haptics driver family - * - * Author: Dan Murphy <dmurphy@ti.com> - * - * Copyright: (C) 2014 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef _LINUX_DRV260X_PDATA_H -#define _LINUX_DRV260X_PDATA_H - -struct drv260x_platform_data { - u32 library_selection; - u32 mode; - u32 vib_rated_voltage; - u32 vib_overdrive_voltage; -}; - -#endif diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h index 21b15f6fee25..7815d50c26ff 100644 --- a/include/linux/platform_data/macb.h +++ b/include/linux/platform_data/macb.h @@ -8,6 +8,8 @@ #ifndef __MACB_PDATA_H__ #define __MACB_PDATA_H__ +#include <linux/clk.h> + /** * struct macb_platform_data - platform data for MACB Ethernet * @phy_mask: phy mask passed when register the MDIO bus @@ -15,12 +17,16 @@ * @phy_irq_pin: PHY IRQ * @is_rmii: using RMII interface? * @rev_eth_addr: reverse Ethernet address byte order + * @pclk: platform clock + * @hclk: AHB clock */ struct macb_platform_data { u32 phy_mask; int phy_irq_pin; u8 is_rmii; u8 rev_eth_addr; + struct clk *pclk; + struct clk *hclk; }; #endif /* __MACB_PDATA_H__ */ diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h new file mode 100644 index 000000000000..e4cfcffaa6f4 --- /dev/null +++ b/include/linux/platform_data/mlxcpld-hotplug.h @@ -0,0 +1,99 @@ +/* + * include/linux/platform_data/mlxcpld-hotplug.h + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H +#define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H + +/** + * struct mlxcpld_hotplug_device - I2C device data: + * @adapter: I2C device adapter; + * @client: I2C device client; + * @brdinfo: device board information; + * @bus: I2C bus, where device is attached; + * + * Structure represents I2C hotplug device static data (board topology) and + * dynamic data (related kernel objects handles). + */ +struct mlxcpld_hotplug_device { + struct i2c_adapter *adapter; + struct i2c_client *client; + struct i2c_board_info brdinfo; + u16 bus; +}; + +/** + * struct mlxcpld_hotplug_platform_data - device platform data: + * @top_aggr_offset: offset of top aggregation interrupt register; + * @top_aggr_mask: top aggregation interrupt common mask; + * @top_aggr_psu_mask: top aggregation interrupt PSU mask; + * @psu_reg_offset: offset of PSU interrupt register; + * @psu_mask: PSU interrupt mask; + * @psu_count: number of equipped replaceable PSUs; + * @psu: pointer to PSU devices data array; + * @top_aggr_pwr_mask: top aggregation interrupt power mask; + * @pwr_reg_offset: offset of power interrupt register + * @pwr_mask: power interrupt mask; + * @pwr_count: number of power sources; + * @pwr: pointer to power devices data array; + * @top_aggr_fan_mask: top aggregation interrupt FAN mask; + * @fan_reg_offset: offset of FAN interrupt register; + * @fan_mask: FAN interrupt mask; + * @fan_count: number of equipped replaceable FANs; + * @fan: pointer to FAN devices data array; + * + * Structure represents board platform data, related to system hotplug events, + * like FAN, PSU, power cable insertion and removing. This data provides the + * number of hot-pluggable devices and hardware description for event handling. + */ +struct mlxcpld_hotplug_platform_data { + u16 top_aggr_offset; + u8 top_aggr_mask; + u8 top_aggr_psu_mask; + u16 psu_reg_offset; + u8 psu_mask; + u8 psu_count; + struct mlxcpld_hotplug_device *psu; + u8 top_aggr_pwr_mask; + u16 pwr_reg_offset; + u8 pwr_mask; + u8 pwr_count; + struct mlxcpld_hotplug_device *pwr; + u8 top_aggr_fan_mask; + u16 fan_reg_offset; + u8 fan_mask; + u8 fan_count; + struct mlxcpld_hotplug_device *fan; +}; + +#endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index c55e42ee57fa..f01659026b26 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -12,9 +12,10 @@ #ifndef __MTD_NAND_S3C2410_H #define __MTD_NAND_S3C2410_H +#include <linux/mtd/nand.h> + /** * struct s3c2410_nand_set - define a set of one or more nand chips - * @disable_ecc: Entirely disable ECC - Dangerous * @flash_bbt: Openmoko u-boot can create a Bad Block Table * Setting this flag will allow the kernel to * look for it at boot time and also skip the NAND @@ -31,7 +32,6 @@ * a warning at boot time. */ struct s3c2410_nand_set { - unsigned int disable_ecc:1; unsigned int flash_bbt:1; unsigned int options; @@ -40,6 +40,7 @@ struct s3c2410_nand_set { char *name; int *nr_map; struct mtd_partition *partitions; + struct device_node *of_node; }; struct s3c2410_platform_nand { @@ -51,6 +52,8 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; + nand_ecc_modes_t ecc_mode; + int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 5c1e21c87270..da79774078a7 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -40,9 +40,6 @@ struct s3c64xx_spi_info { int num_cs; bool no_cs; int (*cfg_gpio)(void); - dma_filter_fn filter; - void *dma_tx; - void *dma_rx; }; /** diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h index e0bc4abe69c2..0926e99f2e8f 100644 --- a/include/linux/platform_data/usb-davinci.h +++ b/include/linux/platform_data/usb-davinci.h @@ -11,29 +11,6 @@ #ifndef __ASM_ARCH_USB_H #define __ASM_ARCH_USB_H -/* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */ -#define CFGCHIP2_PHYCLKGD (1 << 17) -#define CFGCHIP2_VBUSSENSE (1 << 16) -#define CFGCHIP2_RESET (1 << 15) -#define CFGCHIP2_OTGMODE (3 << 13) -#define CFGCHIP2_NO_OVERRIDE (0 << 13) -#define CFGCHIP2_FORCE_HOST (1 << 13) -#define CFGCHIP2_FORCE_DEVICE (2 << 13) -#define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13) -#define CFGCHIP2_USB1PHYCLKMUX (1 << 12) -#define CFGCHIP2_USB2PHYCLKMUX (1 << 11) -#define CFGCHIP2_PHYPWRDN (1 << 10) -#define CFGCHIP2_OTGPWRDN (1 << 9) -#define CFGCHIP2_DATPOL (1 << 8) -#define CFGCHIP2_USB1SUSPENDM (1 << 7) -#define CFGCHIP2_PHY_PLLON (1 << 6) /* override PLL suspend */ -#define CFGCHIP2_SESENDEN (1 << 5) /* Vsess_end comparator */ -#define CFGCHIP2_VBDTCTEN (1 << 4) /* Vbus comparator */ -#define CFGCHIP2_REFFREQ (0xf << 0) -#define CFGCHIP2_REFFREQ_12MHZ (1 << 0) -#define CFGCHIP2_REFFREQ_24MHZ (2 << 0) -#define CFGCHIP2_REFFREQ_48MHZ (3 << 0) - struct da8xx_ohci_root_hub; typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h index ecbde7a5548e..7b78793f07d7 100644 --- a/include/linux/pm-trace.h +++ b/include/linux/pm-trace.h @@ -1,11 +1,17 @@ #ifndef PM_TRACE_H #define PM_TRACE_H +#include <linux/types.h> #ifdef CONFIG_PM_TRACE #include <asm/pm-trace.h> -#include <linux/types.h> extern int pm_trace_enabled; +extern bool pm_trace_rtc_abused; + +static inline bool pm_trace_rtc_valid(void) +{ + return !pm_trace_rtc_abused; +} static inline int pm_trace_is_enabled(void) { @@ -24,6 +30,7 @@ extern int show_trace_dev_match(char *buf, size_t size); #else +static inline bool pm_trace_rtc_valid(void) { return true; } static inline int pm_trace_is_enabled(void) { return 0; } #define TRACE_DEVICE(dev) do { } while (0) diff --git a/include/linux/pm.h b/include/linux/pm.h index 06eb353182ab..f926af41e122 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -258,7 +258,7 @@ typedef struct pm_message { * example, if it detects that a child was unplugged while the system was * asleep). * - * Refer to Documentation/power/devices.txt for more information about the role + * Refer to Documentation/power/admin-guide/devices.rst for more information about the role * of the above callbacks in the system suspend process. * * There also are callbacks related to runtime power management of devices. @@ -559,6 +559,7 @@ struct dev_pm_info { pm_message_t power_state; unsigned int can_wakeup:1; unsigned int async_suspend:1; + bool in_dpm_list:1; /* Owned by the PM core */ bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; @@ -596,6 +597,7 @@ struct dev_pm_info { unsigned int use_autosuspend:1; unsigned int timer_autosuspends:1; unsigned int memalloc_noio:1; + unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a09fe5c009c8..81ece61075df 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -15,11 +15,11 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/notifier.h> +#include <linux/spinlock.h> /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ - -#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */ +#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -40,15 +40,18 @@ struct gpd_dev_ops { struct genpd_power_state { s64 power_off_latency_ns; s64 power_on_latency_ns; + s64 residency_ns; + struct fwnode_handle *fwnode; }; +struct genpd_lock_ops; + struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ struct list_head master_links; /* Links with PM domain as a master */ struct list_head slave_links; /* Links with PM domain as a slave */ struct list_head dev_list; /* List of devices */ - struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; struct fwnode_handle *provider; /* Identity of the domain provider */ @@ -70,9 +73,18 @@ struct generic_pm_domain { void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ - struct genpd_power_state states[GENPD_MAX_NUM_STATES]; + struct genpd_power_state *states; unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ + void *free; /* Free the state that was allocated for default */ + const struct genpd_lock_ops *lock_ops; + union { + struct mutex mlock; + struct { + spinlock_t slock; + unsigned long lock_flags; + }; + }; }; @@ -205,6 +217,8 @@ extern int of_genpd_add_device(struct of_phandle_args *args, extern int of_genpd_add_subdomain(struct of_phandle_args *parent, struct of_phandle_args *new_subdomain); extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); +extern int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n); int genpd_dev_pm_attach(struct device *dev); #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -234,6 +248,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, return -ENODEV; } +static inline int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n) +{ + return -ENODEV; +} + static inline int genpd_dev_pm_attach(struct device *dev) { return -ENODEV; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index bca26157f5b6..0edd88f93904 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -17,13 +17,65 @@ #include <linux/err.h> #include <linux/notifier.h> +struct clk; +struct regulator; struct dev_pm_opp; struct device; +struct opp_table; enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; +/** + * struct dev_pm_opp_supply - Power supply voltage/current values + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * + * This structure stores the voltage/current values for a single power supply. + */ +struct dev_pm_opp_supply { + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; +}; + +/** + * struct dev_pm_opp_info - OPP freq/voltage/current values + * @rate: Target clk rate in hz + * @supplies: Array of voltage/current values for all power supplies + * + * This structure stores the freq/voltage/current values for a single OPP. + */ +struct dev_pm_opp_info { + unsigned long rate; + struct dev_pm_opp_supply *supplies; +}; + +/** + * struct dev_pm_set_opp_data - Set OPP data + * @old_opp: Old OPP info + * @new_opp: New OPP info + * @regulators: Array of regulator pointers + * @regulator_count: Number of regulators + * @clk: Pointer to clk + * @dev: Pointer to the struct device + * + * This structure contains all information required for setting an OPP. + */ +struct dev_pm_set_opp_data { + struct dev_pm_opp_info old_opp; + struct dev_pm_opp_info new_opp; + + struct regulator **regulators; + unsigned int regulator_count; + struct clk *clk; + struct device *dev; +}; + #if defined(CONFIG_PM_OPP) unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); @@ -62,8 +114,10 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, void dev_pm_opp_put_supported_hw(struct device *dev); int dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct device *dev); -int dev_pm_opp_set_regulator(struct device *dev, const char *name); -void dev_pm_opp_put_regulator(struct device *dev); +struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); +void dev_pm_opp_put_regulators(struct opp_table *opp_table); +int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); +void dev_pm_opp_register_put_opp_helper(struct device *dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -163,6 +217,14 @@ static inline int dev_pm_opp_set_supported_hw(struct device *dev, static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} +static inline int dev_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {} + static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { return -ENOTSUPP; @@ -170,12 +232,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) static inline void dev_pm_opp_put_prop_name(struct device *dev) {} -static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) +static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { - return -ENOTSUPP; + return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put_regulator(struct device *dev) {} +static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2e14d2667b6c..ca4823e675e2 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -55,18 +55,17 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); +extern void pm_runtime_clean_up_links(struct device *dev); +extern void pm_runtime_get_suppliers(struct device *dev); +extern void pm_runtime_put_suppliers(struct device *dev); +extern void pm_runtime_new_link(struct device *dev); +extern void pm_runtime_drop_link(struct device *dev); static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } -static inline bool pm_children_suspended(struct device *dev) -{ - return dev->power.ignore_children - || !atomic_read(&dev->power.child_count); -} - static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); @@ -162,7 +161,6 @@ static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} -static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool device_run_wake(struct device *dev) { return false; } @@ -186,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} +static inline void pm_runtime_clean_up_links(struct device *dev) {} +static inline void pm_runtime_get_suppliers(struct device *dev) {} +static inline void pm_runtime_put_suppliers(struct device *dev) {} +static inline void pm_runtime_new_link(struct device *dev) {} +static inline void pm_runtime_drop_link(struct device *dev) {} #endif /* !CONFIG_PM */ @@ -265,9 +268,9 @@ static inline int pm_runtime_set_active(struct device *dev) return __pm_runtime_set_status(dev, RPM_ACTIVE); } -static inline void pm_runtime_set_suspended(struct device *dev) +static inline int pm_runtime_set_suspended(struct device *dev) { - __pm_runtime_set_status(dev, RPM_SUSPENDED); + return __pm_runtime_set_status(dev, RPM_SUSPENDED); } static inline void pm_runtime_disable(struct device *dev) diff --git a/include/linux/poll.h b/include/linux/poll.h index 37b057b63b46..a46d6755035e 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -8,7 +8,7 @@ #include <linux/string.h> #include <linux/fs.h> #include <linux/sysctl.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <uapi/linux/poll.h> extern struct ctl_table epoll_table[]; /* for sysctl */ diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index e30deb046156..bed9557b69e7 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -4,7 +4,8 @@ enum bq27xxx_chip { BQ27000 = 1, /* bq27000, bq27200 */ BQ27010, /* bq27010, bq27210 */ - BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27500, /* bq27500 */ + BQ27510, /* bq27510, bq27520 */ BQ27530, /* bq27530, bq27531 */ BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ BQ27545, /* bq27545 */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 75e4e30677f1..7eeceac52dea 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -65,19 +65,24 @@ /* * Are we doing bottom half or hardware interrupt processing? - * Are we in a softirq context? Interrupt context? - * in_softirq - Are we currently processing softirq or have bh disabled? - * in_serving_softirq - Are we currently processing softirq? + * + * in_irq() - We're in (hard) IRQ context + * in_softirq() - We have BH disabled, or are processing softirqs + * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled + * in_serving_softirq() - We're in softirq context + * in_nmi() - We're in NMI context + * in_task() - We're in task context + * + * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really + * should not be used in new code. */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - -/* - * Are we in NMI context? - */ -#define in_nmi() (preempt_count() & NMI_MASK) +#define in_nmi() (preempt_count() & NMI_MASK) +#define in_task() (!(preempt_count() & \ + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) /* * The preempt_count offset after preempt_disable(); diff --git a/include/linux/printk.h b/include/linux/printk.h index eac1af8502bb..3472cc6b7a60 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -10,6 +10,8 @@ extern const char linux_banner[]; extern const char linux_proc_banner[]; +#define PRINTK_MAX_SINGLE_HEADER_LEN 2 + static inline int printk_get_level(const char *buffer) { if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { @@ -31,6 +33,14 @@ static inline const char *printk_skip_level(const char *buffer) return buffer; } +static inline const char *printk_skip_headers(const char *buffer) +{ + while (printk_get_level(buffer)) + buffer = printk_skip_level(buffer); + + return buffer; +} + #define CONSOLE_EXT_LOG_MAX 8192 /* printk's without a loglevel use this.. */ @@ -40,10 +50,15 @@ static inline const char *printk_skip_level(const char *buffer) #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ #define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ -#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ +/* + * Default used to be hard-coded at 7, we're now allowing it to be set from + * kernel config. + */ +#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT + extern int console_printk[]; #define console_loglevel (console_printk[0]) diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index b97bf2ef996e..2d2bf592d9db 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, struct proc_dir_entry *, void *); extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, struct proc_dir_entry *); +struct proc_dir_entry *proc_create_mount_point(const char *name); extern struct proc_dir_entry *proc_create_data(const char *, umode_t, struct proc_dir_entry *, @@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) { return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} +static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, @@ -82,4 +84,8 @@ static inline struct proc_dir_entry *proc_net_mkdir( return proc_mkdir_data(name, 0, parent, net); } +struct ns_common; +int open_related_ns(struct ns_common *ns, + struct ns_common *(*get_ns)(struct ns_common *ns)); + #endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 92013cc9cc8c..0da29cae009b 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -89,4 +89,80 @@ extern int pstore_register(struct pstore_info *); extern void pstore_unregister(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); +struct pstore_ftrace_record { + unsigned long ip; + unsigned long parent_ip; + u64 ts; +}; + +/* + * ftrace related stuff: Both backends and frontends need these so expose + * them here. + */ + +#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB) +#define PSTORE_CPU_IN_IP 0x1 +#elif NR_CPUS <= 4 && defined(CONFIG_ARM) +#define PSTORE_CPU_IN_IP 0x3 +#endif + +#define TS_CPU_SHIFT 8 +#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1) + +/* + * If CPU number can be stored in IP, store it there, otherwise store it in + * the time stamp. This means more timestamp resolution is available when + * the CPU can be stored in the IP. + */ +#ifdef PSTORE_CPU_IN_IP +static inline void +pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) +{ + rec->ip |= cpu; +} + +static inline unsigned int +pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) +{ + return rec->ip & PSTORE_CPU_IN_IP; +} + +static inline u64 +pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) +{ + return rec->ts; +} + +static inline void +pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) +{ + rec->ts = val; +} +#else +static inline void +pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) +{ + rec->ts &= ~(TS_CPU_MASK); + rec->ts |= cpu; +} + +static inline unsigned int +pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) +{ + return rec->ts & TS_CPU_MASK; +} + +static inline u64 +pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) +{ + return rec->ts >> TS_CPU_SHIFT; +} + +static inline void +pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) +{ + rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT); +} +#endif + #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index c668c861c96c..9395f06e8372 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -24,6 +24,13 @@ #include <linux/list.h> #include <linux/types.h> +/* + * Choose whether access to the RAM zone requires locking or not. If a zone + * can be written to from different CPUs like with ftrace for example, then + * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. + */ +#define PRZ_FLAG_NO_LOCK BIT(0) + struct persistent_ram_buffer; struct rs_control; @@ -40,6 +47,8 @@ struct persistent_ram_zone { void *vaddr; struct persistent_ram_buffer *buffer; size_t buffer_size; + u32 flags; + raw_spinlock_t buffer_lock; /* ECC correction */ char *par_buffer; @@ -55,7 +64,7 @@ struct persistent_ram_zone { struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, - unsigned int memtype); + unsigned int memtype, u32 flags); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); @@ -77,6 +86,8 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, * @mem_address physical memory address to contain ramoops */ +#define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0) + struct ramoops_platform_data { unsigned long mem_size; phys_addr_t mem_address; @@ -86,6 +97,7 @@ struct ramoops_platform_data { unsigned long ftrace_size; unsigned long pmsg_size; int dump_oops; + u32 flags; struct persistent_ram_ecc_info ecc_info; }; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 5ad54fc66cf0..a026bfd089db 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -58,7 +58,14 @@ struct system_device_crosststamp; * * clock operations * + * @adjfine: Adjusts the frequency of the hardware clock. + * parameter scaled_ppm: Desired frequency offset from + * nominal frequency in parts per million, but with a + * 16 bit binary fractional field. + * * @adjfreq: Adjusts the frequency of the hardware clock. + * This method is deprecated. New drivers should implement + * the @adjfine method instead. * parameter delta: Desired frequency offset from nominal frequency * in parts per billion * @@ -108,6 +115,7 @@ struct ptp_clock_info { int n_pins; int pps; struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); @@ -122,30 +130,6 @@ struct ptp_clock_info { struct ptp_clock; -/** - * ptp_clock_register() - register a PTP hardware clock driver - * - * @info: Structure describing the new clock. - * @parent: Pointer to the parent device of the new clock. - * - * Returns a valid pointer on success or PTR_ERR on failure. If PHC - * support is missing at the configuration level, this function - * returns NULL, and drivers are expected to gracefully handle that - * case separately. - */ - -extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, - struct device *parent); - -/** - * ptp_clock_unregister() - unregister a PTP hardware clock driver - * - * @ptp: The clock to remove from service. - */ - -extern int ptp_clock_unregister(struct ptp_clock *ptp); - - enum ptp_clock_events { PTP_CLOCK_ALARM, PTP_CLOCK_EXTTS, @@ -171,6 +155,31 @@ struct ptp_clock_event { }; }; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + +/** + * ptp_clock_register() - register a PTP hardware clock driver + * + * @info: Structure describing the new clock. + * @parent: Pointer to the parent device of the new clock. + * + * Returns a valid pointer on success or PTR_ERR on failure. If PHC + * support is missing at the configuration level, this function + * returns NULL, and drivers are expected to gracefully handle that + * case separately. + */ + +extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent); + +/** + * ptp_clock_unregister() - unregister a PTP hardware clock driver + * + * @ptp: The clock to remove from service. + */ + +extern int ptp_clock_unregister(struct ptp_clock *ptp); + /** * ptp_clock_event() - notify the PTP layer about an event * @@ -202,4 +211,20 @@ extern int ptp_clock_index(struct ptp_clock *ptp); int ptp_find_pin(struct ptp_clock *ptp, enum ptp_pin_function func, unsigned int chan); +#else +static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent) +{ return NULL; } +static inline int ptp_clock_unregister(struct ptp_clock *ptp) +{ return 0; } +static inline void ptp_clock_event(struct ptp_clock *ptp, + struct ptp_clock_event *event) +{ } +static inline int ptp_clock_index(struct ptp_clock *ptp) +{ return -1; } +static inline int ptp_find_pin(struct ptp_clock *ptp, + enum ptp_pin_function func, unsigned int chan) +{ return -1; } +#endif + #endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 504c98a278d4..e0e539321ab9 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -8,6 +8,9 @@ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ #include <uapi/linux/ptrace.h> +extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); + /* * Ptrace flags * @@ -19,7 +22,6 @@ #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ #define PT_PTRACED 0x00000001 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ -#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */ #define PT_OPT_FLAG_SHIFT 3 /* PT_TRACE_* event enable flags */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 72d88cf3ca25..37dfba101c6c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 { u32 cons_page_idx; }; -struct qed_chain_pbl { - /* Base address of a pre-allocated buffer for pbl */ - dma_addr_t p_phys_table; - void *p_virt_table; - - /* Table for keeping the virtual addresses of the chain pages, - * respectively to the physical addresses in the pbl table. - */ - void **pp_virt_addr_tbl; - - /* Index to current used page by producer/consumer */ - union { - struct qed_chain_pbl_u16 pbl16; - struct qed_chain_pbl_u32 pbl32; - } u; -}; - struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; @@ -86,46 +69,78 @@ struct qed_chain_u32 { }; struct qed_chain { - void *p_virt_addr; - dma_addr_t p_phys_addr; - void *p_prod_elem; - void *p_cons_elem; + /* fastpath portion of the chain - required for commands such + * as produce / consume. + */ + /* Point to next element to produce/consume */ + void *p_prod_elem; + void *p_cons_elem; + + /* Fastpath portions of the PBL [if exists] */ + struct { + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; - enum qed_chain_mode mode; - enum qed_chain_use_mode intended_use; /* used to produce/consume */ - enum qed_chain_cnt_type cnt_type; + union { + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; union { struct qed_chain_u16 chain16; struct qed_chain_u32 chain32; } u; + /* Capacity counts only usable elements */ + u32 capacity; u32 page_cnt; - /* Number of elements - capacity is for usable elements only, - * while size will contain total number of elements [for entire chain]. + enum qed_chain_mode mode; + + /* Elements information for fast calculations */ + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; + + u8 cnt_type; + + /* Slowpath of the chain - required for initialization and destruction, + * but isn't involved in regular functionality. */ - u32 capacity; + + /* Base address of a pre-allocated buffer for pbl */ + struct { + dma_addr_t p_phys_table; + void *p_virt_table; + } pbl_sp; + + /* Address of first page of the chain - the address is required + * for fastpath operation [consume/produce] but only for the the SINGLE + * flavour which isn't considered fastpath [== SPQ]. + */ + void *p_virt_addr; + dma_addr_t p_phys_addr; + + /* Total number of elements [for entire chain] */ u32 size; - /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_unusable; - u16 usable_per_page; - u16 elem_size; - u16 next_page_mask; - struct qed_chain_pbl pbl; + u8 intended_use; }; #define QED_CHAIN_PBL_ENTRY_SIZE (8) #define QED_CHAIN_PAGE_SIZE (0x1000) #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ - (1 + ((sizeof(struct qed_chain_next) - 1) / \ - (elem_size))) : 0) +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ ((u32)(ELEMS_PER_PAGE(elem_size) - \ @@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) return p_chain->usable_per_page; } -static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) { return p_chain->elem_unusable; } @@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { - return p_chain->pbl.p_phys_table; + return p_chain->pbl_sp.p_phys_table; } /** @@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) static inline void qed_chain_advance_page(struct qed_chain *p_chain, void **p_next_elem, void *idx_to_inc, void *page_to_inc) - { struct qed_chain_next *p_next = NULL; u32 page_index = 0; + switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: p_next = *p_next_elem; @@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) if ((p_chain->u.chain16.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain16.prod_idx; - p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; + p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } @@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) if ((p_chain->u.chain32.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain32.prod_idx; - p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; + p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } @@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) if ((p_chain->u.chain16.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain16.cons_idx; - p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; + p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } @@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) if ((p_chain->u.chain32.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain32.cons_idx; - p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; - qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } p_chain->u.chain32.cons_idx++; @@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) u32 reset_val = p_chain->page_cnt - 1; if (is_chain_u16(p_chain)) { - p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; - p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; + p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; + p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; } else { - p_chain->pbl.u.pbl32.prod_page_idx = reset_val; - p_chain->pbl.u.pbl32.cons_page_idx = reset_val; + p_chain->pbl.c.u32.prod_page_idx = reset_val; + p_chain->pbl.c.u32.cons_page_idx = reset_val; } } switch (p_chain->intended_use) { - case QED_CHAIN_USE_TO_CONSUME_PRODUCE: - case QED_CHAIN_USE_TO_PRODUCE: - /* Do nothing */ - break; - case QED_CHAIN_USE_TO_CONSUME: /* produce empty elements */ for (i = 0; i < p_chain->capacity; i++) qed_chain_recycle_consumed(p_chain); break; + + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + default: + /* Do nothing */ + break; } } @@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->p_virt_addr = NULL; p_chain->p_phys_addr = 0; p_chain->elem_size = elem_size; - p_chain->intended_use = intended_use; + p_chain->intended_use = (u8)intended_use; p_chain->mode = mode; - p_chain->cnt_type = cnt_type; + p_chain->cnt_type = (u8)cnt_type; - p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->next_page_mask = (p_chain->usable_per_page & p_chain->elem_per_page_mask); @@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->capacity = p_chain->usable_per_page * page_cnt; p_chain->size = p_chain->elem_per_page * page_cnt; - p_chain->pbl.p_phys_table = 0; - p_chain->pbl.p_virt_table = NULL; + p_chain->pbl_sp.p_phys_table = 0; + p_chain->pbl_sp.p_virt_table = NULL; p_chain->pbl.pp_virt_addr_tbl = NULL; } @@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, dma_addr_t p_phys_pbl, void **pp_virt_addr_tbl) { - p_chain->pbl.p_phys_table = p_phys_pbl; - p_chain->pbl.p_virt_table = p_virt_pbl; + p_chain->pbl_sp.p_phys_table = p_phys_pbl; + p_chain->pbl_sp.p_virt_table = p_virt_pbl; p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 33c24ebc9b7f..7a52f7c58c37 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -15,6 +15,29 @@ #include <linux/qed/qed_if.h> #include <linux/qed/qed_iov_if.h> +struct qed_queue_start_common_params { + /* Should always be relative to entity sending this. */ + u8 vport_id; + u16 queue_id; + + /* Relative, but relevant only for PFs */ + u8 stats_id; + + /* These are always absolute */ + u16 sb; + u8 sb_idx; +}; + +struct qed_rxq_start_ret_params { + void __iomem *p_prod; + void *p_handle; +}; + +struct qed_txq_start_ret_params { + void __iomem *p_doorbell; + void *p_handle; +}; + struct qed_dev_eth_info { struct qed_dev_info common; @@ -22,7 +45,8 @@ struct qed_dev_eth_info { u8 num_tc; u8 port_mac[ETH_ALEN]; - u8 num_vlan_filters; + u16 num_vlan_filters; + u16 num_mac_filters; /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; @@ -55,18 +79,6 @@ struct qed_start_vport_params { bool clear_stats; }; -struct qed_stop_rxq_params { - u8 rss_id; - u8 rx_queue_id; - u8 vport_id; - bool eq_completion_only; -}; - -struct qed_stop_txq_params { - u8 rss_id; - u8 tx_queue_id; -}; - enum qed_filter_rx_mode_type { QED_FILTER_RX_MODE_TYPE_REGULAR, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, @@ -111,15 +123,6 @@ struct qed_filter_params { union qed_filter_type_params filter; }; -struct qed_queue_start_common_params { - u8 rss_id; - u8 queue_id; - u8 vport_id; - u16 sb; - u16 sb_idx; - u16 vf_qid; -}; - struct qed_tunn_params { u16 vxlan_port; u8 update_vxlan_port; @@ -129,7 +132,7 @@ struct qed_tunn_params { struct qed_eth_cb_ops { struct qed_common_cb_ops common; - void (*force_mac) (void *dev, u8 *mac); + void (*force_mac) (void *dev, u8 *mac, bool forced); }; #ifdef CONFIG_DCB @@ -219,24 +222,24 @@ struct qed_eth_ops { struct qed_update_vport_params *params); int (*q_rx_start)(struct qed_dev *cdev, + u8 rss_num, struct qed_queue_start_common_params *params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, - void __iomem **pp_prod); + struct qed_rxq_start_ret_params *ret_params); - int (*q_rx_stop)(struct qed_dev *cdev, - struct qed_stop_rxq_params *params); + int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); int (*q_tx_start)(struct qed_dev *cdev, + u8 rss_num, struct qed_queue_start_common_params *params, dma_addr_t pbl_addr, u16 pbl_size, - void __iomem **pp_doorbell); + struct qed_txq_start_ret_params *ret_params); - int (*q_tx_stop)(struct qed_dev *cdev, - struct qed_stop_txq_params *params); + int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); int (*filter_config)(struct qed_dev *cdev, struct qed_filter_params *params); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f9ae903bbb84..4b454f4f5b25 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -146,6 +146,7 @@ enum qed_led_mode { #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) #define QED_COALESCE_MAX 0xFF +#define QED_DEFAULT_RX_USECS 12 /* forward */ struct qed_dev; @@ -165,6 +166,7 @@ struct qed_iscsi_pf_params { u32 max_cwnd; u16 cq_num_entries; u16 cmdq_num_entries; + u32 two_msl_timer; u16 dup_ack_threshold; u16 tx_sws_timer; u16 min_rto; @@ -266,11 +268,15 @@ struct qed_dev_info { u8 mf_mode; bool tx_switching; bool rdma_supported; + u16 mtu; + + bool wol_support; }; enum qed_sb_type { QED_SB_TYPE_L2_QUEUE, QED_SB_TYPE_CNQ, + QED_SB_TYPE_STORAGE, }; enum qed_protocol { @@ -400,6 +406,15 @@ struct qed_selftest_ops { * @return 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); + +/** + * @brief selftest_nvram - Perform nvram test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_nvram) (struct qed_dev *cdev); }; struct qed_common_ops { @@ -553,6 +568,41 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); + +/** + * @brief update_drv_state - API to inform the change in the driver state. + * + * @param cdev + * @param active + * + */ + int (*update_drv_state)(struct qed_dev *cdev, bool active); + +/** + * @brief update_mac - API to inform the change in the mac address + * + * @param cdev + * @param mac + * + */ + int (*update_mac)(struct qed_dev *cdev, u8 *mac); + +/** + * @brief update_mtu - API to inform the change in the mtu + * + * @param cdev + * @param mtu + * + */ + int (*update_mtu)(struct qed_dev *cdev, u16 mtu); + +/** + * @brief update_wol - update of changes in the WoL configuration + * + * @param cdev + * @param enabled - true iff WoL should be enabled. + */ + int (*update_wol) (struct qed_dev *cdev, bool enabled); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h new file mode 100644 index 000000000000..d27912480cb3 --- /dev/null +++ b/include/linux/qed/qed_iscsi_if.h @@ -0,0 +1,229 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ISCSI_IF_H +#define _QED_ISCSI_IF_H +#include <linux/types.h> +#include <linux/qed/qed_if.h> + +typedef int (*iscsi_event_cb_t) (void *context, + u8 fw_event_code, void *fw_handle); +struct qed_iscsi_stats { + u64 iscsi_rx_bytes_cnt; + u64 iscsi_rx_packet_cnt; + u64 iscsi_rx_new_ooo_isle_events_cnt; + u32 iscsi_cmdq_threshold_cnt; + u32 iscsi_rq_threshold_cnt; + u32 iscsi_immq_threshold_cnt; + + u64 iscsi_rx_dropped_pdus_task_not_valid; + + u64 iscsi_rx_data_pdu_cnt; + u64 iscsi_rx_r2t_pdu_cnt; + u64 iscsi_rx_total_pdu_cnt; + + u64 iscsi_tx_go_to_slow_start_event_cnt; + u64 iscsi_tx_fast_retransmit_event_cnt; + + u64 iscsi_tx_data_pdu_cnt; + u64 iscsi_tx_r2t_pdu_cnt; + u64 iscsi_tx_total_pdu_cnt; + + u64 iscsi_tx_bytes_cnt; + u64 iscsi_tx_packet_cnt; +}; + +struct qed_dev_iscsi_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; +}; + +struct qed_iscsi_id_params { + u8 mac[ETH_ALEN]; + u32 ip[4]; + u16 port; +}; + +struct qed_iscsi_params_offload { + u8 layer_code; + dma_addr_t sq_pbl_addr; + u32 initial_ack; + + struct qed_iscsi_id_params src; + struct qed_iscsi_id_params dst; + u16 vlan_id; + u8 tcp_flags; + u8 ip_version; + u8 default_cq; + + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + u32 rcv_next; + u32 snd_una; + u32 snd_next; + u32 snd_max; + u32 snd_wnd; + u32 rcv_wnd; + u32 snd_wl1; + u32 cwnd; + u32 ss_thresh; + u16 srtt; + u16 rtt_var; + u32 ts_time; + u32 ts_recent; + u32 ts_recent_age; + u32 total_rt; + u32 ka_timeout_delta; + u32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 initial_rcv_wnd; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 snd_wnd_scale; + u8 rcv_wnd_scale; + u32 ts_ticks_per_second; + u16 da_timeout_value; + u8 ack_frequency; +}; + +struct qed_iscsi_params_update { + u8 update_flag; +#define QED_ISCSI_CONN_HD_EN BIT(0) +#define QED_ISCSI_CONN_DD_EN BIT(1) +#define QED_ISCSI_CONN_INITIAL_R2T BIT(2) +#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3) + + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u32 exp_stat_sn; +}; + +#define MAX_TID_BLOCKS_ISCSI (512) +struct qed_iscsi_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_ISCSI]; +}; + +struct qed_iscsi_cb_ops { + struct qed_common_cb_ops common; +}; + +/** + * struct qed_iscsi_ops - qed iSCSI operations. + * @common: common operations pointer + * @ll2: light L2 operations pointer + * @fill_dev_info: fills iSCSI specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register iscsi operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @start: iscsi in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: iscsi in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new iscsi connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * @return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired iscsi connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @update_conn: updates an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @clear_sq: clear all task in sq + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @get_stats: iSCSI related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * @return 0 on success, error otherwise. + */ +struct qed_iscsi_ops { + const struct qed_common_ops *common; + + const struct qed_ll2_ops *ll2; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_iscsi_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_iscsi_cb_ops *ops, void *cookie); + + int (*start)(struct qed_dev *cdev, + struct qed_iscsi_tid *tasks, + void *event_context, iscsi_event_cb_t async_event_cb); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_offload *conn_info); + + int (*update_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_update *conn_info); + + int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); + + int (*clear_sq)(struct qed_dev *cdev, u32 handle); + + int (*get_stats)(struct qed_dev *cdev, + struct qed_iscsi_stats *stats); +}; + +const struct qed_iscsi_ops *qed_get_iscsi_ops(void); +void qed_put_iscsi_ops(void); +#endif diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h index 99fbe6d55acb..f48d64b0e2fb 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_roce.h @@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv); bool qede_roce_supported(struct qede_dev *dev); -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +#if IS_ENABLED(CONFIG_QED_RDMA) int qede_roce_dev_add(struct qede_dev *dev); void qede_roce_dev_event_open(struct qede_dev *dev); void qede_roce_dev_event_close(struct qede_dev *dev); diff --git a/include/linux/quota.h b/include/linux/quota.h index 55107a8ff887..3434eef2a5aa 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -431,7 +431,7 @@ struct qc_info { /* Operations handling requests from userspace */ struct quotactl_ops { - int (*quota_on)(struct super_block *, int, int, struct path *); + int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); @@ -520,7 +520,6 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev, struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ struct mutex dqio_mutex; /* lock device while I/O in progress */ - struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f00fa86ac966..799a63d0e1a8 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -90,7 +90,7 @@ int dquot_file_open(struct inode *inode, struct file *file); int dquot_enable(struct inode *inode, int type, int format_id, unsigned int flags); int dquot_quota_on(struct super_block *sb, int type, int format_id, - struct path *path); + const struct path *path); int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int dquot_quota_off(struct super_block *sb, int type); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index af3581b8a451..52bda854593b 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -80,26 +80,25 @@ static inline bool radix_tree_is_internal_node(void *ptr) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* Internally used bits of node->count */ -#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) -#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) - +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is an exceptional entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @exceptional is the count of every element in ->slots which is + * either radix_tree_exceptional_entry() or is a sibling entry for an + * exceptional entry. + */ struct radix_tree_node { - unsigned char shift; /* Bits remaining in each slot */ - unsigned char offset; /* Slot offset in parent */ - unsigned int count; + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char exceptional; /* Exceptional entry count */ + struct radix_tree_node *parent; /* Used when ascending tree */ + void *private_data; /* For tree user */ union { - struct { - /* Used when ascending tree */ - struct radix_tree_node *parent; - /* For tree user */ - void *private_data; - }; - /* Used when freeing node */ - struct rcu_head rcu_head; + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ }; - /* For tree user */ - struct list_head private_list; void __rcu *slots[RADIX_TREE_MAP_SIZE]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; @@ -130,6 +129,41 @@ static inline bool radix_tree_empty(struct radix_tree_root *root) } /** + * struct radix_tree_iter - radix tree iterator state + * + * @index: index of current slot + * @next_index: one beyond the last index for this chunk + * @tags: bit-mask for tag-iterating + * @node: node that contains current slot + * @shift: shift for the node that holds our slots + * + * This radix tree iterator works in terms of "chunks" of slots. A chunk is a + * subinterval of slots contained within one radix tree leaf node. It is + * described by a pointer to its first slot and a struct radix_tree_iter + * which holds the chunk's position in the tree and its size. For tagged + * iteration radix_tree_iter also holds the slots' bit-mask for one chosen + * radix tree tag. + */ +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct radix_tree_node *node; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif +}; + +static inline unsigned int iter_shift(const struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + +/** * Radix-tree synchronization * * The radix-tree API requires that users provide all synchronisation (with @@ -248,20 +282,6 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -/** - * radix_tree_replace_slot - replace item in a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * @item: new item to store in the slot. - * - * For use with radix_tree_lookup_slot(). Caller must hold tree write locked - * across slot lookup and replacement. - */ -static inline void radix_tree_replace_slot(void **pslot, void *item) -{ - BUG_ON(radix_tree_is_internal_node(item)); - rcu_assign_pointer(*pslot, item); -} - int __radix_tree_create(struct radix_tree_root *root, unsigned long index, unsigned order, struct radix_tree_node **nodep, void ***slotp); @@ -276,8 +296,19 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void ***slotp); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); -bool __radix_tree_delete_node(struct radix_tree_root *root, - struct radix_tree_node *node); +typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); +void __radix_tree_replace(struct radix_tree_root *root, + struct radix_tree_node *node, + void **slot, void *item, + radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_replace(struct radix_tree_root *, + const struct radix_tree_iter *, void **slot, void *item); +void radix_tree_replace_slot(struct radix_tree_root *root, + void **slot, void *item); +void __radix_tree_delete_node(struct radix_tree_root *root, + struct radix_tree_node *node, + radix_tree_update_node_t update_node, + void *private); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); void radix_tree_clear_tags(struct radix_tree_root *root, @@ -299,6 +330,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag); int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag); +void radix_tree_iter_tag_set(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, @@ -307,50 +340,18 @@ unsigned int radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); -unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, - unsigned long *first_indexp, unsigned long last_index, - unsigned long nr_to_tag, - unsigned int fromtag, unsigned int totag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); -unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); static inline void radix_tree_preload_end(void) { preempt_enable(); } -/** - * struct radix_tree_iter - radix tree iterator state - * - * @index: index of current slot - * @next_index: one beyond the last index for this chunk - * @tags: bit-mask for tag-iterating - * @shift: shift for the node that holds our slots - * - * This radix tree iterator works in terms of "chunks" of slots. A chunk is a - * subinterval of slots contained within one radix tree leaf node. It is - * described by a pointer to its first slot and a struct radix_tree_iter - * which holds the chunk's position in the tree and its size. For tagged - * iteration radix_tree_iter also holds the slots' bit-mask for one chosen - * radix tree tag. - */ -struct radix_tree_iter { - unsigned long index; - unsigned long next_index; - unsigned long tags; -#ifdef CONFIG_RADIX_TREE_MULTIORDER - unsigned int shift; -#endif -}; - -static inline unsigned int iter_shift(struct radix_tree_iter *iter) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - return iter->shift; -#else - return 0; -#endif -} +int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); +int radix_tree_split(struct radix_tree_root *, unsigned long index, + unsigned new_order); +int radix_tree_join(struct radix_tree_root *, unsigned long index, + unsigned new_order, void *); #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ @@ -419,20 +420,17 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) } /** - * radix_tree_iter_next - resume iterating when the chunk may be invalid - * @iter: iterator state + * radix_tree_iter_resume - resume iterating when the chunk may be invalid + * @slot: pointer to current slot + * @iter: iterator state + * Returns: New slot pointer * * If the iterator needs to release then reacquire a lock, the chunk may * have been invalidated by an insertion or deletion. Call this function - * to continue the iteration from the next index. + * before releasing the lock to continue the iteration from the next index. */ -static inline __must_check -void **radix_tree_iter_next(struct radix_tree_iter *iter) -{ - iter->next_index = __radix_tree_iter_add(iter, 1); - iter->tags = 0; - return NULL; -} +void **__must_check radix_tree_iter_resume(void **slot, + struct radix_tree_iter *iter); /** * radix_tree_chunk_size - get current chunk size @@ -446,10 +444,17 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) return (iter->next_index - iter->index) >> iter_shift(iter); } -static inline struct radix_tree_node *entry_to_node(void *ptr) +#ifdef CONFIG_RADIX_TREE_MULTIORDER +void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, + unsigned flags); +#else +/* Can't happen without sibling entries, but the compiler can't tell that */ +static inline void ** __radix_tree_next_slot(void **slot, + struct radix_tree_iter *iter, unsigned flags) { - return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); + return slot; } +#endif /** * radix_tree_next_slot - find next slot in chunk @@ -463,7 +468,7 @@ static inline struct radix_tree_node *entry_to_node(void *ptr) * For tagged lookup it also eats @iter->tags. * * There are several cases where 'slot' can be passed in as NULL to this - * function. These cases result from the use of radix_tree_iter_next() or + * function. These cases result from the use of radix_tree_iter_resume() or * radix_tree_iter_retry(). In these cases we don't end up dereferencing * 'slot' because either: * a) we are doing tagged iteration and iter->tags has been set to 0, or @@ -474,51 +479,31 @@ static __always_inline void ** radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { - void *canon = slot; - iter->tags >>= 1; if (unlikely(!iter->tags)) return NULL; - while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && - radix_tree_is_internal_node(slot[1])) { - if (entry_to_node(slot[1]) == canon) { - iter->tags >>= 1; - iter->index = __radix_tree_iter_add(iter, 1); - slot++; - continue; - } - iter->next_index = __radix_tree_iter_add(iter, 1); - return NULL; - } if (likely(iter->tags & 1ul)) { iter->index = __radix_tree_iter_add(iter, 1); - return slot + 1; + slot++; + goto found; } if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); - iter->tags >>= offset; - iter->index = __radix_tree_iter_add(iter, offset + 1); - return slot + offset + 1; + iter->tags >>= offset++; + iter->index = __radix_tree_iter_add(iter, offset); + slot += offset; + goto found; } } else { long count = radix_tree_chunk_size(iter); - void *canon = slot; while (--count > 0) { slot++; iter->index = __radix_tree_iter_add(iter, 1); - if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && - radix_tree_is_internal_node(*slot)) { - if (entry_to_node(*slot) == canon) - continue; - iter->next_index = iter->index; - break; - } - if (likely(*slot)) - return slot; + goto found; if (flags & RADIX_TREE_ITER_CONTIG) { /* forbid switching to the next chunk */ iter->next_index = 0; @@ -527,6 +512,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) } } return NULL; + + found: + if (unlikely(radix_tree_is_internal_node(*slot))) + return __radix_tree_next_slot(slot, iter, flags); + return slot; } /** @@ -577,6 +567,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) slot || (slot = radix_tree_next_chunk(root, iter, \ RADIX_TREE_ITER_TAGGED | tag)) ; \ slot = radix_tree_next_slot(slot, iter, \ - RADIX_TREE_ITER_TAGGED)) + RADIX_TREE_ITER_TAGGED | tag)) #endif /* _LINUX_RADIX_TREE_H */ diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 57c9e0622a38..56375edf2ed2 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -77,8 +77,11 @@ extern int ___ratelimit(struct ratelimit_state *rs, const char *func); #ifdef CONFIG_PRINTK -#define WARN_ON_RATELIMIT(condition, state) \ - WARN_ON((condition) && __ratelimit(state)) +#define WARN_ON_RATELIMIT(condition, state) ({ \ + bool __rtn_cond = !!(condition); \ + WARN_ON(__rtn_cond && __ratelimit(state)); \ + __rtn_cond; \ +}) #define WARN_RATELIMIT(condition, format, ...) \ ({ \ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8beb98dcf14f..4f7a9561b8c4 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) * This is only for internal list manipulation where we know * the prev/next entries already! */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { + if (!__list_add_valid(new, prev, next)) + return; + new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } -#else -void __list_add_rcu(struct list_head *new, - struct list_head *prev, struct list_head *next); -#endif /** * list_add_rcu - add a new entry to rcu-protected list diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 321f9ed552a9..01f71e1d2e94 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -444,6 +444,10 @@ bool __rcu_is_watching(void); #error "Unknown RCU implementation specified to kernel configuration" #endif +#define RCU_SCHEDULER_INACTIVE 0 +#define RCU_SCHEDULER_INIT 1 +#define RCU_SCHEDULER_RUNNING 2 + /* * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic * initialization and destruction of rcu_head on the stack. rcu_head structures diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 9adc7b21903d..f6673132431d 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/rbtree.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/bug.h> #include <linux/lockdep.h> @@ -116,22 +117,22 @@ struct reg_sequence { #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ - int ret; \ + int pollret; \ might_sleep_if(sleep_us); \ for (;;) { \ - ret = regmap_read((map), (addr), &(val)); \ - if (ret) \ + pollret = regmap_read((map), (addr), &(val)); \ + if (pollret) \ break; \ if (cond) \ break; \ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ - ret = regmap_read((map), (addr), &(val)); \ + pollret = regmap_read((map), (addr), &(val)); \ break; \ } \ if (sleep_us) \ usleep_range((sleep_us >> 2) + 1, sleep_us); \ } \ - ret ?: ((cond) ? 0 : -ETIMEDOUT); \ + pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ }) #ifdef CONFIG_REGMAP diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 692108222271..ea0fffa5faeb 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -120,6 +120,25 @@ struct regmap; #define REGULATOR_EVENT_PRE_DISABLE 0x400 #define REGULATOR_EVENT_ABORT_DISABLE 0x800 +/* + * Regulator errors that can be queried using regulator_get_error_flags + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * + * NOTE: These errors can be OR'ed together. + */ + +#define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1) +#define REGULATOR_ERROR_OVER_CURRENT BIT(2) +#define REGULATOR_ERROR_REGULATION_OUT BIT(3) +#define REGULATOR_ERROR_FAIL BIT(4) +#define REGULATOR_ERROR_OVER_TEMP BIT(5) + + /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event * @@ -237,6 +256,8 @@ int regulator_get_current_limit(struct regulator *regulator); int regulator_set_mode(struct regulator *regulator, unsigned int mode); unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags); int regulator_set_load(struct regulator *regulator, int load_uA); int regulator_allow_bypass(struct regulator *regulator, bool allow); @@ -477,6 +498,12 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator) return REGULATOR_MODE_NORMAL; } +static inline int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags) +{ + return -EINVAL; +} + static inline int regulator_set_load(struct regulator *regulator, int load_uA) { return REGULATOR_MODE_NORMAL; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 37b532410528..dac8e7b16bc6 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -100,6 +100,7 @@ struct regulator_linear_range { * * @set_mode: Set the configured operating mode for the regulator. * @get_mode: Get the configured operating mode for the regulator. + * @get_error_flags: Get the current error(s) for the regulator. * @get_status: Return actual (not as-configured) status of regulator, as a * REGULATOR_STATUS value (or negative errno) * @get_optimum_mode: Get the most efficient operating mode for the regulator @@ -169,6 +170,9 @@ struct regulator_ops { int (*set_mode) (struct regulator_dev *, unsigned int mode); unsigned int (*get_mode) (struct regulator_dev *); + /* retrieve current error flags on the regulator */ + int (*get_error_flags)(struct regulator_dev *, unsigned int *flags); + /* Time taken to enable or set voltage on the regulator */ int (*enable_time) (struct regulator_dev *); int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 930023b7c825..8265d351c9f0 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -400,6 +400,7 @@ enum rproc_crash_type { * @firmware_loading_complete: marks e/o asynchronous firmware loading * @bootaddr: address of first instruction to boot rproc with (optional) * @rvdevs: list of remote virtio devices + * @subdevs: list of subdevices, to following the running state * @notifyids: idr for dynamically assigning rproc-wide unique notify ids * @index: index of this rproc device * @crash_handler: workqueue for handling a crash @@ -415,7 +416,7 @@ struct rproc { struct list_head node; struct iommu_domain *domain; const char *name; - const char *firmware; + char *firmware; void *priv; const struct rproc_ops *ops; struct device dev; @@ -431,6 +432,7 @@ struct rproc { struct completion firmware_loading_complete; u32 bootaddr; struct list_head rvdevs; + struct list_head subdevs; struct idr notifyids; int index; struct work_struct crash_handler; @@ -444,6 +446,19 @@ struct rproc { bool auto_boot; }; +/** + * struct rproc_subdev - subdevice tied to a remoteproc + * @node: list node related to the rproc subdevs list + * @probe: probe function, called as the rproc is started + * @remove: remove function, called as the rproc is stopped + */ +struct rproc_subdev { + struct list_head node; + + int (*probe)(struct rproc_subdev *subdev); + void (*remove)(struct rproc_subdev *subdev); +}; + /* we currently support only two vrings per rvdev */ #define RVDEV_NUM_VRINGS 2 @@ -472,6 +487,9 @@ struct rproc_vring { /** * struct rproc_vdev - remoteproc state for a supported virtio device + * @refcount: reference counter for the vdev and vring allocations + * @subdev: handle for registering the vdev as a rproc subdevice + * @id: virtio device id (as in virtio_ids.h) * @node: list node * @rproc: the rproc handle * @vdev: the virio device @@ -479,6 +497,11 @@ struct rproc_vring { * @rsc_offset: offset of the vdev's resource entry */ struct rproc_vdev { + struct kref refcount; + + struct rproc_subdev subdev; + + unsigned int id; struct list_head node; struct rproc *rproc; struct virtio_device vdev; @@ -511,4 +534,11 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) return rvdev->rproc; } +void rproc_add_subdev(struct rproc *rproc, + struct rproc_subdev *subdev, + int (*probe)(struct rproc_subdev *subdev), + void (*remove)(struct rproc_subdev *subdev)); + +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); + #endif /* REMOTEPROC_H */ diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h new file mode 100644 index 000000000000..4155556fa4b2 --- /dev/null +++ b/include/linux/remoteproc/st_slim_rproc.h @@ -0,0 +1,58 @@ +/* + * SLIM core rproc driver header + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin <peter.griffin@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _ST_REMOTEPROC_SLIM_H +#define _ST_REMOTEPROC_SLIM_H + +#define ST_SLIM_MEM_MAX 2 +#define ST_SLIM_MAX_CLK 4 + +enum { + ST_SLIM_DMEM, + ST_SLIM_IMEM, +}; + +/** + * struct st_slim_mem - slim internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @size: Size of the memory region + */ +struct st_slim_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + size_t size; +}; + +/** + * struct st_slim_rproc - SLIM slim core + * @rproc: rproc handle + * @mem: slim memory information + * @slimcore: slim slimcore regs + * @peri: slim peripheral regs + * @clks: slim clocks + */ +struct st_slim_rproc { + struct rproc *rproc; + struct st_slim_mem mem[ST_SLIM_MEM_MAX]; + void __iomem *slimcore; + void __iomem *peri; + + /* st_slim_rproc private */ + struct clk *clks[ST_SLIM_MAX_CLK]; +}; + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name); +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc); + +#endif diff --git a/include/linux/reservation.h b/include/linux/reservation.h index b0f305e77b7f..d9706a6f5ae2 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -40,7 +40,7 @@ #define _LINUX_RESERVATION_H #include <linux/ww_mutex.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/slab.h> #include <linux/seqlock.h> #include <linux/rcupdate.h> @@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; - struct fence __rcu *shared[]; + struct dma_fence __rcu *shared[]; }; /** @@ -74,7 +74,7 @@ struct reservation_object { struct ww_mutex lock; seqcount_t seq; - struct fence __rcu *fence_excl; + struct dma_fence __rcu *fence_excl; struct reservation_object_list __rcu *fence; struct reservation_object_list *staged; }; @@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj) { int i; struct reservation_object_list *fobj; - struct fence *excl; + struct dma_fence *excl; /* * This object should be dead and all references must have @@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj) */ excl = rcu_dereference_protected(obj->fence_excl, 1); if (excl) - fence_put(excl); + dma_fence_put(excl); fobj = rcu_dereference_protected(obj->fence, 1); if (fobj) { for (i = 0; i < fobj->shared_count; ++i) - fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); kfree(fobj); } @@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj) * RETURNS * The exclusive fence or NULL */ -static inline struct fence * +static inline struct dma_fence * reservation_object_get_excl(struct reservation_object *obj) { return rcu_dereference_protected(obj->fence_excl, @@ -173,35 +173,32 @@ reservation_object_get_excl(struct reservation_object *obj) * RETURNS * The exclusive fence or NULL if none */ -static inline struct fence * +static inline struct dma_fence * reservation_object_get_excl_rcu(struct reservation_object *obj) { - struct fence *fence; - unsigned seq; -retry: - seq = read_seqcount_begin(&obj->seq); + struct dma_fence *fence; + + if (!rcu_access_pointer(obj->fence_excl)) + return NULL; + rcu_read_lock(); - fence = rcu_dereference(obj->fence_excl); - if (read_seqcount_retry(&obj->seq, seq)) { - rcu_read_unlock(); - goto retry; - } - fence = fence_get(fence); + fence = dma_fence_get_rcu_safe(&obj->fence_excl); rcu_read_unlock(); + return fence; } int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, - struct fence *fence); + struct dma_fence *fence); void reservation_object_add_excl_fence(struct reservation_object *obj, - struct fence *fence); + struct dma_fence *fence); int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct fence **pfence_excl, + struct dma_fence **pfence_excl, unsigned *pshared_count, - struct fence ***pshared); + struct dma_fence ***pshared); long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000000..0d905d8ec553 --- /dev/null +++ b/include/linux/restart_block.h @@ -0,0 +1,51 @@ +/* + * Common syscall restarting data + */ +#ifndef __LINUX_RESTART_BLOCK_H +#define __LINUX_RESTART_BLOCK_H + +#include <linux/compiler.h> +#include <linux/types.h> + +struct timespec; +struct compat_timespec; +struct pollfd; + +/* + * System call restart block. + */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + struct timespec __user *rmtp; +#ifdef CONFIG_COMPAT + struct compat_timespec __user *compat_rmtp; +#endif + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + +#endif /* __LINUX_RESTART_BLOCK_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 4acc552e9279..b6d4568795a7 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -198,4 +198,10 @@ enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, }; +#ifdef CONFIG_RING_BUFFER +int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); +#else +#define trace_rb_cpu_prepare NULL +#endif + #endif /* _LINUX_RING_BUFFER_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b46bb5620a76..15321fb1df6b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -137,11 +137,19 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) * anon_vma helper functions. */ void anon_vma_init(void); /* create anon_vma_cachep */ -int anon_vma_prepare(struct vm_area_struct *); +int __anon_vma_prepare(struct vm_area_struct *); void unlink_anon_vmas(struct vm_area_struct *); int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); +static inline int anon_vma_prepare(struct vm_area_struct *vma) +{ + if (likely(vma->anon_vma)) + return 0; + + return __anon_vma_prepare(vma); +} + static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { diff --git a/include/linux/rmi.h b/include/linux/rmi.h index e0aca1476001..64125443f8a6 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/input.h> +#include <linux/kfifo.h> #include <linux/list.h> #include <linux/module.h> #include <linux/types.h> @@ -99,6 +100,8 @@ struct rmi_2d_sensor_platform_data { bool topbuttonpad; bool kernel_tracking; int dmax; + int dribble; + int palm_detect; }; /** @@ -106,7 +109,7 @@ struct rmi_2d_sensor_platform_data { * @buttonpad - the touchpad is a buttonpad, so enable only the first actual * button that is found. * @trackstick_buttons - Set when the function 30 is handling the physical - * buttons of the trackstick (as a PD/2 passthrough device. + * buttons of the trackstick (as a PS/2 passthrough device). * @disable - the touchpad incorrectly reports F30 and it should be ignored. * This is a special case which is due to misconfigured firmware. */ @@ -116,14 +119,17 @@ struct rmi_f30_data { bool disable; }; -/** - * struct rmi_f01_power - override default power management settings. - * + +/* + * Set the state of a register + * DEFAULT - use the default value set by the firmware config + * OFF - explicitly disable the register + * ON - explicitly enable the register */ -enum rmi_f01_nosleep { - RMI_F01_NOSLEEP_DEFAULT = 0, - RMI_F01_NOSLEEP_OFF = 1, - RMI_F01_NOSLEEP_ON = 2 +enum rmi_reg_state { + RMI_REG_STATE_DEFAULT = 0, + RMI_REG_STATE_OFF = 1, + RMI_REG_STATE_ON = 2 }; /** @@ -143,7 +149,7 @@ enum rmi_f01_nosleep { * when the touch sensor is in doze mode, in units of 10ms. */ struct rmi_f01_power_management { - enum rmi_f01_nosleep nosleep; + enum rmi_reg_state nosleep; u8 wakeup_threshold; u8 doze_holdoff; u8 doze_interval; @@ -204,16 +210,18 @@ struct rmi_device_platform_data_spi { * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to * to re-initialize. You can override the default wait period here. + * @irq: irq associated with the attn gpio line, or negative */ struct rmi_device_platform_data { int reset_delay_ms; + int irq; struct rmi_device_platform_data_spi spi_data; /* function handler pdata */ - struct rmi_2d_sensor_platform_data *sensor_pdata; + struct rmi_2d_sensor_platform_data sensor_pdata; struct rmi_f01_power_management power_management; - struct rmi_f30_data *f30_data; + struct rmi_f30_data f30_data; }; /** @@ -264,9 +272,6 @@ struct rmi_transport_dev { struct rmi_device_platform_data pdata; struct input_dev *input; - - void *attn_data; - int attn_size; }; /** @@ -324,17 +329,24 @@ struct rmi_device { }; +struct rmi4_attn_data { + unsigned long irq_status; + size_t size; + void *data; +}; + struct rmi_driver_data { struct list_head function_list; struct rmi_device *rmi_dev; struct rmi_function *f01_container; - bool f01_bootloader_mode; + struct rmi_function *f34_container; + bool bootloader_mode; - u32 attn_count; int num_of_irq_regs; int irq_count; + void *irq_memory; unsigned long *irq_status; unsigned long *fn_irq_bits; unsigned long *current_irq_mask; @@ -343,17 +355,23 @@ struct rmi_driver_data { struct input_dev *input; u8 pdt_props; - u8 bsr; + + u8 num_rx_electrodes; + u8 num_tx_electrodes; bool enabled; + struct mutex enabled_mutex; - void *data; + struct rmi4_attn_data attn_data; + DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); }; int rmi_register_transport_device(struct rmi_transport_dev *xport); void rmi_unregister_transport_device(struct rmi_transport_dev *xport); -int rmi_process_interrupt_requests(struct rmi_device *rmi_dev); -int rmi_driver_suspend(struct rmi_device *rmi_dev); -int rmi_driver_resume(struct rmi_device *rmi_dev); +void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, + void *data, size_t size); + +int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake); +int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake); #endif diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 452d393cc8dd..18f9e1ae4b7e 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -37,6 +37,7 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/err.h> #include <linux/mod_devicetable.h> #include <linux/kref.h> #include <linux/mutex.h> @@ -64,6 +65,7 @@ struct rpmsg_channel_info { * rpmsg_device - device that belong to the rpmsg bus * @dev: the device struct * @id: device id (used to match between rpmsg drivers and devices) + * @driver_override: driver name to force a match * @src: local address * @dst: destination address * @ept: the rpmsg endpoint of this channel @@ -72,6 +74,7 @@ struct rpmsg_channel_info { struct rpmsg_device { struct device dev; struct rpmsg_device_id id; + char *driver_override; u32 src; u32 dst; struct rpmsg_endpoint *ept; @@ -132,6 +135,8 @@ struct rpmsg_driver { int (*callback)(struct rpmsg_device *, void *, int, void *, u32); }; +#if IS_ENABLED(CONFIG_RPMSG) + int register_rpmsg_device(struct rpmsg_device *dev); void unregister_rpmsg_device(struct rpmsg_device *dev); int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); @@ -141,6 +146,116 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo); +int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +#else + +static inline int register_rpmsg_device(struct rpmsg_device *dev) +{ + return -ENXIO; +} + +static inline void unregister_rpmsg_device(struct rpmsg_device *dev) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, + struct module *owner) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, + void *priv, + struct rpmsg_channel_info chinfo) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return ERR_PTR(-ENXIO); +} + +static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, + u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; + +} + +static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, + int len, u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +#endif /* IS_ENABLED(CONFIG_RPMSG) */ + /* use a macro to avoid include chaining to get THIS_MODULE */ #define register_rpmsg_driver(drv) \ __register_rpmsg_driver(drv, THIS_MODULE) @@ -157,14 +272,4 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, module_driver(__rpmsg_driver, register_rpmsg_driver, \ unregister_rpmsg_driver) -int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - -int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - #endif /* _LINUX_RPMSG_H */ diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h new file mode 100644 index 000000000000..e674b2e3074b --- /dev/null +++ b/include/linux/rpmsg/qcom_smd.h @@ -0,0 +1,33 @@ + +#ifndef _LINUX_RPMSG_QCOM_SMD_H +#define _LINUX_RPMSG_QCOM_SMD_H + +#include <linux/device.h> + +struct qcom_smd_edge; + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD) + +struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, + struct device_node *node); +int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); + +#else + +static inline struct qcom_smd_edge * +qcom_smd_register_edge(struct device *parent, + struct device_node *node) +{ + return ERR_PTR(-ENXIO); +} + +static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) +{ + /* This shouldn't be possible */ + WARN_ON(1); + return -ENXIO; +} + +#endif + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 348f51b0ec92..ad3ec9ec61f7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!( #define set_task_state(tsk, state_value) \ do { \ (tsk)->task_state_change = _THIS_IP_; \ - smp_store_mb((tsk)->state, (state_value)); \ + smp_store_mb((tsk)->state, (state_value)); \ } while (0) -/* - * set_current_state() includes a barrier so that the write of current->state - * is correctly serialised wrt the caller's subsequent test of whether to - * actually sleep: - * - * set_current_state(TASK_UNINTERRUPTIBLE); - * if (do_i_need_to_sleep()) - * schedule(); - * - * If the caller does not need such serialisation then use __set_current_state() - */ #define __set_current_state(state_value) \ do { \ current->task_state_change = _THIS_IP_; \ @@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!( #define set_current_state(state_value) \ do { \ current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->state, (state_value)); \ + smp_store_mb(current->state, (state_value)); \ } while (0) #else +/* + * @tsk had better be current, or you get to keep the pieces. + * + * The only reason is that computing current can be more expensive than + * using a pointer that's already available. + * + * Therefore, see set_current_state(). + */ #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) #define set_task_state(tsk, state_value) \ @@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!( * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * + * for (;;) { * set_current_state(TASK_UNINTERRUPTIBLE); - * if (do_i_need_to_sleep()) - * schedule(); + * if (!need_sleep) + * break; + * + * schedule(); + * } + * __set_current_state(TASK_RUNNING); * - * If the caller does not need such serialisation then use __set_current_state() + * If the caller does not need such serialisation (because, for instance, the + * condition test and condition change and wakeup are under the same lock) then + * use __set_current_state(). + * + * The above is typically ordered against the wakeup, which does: + * + * need_sleep = false; + * wake_up_state(p, TASK_UNINTERRUPTIBLE); + * + * Where wake_up_state() (and all other wakeup primitives) imply enough + * barriers to order the store of the variable against wakeup. + * + * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, + * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a + * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). + * + * This is obviously fine, since they both store the exact same value. + * + * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ do { current->state = (state_value); } while (0) @@ -520,7 +540,11 @@ static inline int get_dumpable(struct mm_struct *mm) /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ @@ -830,6 +854,16 @@ struct signal_struct { #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + /* If true, all threads except ->group_exit_task have pending SIGKILL */ static inline int signal_group_exit(const struct signal_struct *sig) { @@ -989,7 +1023,7 @@ enum cpu_idle_type { * already in a wake queue, the wakeup will happen soon and the second * waker can just skip it. * - * The WAKE_Q macro declares and initializes the list head. + * The DEFINE_WAKE_Q macro declares and initializes the list head. * wake_up_q() does NOT reinitialize the list; it's expected to be * called near the end of a function, where the fact that the queue is * not used again will be easy to see by inspection. @@ -1009,7 +1043,7 @@ struct wake_q_head { #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) -#define WAKE_Q(name) \ +#define DEFINE_WAKE_Q(name) \ struct wake_q_head name = { WAKE_Q_TAIL, &name.first } extern void wake_q_add(struct wake_q_head *head, @@ -1057,6 +1091,8 @@ static inline int cpu_numa_flags(void) } #endif +extern int arch_asym_cpu_priority(int cpu); + struct sched_domain_attr { int relax_domain_level; }; @@ -1627,7 +1663,10 @@ struct task_struct { int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - cputime_t utime, stime, utimescaled, stimescaled; + cputime_t utime, stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + cputime_t utimescaled, stimescaled; +#endif cputime_t gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -1656,6 +1695,7 @@ struct task_struct { struct list_head cpu_timers[3]; /* process credentials */ + const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ const struct cred __rcu *real_cred; /* objective and real subjective task * credentials (COW) */ const struct cred __rcu *cred; /* effective (overridable) subjective task @@ -1791,6 +1831,9 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock */ struct list_head cg_list; #endif +#ifdef CONFIG_INTEL_RDT_A + int closid; +#endif #ifdef CONFIG_FUTEX struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT @@ -2220,40 +2263,45 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime); -extern void task_cputime_scaled(struct task_struct *t, - cputime_t *utimescaled, cputime_t *stimescaled); extern cputime_t task_gtime(struct task_struct *t); #else static inline void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) { - if (utime) - *utime = t->utime; - if (stime) - *stime = t->stime; + *utime = t->utime; + *stime = t->stime; +} + +static inline cputime_t task_gtime(struct task_struct *t) +{ + return t->gtime; } +#endif +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME static inline void task_cputime_scaled(struct task_struct *t, cputime_t *utimescaled, cputime_t *stimescaled) { - if (utimescaled) - *utimescaled = t->utimescaled; - if (stimescaled) - *stimescaled = t->stimescaled; + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; } - -static inline cputime_t task_gtime(struct task_struct *t) +#else +static inline void task_cputime_scaled(struct task_struct *t, + cputime_t *utimescaled, + cputime_t *stimescaled) { - return t->gtime; + task_cputime(t, utimescaled, stimescaled); } #endif + extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); /* * Per process flags */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ @@ -2444,6 +2492,10 @@ static inline void calc_load_enter_idle(void) { } static inline void calc_load_exit_idle(void) { } #endif /* CONFIG_NO_HZ_COMMON */ +#ifndef cpu_relax_yield +#define cpu_relax_yield() cpu_relax() +#endif + /* * Do not use outside of architecture code which knows its limitations. * @@ -2567,6 +2619,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p); extern void sched_autogroup_detach(struct task_struct *p); extern void sched_autogroup_fork(struct signal_struct *sig); extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); #ifdef CONFIG_PROC_FS extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); @@ -2576,6 +2629,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { } static inline void sched_autogroup_detach(struct task_struct *p) { } static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } #endif extern int yield_to(struct task_struct *p, bool preempt); @@ -2609,7 +2663,7 @@ extern struct task_struct *idle_task(int cpu); */ static inline bool is_idle_task(const struct task_struct *p) { - return p->pid == 0; + return !!(p->flags & PF_IDLE); } extern struct task_struct *curr_task(int cpu); extern void ia64_set_curr_task(int cpu, struct task_struct *p); @@ -3506,6 +3560,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +/* + * In order to reduce various lock holder preemption latencies provide an + * interface to see if a vCPU is currently running or not. + * + * This allows us to terminate optimistic spin loops and block, analogous to + * the native optimistic spin heuristic of testing if the lock owner task is + * running or not. + */ +#ifndef vcpu_is_preempted +# define vcpu_is_preempted(cpu) false +#endif + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 22db1e63707e..441145351301 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -36,7 +36,6 @@ extern unsigned int sysctl_numa_balancing_scan_size; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_time_avg; -extern unsigned int sysctl_sched_shares_window; int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, diff --git a/include/linux/seg6.h b/include/linux/seg6.h new file mode 100644 index 000000000000..7a66d2b4c5a6 --- /dev/null +++ b/include/linux/seg6.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_H +#define _LINUX_SEG6_H + +#include <uapi/linux/seg6.h> + +#endif diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h new file mode 100644 index 000000000000..d6c3fb4f3734 --- /dev/null +++ b/include/linux/seg6_genl.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_GENL_H +#define _LINUX_SEG6_GENL_H + +#include <uapi/linux/seg6_genl.h> + +#endif diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h new file mode 100644 index 000000000000..da437ebdc6cd --- /dev/null +++ b/include/linux/seg6_hmac.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_HMAC_H +#define _LINUX_SEG6_HMAC_H + +#include <uapi/linux/seg6_hmac.h> + +#endif diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h new file mode 100644 index 000000000000..5377cf6a5a02 --- /dev/null +++ b/include/linux/seg6_iptunnel.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_IPTUNNEL_H +#define _LINUX_SEG6_IPTUNNEL_H + +#include <uapi/linux/seg6_iptunnel.h> + +#endif diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index a1ba6a5ccdd6..c58c535d12a8 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h @@ -20,7 +20,7 @@ #ifndef __LINUX_SEQNO_FENCE_H #define __LINUX_SEQNO_FENCE_H -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/dma-buf.h> enum seqno_fence_condition { @@ -29,15 +29,15 @@ enum seqno_fence_condition { }; struct seqno_fence { - struct fence base; + struct dma_fence base; - const struct fence_ops *ops; + const struct dma_fence_ops *ops; struct dma_buf *sync_buf; uint32_t seqno_ofs; enum seqno_fence_condition condition; }; -extern const struct fence_ops seqno_fence_ops; +extern const struct dma_fence_ops seqno_fence_ops; /** * to_seqno_fence - cast a fence to a seqno_fence @@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops; * or the seqno_fence otherwise. */ static inline struct seqno_fence * -to_seqno_fence(struct fence *fence) +to_seqno_fence(struct dma_fence *fence) { if (fence->ops != &seqno_fence_ops) return NULL; @@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence) * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the * device's vm can be expensive. * - * It is recommended for creators of seqno_fence to call fence_signal + * It is recommended for creators of seqno_fence to call dma_fence_signal() * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check fence_is_signaled + * time of issue vs time of check, since users can check dma_fence_is_signaled() * before submitting instructions for the hardware to wait on the fence. * However, when ops.enable_signaling is not called, it doesn't have to be * done as soon as possible, just before there's any real danger of seqno @@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, struct dma_buf *sync_buf, uint32_t context, uint32_t seqno_ofs, uint32_t seqno, enum seqno_fence_condition cond, - const struct fence_ops *ops) + const struct dma_fence_ops *ops) { BUG_ON(!fence || !sync_buf || !ops); BUG_ON(!ops->wait || !ops->enable_signaling || !ops->get_driver_name || !ops->get_timeline_name); /* - * ops is used in fence_init for get_driver_name, so needs to be + * ops is used in dma_fence_init for get_driver_name, so needs to be * initialized first */ fence->ops = ops; - fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); + dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); get_dma_buf(sync_buf); fence->sync_buf = sync_buf; fence->seqno_ofs = seqno_ofs; diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 48ec7651989b..61fbb440449c 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -36,6 +36,8 @@ struct plat_serial8250_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + void (*set_ldisc)(struct uart_port *, + struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, @@ -94,7 +96,7 @@ struct uart_8250_port { struct uart_port port; struct timer_list timer; /* "no irq" timer */ struct list_head list; /* ports on this IRQ */ - unsigned short capabilities; /* port capabilities */ + u32 capabilities; /* port capabilities */ unsigned short bugs; /* port bugs */ bool fifo_bug; /* min RX trigger if enabled */ unsigned int tx_loadsz; /* transmit fifo load size */ @@ -149,6 +151,8 @@ extern int early_serial8250_setup(struct earlycon_device *device, const char *options); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); +extern void serial8250_do_set_ldisc(struct uart_port *port, + struct ktermios *termios); extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); @@ -168,6 +172,6 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe); extern void serial8250_set_isa_configurator(void (*v) (int port, struct uart_port *up, - unsigned short *capabilities)); + u32 *capabilities)); #endif diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 344201437017..5def8e830fb0 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -111,8 +111,8 @@ struct uart_icount { __u32 buf_overrun; }; -typedef unsigned int __bitwise__ upf_t; -typedef unsigned int __bitwise__ upstat_t; +typedef unsigned int __bitwise upf_t; +typedef unsigned int __bitwise upstat_t; struct uart_port { spinlock_t lock; /* port lock */ @@ -123,6 +123,8 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + void (*set_ldisc)(struct uart_port *, + struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); int (*startup)(struct uart_port *port); diff --git a/include/linux/signal.h b/include/linux/signal.h index b63f63eaa39c..5308304993be 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set) } } +static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) +{ + switch (_NSIG_WORDS) { + case 4: + return (set1->sig[3] == set2->sig[3]) && + (set1->sig[2] == set2->sig[2]) && + (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 2: + return (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 1: + return set1->sig[0] == set2->sig[0]; + } + return 0; +} + #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 601258f6e621..a410715bbef8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -645,8 +645,15 @@ struct sk_buff { struct rb_node rbnode; /* used in netem & tcp stack */ }; struct sock *sk; - struct net_device *dev; + union { + struct net_device *dev; + /* Some protocols might use this space to store information, + * while device pointer would be NULL. + * UDP receive path is one user. + */ + unsigned long dev_scratch; + }; /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you @@ -936,6 +943,7 @@ struct sk_buff_fclones { /** * skb_fclone_busy - check if fclone is busy + * @sk: socket * @skb: buffer * * Returns true if skb is a fast clone, and its clone is not freed. @@ -1086,7 +1094,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(const struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); @@ -1798,11 +1806,11 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb) return skb->len - skb->data_len; } -static inline int skb_pagelen(const struct sk_buff *skb) +static inline unsigned int skb_pagelen(const struct sk_buff *skb) { - int i, len = 0; + unsigned int i, len = 0; - for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) + for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) len += skb_frag_size(&skb_shinfo(skb)->frags[i]); return len + skb_headlen(skb); } @@ -1965,6 +1973,8 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } +void skb_condense(struct sk_buff *skb); + /** * skb_headroom - bytes at buffer head * @skb: buffer to check @@ -2470,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, static inline void skb_free_frag(void *addr) { - __free_page_frag(addr); + page_frag_free(addr); } void *napi_alloc_frag(unsigned int fragsz); @@ -2808,12 +2818,12 @@ static inline int skb_add_data(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum = 0; - if (csum_and_copy_from_iter(skb_put(skb, copy), copy, - &csum, from) == copy) { + if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, + &csum, from)) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } - } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) + } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) return 0; __skb_trim(skb, off); @@ -3032,9 +3042,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb) int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, const struct sk_buff *skb); struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), int *peeked, int *off, int *err, struct sk_buff **last); struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); @@ -3213,7 +3227,7 @@ static inline ktime_t net_timedelta(ktime_t t) static inline ktime_t net_invalid_timestamp(void) { - return ktime_set(0, 0); + return 0; } struct sk_buff *skb_clone_sk(struct sk_buff *skb); diff --git a/include/linux/slab.h b/include/linux/slab.h index 084b12bad198..4c5363566815 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr, * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr, * be allocated from the same page. */ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX 30 +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index e302c447e057..129bc674dcf5 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -39,6 +39,7 @@ struct smc91x_platdata { unsigned long flags; unsigned char leda; unsigned char ledb; + bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */ }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h index a37bc5538f19..eab64976a73b 100644 --- a/include/linux/soc/qcom/wcnss_ctrl.h +++ b/include/linux/soc/qcom/wcnss_ctrl.h @@ -3,6 +3,19 @@ #include <linux/soc/qcom/smd.h> +#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) + struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); +#else + +static inline struct qcom_smd_channel* +qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) +{ + WARN_ON(1); + return ERR_PTR(-ENXIO); +} + +#endif + #endif diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h new file mode 100644 index 000000000000..a18e0783946b --- /dev/null +++ b/include/linux/soc/renesas/rcar-rst.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ +#define __LINUX_SOC_RENESAS_RCAR_RST_H__ + +int rcar_rst_read_mode_pins(u32 *mode); + +#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h new file mode 100644 index 000000000000..0ccbc138c26a --- /dev/null +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -0,0 +1,249 @@ +/* + * Texas Instruments System Control Interface Protocol + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Nishanth Menon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __TISCI_PROTOCOL_H +#define __TISCI_PROTOCOL_H + +/** + * struct ti_sci_version_info - version information structure + * @abi_major: Major ABI version. Change here implies risk of backward + * compatibility break. + * @abi_minor: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @firmware_revision: Firmware revision (not usually used). + * @firmware_description: Firmware description (not usually used). + */ +struct ti_sci_version_info { + u8 abi_major; + u8 abi_minor; + u16 firmware_revision; + char firmware_description[32]; +}; + +struct ti_sci_handle; + +/** + * struct ti_sci_core_ops - SoC Core Operations + * @reboot_device: Reboot the SoC + * Returns 0 for successful request(ideally should never return), + * else returns corresponding error value. + */ +struct ti_sci_core_ops { + int (*reboot_device)(const struct ti_sci_handle *handle); +}; + +/** + * struct ti_sci_dev_ops - Device control operations + * @get_device: Command to request for device managed by TISCI + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @idle_device: Command to idle a device managed by TISCI + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @put_device: Command to release a device managed by TISCI + * Returns 0 for successful release, else returns corresponding + * error message. + * @is_valid: Check if the device ID is a valid ID. + * Returns 0 if the ID is valid, else returns corresponding error. + * @get_context_loss_count: Command to retrieve context loss counter - this + * increments every time the device looses context. Overflow + * is possible. + * - count: pointer to u32 which will retrieve counter + * Returns 0 for successful information request and count has + * proper data, else returns corresponding error message. + * @is_idle: Reports back about device idle state + * - req_state: Returns requested idle state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_stop: Reports back about device stop state + * - req_state: Returns requested stop state + * - current_state: Returns current stop state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_on: Reports back about device ON(or active) state + * - req_state: Returns requested ON state + * - current_state: Returns current ON state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_transitioning: Reports back if the device is in the middle of transition + * of state. + * -current_state: Returns 'true' if currently transitioning. + * @set_device_resets: Command to configure resets for device managed by TISCI. + * -reset_state: Device specific reset bit field + * Returns 0 for successful request, else returns + * corresponding error message. + * @get_device_resets: Command to read state of resets for device managed + * by TISCI. + * -reset_state: pointer to u32 which will retrieve resets + * Returns 0 for successful request, else returns + * corresponding error message. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + */ +struct ti_sci_dev_ops { + int (*get_device)(const struct ti_sci_handle *handle, u32 id); + int (*idle_device)(const struct ti_sci_handle *handle, u32 id); + int (*put_device)(const struct ti_sci_handle *handle, u32 id); + int (*is_valid)(const struct ti_sci_handle *handle, u32 id); + int (*get_context_loss_count)(const struct ti_sci_handle *handle, + u32 id, u32 *count); + int (*is_idle)(const struct ti_sci_handle *handle, u32 id, + bool *requested_state); + int (*is_stop)(const struct ti_sci_handle *handle, u32 id, + bool *req_state, bool *current_state); + int (*is_on)(const struct ti_sci_handle *handle, u32 id, + bool *req_state, bool *current_state); + int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id, + bool *current_state); + int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id, + u32 reset_state); + int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id, + u32 *reset_state); +}; + +/** + * struct ti_sci_clk_ops - Clock control operations + * @get_clock: Request for activation of clock and manage by processor + * - needs_ssc: 'true' if Spread Spectrum clock is desired. + * - can_change_freq: 'true' if frequency change is desired. + * - enable_input_term: 'true' if input termination is desired. + * @idle_clock: Request for Idling a clock managed by processor + * @put_clock: Release the clock to be auto managed by TISCI + * @is_auto: Is the clock being auto managed + * - req_state: state indicating if the clock is auto managed + * @is_on: Is the clock ON + * - req_state: if the clock is requested to be forced ON + * - current_state: if the clock is currently ON + * @is_off: Is the clock OFF + * - req_state: if the clock is requested to be forced OFF + * - current_state: if the clock is currently Gated + * @set_parent: Set the clock source of a specific device clock + * - parent_id: Parent clock identifier to set. + * @get_parent: Get the current clock source of a specific device clock + * - parent_id: Parent clock identifier which is the parent. + * @get_num_parents: Get the number of parents of the current clock source + * - num_parents: returns the number of parent clocks. + * @get_best_match_freq: Find a best matching frequency for a frequency + * range. + * - match_freq: Best matching frequency in Hz. + * @set_freq: Set the Clock frequency + * @get_freq: Get the Clock frequency + * - current_freq: Frequency in Hz that the clock is at. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -did: Device identifier this request is for + * -cid: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * -min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * -target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * -max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * + * Request for the clock - NOTE: the client MUST maintain integrity of + * usage count by balancing get_clock with put_clock. No refcounting is + * managed by driver for that purpose. + */ +struct ti_sci_clk_ops { + int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool needs_ssc, bool can_change_freq, + bool enable_input_term); + int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); + int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); + int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state); + int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state, bool *current_state); + int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state, bool *current_state); + int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u8 parent_id); + int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u8 *parent_id); + int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, + u8 cid, u8 *num_parents); + int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, + u8 cid, u64 min_freq, u64 target_freq, + u64 max_freq, u64 *match_freq); + int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u64 min_freq, u64 target_freq, u64 max_freq); + int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u64 *current_freq); +}; + +/** + * struct ti_sci_ops - Function support for TI SCI + * @dev_ops: Device specific operations + * @clk_ops: Clock specific operations + */ +struct ti_sci_ops { + struct ti_sci_core_ops core_ops; + struct ti_sci_dev_ops dev_ops; + struct ti_sci_clk_ops clk_ops; +}; + +/** + * struct ti_sci_handle - Handle returned to TI SCI clients for usage. + * @version: structure containing version information + * @ops: operations that are made available to TI SCI clients + */ +struct ti_sci_handle { + struct ti_sci_version_info version; + struct ti_sci_ops ops; +}; + +#if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) +const struct ti_sci_handle *ti_sci_get_handle(struct device *dev); +int ti_sci_put_handle(const struct ti_sci_handle *handle); +const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev); + +#else /* CONFIG_TI_SCI_PROTOCOL */ + +static inline const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} + +static inline int ti_sci_put_handle(const struct ti_sci_handle *handle) +{ + return -EINVAL; +} + +static inline +const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} + +#endif /* CONFIG_TI_SCI_PROTOCOL */ + +#endif /* __TISCI_PROTOCOL_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4b743ac35396..75c6bd0ac605 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -442,6 +442,7 @@ struct spi_master { #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ +#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ /* * on some hardware transfer / message size may be constrained diff --git a/include/linux/stm.h b/include/linux/stm.h index 8369d8a8cabd..210ff2292361 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -133,7 +133,7 @@ int stm_source_register_device(struct device *parent, struct stm_source_data *data); void stm_source_unregister_device(struct stm_source_data *data); -int stm_source_write(struct stm_source_data *data, unsigned int chan, - const char *buf, size_t count); +int notrace stm_source_write(struct stm_source_data *data, unsigned int chan, + const char *buf, size_t count); #endif /* _STM_H_ */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 705840e0438f..266dab9ad782 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data { struct stmmac_dma_cfg { int pbl; + int txpbl; + int rxpbl; + bool pblx8; int fixed_burst; int mixed_burst; bool aal; @@ -135,8 +138,6 @@ struct plat_stmmacenet_data { void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); - void (*suspend)(struct platform_device *pdev, void *priv); - void (*resume)(struct platform_device *pdev, void *priv); void *bsp_priv; struct stmmac_axi *axi; int has_gmac4; diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index cc3ae16eac68..757fb963696c 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -79,7 +79,6 @@ struct svc_rdma_op_ctxt { struct ib_cqe reg_cqe; struct ib_cqe inv_cqe; struct list_head dto_q; - enum ib_wc_status wc_status; u32 byte_len; u32 position; struct svcxprt_rdma *xprt; @@ -139,7 +138,7 @@ struct svcxprt_rdma { int sc_max_sge_rd; /* max sge for read target */ bool sc_snd_w_inv; /* OK to use Send With Invalidate */ - atomic_t sc_sq_count; /* Number of SQ WR on queue */ + atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ unsigned int sc_rq_depth; /* Depth of RQ */ u32 sc_max_requests; /* Forward credits */ @@ -148,7 +147,6 @@ struct svcxprt_rdma { struct ib_pd *sc_pd; - atomic_t sc_dma_used; spinlock_t sc_ctxt_lock; struct list_head sc_ctxts; int sc_ctxt_used; @@ -200,7 +198,6 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt) { ctxt->mapped_sges++; - atomic_inc(&rdma->sc_dma_used); } /* svc_rdma_backchannel.c */ @@ -236,8 +233,6 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, struct svc_rdma_req_map *, bool); extern int svc_rdma_sendto(struct svc_rqst *); -extern struct rpcrdma_read_chunk * - svc_rdma_get_read_chunk(struct rpcrdma_msg *); extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, int); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ab02a457da1f..7440290f64ac 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -25,6 +25,7 @@ struct svc_xprt_ops { void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); int (*xpo_secure_port)(struct svc_rqst *); + void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; struct svc_xprt_class { @@ -65,6 +66,7 @@ struct svc_xprt { #define XPT_LISTENER 10 /* listening endpoint */ #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ +#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/swap.h b/include/linux/swap.h index a56523cefb9b..7f47b7098b1b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -150,8 +150,9 @@ enum { SWP_FILE = (1 << 7), /* set after swap_activate success */ SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ + SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ /* add others here before... */ - SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */ + SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL @@ -246,39 +247,7 @@ struct swap_info_struct { void *workingset_eviction(struct address_space *mapping, struct page *page); bool workingset_refault(void *shadow); void workingset_activation(struct page *page); -extern struct list_lru workingset_shadow_nodes; - -static inline unsigned int workingset_node_pages(struct radix_tree_node *node) -{ - return node->count & RADIX_TREE_COUNT_MASK; -} - -static inline void workingset_node_pages_inc(struct radix_tree_node *node) -{ - node->count++; -} - -static inline void workingset_node_pages_dec(struct radix_tree_node *node) -{ - VM_WARN_ON_ONCE(!workingset_node_pages(node)); - node->count--; -} - -static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) -{ - return node->count >> RADIX_TREE_COUNT_SHIFT; -} - -static inline void workingset_node_shadows_inc(struct radix_tree_node *node) -{ - node->count += 1U << RADIX_TREE_COUNT_SHIFT; -} - -static inline void workingset_node_shadows_dec(struct radix_tree_node *node) -{ - VM_WARN_ON_ONCE(!workingset_node_shadows(node)); - node->count -= 1U << RADIX_TREE_COUNT_SHIFT; -} +void workingset_update_node(struct radix_tree_node *node, void *private); /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; @@ -351,6 +320,9 @@ extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_SWAP + +#include <linux/blk_types.h> /* for bio_end_io_t */ + /* linux/mm/page_io.c */ extern int swap_readpage(struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5f81f8a187f2..4ee479f2f355 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -9,7 +9,13 @@ struct device; struct page; struct scatterlist; -extern int swiotlb_force; +enum swiotlb_force { + SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ + SWIOTLB_FORCE, /* swiotlb=force */ + SWIOTLB_NO_FORCE, /* swiotlb=noforce */ +}; + +extern enum swiotlb_force swiotlb_force; /* * Maximum allowable number of contiguous slabs to map, @@ -44,11 +50,13 @@ enum dma_sync_target { extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t phys, size_t size, - enum dma_data_direction dir); + enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); + size_t size, enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, @@ -73,14 +81,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, unsigned long attrs); extern int -swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); - -extern void -swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); - -extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs); @@ -114,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask); #ifdef CONFIG_SWIOTLB extern void __init swiotlb_free(void); +unsigned int swiotlb_max_segment(void); #else static inline void swiotlb_free(void) { } +static inline unsigned int swiotlb_max_segment(void) { return 0; } #endif extern void swiotlb_print_info(void); extern int is_swiotlb_buffer(phys_addr_t paddr); +extern void swiotlb_set_max_segment(unsigned int); #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index aa17ccfc2f57..3e3ab84fc4cd 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -18,8 +18,8 @@ #include <linux/ktime.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/fence.h> -#include <linux/fence-array.h> +#include <linux/dma-fence.h> +#include <linux/dma-fence-array.h> /** * struct sync_file - sync file to export to the userspace @@ -41,13 +41,13 @@ struct sync_file { wait_queue_head_t wq; - struct fence *fence; - struct fence_cb cb; + struct dma_fence *fence; + struct dma_fence_cb cb; }; -#define POLL_ENABLED FENCE_FLAG_USER_BITS +#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS -struct sync_file *sync_file_create(struct fence *fence); -struct fence *sync_file_get_fence(int fd); +struct sync_file *sync_file_create(struct dma_fence *fence); +struct dma_fence *sync_file_get_fence(int fd); #endif /* _LINUX_SYNC_H */ diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index 2739ccb69571..bed223b70217 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h @@ -13,6 +13,7 @@ struct soc_device_attribute { const char *family; const char *revision; const char *soc_id; + const void *data; }; /** @@ -34,4 +35,12 @@ void soc_device_unregister(struct soc_device *soc_dev); */ struct device *soc_device_to_device(struct soc_device *soc); +#ifdef CONFIG_SOC_BUS +const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches); +#else +static inline const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches) { return NULL; } +#endif + #endif /* __SOC_BUS_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a17ae7b85218..c93f4b3a59cb 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { + union { + u8 val[TCP_FASTOPEN_COOKIE_MAX]; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr addr; +#endif + }; s8 len; - u8 val[TCP_FASTOPEN_COOKIE_MAX]; bool exp; /* In RFC6994 experimental option format */ }; @@ -123,6 +128,7 @@ struct tcp_request_sock { u32 txhash; u32 rcv_isn; u32 snt_isn; + u32 ts_off; u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# @@ -176,8 +182,6 @@ struct tcp_sock { * sum(delta(snd_una)), or how many bytes * were acked. */ - struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */ - u32 snd_una; /* First byte we want an ack for */ u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ @@ -187,7 +191,6 @@ struct tcp_sock { u32 tsoffset; /* timestamp offset */ struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ - unsigned long tsq_flags; /* Data for direct copy to user */ struct { @@ -213,8 +216,11 @@ struct tcp_sock { u8 reord; /* reordering detected */ } rack; u16 advmss; /* Advertised MSS */ - u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ - unused:7; + u32 chrono_start; /* Start time in jiffies of a TCP chrono */ + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ + u8 chrono_type:2, /* current chronograph type */ + rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ + unused:5; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ thin_dupack : 1,/* Fast retransmit on first dupack */ @@ -362,7 +368,7 @@ struct tcp_sock { u32 *saved_syn; }; -enum tsq_flags { +enum tsq_enum { TSQ_THROTTLED, TSQ_QUEUED, TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ @@ -373,6 +379,15 @@ enum tsq_flags { */ }; +enum tsq_flags { + TSQF_THROTTLED = (1UL << TSQ_THROTTLED), + TSQF_QUEUED = (1UL << TSQ_QUEUED), + TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), + TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), + TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), + TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), +}; + static inline struct tcp_sock *tcp_sk(const struct sock *sk) { return (struct tcp_sock *)sk; @@ -427,4 +442,6 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) tp->saved_syn = NULL; } +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 511182a88e76..e275e98bdceb 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -28,6 +28,7 @@ #include <linux/of.h> #include <linux/idr.h> #include <linux/device.h> +#include <linux/sysfs.h> #include <linux/workqueue.h> #include <uapi/linux/thermal.h> @@ -204,6 +205,7 @@ struct thermal_zone_device { int id; char type[THERMAL_NAME_LENGTH]; struct device device; + struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 2873baf5372a..58373875e8ee 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -9,50 +9,17 @@ #include <linux/types.h> #include <linux/bug.h> - -struct timespec; -struct compat_timespec; +#include <linux/restart_block.h> #ifdef CONFIG_THREAD_INFO_IN_TASK -#define current_thread_info() ((struct thread_info *)current) -#endif - /* - * System call restart block. + * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the + * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, + * including <asm/current.h> can cause a circular dependency on some platforms. */ -struct restart_block { - long (*fn)(struct restart_block *); - union { - /* For futex_wait and futex_wait_requeue_pi */ - struct { - u32 __user *uaddr; - u32 val; - u32 flags; - u32 bitset; - u64 time; - u32 __user *uaddr2; - } futex; - /* For nanosleep */ - struct { - clockid_t clockid; - struct timespec __user *rmtp; -#ifdef CONFIG_COMPAT - struct compat_timespec __user *compat_rmtp; +#include <asm/current.h> +#define current_thread_info() ((struct thread_info *)current) #endif - u64 expires; - } nanosleep; - /* For poll */ - struct { - struct pollfd __user *ufds; - int nfds; - int has_timeout; - unsigned long tv_sec; - unsigned long tv_nsec; - } poll; - }; -}; - -extern long do_no_restart_syscall(struct restart_block *parm); #include <linux/bitops.h> #include <asm/thread_info.h> diff --git a/include/linux/tick.h b/include/linux/tick.h index 62be0786d6d0..a04fea19676f 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -127,9 +127,7 @@ static inline void tick_nohz_idle_exit(void) { } static inline ktime_t tick_nohz_get_sleep_length(void) { - ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; - - return len; + return NSEC_PER_SEC / HZ; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } diff --git a/include/linux/time.h b/include/linux/time.h index 4cea09d94208..23f0f5ce3090 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -172,8 +172,6 @@ extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); extern int do_getitimer(int which, struct itimerval *value); -extern unsigned int alarm_setitimer(unsigned int seconds); - extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); struct tms; diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 4382035a75bb..2496ad4cfc99 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -20,7 +20,7 @@ #include <linux/types.h> /* simplify initialization of mask field */ -#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) +#define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) /** * struct cyclecounter - hardware abstraction for a free running counter @@ -37,8 +37,8 @@ * @shift: cycle to nanosecond divisor (power of two) */ struct cyclecounter { - cycle_t (*read)(const struct cyclecounter *cc); - cycle_t mask; + u64 (*read)(const struct cyclecounter *cc); + u64 mask; u32 mult; u32 shift; }; @@ -63,7 +63,7 @@ struct cyclecounter { */ struct timecounter { const struct cyclecounter *cc; - cycle_t cycle_last; + u64 cycle_last; u64 nsec; u64 mask; u64 frac; @@ -77,7 +77,7 @@ struct timecounter { * @frac: pointer to storage for the fractional nanoseconds. */ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, - cycle_t cycles, u64 mask, u64 *frac) + u64 cycles, u64 mask, u64 *frac) { u64 ns = (u64) cycles; @@ -134,6 +134,6 @@ extern u64 timecounter_read(struct timecounter *tc); * in the past. */ extern u64 timecounter_cyc2time(struct timecounter *tc, - cycle_t cycle_tstamp); + u64 cycle_tstamp); #endif diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index e88005459035..110f4532188c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -29,9 +29,9 @@ */ struct tk_read_base { struct clocksource *clock; - cycle_t (*read)(struct clocksource *cs); - cycle_t mask; - cycle_t cycle_last; + u64 (*read)(struct clocksource *cs); + u64 mask; + u64 cycle_last; u32 mult; u32 shift; u64 xtime_nsec; @@ -97,7 +97,7 @@ struct timekeeper { struct timespec64 raw_time; /* The following members are for timekeeping internal use */ - cycle_t cycle_interval; + u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; u32 raw_interval; @@ -136,7 +136,7 @@ extern void update_vsyscall_tz(void); extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, struct clocksource *c, u32 mult, - cycle_t cycle_last); + u64 cycle_last); extern void update_vsyscall_tz(void); #else diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 09168c52ab64..d2e804e15c3e 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -249,6 +249,7 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); +extern u64 ktime_get_boot_fast_ns(void); /* * Timespec interfaces utilizing the ktime based ones @@ -292,7 +293,7 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, * @cs_was_changed_seq: The sequence number of clocksource change events */ struct system_time_snapshot { - cycle_t cycles; + u64 cycles; ktime_t real; ktime_t raw; unsigned int clock_was_set_seq; @@ -320,7 +321,7 @@ struct system_device_crosststamp { * timekeeping code to verify comparibility of two cycle values */ struct system_counterval_t { - cycle_t cycles; + u64 cycles; struct clocksource *cs; }; diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index bd36ce431e32..bab0b1ad0613 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h @@ -8,23 +8,7 @@ #ifndef _LINUX_TIMERFD_H #define _LINUX_TIMERFD_H -/* For O_CLOEXEC and O_NONBLOCK */ -#include <linux/fcntl.h> - -/* For _IO helpers */ -#include <linux/ioctl.h> - -/* - * CAREFUL: Check include/asm-generic/fcntl.h when defining - * new flags, since they might collide with O_* ones. We want - * to re-use O_* flags that couldn't possibly have a meaning - * from eventfd, in order to leave a free define-space for - * shared O_* flags. - */ -#define TFD_TIMER_ABSTIME (1 << 0) -#define TFD_TIMER_CANCEL_ON_SET (1 << 1) -#define TFD_CLOEXEC O_CLOEXEC -#define TFD_NONBLOCK O_NONBLOCK +#include <uapi/linux/timerfd.h> #define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK) /* Flags for timerfd_create. */ @@ -32,6 +16,4 @@ /* Flags for timerfd_settime. */ #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) -#define TFD_IOC_SET_TICKS _IOW('T', 0, u64) - #endif /* _LINUX_TIMERFD_H */ diff --git a/include/linux/trace.h b/include/linux/trace.h new file mode 100644 index 000000000000..9330a58e2651 --- /dev/null +++ b/include/linux/trace.h @@ -0,0 +1,28 @@ +#ifndef _LINUX_TRACE_H +#define _LINUX_TRACE_H + +#ifdef CONFIG_TRACING +/* + * The trace export - an export of Ftrace output. The trace_export + * can process traces and export them to a registered destination as + * an addition to the current only output of Ftrace - i.e. ring buffer. + * + * If you want traces to be sent to some other place rather than ring + * buffer only, just need to register a new trace_export and implement + * its own .write() function for writing traces to the storage. + * + * next - pointer to the next trace_export + * write - copy traces which have been delt with ->commit() to + * the destination + */ +struct trace_export { + struct trace_export __rcu *next; + void (*write)(const void *, unsigned int); +}; + +int register_ftrace_export(struct trace_export *export); +int unregister_ftrace_export(struct trace_export *export); + +#endif /* CONFIG_TRACING */ + +#endif /* _LINUX_TRACE_H */ diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 4ac89acb6136..a03192052066 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -29,7 +29,7 @@ struct tracepoint_func { struct tracepoint { const char *name; /* Tracepoint name */ struct static_key key; - void (*regfunc)(void); + int (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; }; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index be586c632a0c..f72fcfe0e66a 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -81,7 +81,7 @@ static inline void tracepoint_synchronize_unregister(void) } #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS -extern void syscall_regfunc(void); +extern int syscall_regfunc(void); extern void syscall_unregfunc(void); #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ diff --git a/include/linux/types.h b/include/linux/types.h index baf718324f4a..1e7bd24848fc 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -154,8 +154,8 @@ typedef u64 dma_addr_t; typedef u32 dma_addr_t; #endif -typedef unsigned __bitwise__ gfp_t; -typedef unsigned __bitwise__ fmode_t; +typedef unsigned __bitwise gfp_t; +typedef unsigned __bitwise fmode_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 phys_addr_t; @@ -228,8 +228,5 @@ struct callback_head { typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); -/* clocksource cycle base type */ -typedef u64 cycle_t; - #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/udp.h b/include/linux/udp.h index d1fd8cd39478..c0f530809d1f 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -79,6 +79,9 @@ struct udp_sock { int (*gro_complete)(struct sock *sk, struct sk_buff *skb, int nhoff); + + /* This field is dirtied by udp_recvmsg() */ + int forward_deficit; }; static inline struct udp_sock *udp_sk(const struct sock *sk) diff --git a/include/linux/uio.h b/include/linux/uio.h index 6e22b544d039..804e34c6f981 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -89,7 +89,9 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); +bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_gap_alignment(const struct iov_iter *i); @@ -125,7 +127,7 @@ static inline bool iter_is_iovec(const struct iov_iter *i) * * The ?: is just for type safety. */ -#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK) +#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE)) /* * Cap the iov_iter by given limit; note that the second argument is @@ -155,6 +157,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) } size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4a29c75b146e..0a294e950df8 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -27,6 +27,7 @@ #include <linux/errno.h> #include <linux/rbtree.h> #include <linux/types.h> +#include <linux/wait.h> struct vm_area_struct; struct mm_struct; diff --git a/include/linux/usb.h b/include/linux/usb.h index eba1f10e8cfd..7e68259360de 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1160,7 +1160,7 @@ extern struct bus_type usb_bus_type; * @minor_base: the start of the minor range for this driver. * * This structure is used for the usb_register_dev() and - * usb_unregister_dev() functions, to consolidate a number of the + * usb_deregister_dev() functions, to consolidate a number of the * parameters used for them. */ struct usb_class_driver { diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3a375d07d0dc..00d232406f18 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -81,7 +81,8 @@ #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) /* Driver flags */ -#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ +#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ +#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 8e81f9eb95e4..e4516e9ded0f 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { - return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize)); + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; + + return round_up(len, max_packet_size); } /** diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 66fc13705ab7..40edf6a8533e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -566,21 +566,22 @@ extern void usb_ep0_reinit(struct usb_device *); ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ +#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) /* GetBusState and SetHubDescriptor are optional, omitted */ -#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) -#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) -#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) -#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) -#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) -#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) -#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) +#define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) +#define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) +#define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) +#define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) +#define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) +#define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) +#define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) /*-------------------------------------------------------------------------*/ /* class requests from USB 3.1 hub spec, table 10-7 */ -#define SetHubDepth (0x2000 | HUB_SET_DEPTH) -#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT) +#define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) +#define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) /* * Generic bandwidth allocation constants/support diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index dd66a952e8cd..11b92b047a1e 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -27,7 +27,7 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int handle_userfault(struct fault_env *fe, unsigned long reason); +extern int handle_userfault(struct vm_fault *vmf, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len); @@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline int handle_userfault(struct fault_env *fe, unsigned long reason) +static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason) { return VM_FAULT_SIGBUS; } diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 0ecae0b1cd34..edf9b2cad277 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -75,7 +75,16 @@ struct vfio_iommu_driver_ops { struct iommu_group *group); void (*detach_group)(void *iommu_data, struct iommu_group *group); - + int (*pin_pages)(void *iommu_data, unsigned long *user_pfn, + int npage, int prot, + unsigned long *phys_pfn); + int (*unpin_pages)(void *iommu_data, + unsigned long *user_pfn, int npage); + int (*register_notifier)(void *iommu_data, + unsigned long *events, + struct notifier_block *nb); + int (*unregister_notifier)(void *iommu_data, + struct notifier_block *nb); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); @@ -92,6 +101,36 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group); extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); +#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) + +extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, + int npage, int prot, unsigned long *phys_pfn); +extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, + int npage); + +/* each type has independent events */ +enum vfio_notify_type { + VFIO_IOMMU_NOTIFY = 0, + VFIO_GROUP_NOTIFY = 1, +}; + +/* events for VFIO_IOMMU_NOTIFY */ +#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) + +/* events for VFIO_GROUP_NOTIFY */ +#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) + +extern int vfio_register_notifier(struct device *dev, + enum vfio_notify_type type, + unsigned long *required_events, + struct notifier_block *nb); +extern int vfio_unregister_notifier(struct device *dev, + enum vfio_notify_type type, + struct notifier_block *nb); + +struct kvm; +extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); + /* * Sub-module helpers */ @@ -103,6 +142,13 @@ extern struct vfio_info_cap_header *vfio_info_cap_add( struct vfio_info_cap *caps, size_t size, u16 id, u16 version); extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); +extern int vfio_info_add_capability(struct vfio_info_cap *caps, + int cap_type_id, void *cap_type); + +extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, + int num_irqs, int max_irq_type, + size_t *data_size); + struct pci_dev; #ifdef CONFIG_EEH extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 1c912f85e041..66204007d7ac 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -58,7 +58,7 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, struct virtio_net_hdr *hdr, bool little_endian) { - memset(hdr, 0, sizeof(*hdr)); + memset(hdr, 0, sizeof(*hdr)); /* no info leak */ if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); @@ -98,4 +98,4 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, return 0; } -#endif /* _LINUX_VIRTIO_BYTEORDER */ +#endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3d9d786a943c..d68edffbf142 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, const void *caller); extern void vfree(const void *addr); +extern void vfree_atomic(const void *addr); extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); diff --git a/include/linux/vme.h b/include/linux/vme.h index ea6095deba20..8c589176c2f8 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h @@ -113,7 +113,6 @@ struct vme_driver { int (*match)(struct vme_dev *); int (*probe)(struct vme_dev *); int (*remove)(struct vme_dev *); - void (*shutdown)(void); struct device_driver driver; struct list_head devices; }; diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 6abd24f258bc..833fdd4794a0 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -191,5 +191,7 @@ extern void vt_set_led_state(int console, int leds); extern void vt_kbd_con_start(int console); extern void vt_kbd_con_stop(int console); +void vc_scrolldelta_helper(struct vc_data *c, int lines, + unsigned int rolled_over, void *_base, unsigned int size); #endif /* _VT_KERN_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 2408e8d5c05c..1421132e9086 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -510,7 +510,7 @@ do { \ hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ hrtimer_init_sleeper(&__t, current); \ - if ((timeout).tv64 != KTIME_MAX) \ + if ((timeout) != KTIME_MAX) \ hrtimer_start_range_ns(&__t.timer, timeout, \ current->timer_slack_ns, \ HRTIMER_MODE_REL); \ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index fc6e22186405..a26cc437293c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -119,18 +119,30 @@ struct delayed_work { int cpu; }; -/* - * A struct for workqueue attributes. This can be used to change - * attributes of an unbound workqueue. +/** + * struct workqueue_attrs - A struct for workqueue attributes. * - * Unlike other fields, ->no_numa isn't a property of a worker_pool. It - * only modifies how apply_workqueue_attrs() select pools and thus doesn't - * participate in pool hash calculations or equality comparisons. + * This can be used to change attributes of an unbound workqueue. */ struct workqueue_attrs { - int nice; /* nice level */ - cpumask_var_t cpumask; /* allowed CPUs */ - bool no_numa; /* disable NUMA affinity */ + /** + * @nice: nice level + */ + int nice; + + /** + * @cpumask: allowed CPUs + */ + cpumask_var_t cpumask; + + /** + * @no_numa: disable NUMA affinity + * + * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It + * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus + * doesn't participate in pool hash calculations or equality comparisons. + */ + bool no_numa; }; static inline struct delayed_work *to_delayed_work(struct work_struct *work) @@ -272,7 +284,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } /* * Workqueue flags and constants. For details, please refer to - * Documentation/workqueue.txt. + * Documentation/core-api/workqueue.rst. */ enum { WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ @@ -370,7 +382,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, * @args...: args for @fmt * * Allocate a workqueue with the specified parameters. For detailed - * information on WQ_* flags, please refer to Documentation/workqueue.txt. + * information on WQ_* flags, please refer to + * Documentation/core-api/workqueue.rst. * * The __lock_name macro dance is to guarantee that single lock_class_key * doesn't end up with different namesm, which isn't allowed by lockdep. @@ -590,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork, return queue_delayed_work(system_wq, dwork, delay); } -/** - * keventd_up - is workqueue initialized yet? - */ -static inline bool keventd_up(void) -{ - return system_wq != NULL; -} - #ifndef CONFIG_SMP static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { @@ -632,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu); #endif +int __init workqueue_init_early(void); +int __init workqueue_init(void); + #endif diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 797100e10010..5527d910ba3d 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -9,6 +9,9 @@ #include <linux/fs.h> #include <linux/flex_proportions.h> #include <linux/backing-dev-defs.h> +#include <linux/blk_types.h> + +struct bio; DECLARE_PER_CPU(int, dirty_throttle_leaks); @@ -100,6 +103,16 @@ struct writeback_control { #endif }; +static inline int wbc_to_write_flags(struct writeback_control *wbc) +{ + if (wbc->sync_mode == WB_SYNC_ALL) + return REQ_SYNC; + else if (wbc->for_kupdate || wbc->for_background) + return REQ_BACKGROUND; + + return 0; +} + /* * A wb_domain represents a domain that wb's (bdi_writeback's) belong to * and are measured against each other in. There always is one global @@ -362,7 +375,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); -void page_writeback_init(void); void balance_dirty_pages_ratelimited(struct address_space *mapping); bool wb_over_bg_thresh(struct bdi_writeback *wb); diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 2bb5deb0012e..7b0066814fa0 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { ctx->task = current; - ctx->stamp = atomic_long_inc_return(&ww_class->stamp); + ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); ctx->acquired = 0; #ifdef CONFIG_DEBUG_MUTEXES ctx->ww_class = ww_class; |