diff options
Diffstat (limited to 'include/linux')
61 files changed, 1094 insertions, 391 deletions
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 450717299928..58e6c3806c09 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -10,6 +10,8 @@ #include <linux/types.h> +struct amd_iommu; + /* * This is mainly used to communicate information back-and-forth * between SVM and IOMMU for setting up and tearing down posted @@ -33,24 +35,6 @@ extern int amd_iommu_detect(void); extern int amd_iommu_init_hardware(void); /** - * amd_iommu_enable_device_erratum() - Enable erratum workaround for device - * in the IOMMUv2 driver - * @pdev: The PCI device the workaround is necessary for - * @erratum: The erratum workaround to enable - * - * The function needs to be called before amd_iommu_init_device(). - * Possible values for the erratum number are for now: - * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI - * is enabled - * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI - * requests to one - */ -#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 -#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 - -extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); - -/** * amd_iommu_init_device() - Init device for use with IOMMUv2 driver * @pdev: The PCI device to initialize * @pasids: Number of PASIDs to support for this device @@ -212,4 +196,14 @@ static inline int amd_iommu_deactivate_guest_mode(void *data) } #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ +int amd_iommu_get_num_iommus(void); +bool amd_iommu_pc_supported(void); +u8 amd_iommu_pc_get_max_banks(unsigned int idx); +u8 amd_iommu_pc_get_max_counters(unsigned int idx); +int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, + u64 *value); +int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, + u64 *value); +struct amd_iommu *get_amd_iommu(unsigned int idx); + #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 62c54234576c..6861489a1890 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -55,6 +55,8 @@ #define ARM_SMCCC_OWNER_TRUSTED_OS 50 #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 +#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01 + #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ @@ -87,8 +89,47 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_FUNC_QUERY_CALL_UID) + +/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU + +/* KVM "vendor specific" services */ +#define ARM_SMCCC_KVM_FUNC_FEATURES 0 +#define ARM_SMCCC_KVM_FUNC_PTP 1 +#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 +#define ARM_SMCCC_KVM_NUM_FUNCS 128 + +#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_FEATURES) + #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 +/* + * ptp_kvm is a feature used for time sync between vm and host. + * ptp_kvm module in guest kernel will get service from host using + * this hypercall ID. + */ +#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_PTP) + +/* ptp_kvm counter type ID */ +#define KVM_PTP_VIRT_COUNTER 0 +#define KVM_PTP_PHYS_COUNTER 1 + /* Paravirtualised time calls (defined by ARM DEN0057A) */ #define ARM_SMCCC_HV_PV_TIME_FEATURES \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index f57f4473bbe4..a36cfcec4e77 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -5,10 +5,12 @@ #ifndef __ASSEMBLY__ #include <linux/align.h> -#include <linux/types.h> #include <linux/bitops.h> #include <linux/limits.h> #include <linux/string.h> +#include <linux/types.h> + +struct device; /* * bitmaps provide bit arrays that consume one or more unsigned @@ -119,54 +121,59 @@ * Allocation and deallocation of bitmap. * Provided in lib/bitmap.c to avoid circular dependency. */ -extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); -extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); -extern void bitmap_free(const unsigned long *bitmap); +unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +void bitmap_free(const unsigned long *bitmap); + +/* Managed variants of the above. */ +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags); +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags); /* * lib/bitmap.c provides these functions: */ -extern int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern bool __pure __bitmap_or_equal(const unsigned long *src1, - const unsigned long *src2, - const unsigned long *src3, - unsigned int nbits); -extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, - unsigned int nbits); -extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void bitmap_cut(unsigned long *dst, const unsigned long *src, - unsigned int first, unsigned int cut, - unsigned int nbits); -extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, +int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); +void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void bitmap_cut(unsigned long *dst, const unsigned long *src, + unsigned int first, unsigned int cut, unsigned int nbits); +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_replace(unsigned long *dst, + const unsigned long *old, const unsigned long *new, + const unsigned long *mask, unsigned int nbits); +int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_replace(unsigned long *dst, - const unsigned long *old, const unsigned long *new, - const unsigned long *mask, unsigned int nbits); -extern int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -extern void __bitmap_set(unsigned long *map, unsigned int start, int len); -extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); - -extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask, - unsigned long align_offset); +int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +void __bitmap_set(unsigned long *map, unsigned int start, int len); +void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); /** * bitmap_find_next_zero_area - find a contiguous aligned zero area @@ -191,33 +198,33 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -extern int bitmap_parse(const char *buf, unsigned int buflen, +int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits); -extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern int bitmap_parselist(const char *buf, unsigned long *maskp, +int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); -extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern void bitmap_remap(unsigned long *dst, const unsigned long *src, +void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); -extern int bitmap_bitremap(int oldbit, +int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); -extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, +void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); -extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, +void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); #ifdef __BIG_ENDIAN -extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else #define bitmap_copy_le bitmap_copy #endif -extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); -extern int bitmap_print_to_pagebuf(bool list, char *buf, +unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) @@ -258,9 +265,9 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst, * therefore conversion is not needed when copying data from/to arrays of u32. */ #if BITS_PER_LONG == 64 -extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); -extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, +void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits); #else #define bitmap_from_arr32(bitmap, buf, nbits) \ diff --git a/include/linux/bug.h b/include/linux/bug.h index f639bd0122f3..348acf2558f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -36,6 +36,9 @@ static inline int is_warning_bug(const struct bug_entry *bug) return bug->flags & BUGFLAG_WARNING; } +void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line); + struct bug_entry *find_bug(unsigned long bugaddr); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); @@ -58,6 +61,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, return BUG_TRAP_TYPE_BUG; } +struct bug_entry; +static inline void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line) +{ + *file = NULL; + *line = 0; +} static inline void generic_bug_clear_once(void) {} diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a247b089ca78..d6ab416ee2d2 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -17,6 +17,7 @@ #include <linux/timer.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/clocksource_ids.h> #include <asm/div64.h> #include <asm/io.h> @@ -62,6 +63,10 @@ struct module; * 400-499: Perfect * The ideal clocksource. A must-use where * available. + * @id: Defaults to CSID_GENERIC. The id value is captured + * in certain snapshot functions to allow callers to + * validate the clocksource from which the snapshot was + * taken. * @flags: Flags describing special properties * @enable: Optional function to enable the clocksource * @disable: Optional function to disable the clocksource @@ -100,6 +105,7 @@ struct clocksource { const char *name; struct list_head list; int rating; + enum clocksource_ids id; enum vdso_clock_mode vdso_clock_mode; unsigned long flags; diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h new file mode 100644 index 000000000000..16775d7d8f8d --- /dev/null +++ b/include/linux/clocksource_ids.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLOCKSOURCE_IDS_H +#define _LINUX_CLOCKSOURCE_IDS_H + +/* Enum to give clocksources a unique identifier */ +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER, + CSID_MAX, +}; + +#endif diff --git a/include/linux/compat.h b/include/linux/compat.h index cfac4ec9a7df..98dd7b324c35 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -466,6 +466,34 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, unsafe_put_user(__s->sig[0], &__c->sig[0], label); \ } \ } while (0) + +#define unsafe_get_compat_sigset(set, compat, label) do { \ + const compat_sigset_t __user *__c = compat; \ + compat_sigset_word hi, lo; \ + sigset_t *__s = set; \ + \ + switch (_NSIG_WORDS) { \ + case 4: \ + unsafe_get_user(lo, &__c->sig[7], label); \ + unsafe_get_user(hi, &__c->sig[6], label); \ + __s->sig[3] = hi | (((long)lo) << 32); \ + fallthrough; \ + case 3: \ + unsafe_get_user(lo, &__c->sig[5], label); \ + unsafe_get_user(hi, &__c->sig[4], label); \ + __s->sig[2] = hi | (((long)lo) << 32); \ + fallthrough; \ + case 2: \ + unsafe_get_user(lo, &__c->sig[3], label); \ + unsafe_get_user(hi, &__c->sig[2], label); \ + __s->sig[1] = hi | (((long)lo) << 32); \ + fallthrough; \ + case 1: \ + unsafe_get_user(lo, &__c->sig[1], label); \ + unsafe_get_user(hi, &__c->sig[0], label); \ + __s->sig[0] = hi | (((long)lo) << 32); \ + } \ +} while (0) #else #define unsafe_put_compat_sigset(compat, set, label) do { \ compat_sigset_t __user *__c = compat; \ @@ -473,6 +501,13 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, \ unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \ } while (0) + +#define unsafe_get_compat_sigset(set, compat, label) do { \ + const compat_sigset_t __user *__c = compat; \ + sigset_t *__s = set; \ + \ + unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \ +} while (0) #endif extern int compat_ptrace_request(struct task_struct *child, diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 976ec2697610..85008a65e21f 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -50,6 +50,7 @@ enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, + CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM, }; enum coresight_dev_subtype_link { @@ -455,6 +456,18 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o } #endif /* CONFIG_64BIT */ +static inline bool coresight_is_percpu_source(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && + (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC); +} + +static inline bool coresight_is_percpu_sink(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) && + (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM); +} + extern struct coresight_device * coresight_register(struct coresight_desc *desc); extern void coresight_unregister(struct coresight_device *csdev); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index db82ce5304f9..4a62b3980642 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -57,7 +57,7 @@ enum cpuhp_state { CPUHP_PAGE_ALLOC_DEAD, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, - CPUHP_IOMMU_INTEL_DEAD, + CPUHP_IOMMU_IOVA_DEAD, CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, @@ -169,6 +169,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c1e48014106f..9e23d33bb6f1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -59,6 +59,7 @@ struct qstr { extern const struct qstr empty_name; extern const struct qstr slash_name; +extern const struct qstr dotdot_name; struct dentry_stat_t { long nr_dentry; @@ -300,8 +301,8 @@ char *dynamic_dname(struct dentry *, char *, int, const char *, ...); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); -extern char *dentry_path_raw(struct dentry *, char *, int); -extern char *dentry_path(struct dentry *, char *, int); +extern char *dentry_path_raw(const struct dentry *, char *, int); +extern char *dentry_path(const struct dentry *, char *, int); /* Allocation counts.. */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 5c641f930caf..ff700fb6ce1d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -574,11 +574,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, */ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); -/* - * A wrapper around vmalloc. - */ -void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); - /*----------------------------------------------------------------- * Macros. *---------------------------------------------------------------*/ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 706b68d1359b..6e75a2d689b4 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, struct iommu_domain *domain); +extern bool iommu_dma_forcedac; + #else /* CONFIG_IOMMU_DMA */ struct iommu_domain; @@ -81,10 +83,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } -static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, - struct iommu_domain *domain) -{ -} - #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 51872e736e7b..0d53a96a3d64 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -22,6 +22,11 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); + struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs); + void (*free_noncontiguous)(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); @@ -198,6 +203,20 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_DECLARE_COHERENT */ +/* + * This is the actual return value from the ->alloc_noncontiguous method. + * The users of the DMA API should only care about the sg_table, but to make + * the DMA-API internal vmaping and freeing easier we stash away the page + * array as well (except for the fallback case). This can go away any time, + * e.g. when a vmap-variant that takes a scatterlist comes along. + */ +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; +#define sgt_handle(sgt) \ + container_of((sgt), struct dma_sgt_handle, sgt) + int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2a984cb4d1e0..183e7103a66d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -95,7 +95,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); - if (dma_addr == DMA_MAPPING_ERROR) + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) return -ENOMEM; return 0; } @@ -144,6 +144,15 @@ u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); +struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); +void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); +void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt); +void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); +int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, @@ -257,12 +266,37 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; } +static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return NULL; +} +static inline void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ +} +static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + return NULL; +} +static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) +{ +} +static inline int dma_mmap_noncontiguous(struct device *dev, + struct vm_area_struct *vma, size_t size, struct sg_table *sgt) +{ + return -EINVAL; +} #endif /* CONFIG_HAS_DMA */ struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir); +int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page); static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) @@ -401,7 +435,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c6cc0a566ef5..5487a80617a3 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -168,7 +168,7 @@ struct f2fs_checkpoint { unsigned char alloc_type[MAX_ACTIVE_LOGS]; /* SIT and NAT version bitmap */ - unsigned char sit_nat_version_bitmap[1]; + unsigned char sit_nat_version_bitmap[]; } __packed; #define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ diff --git a/include/linux/file.h b/include/linux/file.h index 225982792fa2..2de2e4613d7b 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -92,23 +92,20 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); -extern int __receive_fd(int fd, struct file *file, int __user *ufd, +extern int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { if (ufd == NULL) return -EFAULT; - return __receive_fd(-1, file, ufd, o_flags); + return __receive_fd(file, ufd, o_flags); } static inline int receive_fd(struct file *file, unsigned int o_flags) { - return __receive_fd(-1, file, NULL, o_flags); -} -static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags) -{ - return __receive_fd(fd, file, NULL, o_flags); + return __receive_fd(file, NULL, o_flags); } +int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 66e2423d9feb..9d1a5c175065 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -72,6 +72,12 @@ enum pm_api_id { PM_FPGA_LOAD = 22, PM_FPGA_GET_STATUS = 23, PM_GET_CHIPID = 24, + PM_PINCTRL_REQUEST = 28, + PM_PINCTRL_RELEASE = 29, + PM_PINCTRL_GET_FUNCTION = 30, + PM_PINCTRL_SET_FUNCTION = 31, + PM_PINCTRL_CONFIG_PARAM_GET = 32, + PM_PINCTRL_CONFIG_PARAM_SET = 33, PM_IOCTL = 34, PM_QUERY_DATA = 35, PM_CLOCK_ENABLE = 36, @@ -122,6 +128,12 @@ enum pm_query_id { PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3, PM_QID_CLOCK_GET_PARENTS = 4, PM_QID_CLOCK_GET_ATTRIBUTES = 5, + PM_QID_PINCTRL_GET_NUM_PINS = 6, + PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7, + PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8, + PM_QID_PINCTRL_GET_FUNCTION_NAME = 9, + PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10, + PM_QID_PINCTRL_GET_PIN_GROUPS = 11, PM_QID_CLOCK_GET_NUM_CLOCKS = 12, PM_QID_CLOCK_GET_MAX_DIVISOR = 13, }; @@ -285,6 +297,44 @@ enum dll_reset_type { PM_DLL_RESET_PULSE = 2, }; +enum pm_pinctrl_config_param { + PM_PINCTRL_CONFIG_SLEW_RATE = 0, + PM_PINCTRL_CONFIG_BIAS_STATUS = 1, + PM_PINCTRL_CONFIG_PULL_CTRL = 2, + PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3, + PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4, + PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5, + PM_PINCTRL_CONFIG_TRI_STATE = 6, + PM_PINCTRL_CONFIG_MAX = 7, +}; + +enum pm_pinctrl_slew_rate { + PM_PINCTRL_SLEW_RATE_FAST = 0, + PM_PINCTRL_SLEW_RATE_SLOW = 1, +}; + +enum pm_pinctrl_bias_status { + PM_PINCTRL_BIAS_DISABLE = 0, + PM_PINCTRL_BIAS_ENABLE = 1, +}; + +enum pm_pinctrl_pull_ctrl { + PM_PINCTRL_BIAS_PULL_DOWN = 0, + PM_PINCTRL_BIAS_PULL_UP = 1, +}; + +enum pm_pinctrl_schmitt_cmos { + PM_PINCTRL_INPUT_TYPE_CMOS = 0, + PM_PINCTRL_INPUT_TYPE_SCHMITT = 1, +}; + +enum pm_pinctrl_drive_strength { + PM_PINCTRL_DRIVE_STRENGTH_2MA = 0, + PM_PINCTRL_DRIVE_STRENGTH_4MA = 1, + PM_PINCTRL_DRIVE_STRENGTH_8MA = 2, + PM_PINCTRL_DRIVE_STRENGTH_12MA = 3, +}; + enum zynqmp_pm_shutdown_type { ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0, ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1, @@ -353,6 +403,14 @@ int zynqmp_pm_write_pggs(u32 index, u32 value); int zynqmp_pm_read_pggs(u32 index, u32 *value); int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); int zynqmp_pm_set_boot_health_status(u32 value); +int zynqmp_pm_pinctrl_request(const u32 pin); +int zynqmp_pm_pinctrl_release(const u32 pin); +int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id); +int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id); +int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value); +int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -532,6 +590,38 @@ static inline int zynqmp_pm_set_boot_health_status(u32 value) { return -ENODEV; } + +static inline int zynqmp_pm_pinctrl_request(const u32 pin) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_release(const u32 pin) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 86e5028bfa20..a69f363b61bf 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -33,7 +33,7 @@ /* * If the arch's mcount caller does not support all of ftrace's * features, then it must call an indirect function that - * does. Or at least does enough to prevent any unwelcomed side effects. + * does. Or at least does enough to prevent any unwelcome side effects. */ #if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 @@ -389,7 +389,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer); */ static inline void stack_tracer_disable(void) { - /* Preemption or interupts must be disabled */ + /* Preemption or interrupts must be disabled */ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); this_cpu_inc(disable_stack_tracer); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 286de0520574..3a268781fcec 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -227,7 +227,7 @@ struct gpio_irq_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be included + * If not %NULL, holds bitmask of GPIOs which are valid to be included * in IRQ domain of the chip. */ unsigned long *valid_mask; @@ -346,7 +346,7 @@ struct gpio_irq_chip { * output. * * A gpio_chip can help platforms abstract various sources of GPIOs so - * they can all be accessed through a common programing interface. + * they can all be accessed through a common programming interface. * Example sources would be SOC controllers, FPGAs, multifunction * chips, dedicated GPIO expanders, and so on. * @@ -435,15 +435,15 @@ struct gpio_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be used + * If not %NULL, holds bitmask of GPIOs which are valid to be used * from the chip. */ unsigned long *valid_mask; #if defined(CONFIG_OF_GPIO) /* - * If CONFIG_OF is enabled, then all GPIO controllers described in the - * device tree automatically may have an OF translation + * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in + * the device tree automatically may have an OF translation */ /** @@ -508,7 +508,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, - * ie after core_initcall(). + * i.e. after core_initcall(). * * If gc->base is negative, this requests dynamic assignment of * a range of valid GPIOs. @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain, bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); +#ifdef CONFIG_GPIOLIB_IRQCHIP int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain); +#else +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc, + struct irq_domain *domain) +{ + WARN_ON(1); + return -EINVAL; +} +#endif int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index ad76f3d0a6ba..334dd928042b 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -4,6 +4,7 @@ #define _LINUX_GPIO_REGMAP_H struct device; +struct fwnode_handle; struct gpio_regmap; struct irq_domain; struct regmap; @@ -16,6 +17,8 @@ struct regmap; * @parent: The parent device * @regmap: The regmap used to access the registers * given, the name of the device is used + * @fwnode: (Optional) The firmware node. + * If not given, the fwnode of the parent is used. * @label: (Optional) Descriptive name for GPIO controller. * If not given, the name of the device is used. * @ngpio: Number of GPIOs @@ -57,6 +60,7 @@ struct regmap; struct gpio_regmap_config { struct device *parent; struct regmap *regmap; + struct fwnode_handle *fwnode; const char *label; int ngpio; diff --git a/include/linux/hid.h b/include/linux/hid.h index ef702b3f56e3..271021e20a3f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -153,6 +153,7 @@ struct hid_item { #define HID_UP_CONSUMER 0x000c0000 #define HID_UP_DIGITIZER 0x000d0000 #define HID_UP_PID 0x000f0000 +#define HID_UP_BATTERY 0x00850000 #define HID_UP_HPVENDOR 0xff7f0000 #define HID_UP_HPVENDOR2 0xff010000 #define HID_UP_MSVENDOR 0xff000000 @@ -262,6 +263,8 @@ struct hid_item { #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_VOLUMEUP 0x000c00e9 +#define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 @@ -297,6 +300,8 @@ struct hid_item { #define HID_DG_TOOLSERIALNUMBER 0x000d005b #define HID_DG_LATENCYMODE 0x000d0060 +#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065 + #define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 /* * HID report types --- Ouch! HID spec says 1 2 3! diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 9eb77c87a83b..232e1bd507a7 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -25,14 +25,18 @@ u64 host1x_get_dma_mask(struct host1x *host1x); /** * struct host1x_client_ops - host1x client operations + * @early_init: host1x client early initialization code * @init: host1x client initialization code * @exit: host1x client tear down code + * @late_exit: host1x client late tear down code * @suspend: host1x client suspend code * @resume: host1x client resume code */ struct host1x_client_ops { + int (*early_init)(struct host1x_client *client); int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); + int (*late_exit)(struct host1x_client *client); int (*suspend)(struct host1x_client *client); int (*resume)(struct host1x_client *client); }; @@ -142,7 +146,9 @@ struct host1x_syncpt_base; struct host1x_syncpt; struct host1x; -struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); +struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id); +struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id); +struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp); u32 host1x_syncpt_id(struct host1x_syncpt *sp); u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); @@ -153,11 +159,17 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, u32 *value); struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags); -void host1x_syncpt_free(struct host1x_syncpt *sp); +void host1x_syncpt_put(struct host1x_syncpt *sp); +struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, + unsigned long flags, + const char *name); struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); +void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, + u32 syncpt_id); + /* * host1x channel */ @@ -218,7 +230,7 @@ struct host1x_job { dma_addr_t *reloc_addr_phys; /* Sync point id, number of increments and end related to the submit */ - u32 syncpt_id; + struct host1x_syncpt *syncpt; u32 syncpt_incrs; u32 syncpt_end; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 56622658b215..e8f2ac8c9c3d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -51,6 +51,9 @@ struct module; struct property_entry; #if IS_ENABLED(CONFIG_I2C) +/* Return the Frequency mode string based on the bus frequency */ +const char *i2c_freq_mode_string(u32 bus_freq_hz); + /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to @@ -306,6 +309,8 @@ struct i2c_driver { * userspace_devices list * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter * calls it to pass on slave events to the slave driver. + * @devres_group_id: id of the devres group that will be created for resources + * acquired when probing this device. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -334,6 +339,7 @@ struct i2c_client { #if IS_ENABLED(CONFIG_I2C_SLAVE) i2c_slave_cb_t slave_cb; /* callback for slave mode */ #endif + void *devres_group_id; /* ID of probe devres group */ }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -391,7 +397,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * @platform_data: stored in i2c_client.dev.platform_data * @of_node: pointer to OpenFirmware device node * @fwnode: device node supplied by the platform firmware - * @properties: additional device properties for the device + * @swnode: software node for the device * @resources: resources associated with the device * @num_resources: number of resources in the @resources array * @irq: stored in i2c_client.irq @@ -415,7 +421,7 @@ struct i2c_board_info { void *platform_data; struct device_node *of_node; struct fwnode_handle *fwnode; - const struct property_entry *properties; + const struct software_node *swnode; const struct resource *resources; unsigned int num_resources; int irq; @@ -687,6 +693,8 @@ struct i2c_adapter_quirks { #define I2C_AQ_NO_ZERO_LEN_READ BIT(5) #define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6) #define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE) +/* adapter cannot do repeated START */ +#define I2C_AQ_NO_REP_START BIT(7) /* * i2c_adapter is the structure used to identify a physical i2c bus along @@ -844,6 +852,7 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) */ #if IS_ENABLED(CONFIG_I2C) int i2c_add_adapter(struct i2c_adapter *adap); +int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter); void i2c_del_adapter(struct i2c_adapter *adap); int i2c_add_numbered_adapter(struct i2c_adapter *adap); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1bc46b88711a..03faf20a6817 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -20,6 +20,7 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmar.h> #include <linux/ioasid.h> +#include <linux/bitfield.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -80,6 +81,7 @@ #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ +#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ @@ -126,6 +128,10 @@ #define DMAR_VCMD_REG 0xe10 /* Virtual command register */ #define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ +#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) +#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) +#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) + #define OFFSET_STRIDE (9) #define dmar_readq(a) readq(a) @@ -372,6 +378,7 @@ enum { /* PASID cache invalidation granu */ #define QI_PC_ALL_PASIDS 0 #define QI_PC_PASID_SEL 1 +#define QI_PC_GLOBAL 3 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) @@ -763,14 +770,11 @@ u32 intel_svm_get_pasid(struct iommu_sva *handle); int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); -struct svm_dev_ops; - struct intel_svm_dev { struct list_head list; struct rcu_head rcu; struct device *dev; struct intel_iommu *iommu; - struct svm_dev_ops *ops; struct iommu_sva sva; u32 pasid; int users; diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 39d368a810b8..10fa80eef13a 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -8,13 +8,6 @@ #ifndef __INTEL_SVM_H__ #define __INTEL_SVM_H__ -struct device; - -struct svm_dev_ops { - void (*fault_cb)(struct device *dev, u32 pasid, u64 address, - void *private, int rwxp, int response); -}; - /* Values for rxwp in fault_cb callback */ #define SVM_REQ_READ (1<<3) #define SVM_REQ_WRITE (1<<2) @@ -22,16 +15,6 @@ struct svm_dev_ops { #define SVM_REQ_PRIV (1<<0) /* - * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" - * PASID for the current process. Even if a PASID already exists, a new one - * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID - * will not be given to subsequent callers. This facility allows a driver to - * disambiguate between multiple device contexts which access the same MM, - * if there is no other way to do so. It should be used sparingly, if at all. - */ -#define SVM_FLAG_PRIVATE_PASID (1<<0) - -/* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only * for access to kernel addresses. No IOTLB flushes are automatically done * for kernel mappings; it is valid only for access to the kernel's static @@ -42,18 +25,18 @@ struct svm_dev_ops { * It is unlikely that we will ever hook into flush_tlb_kernel_range() to * do such IOTLB flushes automatically. */ -#define SVM_FLAG_SUPERVISOR_MODE (1<<1) +#define SVM_FLAG_SUPERVISOR_MODE BIT(0) /* * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest * processes. Compared to the host bind, the primary differences are: * 1. mm life cycle management * 2. fault reporting */ -#define SVM_FLAG_GUEST_MODE (1<<2) +#define SVM_FLAG_GUEST_MODE BIT(1) /* * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, * which requires guest and host PASID translation at both directions. */ -#define SVM_FLAG_GUEST_PASID (1<<3) +#define SVM_FLAG_GUEST_PASID BIT(2) #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index a4c9ca2c31f1..4d40dfa75b55 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -204,10 +204,6 @@ struct io_pgtable { #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) -struct io_pgtable_domain_attr { - unsigned long quirks; -}; - static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) diff --git a/include/linux/iomap.h b/include/linux/iomap.h index d202fd2d0f91..c87d0cb0de6d 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -198,7 +198,6 @@ struct iomap_ioend { struct inode *io_inode; /* file being written to */ size_t io_size; /* size of the extent */ loff_t io_offset; /* offset in the file */ - void *io_private; /* file system private data */ struct bio *io_bio; /* bio being built */ struct bio io_inline_bio; /* MUST BE LAST! */ }; @@ -234,9 +233,7 @@ struct iomap_writepage_ctx { void iomap_finish_ioends(struct iomap_ioend *ioend, int error); void iomap_ioend_try_merge(struct iomap_ioend *ioend, - struct list_head *more_ioends, - void (*merge_private)(struct iomap_ioend *ioend, - struct iomap_ioend *next)); + struct list_head *more_ioends); void iomap_sort_ioends(struct list_head *ioend_list); int iomap_writepage(struct page *page, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5e7fe519430a..32d448050bf7 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -96,32 +96,6 @@ enum iommu_cap { IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; -/* - * Following constraints are specifc to FSL_PAMUV1: - * -aperture must be power of 2, and naturally aligned - * -number of windows must be power of 2, and address space size - * of each window is determined by aperture size / # of windows - * -the actual size of the mapped region of a window must be power - * of 2 starting with 4KB and physical address must be naturally - * aligned. - * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. - * The caller can invoke iommu_domain_get_attr to check if the underlying - * iommu implementation supports these constraints. - */ - -enum iommu_attr { - DOMAIN_ATTR_GEOMETRY, - DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_WINDOWS, - DOMAIN_ATTR_FSL_PAMU_STASH, - DOMAIN_ATTR_FSL_PAMU_ENABLE, - DOMAIN_ATTR_FSL_PAMUV1, - DOMAIN_ATTR_NESTING, /* two stages of translation */ - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, - DOMAIN_ATTR_IO_PGTABLE_CFG, - DOMAIN_ATTR_MAX, -}; - /* These are the possible reserved region types */ enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ @@ -156,10 +130,24 @@ struct iommu_resv_region { enum iommu_resv_type type; }; -/* Per device IOMMU features */ +/** + * enum iommu_dev_features - Per device IOMMU features + * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature + * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses + * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally + * enabling %IOMMU_DEV_FEAT_SVA requires + * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page + * Faults themselves instead of relying on the IOMMU. When + * supported, this feature must be enabled before and + * disabled after %IOMMU_DEV_FEAT_SVA. + * + * Device drivers query whether a feature is supported using + * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). + */ enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ - IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ + IOMMU_DEV_FEAT_AUX, + IOMMU_DEV_FEAT_SVA, + IOMMU_DEV_FEAT_IOPF, }; #define IOMMU_PASID_INVALID (-1U) @@ -203,13 +191,11 @@ struct iommu_iotlb_gather { * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @domain_get_attr: Query domain attributes - * @domain_set_attr: Change domain attributes + * @enable_nesting: Enable nesting + * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges - * @domain_window_enable: Configure and enable a particular window for a domain - * @domain_window_disable: Disable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -255,10 +241,9 @@ struct iommu_ops { void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*domain_get_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); - int (*domain_set_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); + int (*enable_nesting)(struct iommu_domain *domain); + int (*set_pgtable_quirks)(struct iommu_domain *domain, + unsigned long quirks); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -267,11 +252,6 @@ struct iommu_ops { struct iommu_domain *domain, struct iommu_resv_region *region); - /* Window handling functions */ - int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot); - void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); - int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); @@ -353,6 +333,7 @@ struct iommu_fault_param { * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data + * @iopf_param: I/O Page Fault queue and data * @fwspec: IOMMU fwspec data * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data @@ -363,12 +344,15 @@ struct iommu_fault_param { struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; + struct iopf_device_param *iopf_param; struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; }; -int iommu_device_register(struct iommu_device *iommu); +int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, + struct device *hwdev); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, @@ -379,25 +363,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); -static inline void __iommu_device_set_ops(struct iommu_device *iommu, - const struct iommu_ops *ops) -{ - iommu->ops = ops; -} - -#define iommu_device_set_ops(iommu, ops) \ -do { \ - struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ - __ops->owner = THIS_MODULE; \ - __iommu_device_set_ops(iommu, __ops); \ -} while (0) - -static inline void iommu_device_set_fwnode(struct iommu_device *iommu, - struct fwnode_handle *fwnode) -{ - iommu->fwnode = fwnode; -} - static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return (struct iommu_device *)dev_get_drvdata(dev); @@ -507,15 +472,12 @@ extern int iommu_page_response(struct device *dev, extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); -extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); +int iommu_enable_nesting(struct iommu_domain *domain); +int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks); -/* Window handling function prototypes */ -extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t offset, u64 size, - int prot); +void iommu_set_dma_strict(bool val); +bool iommu_get_dma_strict(struct iommu_domain *domain); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -547,7 +509,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, * structure can be rewritten. */ if (gather->pgsize != size || - end < gather->start || start > gather->end) { + end + 1 < gather->start || start > gather->end + 1) { if (gather->pgsize) iommu_iotlb_sync(domain, gather); gather->pgsize = size; @@ -571,8 +533,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); * struct iommu_fwspec - per-device IOMMU instance data * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU - * @iommu_priv: IOMMU driver private data for this device - * @num_pasid_bits: number of PASID bits supported by this device + * @flags: IOMMU_FWSPEC_* flags * @num_ids: number of associated device IDs * @ids: IDs which this device may present to the IOMMU */ @@ -580,7 +541,6 @@ struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; u32 flags; - u32 num_pasid_bits; unsigned int num_ids; u32 ids[]; }; @@ -742,13 +702,6 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain, { } -static inline int iommu_domain_window_enable(struct iommu_domain *domain, - u32 wnd_nr, phys_addr_t paddr, - u64 size, int prot) -{ - return -ENODEV; -} - static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { return 0; @@ -889,33 +842,19 @@ static inline int iommu_group_id(struct iommu_group *group) return -ENODEV; } -static inline int iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - return -EINVAL; -} - -static inline int iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks) { - return -EINVAL; + return 0; } -static inline int iommu_device_register(struct iommu_device *iommu) +static inline int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, + struct device *hwdev) { return -ENODEV; } -static inline void iommu_device_set_ops(struct iommu_device *iommu, - const struct iommu_ops *ops) -{ -} - -static inline void iommu_device_set_fwnode(struct iommu_device *iommu, - struct fwnode_handle *fwnode) -{ -} - static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return NULL; diff --git a/include/linux/iova.h b/include/linux/iova.h index c834c01c0a5b..71d8a2de6635 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -95,6 +95,7 @@ struct iova_domain { flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ + struct hlist_node cpuhp_dead; }; static inline unsigned long iova_size(struct iova *iova) @@ -156,7 +157,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); -void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #else static inline int iova_cache_get(void) { @@ -233,10 +233,6 @@ static inline void put_iova_domain(struct iova_domain *iovad) { } -static inline void free_cpu_cached_iovas(unsigned int cpu, - struct iova_domain *iovad) -{ -} #endif #endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 7a1dd7b969b6..62a8e3d23829 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -256,11 +256,11 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data); +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, @@ -325,6 +325,15 @@ static inline struct irq_domain *irq_find_host(struct device_node *node) return d; } +static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); +} + /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 99d3cd051ac3..db0e1920cb12 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -61,7 +61,7 @@ void __jbd2_debug(int level, const char *file, const char *func, #define jbd_debug(n, fmt, a...) \ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) #else -#define jbd_debug(n, fmt, a...) /**/ +#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a) #endif extern void *jbd2_alloc(size_t size, gfp_t flags); @@ -594,18 +594,22 @@ struct transaction_s */ unsigned long t_log_start; - /* Number of buffers on the t_buffers list [j_list_lock] */ + /* + * Number of buffers on the t_buffers list [j_list_lock, no locks + * needed for jbd2 thread] + */ int t_nr_buffers; /* * Doubly-linked circular list of all buffers reserved but not yet - * modified by this transaction [j_list_lock] + * modified by this transaction [j_list_lock, no locks needed fo + * jbd2 thread] */ struct journal_head *t_reserved_list; /* * Doubly-linked circular list of all metadata buffers owned by this - * transaction [j_list_lock] + * transaction [j_list_lock, no locks needed for jbd2 thread] */ struct journal_head *t_buffers; @@ -629,9 +633,11 @@ struct transaction_s struct journal_head *t_checkpoint_io_list; /* - * Doubly-linked circular list of metadata buffers being shadowed by log - * IO. The IO buffers on the iobuf list and the shadow buffers on this - * list match each other one for one at all times. [j_list_lock] + * Doubly-linked circular list of metadata buffers being + * shadowed by log IO. The IO buffers on the iobuf list and + * the shadow buffers on this list match each other one for + * one at all times. [j_list_lock, no locks needed for jbd2 + * thread] */ struct journal_head *t_shadow_list; @@ -768,7 +774,8 @@ enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; struct journal_s { /** - * @j_flags: General journaling state flags [j_state_lock] + * @j_flags: General journaling state flags [j_state_lock, + * no lock for quick racy checks] */ unsigned long j_flags; @@ -808,7 +815,8 @@ struct journal_s /** * @j_barrier_count: * - * Number of processes waiting to create a barrier lock [j_state_lock] + * Number of processes waiting to create a barrier lock [j_state_lock, + * no lock for quick racy checks] */ int j_barrier_count; @@ -821,7 +829,8 @@ struct journal_s * @j_running_transaction: * * Transactions: The current running transaction... - * [j_state_lock] [caller holding open handle] + * [j_state_lock, no lock for quick racy checks] [caller holding + * open handle] */ transaction_t *j_running_transaction; @@ -1033,7 +1042,7 @@ struct journal_s * @j_commit_sequence: * * Sequence number of the most recently committed transaction - * [j_state_lock]. + * [j_state_lock, no lock for quick racy checks] */ tid_t j_commit_sequence; @@ -1041,7 +1050,7 @@ struct journal_s * @j_commit_request: * * Sequence number of the most recent transaction wanting commit - * [j_state_lock] + * [j_state_lock, no lock for quick racy checks] */ tid_t j_commit_request; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1b65e7204344..8895b95b6a22 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); @@ -218,6 +218,20 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +struct kvm_gfn_range { + struct kvm_memory_slot *slot; + gfn_t start; + gfn_t end; + pte_t pte; + bool may_block; +}; +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +#endif + enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, @@ -640,6 +654,7 @@ void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); +bool file_is_kvm(struct file *file); void kvm_put_kvm_no_destroy(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) @@ -886,7 +901,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot); + const struct kvm_memory_slot *memslot); #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, @@ -945,6 +960,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); @@ -1116,7 +1132,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) } static inline unsigned long -__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 61f04f7dc1a4..04c01794de83 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -59,6 +59,7 @@ LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index ba2ccd950833..5c4c5c0602cb 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -108,6 +108,9 @@ * allocated. * @sb contains the super_block structure to be modified. * Return 0 if operation was successful. + * @sb_delete: + * Release objects tied to a superblock (e.g. inodes). + * @sb contains the super_block structure being released. * @sb_free_security: * Deallocate and clear the sb->s_security field. * @sb contains the super_block structure to be modified. @@ -1585,6 +1588,7 @@ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; + int lbs_superblock; int lbs_ipc; int lbs_msg_msg; int lbs_task; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 4e531c2aab52..f8e8d7e90616 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1236,7 +1236,7 @@ enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev) { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 52b7cabcde08..6d16eed6850e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -133,6 +133,7 @@ enum { MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, MLX5_CMD_OP_ALLOC_MEMIC = 0x205, MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, + MLX5_CMD_OP_MODIFY_MEMIC = 0x207, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, @@ -1031,7 +1032,11 @@ struct mlx5_ifc_device_mem_cap_bits { u8 header_modify_sw_icm_start_address[0x40]; - u8 reserved_at_180[0x680]; + u8 reserved_at_180[0x80]; + + u8 memic_operations[0x20]; + + u8 reserved_at_220[0x5e0]; }; struct mlx5_ifc_device_event_cap_bits { @@ -10498,6 +10503,41 @@ struct mlx5_ifc_destroy_vport_lag_in_bits { u8 reserved_at_40[0x40]; }; +enum { + MLX5_MODIFY_MEMIC_OP_MOD_ALLOC, + MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC, +}; + +struct mlx5_ifc_modify_memic_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x18]; + u8 memic_operation_type[0x8]; + + u8 memic_start_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_modify_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 memic_operation_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + struct mlx5_ifc_alloc_memic_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; diff --git a/include/linux/msi.h b/include/linux/msi.h index aef35fd1cf11..6aff469e511d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -240,8 +240,7 @@ void pci_msi_unmask_irq(struct irq_data *data); /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific - * code if needed. These hooks must be enabled by the architecture or by - * drivers which depend on them via msi_controller based MSI handling. + * code if needed. These hooks can only be enabled by the architecture. * * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by * stubs with warnings. @@ -251,7 +250,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -void default_teardown_msi_irqs(struct pci_dev *dev); #else static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { @@ -272,19 +270,6 @@ static inline void arch_teardown_msi_irqs(struct pci_dev *dev) void arch_restore_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); -struct msi_controller { - struct module *owner; - struct device *dev; - struct device_node *of_node; - struct list_head list; - - int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, - struct msi_desc *desc); - int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, - int nvec, int type); - void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); -}; - #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include <linux/irqhandler.h> diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 65d3d83015c3..fbdadd4d8377 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -85,6 +85,7 @@ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci.h b/include/linux/pci.h index 0fa104ee13d9..c20211e59a57 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -458,7 +458,6 @@ struct pci_dev { u32 saved_config_space[16]; /* Config space saved at suspend time */ struct hlist_head saved_cap_space; - struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ int rom_attr_enabled; /* Display of ROM attribute enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ @@ -540,7 +539,6 @@ struct pci_host_bridge { int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; - struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ unsigned int native_aer:1; /* OS may use PCIe AER */ @@ -551,6 +549,7 @@ struct pci_host_bridge { unsigned int native_dpc:1; /* OS may use PCIe DPC */ unsigned int preserve_config:1; /* Preserve FW resource setup */ unsigned int size_windows:1; /* Enable root bus sizing */ + unsigned int msi_domain:1; /* Bridge wants MSI domain */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, @@ -621,7 +620,6 @@ struct pci_bus { struct resource busn_res; /* Bus numbers routed to this bus */ struct pci_ops *ops; /* Configuration access functions */ - struct msi_controller *msi; /* MSI controller */ void *sysdata; /* Hook for sys-specific extension */ struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ @@ -1085,6 +1083,7 @@ u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); u16 pci_find_ext_capability(struct pci_dev *dev, int cap); u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); u64 pci_get_dsn(struct pci_dev *dev); @@ -1209,6 +1208,7 @@ int __must_check pci_set_mwi(struct pci_dev *dev); int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); +void pci_disable_parity(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); @@ -1310,7 +1310,6 @@ void pci_unlock_rescan_remove(void); /* Vital Product Data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); -int pci_set_vpd_size(struct pci_dev *dev, size_t len); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); @@ -2319,14 +2318,13 @@ static inline u8 pci_vpd_info_field_size(const u8 *info_field) /** * pci_vpd_find_tag - Locates the Resource Data Type tag provided * @buf: Pointer to buffered vpd data - * @off: The offset into the buffer at which to begin the search * @len: The length of the vpd buffer * @rdt: The Resource Data Type to search for * * Returns the index where the Resource Data Type was found or * -ENOENT otherwise. */ -int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); +int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt); /** * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a763928a0e41..f5a6a2f069ed 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -954,8 +954,6 @@ extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); -extern int perf_num_counters(void); -extern const char *perf_pmu_name(void); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 6aeb711f7cd1..e18ab3d5908f 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -76,10 +76,11 @@ struct pinctrl_map; * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. - * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power + * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. + * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode * without driving a value there. For most platforms this reduces to * enable the output buffers and then let the pin controller current @@ -90,6 +91,7 @@ struct pinctrl_map; * value on the line. Use argument 1 to indicate high level, argument 0 to * indicate low level. (Please see Documentation/driver-api/pinctl.rst, * section "GPIO mode pitfalls" for a discussion around this parameter.) + * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. @@ -101,7 +103,6 @@ struct pinctrl_map; * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. - * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. @@ -124,14 +125,15 @@ enum pin_config_param { PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, - PIN_CONFIG_LOW_POWER_MODE, + PIN_CONFIG_MODE_LOW_POWER, + PIN_CONFIG_MODE_PWM, PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_POWER_SOURCE, PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, PIN_CONFIG_SKEW_DELAY, - PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h deleted file mode 100644 index 2107d0d992dd..000000000000 --- a/include/linux/platform_data/asoc-mx27vis.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __PLATFORM_DATA_ASOC_MX27VIS_H -#define __PLATFORM_DATA_ASOC_MX27VIS_H - -struct snd_mx27vis_platform_data { - int amp_gain0_gpio; - int amp_gain1_gpio; - int amp_mutel_gpio; - int amp_muter_gpio; -}; - -#endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index b801ead1e2bb..d48a7192e881 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -73,6 +73,7 @@ enum sev_cmd { SEV_CMD_SEND_UPDATE_DATA = 0x041, SEV_CMD_SEND_UPDATE_VMSA = 0x042, SEV_CMD_SEND_FINISH = 0x043, + SEV_CMD_SEND_CANCEL = 0x044, /* Guest migration commands (incoming) */ SEV_CMD_RECEIVE_START = 0x050, @@ -326,11 +327,11 @@ struct sev_data_send_start { u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In */ u32 reserved1; - u64 plat_cert_address; /* In */ - u32 plat_cert_len; /* In */ + u64 plat_certs_address; /* In */ + u32 plat_certs_len; /* In */ u32 reserved2; - u64 amd_cert_address; /* In */ - u32 amd_cert_len; /* In */ + u64 amd_certs_address; /* In */ + u32 amd_certs_len; /* In */ u32 reserved3; u64 session_address; /* In */ u32 session_len; /* In/Out */ @@ -393,6 +394,15 @@ struct sev_data_send_finish { } __packed; /** + * struct sev_data_send_cancel - SEND_CANCEL command parameters + * + * @handle: handle of the VM to process + */ +struct sev_data_send_cancel { + u32 handle; /* In */ +} __packed; + +/** * struct sev_data_receive_start - RECEIVE_START command parameters * * @handle: handle of the VM to perform receive operation diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h new file mode 100644 index 000000000000..f960a719f0d5 --- /dev/null +++ b/include/linux/ptp_kvm.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Virtual PTP 1588 clock for use with KVM guests + * + * Copyright (C) 2017 Red Hat Inc. + */ + +#ifndef _PTP_KVM_H_ +#define _PTP_KVM_H_ + +struct timespec64; +struct clocksource; + +int kvm_arch_ptp_init(void); +int kvm_arch_ptp_get_clock(struct timespec64 *ts); +int kvm_arch_ptp_get_crosststamp(u64 *cycle, + struct timespec64 *tspec, struct clocksource **cs); + +#endif /* _PTP_KVM_H_ */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index e4d84d4db293..5bb90af4997e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -91,6 +91,11 @@ struct pwm_device { * pwm_get_state() - retrieve the current PWM state * @pwm: PWM device * @state: state to fill with the current PWM state + * + * The returned PWM state represents the state that was applied by a previous call to + * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to + * hardware. If pwm_apply_state() was never called, this returns either the current hardware + * state (if supported) or the default settings. */ static inline void pwm_get_state(const struct pwm_device *pwm, struct pwm_state *state) @@ -392,8 +397,6 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); -int pwmchip_add_with_polarity(struct pwm_chip *chip, - enum pwm_polarity polarity); int pwmchip_add(struct pwm_chip *chip); int pwmchip_remove(struct pwm_chip *chip); struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index f28ee75d1005..8b795b544f75 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -315,6 +315,7 @@ struct rproc; /** * struct rproc_mem_entry - memory entry descriptor * @va: virtual address + * @is_iomem: io memory * @dma: dma address * @len: length, in bytes * @da: device address @@ -329,6 +330,7 @@ struct rproc; */ struct rproc_mem_entry { void *va; + bool is_iomem; dma_addr_t dma; size_t len; u32 da; @@ -361,6 +363,7 @@ enum rsc_handling_status { * @start: power on the device and boot it * @stop: power off the device * @attach: attach to a device that his already powered up + * @detach: detach from a device, leaving it powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -368,7 +371,9 @@ enum rsc_handling_status { * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a * negative value on error * @load_rsc_table: load resource table from firmware image - * @find_loaded_rsc_table: find the loaded resouce table + * @find_loaded_rsc_table: find the loaded resource table from firmware image + * @get_loaded_rsc_table: get resource table installed in memory + * by external entity * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image @@ -383,13 +388,16 @@ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); int (*attach)(struct rproc *rproc); + int (*detach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); + void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*get_loaded_rsc_table)( + struct rproc *rproc, size_t *size); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); @@ -405,6 +413,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_ATTACHED: device has been booted by another entity and the core + * has attached to it * @RPROC_DETACHED: device has been booted by another entity and waiting * for the core to attach to it * @RPROC_LAST: just keep this one at the end @@ -421,8 +431,9 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_DETACHED = 5, - RPROC_LAST = 6, + RPROC_ATTACHED = 5, + RPROC_DETACHED = 6, + RPROC_LAST = 7, }; /** @@ -505,11 +516,12 @@ struct rproc_dump_segment { * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. * @table_ptr: pointer to the resource table in effect + * @clean_table: copy of the resource table without modifications. Used + * when a remote processor is attached or detached from the core * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started - * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc * @char_dev: character device of the rproc @@ -542,11 +554,11 @@ struct rproc { bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; + struct resource_table *clean_table; struct resource_table *cached_table; size_t table_sz; bool has_iommu; bool auto_boot; - bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; @@ -655,6 +667,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); +int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); void rproc_coredump_using_sections(struct rproc *rproc); diff --git a/include/linux/reset.h b/include/linux/reset.h index b9109efa2a5c..db0e6115a2f6 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -10,6 +10,21 @@ struct device; struct device_node; struct reset_control; +/** + * struct reset_control_bulk_data - Data used for bulk reset control operations. + * + * @id: reset control consumer ID + * @rstc: struct reset_control * to store the associated reset control + * + * The reset APIs provide a series of reset_control_bulk_*() API calls as + * a convenience to consumers which require multiple reset controls. + * This structure is used to manage data for these calls. + */ +struct reset_control_bulk_data { + const char *id; + struct reset_control *rstc; +}; + #ifdef CONFIG_RESET_CONTROLLER int reset_control_reset(struct reset_control *rstc); @@ -20,6 +35,12 @@ int reset_control_status(struct reset_control *rstc); int reset_control_acquire(struct reset_control *rstc); void reset_control_release(struct reset_control *rstc); +int reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs); +int reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs); +int reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs); +int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs); +void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs); + struct reset_control *__of_reset_control_get(struct device_node *node, const char *id, int index, bool shared, bool optional, bool acquired); @@ -27,10 +48,18 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional, bool acquired); void reset_control_put(struct reset_control *rstc); +int __reset_control_bulk_get(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs, + bool shared, bool optional, bool acquired); +void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs); + int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional, bool acquired); +int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs, + bool shared, bool optional, bool acquired); struct reset_control *devm_reset_control_array_get(struct device *dev, bool shared, bool optional); @@ -47,6 +76,11 @@ static inline int reset_control_reset(struct reset_control *rstc) return 0; } +static inline int reset_control_rearm(struct reset_control *rstc) +{ + return 0; +} + static inline int reset_control_assert(struct reset_control *rstc) { return 0; @@ -96,6 +130,48 @@ static inline struct reset_control *__reset_control_get( return optional ? NULL : ERR_PTR(-ENOTSUPP); } +static inline int +reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs) +{ + return 0; +} + +static inline int +reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs) +{ + return 0; +} + +static inline int +reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs) +{ + return 0; +} + +static inline int +reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs) +{ + return 0; +} + +static inline void +reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs) +{ +} + +static inline int +__reset_control_bulk_get(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs, + bool shared, bool optional, bool acquired) +{ + return optional ? 0 : -EOPNOTSUPP; +} + +static inline void +reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs) +{ +} + static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional, @@ -104,6 +180,14 @@ static inline struct reset_control *__devm_reset_control_get( return optional ? NULL : ERR_PTR(-ENOTSUPP); } +static inline int +__devm_reset_control_bulk_get(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs, + bool shared, bool optional, bool acquired) +{ + return optional ? 0 : -EOPNOTSUPP; +} + static inline struct reset_control * devm_reset_control_array_get(struct device *dev, bool shared, bool optional) { @@ -156,6 +240,23 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) } /** + * reset_control_bulk_get_exclusive - Lookup and obtain exclusive references to + * multiple reset controllers. + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Fills the rstcs array with pointers to exclusive reset controls and + * returns 0, or an IS_ERR() condition containing errno. + */ +static inline int __must_check +reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); +} + +/** * reset_control_get_exclusive_released - Lookup and obtain a temoprarily * exclusive reference to a reset * controller. @@ -177,6 +278,48 @@ __must_check reset_control_get_exclusive_released(struct device *dev, } /** + * reset_control_bulk_get_exclusive_released - Lookup and obtain temporarily + * exclusive references to multiple reset + * controllers. + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Fills the rstcs array with pointers to exclusive reset controls and + * returns 0, or an IS_ERR() condition containing errno. + * reset-controls returned by this function must be acquired via + * reset_control_bulk_acquire() before they can be used and should be released + * via reset_control_bulk_release() afterwards. + */ +static inline int __must_check +reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); +} + +/** + * reset_control_bulk_get_optional_exclusive_released - Lookup and obtain optional + * temporarily exclusive references to multiple + * reset controllers. + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Optional variant of reset_control_bulk_get_exclusive_released(). If the + * requested reset is not specified in the device tree, this function returns 0 + * instead of an error and missing rtsc is set to NULL. + * + * See reset_control_bulk_get_exclusive_released() for more information. + */ +static inline int __must_check +reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); +} + +/** * reset_control_get_shared - Lookup and obtain a shared reference to a * reset controller. * @dev: device to be reset by the controller @@ -205,6 +348,23 @@ static inline struct reset_control *reset_control_get_shared( } /** + * reset_control_bulk_get_shared - Lookup and obtain shared references to + * multiple reset controllers. + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Fills the rstcs array with pointers to shared reset controls and + * returns 0, or an IS_ERR() condition containing errno. + */ +static inline int __must_check +reset_control_bulk_get_shared(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); +} + +/** * reset_control_get_optional_exclusive - optional reset_control_get_exclusive() * @dev: device to be reset by the controller * @id: reset line name @@ -222,6 +382,26 @@ static inline struct reset_control *reset_control_get_optional_exclusive( } /** + * reset_control_bulk_get_optional_exclusive - optional + * reset_control_bulk_get_exclusive() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Optional variant of reset_control_bulk_get_exclusive(). If any of the + * requested resets are not specified in the device tree, this function sets + * them to NULL instead of returning an error. + * + * See reset_control_bulk_get_exclusive() for more information. + */ +static inline int __must_check +reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); +} + +/** * reset_control_get_optional_shared - optional reset_control_get_shared() * @dev: device to be reset by the controller * @id: reset line name @@ -239,6 +419,26 @@ static inline struct reset_control *reset_control_get_optional_shared( } /** + * reset_control_bulk_get_optional_shared - optional + * reset_control_bulk_get_shared() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Optional variant of reset_control_bulk_get_shared(). If the requested resets + * are not specified in the device tree, this function sets them to NULL + * instead of returning an error. + * + * See reset_control_bulk_get_shared() for more information. + */ +static inline int __must_check +reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); +} + +/** * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference * to a reset controller. * @node: device to be reset by the controller @@ -344,6 +544,26 @@ __must_check devm_reset_control_get_exclusive(struct device *dev, } /** + * devm_reset_control_bulk_get_exclusive - resource managed + * reset_control_bulk_get_exclusive() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_exclusive(). For reset controllers returned + * from this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_bulk_get_exclusive() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); +} + +/** * devm_reset_control_get_exclusive_released - resource managed * reset_control_get_exclusive_released() * @dev: device to be reset by the controller @@ -363,6 +583,26 @@ __must_check devm_reset_control_get_exclusive_released(struct device *dev, } /** + * devm_reset_control_bulk_get_exclusive_released - resource managed + * reset_control_bulk_get_exclusive_released() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_exclusive_released(). For reset controllers + * returned from this function, reset_control_put() is called automatically on + * driver detach. + * + * See reset_control_bulk_get_exclusive_released() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); +} + +/** * devm_reset_control_get_optional_exclusive_released - resource managed * reset_control_get_optional_exclusive_released() * @dev: device to be reset by the controller @@ -382,6 +622,26 @@ __must_check devm_reset_control_get_optional_exclusive_released(struct device *d } /** + * devm_reset_control_bulk_get_optional_exclusive_released - resource managed + * reset_control_bulk_optional_get_exclusive_released() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_optional_get_exclusive_released(). For reset + * controllers returned from this function, reset_control_put() is called + * automatically on driver detach. + * + * See reset_control_bulk_optional_get_exclusive_released() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); +} + +/** * devm_reset_control_get_shared - resource managed reset_control_get_shared() * @dev: device to be reset by the controller * @id: reset line name @@ -397,6 +657,26 @@ static inline struct reset_control *devm_reset_control_get_shared( } /** + * devm_reset_control_bulk_get_shared - resource managed + * reset_control_bulk_get_shared() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_shared(). For reset controllers returned + * from this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_bulk_get_shared() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); +} + +/** * devm_reset_control_get_optional_exclusive - resource managed * reset_control_get_optional_exclusive() * @dev: device to be reset by the controller @@ -415,6 +695,26 @@ static inline struct reset_control *devm_reset_control_get_optional_exclusive( } /** + * devm_reset_control_bulk_get_optional_exclusive - resource managed + * reset_control_bulk_get_optional_exclusive() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_optional_exclusive(). For reset controllers + * returned from this function, reset_control_put() is called automatically on + * driver detach. + * + * See reset_control_bulk_get_optional_exclusive() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true); +} + +/** * devm_reset_control_get_optional_shared - resource managed * reset_control_get_optional_shared() * @dev: device to be reset by the controller @@ -433,6 +733,26 @@ static inline struct reset_control *devm_reset_control_get_optional_shared( } /** + * devm_reset_control_bulk_get_optional_shared - resource managed + * reset_control_bulk_get_optional_shared() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_optional_shared(). For reset controllers + * returned from this function, reset_control_put() is called automatically on + * driver detach. + * + * See reset_control_bulk_get_optional_shared() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); +} + +/** * devm_reset_control_get_exclusive_by_index - resource managed * reset_control_get_exclusive() * @dev: device to be reset by the controller diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 136ea0997e6d..dac53fd3afea 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -61,7 +61,8 @@ enum ring_buffer_type { unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); -u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, + struct ring_buffer_event *event); /* * ring_buffer_discard_commit will remove an event that has not @@ -180,7 +181,7 @@ unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cp unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); -u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu); +u64 ring_buffer_time_stamp(struct trace_buffer *buffer); void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, int cpu, u64 *ts); void ring_buffer_set_clock(struct trace_buffer *buffer, diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index a5db828b2420..d97dcd049f18 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -18,8 +18,7 @@ #include <linux/mutex.h> #include <linux/poll.h> #include <linux/rpmsg/byteorder.h> - -#define RPMSG_ADDR_ANY 0xFFFFFFFF +#include <uapi/linux/rpmsg.h> struct rpmsg_device; struct rpmsg_endpoint; diff --git a/include/linux/security.h b/include/linux/security.h index 9aeda3f9e838..06f7c50ce77f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -291,6 +291,7 @@ void security_bprm_committed_creds(struct linux_binprm *bprm); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); +void security_sb_delete(struct super_block *sb); void security_sb_free(struct super_block *sb); void security_free_mnt_opts(void **mnt_opts); int security_sb_eat_lsm_opts(char *options, void **mnt_opts); @@ -633,6 +634,9 @@ static inline int security_sb_alloc(struct super_block *sb) return 0; } +static inline void security_sb_delete(struct super_block *sb) +{ } + static inline void security_sb_free(struct super_block *sb) { } diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 9d6c28cc4d8f..5b31c5147969 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -72,6 +72,31 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) } /** + * seq_buf_terminate - Make sure buffer is nul terminated + * @s: the seq_buf descriptor to terminate. + * + * This makes sure that the buffer in @s is nul terminated and + * safe to read as a string. + * + * Note, if this is called when the buffer has overflowed, then + * the last byte of the buffer is zeroed, and the len will still + * point passed it. + * + * After this function is called, s->buffer is safe to use + * in string operations. + */ +static inline void seq_buf_terminate(struct seq_buf *s) +{ + if (WARN_ON(s->size == 0)) + return; + + if (seq_buf_buffer_left(s)) + s->buffer[s->len] = 0; + else + s->buffer[s->size - 1] = 0; +} + +/** * seq_buf_get_buf - get buffer to write arbitrary data to * @s: the seq_buf handle * @bufp: the beginning of the buffer is stored here diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5857a937c637..216854a5e513 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/limits.h> +#include <linux/spinlock.h> struct device; struct page; @@ -36,20 +37,11 @@ enum swiotlb_force { extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); -extern unsigned long swiotlb_nr_tbl(void); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern int swiotlb_late_init_with_default_size(size_t default_size); extern void __init swiotlb_update_mem_attributes(void); -/* - * Enumeration for sync targets - */ -enum dma_sync_target { - SYNC_FOR_CPU = 0, - SYNC_FOR_DEVICE = 1, -}; - phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); @@ -57,32 +49,70 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, - size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); -extern void swiotlb_tbl_sync_single(struct device *hwdev, - phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target); - +void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); +void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); #ifdef CONFIG_SWIOTLB extern enum swiotlb_force swiotlb_force; -extern phys_addr_t io_tlb_start, io_tlb_end; + +/** + * struct io_tlb_mem - IO TLB Memory Pool Descriptor + * + * @start: The start address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @end: The end address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and + * @end. This is command line adjustable via setup_io_tlb_npages. + * @used: The number of used IO TLB block. + * @list: The free list describing the number of free entries available + * from each index. + * @index: The index to start searching in the next round. + * @orig_addr: The original address corresponding to a mapped entry. + * @alloc_size: Size of the allocated buffer. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + * @debugfs: The dentry to debugfs. + * @late_alloc: %true if allocated using the page allocator + */ +struct io_tlb_mem { + phys_addr_t start; + phys_addr_t end; + unsigned long nslabs; + unsigned long used; + unsigned int index; + spinlock_t lock; + struct dentry *debugfs; + bool late_alloc; + struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; + } slots[]; +}; +extern struct io_tlb_mem *io_tlb_default_mem; static inline bool is_swiotlb_buffer(phys_addr_t paddr) { - return paddr >= io_tlb_start && paddr < io_tlb_end; + struct io_tlb_mem *mem = io_tlb_default_mem; + + return mem && paddr >= mem->start && paddr < mem->end; } void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); bool is_swiotlb_active(void); -void __init swiotlb_adjust_size(unsigned long new_size); +void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE static inline bool is_swiotlb_buffer(phys_addr_t paddr) @@ -106,7 +136,7 @@ static inline bool is_swiotlb_active(void) return false; } -static inline void swiotlb_adjust_size(unsigned long new_size) +static inline void swiotlb_adjust_size(unsigned long size) { } #endif /* CONFIG_SWIOTLB */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a672bbe28577..050511e8f1f8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -69,6 +69,8 @@ struct io_uring_params; struct clone_args; struct open_how; struct mount_attr; +struct landlock_ruleset_attr; +enum landlock_rule_type; #include <linux/types.h> #include <linux/aio_abi.h> @@ -1043,6 +1045,11 @@ asmlinkage long sys_pidfd_send_signal(int pidfd, int sig, siginfo_t __user *info, unsigned int flags); asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags); +asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr __user *attr, + size_t size, __u32 flags); +asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, + const void __user *rule_attr, __u32 flags); +asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); /* * Architecture-specific system calls diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 6ac7bb1d2b1f..d296f3b88fb9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -91,7 +91,7 @@ struct thermal_cooling_device_ops { struct thermal_cooling_device { int id; - char type[THERMAL_NAME_LENGTH]; + char *type; struct device device; struct device_node *np; void *devdata; @@ -390,7 +390,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); void thermal_zone_device_critical(struct thermal_zone_device *tz); @@ -436,10 +435,6 @@ static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline void thermal_notify_framework(struct thermal_zone_device *tz, - int trip) -{ } - static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) { return -ENODEV; } diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index c6792cf01bc7..78a98bdff76d 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -3,6 +3,7 @@ #define _LINUX_TIMEKEEPING_H #include <linux/errno.h> +#include <linux/clocksource_ids.h> /* Included from linux/ktime.h */ @@ -243,11 +244,12 @@ struct ktime_timestamps { * @cs_was_changed_seq: The sequence number of clocksource change events */ struct system_time_snapshot { - u64 cycles; - ktime_t real; - ktime_t raw; - unsigned int clock_was_set_seq; - u8 cs_was_changed_seq; + u64 cycles; + ktime_t real; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; }; /** diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 28e7af1406f2..ad413b382a3c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -206,7 +206,7 @@ static inline unsigned int tracing_gen_ctx_dec(void) trace_ctx = tracing_gen_ctx(); /* - * Subtract one from the preeption counter if preemption is enabled, + * Subtract one from the preemption counter if preemption is enabled, * see trace_event_buffer_reserve()for details. */ if (IS_ENABLED(CONFIG_PREEMPTION)) @@ -404,7 +404,6 @@ trace_get_fields(struct trace_event_call *event_call) return event_call->class->get_fields(event_call); } -struct trace_array; struct trace_subsystem_dir; enum { @@ -640,7 +639,8 @@ enum event_trigger_type { extern int filter_match_preds(struct event_filter *filter, void *rec); extern enum event_trigger_type -event_triggers_call(struct trace_event_file *file, void *rec, +event_triggers_call(struct trace_event_file *file, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event); extern void event_triggers_post_call(struct trace_event_file *file, @@ -664,7 +664,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL, NULL); + event_triggers_call(file, NULL, NULL, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 9cfb099da58f..13f65420f188 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -465,7 +465,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * * * * The declared 'local variable' is called '__entry' * * - * * __field(pid_t, prev_prid) is equivalent to a standard declariton: + * * __field(pid_t, prev_prid) is equivalent to a standard declaration: * * * * pid_t prev_pid; * * diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index c7c6e8b8344d..c05e903cef02 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -397,6 +397,7 @@ long strnlen_user_nofault(const void __user *unsafe_addr, long count); #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) +#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 15fa085fab05..f311d227aa1b 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -8,7 +8,7 @@ #include <linux/vhost_iotlb.h> /** - * vDPA callback definition. + * struct vdpa_calllback - vDPA callback definition. * @callback: interrupt callback function * @private: the data passed to the callback function */ @@ -18,7 +18,7 @@ struct vdpa_callback { }; /** - * vDPA notification area + * struct vdpa_notification_area - vDPA notification area * @addr: base address of the notification area * @size: size of the notification area */ @@ -28,7 +28,7 @@ struct vdpa_notification_area { }; /** - * vDPA vq_state definition + * struct vdpa_vq_state - vDPA vq_state definition * @avail_index: available index */ struct vdpa_vq_state { @@ -38,7 +38,7 @@ struct vdpa_vq_state { struct vdpa_mgmt_dev; /** - * vDPA device - representation of a vDPA device + * struct vdpa_device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. @@ -59,7 +59,7 @@ struct vdpa_device { }; /** - * vDPA IOVA range - the IOVA range support by the device + * struct vdpa_iova_range - the IOVA range support by the device * @first: start of the IOVA range * @last: end of the IOVA range */ @@ -69,7 +69,7 @@ struct vdpa_iova_range { }; /** - * vDPA_config_ops - operations for configuring a vDPA device. + * struct vdpa_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following * list. @@ -150,6 +150,9 @@ struct vdpa_iova_range { * @set_status: Set the device status * @vdev: vdpa device * @status: virtio device status + * @get_config_size: Get the size of the configuration space + * @vdev: vdpa device + * Returns size_t: configuration size * @get_config: Read from device specific configuration space * @vdev: vdpa device * @offset: offset from the beginning of @@ -231,6 +234,7 @@ struct vdpa_config_ops { u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); + size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); void (*set_config)(struct vdpa_device *vdev, unsigned int offset, @@ -267,7 +271,7 @@ int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); void _vdpa_unregister_device(struct vdpa_device *vdev); /** - * vdpa_driver - operations for a vDPA driver + * struct vdpa_driver - operations for a vDPA driver * @driver: underlying device driver * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. @@ -344,18 +348,18 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, } /** - * vdpa_mgmtdev_ops - vdpa device ops - * @dev_add: Add a vdpa device using alloc and register - * @mdev: parent device to use for device addition - * @name: name of the new vdpa device - * Driver need to add a new device using _vdpa_register_device() - * after fully initializing the vdpa device. Driver must return 0 - * on success or appropriate error code. - * @dev_del: Remove a vdpa device using unregister - * @mdev: parent device to use for device removal - * @dev: vdpa device to remove - * Driver need to remove the specified device by calling - * _vdpa_unregister_device(). + * struct vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). */ struct vdpa_mgmtdev_ops { int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index f26acbeec965..6a95b58fd0f4 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -13,6 +13,8 @@ struct virtio_pci_modern_device { void __iomem *device; /* Base of vq notifications (non-legacy mode). */ void __iomem *notify_base; + /* Physical base of vq notifications */ + resource_size_t notify_pa; /* Where to read and clear interrupt */ u8 __iomem *isr; @@ -99,13 +101,8 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); -u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, - u16 idx); -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len); +void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index, resource_size_t *pa); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); #endif diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 59bd50f99291..84db7b8f912f 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -46,6 +46,9 @@ struct vringh { /* IOTLB for this vring */ struct vhost_iotlb *iotlb; + /* spinlock to synchronize IOTLB accesses */ + spinlock_t *iotlb_lock; + /* The function to call to notify the guest about added buffers */ void (*notify)(struct vringh *); }; @@ -196,6 +199,19 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) kiov->iov = NULL; } +static inline size_t vringh_kiov_length(struct vringh_kiov *kiov) +{ + size_t len = 0; + int i; + + for (i = kiov->i; i < kiov->used; i++) + len += kiov->iov[i].iov_len; + + return len; +} + +void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len); + int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, @@ -258,7 +274,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) #if IS_REACHABLE(CONFIG_VHOST_IOTLB) -void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, + spinlock_t *iotlb_lock); int vringh_init_iotlb(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, |